llvm/llvm/test/CodeGen/X86/i128-add.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=X86
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=X64

define i128 @add_i128(i128 %x, i128 %y) nounwind {
; X86-LABEL: add_i128:
; X86:       # %bb.0:
; X86-NEXT:    pushl %edi
; X86-NEXT:    pushl %esi
; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
; X86-NEXT:    addl {{[0-9]+}}(%esp), %esi
; X86-NEXT:    adcl {{[0-9]+}}(%esp), %edi
; X86-NEXT:    adcl {{[0-9]+}}(%esp), %ecx
; X86-NEXT:    adcl {{[0-9]+}}(%esp), %edx
; X86-NEXT:    addl $1, %esi
; X86-NEXT:    adcl $0, %edi
; X86-NEXT:    adcl $0, %ecx
; X86-NEXT:    adcl $0, %edx
; X86-NEXT:    movl %edi, 4(%eax)
; X86-NEXT:    movl %esi, (%eax)
; X86-NEXT:    movl %ecx, 8(%eax)
; X86-NEXT:    movl %edx, 12(%eax)
; X86-NEXT:    popl %esi
; X86-NEXT:    popl %edi
; X86-NEXT:    retl $4
;
; X64-LABEL: add_i128:
; X64:       # %bb.0:
; X64-NEXT:    movq %rdi, %rax
; X64-NEXT:    addq %rdx, %rax
; X64-NEXT:    adcq %rcx, %rsi
; X64-NEXT:    addq $1, %rax
; X64-NEXT:    adcq $0, %rsi
; X64-NEXT:    movq %rsi, %rdx
; X64-NEXT:    retq
  %t0 = add i128 %x, 1
  %t1 = add i128 %y, %t0
  ret i128 %t1
}

; PR42486
define <1 x i128> @add_v1i128(<1 x i128> %x, <1 x i128> %y) nounwind {
; X86-LABEL: add_v1i128:
; X86:       # %bb.0:
; X86-NEXT:    pushl %edi
; X86-NEXT:    pushl %esi
; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
; X86-NEXT:    addl {{[0-9]+}}(%esp), %esi
; X86-NEXT:    adcl {{[0-9]+}}(%esp), %edi
; X86-NEXT:    adcl {{[0-9]+}}(%esp), %ecx
; X86-NEXT:    adcl {{[0-9]+}}(%esp), %edx
; X86-NEXT:    addl $1, %esi
; X86-NEXT:    adcl $0, %edi
; X86-NEXT:    adcl $0, %ecx
; X86-NEXT:    adcl $0, %edx
; X86-NEXT:    movl %edi, 4(%eax)
; X86-NEXT:    movl %esi, (%eax)
; X86-NEXT:    movl %ecx, 8(%eax)
; X86-NEXT:    movl %edx, 12(%eax)
; X86-NEXT:    popl %esi
; X86-NEXT:    popl %edi
; X86-NEXT:    retl $4
;
; X64-LABEL: add_v1i128:
; X64:       # %bb.0:
; X64-NEXT:    movq %rdi, %rax
; X64-NEXT:    addq %rdx, %rax
; X64-NEXT:    adcq %rcx, %rsi
; X64-NEXT:    movq %rax, %xmm0
; X64-NEXT:    movq %rsi, %xmm1
; X64-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
; X64-NEXT:    movq %xmm0, %rdx
; X64-NEXT:    addq $1, %rax
; X64-NEXT:    adcq $0, %rdx
; X64-NEXT:    retq
  %t0 = add <1 x i128> %x, <i128 1>
  %t1 = add <1 x i128> %y, %t0
  ret <1 x i128> %t1
}