llvm/llvm/test/CodeGen/X86/atomicrmw-fadd-fp-vector.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc -mtriple x86_64-pc-linux < %s | FileCheck %s

define <2 x half> @test_atomicrmw_fadd_v2f16_align4(ptr addrspace(1) %ptr, <2 x half> %value) #0 {
; CHECK-LABEL: test_atomicrmw_fadd_v2f16_align4:
; CHECK:       # %bb.0:
; CHECK-NEXT:    pushq %rbp
; CHECK-NEXT:    pushq %rbx
; CHECK-NEXT:    subq $88, %rsp
; CHECK-NEXT:    movq %rdi, %rbx
; CHECK-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    psrld $16, %xmm0
; CHECK-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    pinsrw $0, 2(%rdi), %xmm1
; CHECK-NEXT:    pinsrw $0, (%rdi), %xmm0
; CHECK-NEXT:    .p2align 4
; CHECK-NEXT:  .LBB0_1: # %atomicrmw.start
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; CHECK-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    addss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    pextrw $0, %xmm0, %eax
; CHECK-NEXT:    movzwl %ax, %ebp
; CHECK-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; CHECK-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    callq __extendhfsf2@PLT
; CHECK-NEXT:    addss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
; CHECK-NEXT:    callq __truncsfhf2@PLT
; CHECK-NEXT:    pextrw $0, %xmm0, %ecx
; CHECK-NEXT:    shll $16, %ecx
; CHECK-NEXT:    orl %ebp, %ecx
; CHECK-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    pextrw $0, %xmm0, %edx
; CHECK-NEXT:    shll $16, %edx
; CHECK-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT:    pextrw $0, %xmm0, %eax
; CHECK-NEXT:    movzwl %ax, %eax
; CHECK-NEXT:    orl %edx, %eax
; CHECK-NEXT:    lock cmpxchgl %ecx, (%rbx)
; CHECK-NEXT:    setne %cl
; CHECK-NEXT:    pinsrw $0, %eax, %xmm0
; CHECK-NEXT:    shrl $16, %eax
; CHECK-NEXT:    pinsrw $0, %eax, %xmm1
; CHECK-NEXT:    testb %cl, %cl
; CHECK-NEXT:    jne .LBB0_1
; CHECK-NEXT:  # %bb.2: # %atomicrmw.end
; CHECK-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; CHECK-NEXT:    addq $88, %rsp
; CHECK-NEXT:    popq %rbx
; CHECK-NEXT:    popq %rbp
; CHECK-NEXT:    retq
  %res = atomicrmw fadd ptr addrspace(1) %ptr, <2 x half> %value seq_cst, align 4
  ret <2 x half> %res
}

define <2 x float> @test_atomicrmw_fadd_v2f32_align8(ptr addrspace(1) %ptr, <2 x float> %value) #0 {
; CHECK-LABEL: test_atomicrmw_fadd_v2f32_align8:
; CHECK:       # %bb.0:
; CHECK-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT:    .p2align 4
; CHECK-NEXT:  .LBB1_1: # %atomicrmw.start
; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
; CHECK-NEXT:    movq %xmm1, %rax
; CHECK-NEXT:    addps %xmm0, %xmm1
; CHECK-NEXT:    movq %xmm1, %rcx
; CHECK-NEXT:    lock cmpxchgq %rcx, (%rdi)
; CHECK-NEXT:    movq %rax, %xmm1
; CHECK-NEXT:    jne .LBB1_1
; CHECK-NEXT:  # %bb.2: # %atomicrmw.end
; CHECK-NEXT:    movdqa %xmm1, %xmm0
; CHECK-NEXT:    retq
  %res = atomicrmw fadd ptr addrspace(1) %ptr, <2 x float> %value seq_cst, align 8
  ret <2 x float> %res
}

attributes #0 = { nounwind }