llvm/llvm/test/CodeGen/X86/fp16-libcalls.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -enable-legalize-types-checking -mtriple=x86_64-linux-gnu -mattr=+f16c | FileCheck %s --check-prefix=F16C
; RUN: llc < %s -enable-legalize-types-checking -mtriple=x86_64-linux-gnu -mattr=+avx512fp16 | FileCheck %s --check-prefix=FP16
; RUN: llc < %s -enable-legalize-types-checking -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefix=X64
; RUN: llc < %s -enable-legalize-types-checking -mtriple=i686-linux-gnu -mattr=sse2 | FileCheck %s --check-prefix=X86

; Check all soft floating point library function calls.

define void @test_half_ceil(half %a0, ptr %p0) nounwind {
; F16C-LABEL: test_half_ceil:
; F16C:       # %bb.0:
; F16C-NEXT:    vpextrw $0, %xmm0, %eax
; F16C-NEXT:    vmovd %eax, %xmm0
; F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
; F16C-NEXT:    vroundss $10, %xmm0, %xmm0, %xmm0
; F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
; F16C-NEXT:    vmovd %xmm0, %eax
; F16C-NEXT:    movw %ax, (%rdi)
; F16C-NEXT:    retq
;
; FP16-LABEL: test_half_ceil:
; FP16:       # %bb.0:
; FP16-NEXT:    vrndscalesh $10, %xmm0, %xmm0, %xmm0
; FP16-NEXT:    vmovsh %xmm0, (%rdi)
; FP16-NEXT:    retq
;
; X64-LABEL: test_half_ceil:
; X64:       # %bb.0:
; X64-NEXT:    pushq %rbx
; X64-NEXT:    movq %rdi, %rbx
; X64-NEXT:    callq __extendhfsf2@PLT
; X64-NEXT:    callq ceilf@PLT
; X64-NEXT:    callq __truncsfhf2@PLT
; X64-NEXT:    pextrw $0, %xmm0, %eax
; X64-NEXT:    movw %ax, (%rbx)
; X64-NEXT:    popq %rbx
; X64-NEXT:    retq
;
; X86-LABEL: test_half_ceil:
; X86:       # %bb.0:
; X86-NEXT:    pushl %esi
; X86-NEXT:    subl $8, %esp
; X86-NEXT:    pinsrw $0, {{[0-9]+}}(%esp), %xmm0
; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esp)
; X86-NEXT:    calll __extendhfsf2
; X86-NEXT:    fstps (%esp)
; X86-NEXT:    calll ceilf
; X86-NEXT:    fstps (%esp)
; X86-NEXT:    calll __truncsfhf2
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esi)
; X86-NEXT:    addl $8, %esp
; X86-NEXT:    popl %esi
; X86-NEXT:    retl
  %res = call half @llvm.ceil.half(half %a0)
  store half %res, ptr %p0, align 2
  ret void
}

define void @test_half_copysign(half %a0, half %a1, ptr %p0) nounwind {
; F16C-LABEL: test_half_copysign:
; F16C:       # %bb.0:
; F16C-NEXT:    vpextrw $0, %xmm1, %eax
; F16C-NEXT:    andl $32768, %eax # imm = 0x8000
; F16C-NEXT:    vpextrw $0, %xmm0, %ecx
; F16C-NEXT:    andl $32767, %ecx # imm = 0x7FFF
; F16C-NEXT:    orl %eax, %ecx
; F16C-NEXT:    movw %cx, (%rdi)
; F16C-NEXT:    retq
;
; FP16-LABEL: test_half_copysign:
; FP16:       # %bb.0:
; FP16-NEXT:    vpbroadcastw {{.*#+}} xmm2 = [NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN]
; FP16-NEXT:    vpternlogd $202, %xmm1, %xmm0, %xmm2
; FP16-NEXT:    vmovsh %xmm2, (%rdi)
; FP16-NEXT:    retq
;
; X64-LABEL: test_half_copysign:
; X64:       # %bb.0:
; X64-NEXT:    pextrw $0, %xmm1, %eax
; X64-NEXT:    andl $32768, %eax # imm = 0x8000
; X64-NEXT:    pextrw $0, %xmm0, %ecx
; X64-NEXT:    andl $32767, %ecx # imm = 0x7FFF
; X64-NEXT:    orl %eax, %ecx
; X64-NEXT:    movw %cx, (%rdi)
; X64-NEXT:    retq
;
; X86-LABEL: test_half_copysign:
; X86:       # %bb.0:
; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
; X86-NEXT:    andl $32768, %ecx # imm = 0x8000
; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %edx
; X86-NEXT:    andl $32767, %edx # imm = 0x7FFF
; X86-NEXT:    orl %ecx, %edx
; X86-NEXT:    movw %dx, (%eax)
; X86-NEXT:    retl
  %res = call half @llvm.copysign.half(half %a0, half %a1)
  store half %res, ptr %p0, align 2
  ret void
}

define void @test_half_cos(half %a0, ptr %p0) nounwind {
; F16C-LABEL: test_half_cos:
; F16C:       # %bb.0:
; F16C-NEXT:    pushq %rbx
; F16C-NEXT:    movq %rdi, %rbx
; F16C-NEXT:    vpextrw $0, %xmm0, %eax
; F16C-NEXT:    vmovd %eax, %xmm0
; F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
; F16C-NEXT:    callq cosf@PLT
; F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
; F16C-NEXT:    vmovd %xmm0, %eax
; F16C-NEXT:    movw %ax, (%rbx)
; F16C-NEXT:    popq %rbx
; F16C-NEXT:    retq
;
; FP16-LABEL: test_half_cos:
; FP16:       # %bb.0:
; FP16-NEXT:    pushq %rbx
; FP16-NEXT:    movq %rdi, %rbx
; FP16-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; FP16-NEXT:    callq cosf@PLT
; FP16-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; FP16-NEXT:    vmovsh %xmm0, (%rbx)
; FP16-NEXT:    popq %rbx
; FP16-NEXT:    retq
;
; X64-LABEL: test_half_cos:
; X64:       # %bb.0:
; X64-NEXT:    pushq %rbx
; X64-NEXT:    movq %rdi, %rbx
; X64-NEXT:    callq __extendhfsf2@PLT
; X64-NEXT:    callq cosf@PLT
; X64-NEXT:    callq __truncsfhf2@PLT
; X64-NEXT:    pextrw $0, %xmm0, %eax
; X64-NEXT:    movw %ax, (%rbx)
; X64-NEXT:    popq %rbx
; X64-NEXT:    retq
;
; X86-LABEL: test_half_cos:
; X86:       # %bb.0:
; X86-NEXT:    pushl %esi
; X86-NEXT:    subl $8, %esp
; X86-NEXT:    pinsrw $0, {{[0-9]+}}(%esp), %xmm0
; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esp)
; X86-NEXT:    calll __extendhfsf2
; X86-NEXT:    fstps (%esp)
; X86-NEXT:    calll cosf
; X86-NEXT:    fstps (%esp)
; X86-NEXT:    calll __truncsfhf2
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esi)
; X86-NEXT:    addl $8, %esp
; X86-NEXT:    popl %esi
; X86-NEXT:    retl
  %res = call half @llvm.cos.half(half %a0)
  store half %res, ptr %p0, align 2
  ret void
}

define void @test_half_exp(half %a0, ptr %p0) nounwind {
; F16C-LABEL: test_half_exp:
; F16C:       # %bb.0:
; F16C-NEXT:    pushq %rbx
; F16C-NEXT:    movq %rdi, %rbx
; F16C-NEXT:    vpextrw $0, %xmm0, %eax
; F16C-NEXT:    vmovd %eax, %xmm0
; F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
; F16C-NEXT:    callq expf@PLT
; F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
; F16C-NEXT:    vmovd %xmm0, %eax
; F16C-NEXT:    movw %ax, (%rbx)
; F16C-NEXT:    popq %rbx
; F16C-NEXT:    retq
;
; FP16-LABEL: test_half_exp:
; FP16:       # %bb.0:
; FP16-NEXT:    pushq %rbx
; FP16-NEXT:    movq %rdi, %rbx
; FP16-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; FP16-NEXT:    callq expf@PLT
; FP16-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; FP16-NEXT:    vmovsh %xmm0, (%rbx)
; FP16-NEXT:    popq %rbx
; FP16-NEXT:    retq
;
; X64-LABEL: test_half_exp:
; X64:       # %bb.0:
; X64-NEXT:    pushq %rbx
; X64-NEXT:    movq %rdi, %rbx
; X64-NEXT:    callq __extendhfsf2@PLT
; X64-NEXT:    callq expf@PLT
; X64-NEXT:    callq __truncsfhf2@PLT
; X64-NEXT:    pextrw $0, %xmm0, %eax
; X64-NEXT:    movw %ax, (%rbx)
; X64-NEXT:    popq %rbx
; X64-NEXT:    retq
;
; X86-LABEL: test_half_exp:
; X86:       # %bb.0:
; X86-NEXT:    pushl %esi
; X86-NEXT:    subl $8, %esp
; X86-NEXT:    pinsrw $0, {{[0-9]+}}(%esp), %xmm0
; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esp)
; X86-NEXT:    calll __extendhfsf2
; X86-NEXT:    fstps (%esp)
; X86-NEXT:    calll expf
; X86-NEXT:    fstps (%esp)
; X86-NEXT:    calll __truncsfhf2
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esi)
; X86-NEXT:    addl $8, %esp
; X86-NEXT:    popl %esi
; X86-NEXT:    retl
  %res = call half @llvm.exp.half(half %a0)
  store half %res, ptr %p0, align 2
  ret void
}

define void @test_half_exp2(half %a0, ptr %p0) nounwind {
; F16C-LABEL: test_half_exp2:
; F16C:       # %bb.0:
; F16C-NEXT:    pushq %rbx
; F16C-NEXT:    movq %rdi, %rbx
; F16C-NEXT:    vpextrw $0, %xmm0, %eax
; F16C-NEXT:    vmovd %eax, %xmm0
; F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
; F16C-NEXT:    callq exp2f@PLT
; F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
; F16C-NEXT:    vmovd %xmm0, %eax
; F16C-NEXT:    movw %ax, (%rbx)
; F16C-NEXT:    popq %rbx
; F16C-NEXT:    retq
;
; FP16-LABEL: test_half_exp2:
; FP16:       # %bb.0:
; FP16-NEXT:    pushq %rbx
; FP16-NEXT:    movq %rdi, %rbx
; FP16-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; FP16-NEXT:    callq exp2f@PLT
; FP16-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; FP16-NEXT:    vmovsh %xmm0, (%rbx)
; FP16-NEXT:    popq %rbx
; FP16-NEXT:    retq
;
; X64-LABEL: test_half_exp2:
; X64:       # %bb.0:
; X64-NEXT:    pushq %rbx
; X64-NEXT:    movq %rdi, %rbx
; X64-NEXT:    callq __extendhfsf2@PLT
; X64-NEXT:    callq exp2f@PLT
; X64-NEXT:    callq __truncsfhf2@PLT
; X64-NEXT:    pextrw $0, %xmm0, %eax
; X64-NEXT:    movw %ax, (%rbx)
; X64-NEXT:    popq %rbx
; X64-NEXT:    retq
;
; X86-LABEL: test_half_exp2:
; X86:       # %bb.0:
; X86-NEXT:    pushl %esi
; X86-NEXT:    subl $8, %esp
; X86-NEXT:    pinsrw $0, {{[0-9]+}}(%esp), %xmm0
; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esp)
; X86-NEXT:    calll __extendhfsf2
; X86-NEXT:    fstps (%esp)
; X86-NEXT:    calll exp2f
; X86-NEXT:    fstps (%esp)
; X86-NEXT:    calll __truncsfhf2
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esi)
; X86-NEXT:    addl $8, %esp
; X86-NEXT:    popl %esi
; X86-NEXT:    retl
  %res = call half @llvm.exp2.half(half %a0)
  store half %res, ptr %p0, align 2
  ret void
}

define void @test_half_exp10(half %a0, ptr %p0) nounwind {
; F16C-LABEL: test_half_exp10:
; F16C:       # %bb.0:
; F16C-NEXT:    pushq %rbx
; F16C-NEXT:    movq %rdi, %rbx
; F16C-NEXT:    vpextrw $0, %xmm0, %eax
; F16C-NEXT:    vmovd %eax, %xmm0
; F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
; F16C-NEXT:    callq exp10f@PLT
; F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
; F16C-NEXT:    vmovd %xmm0, %eax
; F16C-NEXT:    movw %ax, (%rbx)
; F16C-NEXT:    popq %rbx
; F16C-NEXT:    retq
;
; FP16-LABEL: test_half_exp10:
; FP16:       # %bb.0:
; FP16-NEXT:    pushq %rbx
; FP16-NEXT:    movq %rdi, %rbx
; FP16-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; FP16-NEXT:    callq exp10f@PLT
; FP16-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; FP16-NEXT:    vmovsh %xmm0, (%rbx)
; FP16-NEXT:    popq %rbx
; FP16-NEXT:    retq
;
; X64-LABEL: test_half_exp10:
; X64:       # %bb.0:
; X64-NEXT:    pushq %rbx
; X64-NEXT:    movq %rdi, %rbx
; X64-NEXT:    callq __extendhfsf2@PLT
; X64-NEXT:    callq exp10f@PLT
; X64-NEXT:    callq __truncsfhf2@PLT
; X64-NEXT:    pextrw $0, %xmm0, %eax
; X64-NEXT:    movw %ax, (%rbx)
; X64-NEXT:    popq %rbx
; X64-NEXT:    retq
;
; X86-LABEL: test_half_exp10:
; X86:       # %bb.0:
; X86-NEXT:    pushl %esi
; X86-NEXT:    subl $8, %esp
; X86-NEXT:    pinsrw $0, {{[0-9]+}}(%esp), %xmm0
; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esp)
; X86-NEXT:    calll __extendhfsf2
; X86-NEXT:    fstps (%esp)
; X86-NEXT:    calll exp10f
; X86-NEXT:    fstps (%esp)
; X86-NEXT:    calll __truncsfhf2
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esi)
; X86-NEXT:    addl $8, %esp
; X86-NEXT:    popl %esi
; X86-NEXT:    retl
  %res = call half @llvm.exp10.half(half %a0)
  store half %res, ptr %p0, align 2
  ret void
}

define void @test_half_fabs(half %a0, ptr %p0) nounwind {
; F16C-LABEL: test_half_fabs:
; F16C:       # %bb.0:
; F16C-NEXT:    vpextrw $0, %xmm0, %eax
; F16C-NEXT:    vmovd %eax, %xmm0
; F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
; F16C-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
; F16C-NEXT:    vmovd %xmm0, %eax
; F16C-NEXT:    movw %ax, (%rdi)
; F16C-NEXT:    retq
;
; FP16-LABEL: test_half_fabs:
; FP16:       # %bb.0:
; FP16-NEXT:    vpbroadcastw {{.*#+}} xmm1 = [NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN]
; FP16-NEXT:    vpand %xmm1, %xmm0, %xmm0
; FP16-NEXT:    vmovsh %xmm0, (%rdi)
; FP16-NEXT:    retq
;
; X64-LABEL: test_half_fabs:
; X64:       # %bb.0:
; X64-NEXT:    pushq %rbx
; X64-NEXT:    movq %rdi, %rbx
; X64-NEXT:    callq __extendhfsf2@PLT
; X64-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; X64-NEXT:    callq __truncsfhf2@PLT
; X64-NEXT:    pextrw $0, %xmm0, %eax
; X64-NEXT:    movw %ax, (%rbx)
; X64-NEXT:    popq %rbx
; X64-NEXT:    retq
;
; X86-LABEL: test_half_fabs:
; X86:       # %bb.0:
; X86-NEXT:    pushl %esi
; X86-NEXT:    subl $8, %esp
; X86-NEXT:    pinsrw $0, {{[0-9]+}}(%esp), %xmm0
; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esp)
; X86-NEXT:    calll __extendhfsf2
; X86-NEXT:    fstps {{[0-9]+}}(%esp)
; X86-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT:    movd %xmm0, (%esp)
; X86-NEXT:    calll __truncsfhf2
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esi)
; X86-NEXT:    addl $8, %esp
; X86-NEXT:    popl %esi
; X86-NEXT:    retl
  %res = call half @llvm.fabs.half(half %a0)
  store half %res, ptr %p0, align 2
  ret void
}

define void @test_half_floor(half %a0, ptr %p0) nounwind {
; F16C-LABEL: test_half_floor:
; F16C:       # %bb.0:
; F16C-NEXT:    vpextrw $0, %xmm0, %eax
; F16C-NEXT:    vmovd %eax, %xmm0
; F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
; F16C-NEXT:    vroundss $9, %xmm0, %xmm0, %xmm0
; F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
; F16C-NEXT:    vmovd %xmm0, %eax
; F16C-NEXT:    movw %ax, (%rdi)
; F16C-NEXT:    retq
;
; FP16-LABEL: test_half_floor:
; FP16:       # %bb.0:
; FP16-NEXT:    vrndscalesh $9, %xmm0, %xmm0, %xmm0
; FP16-NEXT:    vmovsh %xmm0, (%rdi)
; FP16-NEXT:    retq
;
; X64-LABEL: test_half_floor:
; X64:       # %bb.0:
; X64-NEXT:    pushq %rbx
; X64-NEXT:    movq %rdi, %rbx
; X64-NEXT:    callq __extendhfsf2@PLT
; X64-NEXT:    callq floorf@PLT
; X64-NEXT:    callq __truncsfhf2@PLT
; X64-NEXT:    pextrw $0, %xmm0, %eax
; X64-NEXT:    movw %ax, (%rbx)
; X64-NEXT:    popq %rbx
; X64-NEXT:    retq
;
; X86-LABEL: test_half_floor:
; X86:       # %bb.0:
; X86-NEXT:    pushl %esi
; X86-NEXT:    subl $8, %esp
; X86-NEXT:    pinsrw $0, {{[0-9]+}}(%esp), %xmm0
; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esp)
; X86-NEXT:    calll __extendhfsf2
; X86-NEXT:    fstps (%esp)
; X86-NEXT:    calll floorf
; X86-NEXT:    fstps (%esp)
; X86-NEXT:    calll __truncsfhf2
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esi)
; X86-NEXT:    addl $8, %esp
; X86-NEXT:    popl %esi
; X86-NEXT:    retl
  %res = call half @llvm.floor.half(half %a0)
  store half %res, ptr %p0, align 2
  ret void
}

define void @test_half_fma(half %a0, half %a1, half %a2, ptr %p0) nounwind {
; F16C-LABEL: test_half_fma:
; F16C:       # %bb.0:
; F16C-NEXT:    pushq %rbx
; F16C-NEXT:    movq %rdi, %rbx
; F16C-NEXT:    vpextrw $0, %xmm2, %eax
; F16C-NEXT:    vpextrw $0, %xmm1, %ecx
; F16C-NEXT:    vpextrw $0, %xmm0, %edx
; F16C-NEXT:    vmovd %edx, %xmm0
; F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
; F16C-NEXT:    vmovd %ecx, %xmm1
; F16C-NEXT:    vcvtph2ps %xmm1, %xmm1
; F16C-NEXT:    vmovd %eax, %xmm2
; F16C-NEXT:    vcvtph2ps %xmm2, %xmm2
; F16C-NEXT:    callq fmaf@PLT
; F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
; F16C-NEXT:    vmovd %xmm0, %eax
; F16C-NEXT:    movw %ax, (%rbx)
; F16C-NEXT:    popq %rbx
; F16C-NEXT:    retq
;
; FP16-LABEL: test_half_fma:
; FP16:       # %bb.0:
; FP16-NEXT:    vfmadd213sh %xmm2, %xmm1, %xmm0
; FP16-NEXT:    vmovsh %xmm0, (%rdi)
; FP16-NEXT:    retq
;
; X64-LABEL: test_half_fma:
; X64:       # %bb.0:
; X64-NEXT:    pushq %rbx
; X64-NEXT:    subq $16, %rsp
; X64-NEXT:    movq %rdi, %rbx
; X64-NEXT:    movss %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; X64-NEXT:    movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; X64-NEXT:    movaps %xmm1, %xmm0
; X64-NEXT:    callq __extendhfsf2@PLT
; X64-NEXT:    movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; X64-NEXT:    movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
; X64-NEXT:    # xmm0 = mem[0],zero,zero,zero
; X64-NEXT:    callq __extendhfsf2@PLT
; X64-NEXT:    movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; X64-NEXT:    movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
; X64-NEXT:    # xmm0 = mem[0],zero,zero,zero
; X64-NEXT:    callq __extendhfsf2@PLT
; X64-NEXT:    movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
; X64-NEXT:    # xmm1 = mem[0],zero,zero,zero
; X64-NEXT:    movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 4-byte Reload
; X64-NEXT:    # xmm2 = mem[0],zero,zero,zero
; X64-NEXT:    callq fmaf@PLT
; X64-NEXT:    callq __truncsfhf2@PLT
; X64-NEXT:    pextrw $0, %xmm0, %eax
; X64-NEXT:    movw %ax, (%rbx)
; X64-NEXT:    addq $16, %rsp
; X64-NEXT:    popq %rbx
; X64-NEXT:    retq
;
; X86-LABEL: test_half_fma:
; X86:       # %bb.0:
; X86-NEXT:    pushl %esi
; X86-NEXT:    subl $72, %esp
; X86-NEXT:    pinsrw $0, {{[0-9]+}}(%esp), %xmm0
; X86-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
; X86-NEXT:    pinsrw $0, {{[0-9]+}}(%esp), %xmm0
; X86-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
; X86-NEXT:    pinsrw $0, {{[0-9]+}}(%esp), %xmm0
; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esp)
; X86-NEXT:    calll __extendhfsf2
; X86-NEXT:    fstpt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Spill
; X86-NEXT:    movdqa {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esp)
; X86-NEXT:    calll __extendhfsf2
; X86-NEXT:    fstpt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Spill
; X86-NEXT:    movdqa {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esp)
; X86-NEXT:    calll __extendhfsf2
; X86-NEXT:    fstps {{[0-9]+}}(%esp)
; X86-NEXT:    fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload
; X86-NEXT:    fstps {{[0-9]+}}(%esp)
; X86-NEXT:    fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload
; X86-NEXT:    fstps (%esp)
; X86-NEXT:    calll fmaf
; X86-NEXT:    fstps (%esp)
; X86-NEXT:    calll __truncsfhf2
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esi)
; X86-NEXT:    addl $72, %esp
; X86-NEXT:    popl %esi
; X86-NEXT:    retl
  %res = call half @llvm.fma.half(half %a0, half %a1, half %a2)
  store half %res, ptr %p0, align 2
  ret void
}

define void @test_half_fneg(half %a0, ptr %p0) nounwind {
; F16C-LABEL: test_half_fneg:
; F16C:       # %bb.0:
; F16C-NEXT:    vpextrw $0, %xmm0, %eax
; F16C-NEXT:    vmovd %eax, %xmm0
; F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
; F16C-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
; F16C-NEXT:    vmovd %xmm0, %eax
; F16C-NEXT:    movw %ax, (%rdi)
; F16C-NEXT:    retq
;
; FP16-LABEL: test_half_fneg:
; FP16:       # %bb.0:
; FP16-NEXT:    vpbroadcastw {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
; FP16-NEXT:    vpxor %xmm1, %xmm0, %xmm0
; FP16-NEXT:    vmovsh %xmm0, (%rdi)
; FP16-NEXT:    retq
;
; X64-LABEL: test_half_fneg:
; X64:       # %bb.0:
; X64-NEXT:    pushq %rbx
; X64-NEXT:    movq %rdi, %rbx
; X64-NEXT:    callq __extendhfsf2@PLT
; X64-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; X64-NEXT:    callq __truncsfhf2@PLT
; X64-NEXT:    pextrw $0, %xmm0, %eax
; X64-NEXT:    movw %ax, (%rbx)
; X64-NEXT:    popq %rbx
; X64-NEXT:    retq
;
; X86-LABEL: test_half_fneg:
; X86:       # %bb.0:
; X86-NEXT:    pushl %esi
; X86-NEXT:    subl $8, %esp
; X86-NEXT:    pinsrw $0, {{[0-9]+}}(%esp), %xmm0
; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esp)
; X86-NEXT:    calll __extendhfsf2
; X86-NEXT:    fstps {{[0-9]+}}(%esp)
; X86-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT:    movd %xmm0, (%esp)
; X86-NEXT:    calll __truncsfhf2
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esi)
; X86-NEXT:    addl $8, %esp
; X86-NEXT:    popl %esi
; X86-NEXT:    retl
  %res = fneg half %a0
  store half %res, ptr %p0, align 2
  ret void
}

define void @test_half_log(half %a0, ptr %p0) nounwind {
; F16C-LABEL: test_half_log:
; F16C:       # %bb.0:
; F16C-NEXT:    pushq %rbx
; F16C-NEXT:    movq %rdi, %rbx
; F16C-NEXT:    vpextrw $0, %xmm0, %eax
; F16C-NEXT:    vmovd %eax, %xmm0
; F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
; F16C-NEXT:    callq logf@PLT
; F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
; F16C-NEXT:    vmovd %xmm0, %eax
; F16C-NEXT:    movw %ax, (%rbx)
; F16C-NEXT:    popq %rbx
; F16C-NEXT:    retq
;
; FP16-LABEL: test_half_log:
; FP16:       # %bb.0:
; FP16-NEXT:    pushq %rbx
; FP16-NEXT:    movq %rdi, %rbx
; FP16-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; FP16-NEXT:    callq logf@PLT
; FP16-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; FP16-NEXT:    vmovsh %xmm0, (%rbx)
; FP16-NEXT:    popq %rbx
; FP16-NEXT:    retq
;
; X64-LABEL: test_half_log:
; X64:       # %bb.0:
; X64-NEXT:    pushq %rbx
; X64-NEXT:    movq %rdi, %rbx
; X64-NEXT:    callq __extendhfsf2@PLT
; X64-NEXT:    callq logf@PLT
; X64-NEXT:    callq __truncsfhf2@PLT
; X64-NEXT:    pextrw $0, %xmm0, %eax
; X64-NEXT:    movw %ax, (%rbx)
; X64-NEXT:    popq %rbx
; X64-NEXT:    retq
;
; X86-LABEL: test_half_log:
; X86:       # %bb.0:
; X86-NEXT:    pushl %esi
; X86-NEXT:    subl $8, %esp
; X86-NEXT:    pinsrw $0, {{[0-9]+}}(%esp), %xmm0
; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esp)
; X86-NEXT:    calll __extendhfsf2
; X86-NEXT:    fstps (%esp)
; X86-NEXT:    calll logf
; X86-NEXT:    fstps (%esp)
; X86-NEXT:    calll __truncsfhf2
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esi)
; X86-NEXT:    addl $8, %esp
; X86-NEXT:    popl %esi
; X86-NEXT:    retl
  %res = call half @llvm.log.half(half %a0)
  store half %res, ptr %p0, align 2
  ret void
}

define void @test_half_log2(half %a0, ptr %p0) nounwind {
; F16C-LABEL: test_half_log2:
; F16C:       # %bb.0:
; F16C-NEXT:    pushq %rbx
; F16C-NEXT:    movq %rdi, %rbx
; F16C-NEXT:    vpextrw $0, %xmm0, %eax
; F16C-NEXT:    vmovd %eax, %xmm0
; F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
; F16C-NEXT:    callq log2f@PLT
; F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
; F16C-NEXT:    vmovd %xmm0, %eax
; F16C-NEXT:    movw %ax, (%rbx)
; F16C-NEXT:    popq %rbx
; F16C-NEXT:    retq
;
; FP16-LABEL: test_half_log2:
; FP16:       # %bb.0:
; FP16-NEXT:    pushq %rbx
; FP16-NEXT:    movq %rdi, %rbx
; FP16-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; FP16-NEXT:    callq log2f@PLT
; FP16-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; FP16-NEXT:    vmovsh %xmm0, (%rbx)
; FP16-NEXT:    popq %rbx
; FP16-NEXT:    retq
;
; X64-LABEL: test_half_log2:
; X64:       # %bb.0:
; X64-NEXT:    pushq %rbx
; X64-NEXT:    movq %rdi, %rbx
; X64-NEXT:    callq __extendhfsf2@PLT
; X64-NEXT:    callq log2f@PLT
; X64-NEXT:    callq __truncsfhf2@PLT
; X64-NEXT:    pextrw $0, %xmm0, %eax
; X64-NEXT:    movw %ax, (%rbx)
; X64-NEXT:    popq %rbx
; X64-NEXT:    retq
;
; X86-LABEL: test_half_log2:
; X86:       # %bb.0:
; X86-NEXT:    pushl %esi
; X86-NEXT:    subl $8, %esp
; X86-NEXT:    pinsrw $0, {{[0-9]+}}(%esp), %xmm0
; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esp)
; X86-NEXT:    calll __extendhfsf2
; X86-NEXT:    fstps (%esp)
; X86-NEXT:    calll log2f
; X86-NEXT:    fstps (%esp)
; X86-NEXT:    calll __truncsfhf2
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esi)
; X86-NEXT:    addl $8, %esp
; X86-NEXT:    popl %esi
; X86-NEXT:    retl
  %res = call half @llvm.log2.half(half %a0)
  store half %res, ptr %p0, align 2
  ret void
}

define void @test_half_log10(half %a0, ptr %p0) nounwind {
; F16C-LABEL: test_half_log10:
; F16C:       # %bb.0:
; F16C-NEXT:    pushq %rbx
; F16C-NEXT:    movq %rdi, %rbx
; F16C-NEXT:    vpextrw $0, %xmm0, %eax
; F16C-NEXT:    vmovd %eax, %xmm0
; F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
; F16C-NEXT:    callq log10f@PLT
; F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
; F16C-NEXT:    vmovd %xmm0, %eax
; F16C-NEXT:    movw %ax, (%rbx)
; F16C-NEXT:    popq %rbx
; F16C-NEXT:    retq
;
; FP16-LABEL: test_half_log10:
; FP16:       # %bb.0:
; FP16-NEXT:    pushq %rbx
; FP16-NEXT:    movq %rdi, %rbx
; FP16-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; FP16-NEXT:    callq log10f@PLT
; FP16-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; FP16-NEXT:    vmovsh %xmm0, (%rbx)
; FP16-NEXT:    popq %rbx
; FP16-NEXT:    retq
;
; X64-LABEL: test_half_log10:
; X64:       # %bb.0:
; X64-NEXT:    pushq %rbx
; X64-NEXT:    movq %rdi, %rbx
; X64-NEXT:    callq __extendhfsf2@PLT
; X64-NEXT:    callq log10f@PLT
; X64-NEXT:    callq __truncsfhf2@PLT
; X64-NEXT:    pextrw $0, %xmm0, %eax
; X64-NEXT:    movw %ax, (%rbx)
; X64-NEXT:    popq %rbx
; X64-NEXT:    retq
;
; X86-LABEL: test_half_log10:
; X86:       # %bb.0:
; X86-NEXT:    pushl %esi
; X86-NEXT:    subl $8, %esp
; X86-NEXT:    pinsrw $0, {{[0-9]+}}(%esp), %xmm0
; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esp)
; X86-NEXT:    calll __extendhfsf2
; X86-NEXT:    fstps (%esp)
; X86-NEXT:    calll log10f
; X86-NEXT:    fstps (%esp)
; X86-NEXT:    calll __truncsfhf2
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esi)
; X86-NEXT:    addl $8, %esp
; X86-NEXT:    popl %esi
; X86-NEXT:    retl
  %res = call half @llvm.log10.half(half %a0)
  store half %res, ptr %p0, align 2
  ret void
}

define void @test_half_nearbyint(half %a0, ptr %p0) nounwind {
; F16C-LABEL: test_half_nearbyint:
; F16C:       # %bb.0:
; F16C-NEXT:    vpextrw $0, %xmm0, %eax
; F16C-NEXT:    vmovd %eax, %xmm0
; F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
; F16C-NEXT:    vroundss $12, %xmm0, %xmm0, %xmm0
; F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
; F16C-NEXT:    vmovd %xmm0, %eax
; F16C-NEXT:    movw %ax, (%rdi)
; F16C-NEXT:    retq
;
; FP16-LABEL: test_half_nearbyint:
; FP16:       # %bb.0:
; FP16-NEXT:    vrndscalesh $12, %xmm0, %xmm0, %xmm0
; FP16-NEXT:    vmovsh %xmm0, (%rdi)
; FP16-NEXT:    retq
;
; X64-LABEL: test_half_nearbyint:
; X64:       # %bb.0:
; X64-NEXT:    pushq %rbx
; X64-NEXT:    movq %rdi, %rbx
; X64-NEXT:    callq __extendhfsf2@PLT
; X64-NEXT:    callq nearbyintf@PLT
; X64-NEXT:    callq __truncsfhf2@PLT
; X64-NEXT:    pextrw $0, %xmm0, %eax
; X64-NEXT:    movw %ax, (%rbx)
; X64-NEXT:    popq %rbx
; X64-NEXT:    retq
;
; X86-LABEL: test_half_nearbyint:
; X86:       # %bb.0:
; X86-NEXT:    pushl %esi
; X86-NEXT:    subl $8, %esp
; X86-NEXT:    pinsrw $0, {{[0-9]+}}(%esp), %xmm0
; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esp)
; X86-NEXT:    calll __extendhfsf2
; X86-NEXT:    fstps (%esp)
; X86-NEXT:    calll nearbyintf
; X86-NEXT:    fstps (%esp)
; X86-NEXT:    calll __truncsfhf2
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esi)
; X86-NEXT:    addl $8, %esp
; X86-NEXT:    popl %esi
; X86-NEXT:    retl
  %res = call half @llvm.nearbyint.half(half %a0)
  store half %res, ptr %p0, align 2
  ret void
}

define void @test_half_pow(half %a0, half %a1, ptr %p0) nounwind {
; F16C-LABEL: test_half_pow:
; F16C:       # %bb.0:
; F16C-NEXT:    pushq %rbx
; F16C-NEXT:    movq %rdi, %rbx
; F16C-NEXT:    vpextrw $0, %xmm1, %eax
; F16C-NEXT:    vpextrw $0, %xmm0, %ecx
; F16C-NEXT:    vmovd %ecx, %xmm0
; F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
; F16C-NEXT:    vmovd %eax, %xmm1
; F16C-NEXT:    vcvtph2ps %xmm1, %xmm1
; F16C-NEXT:    callq powf@PLT
; F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
; F16C-NEXT:    vmovd %xmm0, %eax
; F16C-NEXT:    movw %ax, (%rbx)
; F16C-NEXT:    popq %rbx
; F16C-NEXT:    retq
;
; FP16-LABEL: test_half_pow:
; FP16:       # %bb.0:
; FP16-NEXT:    pushq %rbx
; FP16-NEXT:    movq %rdi, %rbx
; FP16-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; FP16-NEXT:    vcvtsh2ss %xmm1, %xmm1, %xmm1
; FP16-NEXT:    callq powf@PLT
; FP16-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; FP16-NEXT:    vmovsh %xmm0, (%rbx)
; FP16-NEXT:    popq %rbx
; FP16-NEXT:    retq
;
; X64-LABEL: test_half_pow:
; X64:       # %bb.0:
; X64-NEXT:    pushq %rbx
; X64-NEXT:    subq $16, %rsp
; X64-NEXT:    movq %rdi, %rbx
; X64-NEXT:    movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; X64-NEXT:    movaps %xmm1, %xmm0
; X64-NEXT:    callq __extendhfsf2@PLT
; X64-NEXT:    movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; X64-NEXT:    movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
; X64-NEXT:    # xmm0 = mem[0],zero,zero,zero
; X64-NEXT:    callq __extendhfsf2@PLT
; X64-NEXT:    movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Reload
; X64-NEXT:    # xmm1 = mem[0],zero,zero,zero
; X64-NEXT:    callq powf@PLT
; X64-NEXT:    callq __truncsfhf2@PLT
; X64-NEXT:    pextrw $0, %xmm0, %eax
; X64-NEXT:    movw %ax, (%rbx)
; X64-NEXT:    addq $16, %rsp
; X64-NEXT:    popq %rbx
; X64-NEXT:    retq
;
; X86-LABEL: test_half_pow:
; X86:       # %bb.0:
; X86-NEXT:    pushl %esi
; X86-NEXT:    subl $56, %esp
; X86-NEXT:    pinsrw $0, {{[0-9]+}}(%esp), %xmm0
; X86-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
; X86-NEXT:    pinsrw $0, {{[0-9]+}}(%esp), %xmm0
; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esp)
; X86-NEXT:    calll __extendhfsf2
; X86-NEXT:    fstpt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Spill
; X86-NEXT:    movdqa {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esp)
; X86-NEXT:    calll __extendhfsf2
; X86-NEXT:    fstps {{[0-9]+}}(%esp)
; X86-NEXT:    fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload
; X86-NEXT:    fstps (%esp)
; X86-NEXT:    calll powf
; X86-NEXT:    fstps (%esp)
; X86-NEXT:    calll __truncsfhf2
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esi)
; X86-NEXT:    addl $56, %esp
; X86-NEXT:    popl %esi
; X86-NEXT:    retl
  %res = call half @llvm.pow.half(half %a0, half %a1)
  store half %res, ptr %p0, align 2
  ret void
}

define void @test_half_powi(half %a0, i32 %a1, ptr %p0) nounwind {
; F16C-LABEL: test_half_powi:
; F16C:       # %bb.0:
; F16C-NEXT:    pushq %rbx
; F16C-NEXT:    movq %rsi, %rbx
; F16C-NEXT:    vpextrw $0, %xmm0, %eax
; F16C-NEXT:    vmovd %eax, %xmm0
; F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
; F16C-NEXT:    callq __powisf2@PLT
; F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
; F16C-NEXT:    vmovd %xmm0, %eax
; F16C-NEXT:    movw %ax, (%rbx)
; F16C-NEXT:    popq %rbx
; F16C-NEXT:    retq
;
; FP16-LABEL: test_half_powi:
; FP16:       # %bb.0:
; FP16-NEXT:    pushq %rbx
; FP16-NEXT:    movq %rsi, %rbx
; FP16-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; FP16-NEXT:    callq __powisf2@PLT
; FP16-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; FP16-NEXT:    vmovsh %xmm0, (%rbx)
; FP16-NEXT:    popq %rbx
; FP16-NEXT:    retq
;
; X64-LABEL: test_half_powi:
; X64:       # %bb.0:
; X64-NEXT:    pushq %rbp
; X64-NEXT:    pushq %rbx
; X64-NEXT:    pushq %rax
; X64-NEXT:    movq %rsi, %rbx
; X64-NEXT:    movl %edi, %ebp
; X64-NEXT:    callq __extendhfsf2@PLT
; X64-NEXT:    movl %ebp, %edi
; X64-NEXT:    callq __powisf2@PLT
; X64-NEXT:    callq __truncsfhf2@PLT
; X64-NEXT:    pextrw $0, %xmm0, %eax
; X64-NEXT:    movw %ax, (%rbx)
; X64-NEXT:    addq $8, %rsp
; X64-NEXT:    popq %rbx
; X64-NEXT:    popq %rbp
; X64-NEXT:    retq
;
; X86-LABEL: test_half_powi:
; X86:       # %bb.0:
; X86-NEXT:    pushl %edi
; X86-NEXT:    pushl %esi
; X86-NEXT:    subl $20, %esp
; X86-NEXT:    pinsrw $0, {{[0-9]+}}(%esp), %xmm0
; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esp)
; X86-NEXT:    calll __extendhfsf2
; X86-NEXT:    movl %edi, {{[0-9]+}}(%esp)
; X86-NEXT:    fstps (%esp)
; X86-NEXT:    calll __powisf2
; X86-NEXT:    fstps (%esp)
; X86-NEXT:    calll __truncsfhf2
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esi)
; X86-NEXT:    addl $20, %esp
; X86-NEXT:    popl %esi
; X86-NEXT:    popl %edi
; X86-NEXT:    retl
  %res = call half @llvm.powi.half(half %a0, i32 %a1)
  store half %res, ptr %p0, align 2
  ret void
}

define void @test_half_rint(half %a0, ptr %p0) nounwind {
; F16C-LABEL: test_half_rint:
; F16C:       # %bb.0:
; F16C-NEXT:    vpextrw $0, %xmm0, %eax
; F16C-NEXT:    vmovd %eax, %xmm0
; F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
; F16C-NEXT:    vroundss $4, %xmm0, %xmm0, %xmm0
; F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
; F16C-NEXT:    vmovd %xmm0, %eax
; F16C-NEXT:    movw %ax, (%rdi)
; F16C-NEXT:    retq
;
; FP16-LABEL: test_half_rint:
; FP16:       # %bb.0:
; FP16-NEXT:    vrndscalesh $4, %xmm0, %xmm0, %xmm0
; FP16-NEXT:    vmovsh %xmm0, (%rdi)
; FP16-NEXT:    retq
;
; X64-LABEL: test_half_rint:
; X64:       # %bb.0:
; X64-NEXT:    pushq %rbx
; X64-NEXT:    movq %rdi, %rbx
; X64-NEXT:    callq __extendhfsf2@PLT
; X64-NEXT:    callq rintf@PLT
; X64-NEXT:    callq __truncsfhf2@PLT
; X64-NEXT:    pextrw $0, %xmm0, %eax
; X64-NEXT:    movw %ax, (%rbx)
; X64-NEXT:    popq %rbx
; X64-NEXT:    retq
;
; X86-LABEL: test_half_rint:
; X86:       # %bb.0:
; X86-NEXT:    pushl %esi
; X86-NEXT:    subl $8, %esp
; X86-NEXT:    pinsrw $0, {{[0-9]+}}(%esp), %xmm0
; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esp)
; X86-NEXT:    calll __extendhfsf2
; X86-NEXT:    fstps (%esp)
; X86-NEXT:    calll rintf
; X86-NEXT:    fstps (%esp)
; X86-NEXT:    calll __truncsfhf2
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esi)
; X86-NEXT:    addl $8, %esp
; X86-NEXT:    popl %esi
; X86-NEXT:    retl
  %res = call half @llvm.rint.half(half %a0)
  store half %res, ptr %p0, align 2
  ret void
}

define void @test_half_sin(half %a0, ptr %p0) nounwind {
; F16C-LABEL: test_half_sin:
; F16C:       # %bb.0:
; F16C-NEXT:    pushq %rbx
; F16C-NEXT:    movq %rdi, %rbx
; F16C-NEXT:    vpextrw $0, %xmm0, %eax
; F16C-NEXT:    vmovd %eax, %xmm0
; F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
; F16C-NEXT:    callq sinf@PLT
; F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
; F16C-NEXT:    vmovd %xmm0, %eax
; F16C-NEXT:    movw %ax, (%rbx)
; F16C-NEXT:    popq %rbx
; F16C-NEXT:    retq
;
; FP16-LABEL: test_half_sin:
; FP16:       # %bb.0:
; FP16-NEXT:    pushq %rbx
; FP16-NEXT:    movq %rdi, %rbx
; FP16-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; FP16-NEXT:    callq sinf@PLT
; FP16-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; FP16-NEXT:    vmovsh %xmm0, (%rbx)
; FP16-NEXT:    popq %rbx
; FP16-NEXT:    retq
;
; X64-LABEL: test_half_sin:
; X64:       # %bb.0:
; X64-NEXT:    pushq %rbx
; X64-NEXT:    movq %rdi, %rbx
; X64-NEXT:    callq __extendhfsf2@PLT
; X64-NEXT:    callq sinf@PLT
; X64-NEXT:    callq __truncsfhf2@PLT
; X64-NEXT:    pextrw $0, %xmm0, %eax
; X64-NEXT:    movw %ax, (%rbx)
; X64-NEXT:    popq %rbx
; X64-NEXT:    retq
;
; X86-LABEL: test_half_sin:
; X86:       # %bb.0:
; X86-NEXT:    pushl %esi
; X86-NEXT:    subl $8, %esp
; X86-NEXT:    pinsrw $0, {{[0-9]+}}(%esp), %xmm0
; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esp)
; X86-NEXT:    calll __extendhfsf2
; X86-NEXT:    fstps (%esp)
; X86-NEXT:    calll sinf
; X86-NEXT:    fstps (%esp)
; X86-NEXT:    calll __truncsfhf2
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esi)
; X86-NEXT:    addl $8, %esp
; X86-NEXT:    popl %esi
; X86-NEXT:    retl
  %res = call half @llvm.sin.half(half %a0)
  store half %res, ptr %p0, align 2
  ret void
}

define void @test_half_sqrt(half %a0, ptr %p0) nounwind {
; F16C-LABEL: test_half_sqrt:
; F16C:       # %bb.0:
; F16C-NEXT:    vpextrw $0, %xmm0, %eax
; F16C-NEXT:    vmovd %eax, %xmm0
; F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
; F16C-NEXT:    vsqrtss %xmm0, %xmm0, %xmm0
; F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
; F16C-NEXT:    vmovd %xmm0, %eax
; F16C-NEXT:    movw %ax, (%rdi)
; F16C-NEXT:    retq
;
; FP16-LABEL: test_half_sqrt:
; FP16:       # %bb.0:
; FP16-NEXT:    vsqrtsh %xmm0, %xmm0, %xmm0
; FP16-NEXT:    vmovsh %xmm0, (%rdi)
; FP16-NEXT:    retq
;
; X64-LABEL: test_half_sqrt:
; X64:       # %bb.0:
; X64-NEXT:    pushq %rbx
; X64-NEXT:    movq %rdi, %rbx
; X64-NEXT:    callq __extendhfsf2@PLT
; X64-NEXT:    sqrtss %xmm0, %xmm0
; X64-NEXT:    callq __truncsfhf2@PLT
; X64-NEXT:    pextrw $0, %xmm0, %eax
; X64-NEXT:    movw %ax, (%rbx)
; X64-NEXT:    popq %rbx
; X64-NEXT:    retq
;
; X86-LABEL: test_half_sqrt:
; X86:       # %bb.0:
; X86-NEXT:    pushl %esi
; X86-NEXT:    subl $8, %esp
; X86-NEXT:    pinsrw $0, {{[0-9]+}}(%esp), %xmm0
; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esp)
; X86-NEXT:    calll __extendhfsf2
; X86-NEXT:    fstps {{[0-9]+}}(%esp)
; X86-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-NEXT:    sqrtss %xmm0, %xmm0
; X86-NEXT:    movss %xmm0, (%esp)
; X86-NEXT:    calll __truncsfhf2
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esi)
; X86-NEXT:    addl $8, %esp
; X86-NEXT:    popl %esi
; X86-NEXT:    retl
  %res = call half @llvm.sqrt.half(half %a0)
  store half %res, ptr %p0, align 2
  ret void
}

define void @test_half_tan(half %a0, ptr %p0) nounwind {
; F16C-LABEL: test_half_tan:
; F16C:       # %bb.0:
; F16C-NEXT:    pushq %rbx
; F16C-NEXT:    movq %rdi, %rbx
; F16C-NEXT:    vpextrw $0, %xmm0, %eax
; F16C-NEXT:    vmovd %eax, %xmm0
; F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
; F16C-NEXT:    callq tanf@PLT
; F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
; F16C-NEXT:    vmovd %xmm0, %eax
; F16C-NEXT:    movw %ax, (%rbx)
; F16C-NEXT:    popq %rbx
; F16C-NEXT:    retq
;
; FP16-LABEL: test_half_tan:
; FP16:       # %bb.0:
; FP16-NEXT:    pushq %rbx
; FP16-NEXT:    movq %rdi, %rbx
; FP16-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
; FP16-NEXT:    callq tanf@PLT
; FP16-NEXT:    vcvtss2sh %xmm0, %xmm0, %xmm0
; FP16-NEXT:    vmovsh %xmm0, (%rbx)
; FP16-NEXT:    popq %rbx
; FP16-NEXT:    retq
;
; X64-LABEL: test_half_tan:
; X64:       # %bb.0:
; X64-NEXT:    pushq %rbx
; X64-NEXT:    movq %rdi, %rbx
; X64-NEXT:    callq __extendhfsf2@PLT
; X64-NEXT:    callq tanf@PLT
; X64-NEXT:    callq __truncsfhf2@PLT
; X64-NEXT:    pextrw $0, %xmm0, %eax
; X64-NEXT:    movw %ax, (%rbx)
; X64-NEXT:    popq %rbx
; X64-NEXT:    retq
;
; X86-LABEL: test_half_tan:
; X86:       # %bb.0:
; X86-NEXT:    pushl %esi
; X86-NEXT:    subl $8, %esp
; X86-NEXT:    pinsrw $0, {{[0-9]+}}(%esp), %xmm0
; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esp)
; X86-NEXT:    calll __extendhfsf2
; X86-NEXT:    fstps (%esp)
; X86-NEXT:    calll tanf
; X86-NEXT:    fstps (%esp)
; X86-NEXT:    calll __truncsfhf2
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esi)
; X86-NEXT:    addl $8, %esp
; X86-NEXT:    popl %esi
; X86-NEXT:    retl
  %res = call half @llvm.tan.half(half %a0)
  store half %res, ptr %p0, align 2
  ret void
}

define void @test_half_trunc(half %a0, ptr %p0) nounwind {
; F16C-LABEL: test_half_trunc:
; F16C:       # %bb.0:
; F16C-NEXT:    vpextrw $0, %xmm0, %eax
; F16C-NEXT:    vmovd %eax, %xmm0
; F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
; F16C-NEXT:    vroundss $11, %xmm0, %xmm0, %xmm0
; F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
; F16C-NEXT:    vmovd %xmm0, %eax
; F16C-NEXT:    movw %ax, (%rdi)
; F16C-NEXT:    retq
;
; FP16-LABEL: test_half_trunc:
; FP16:       # %bb.0:
; FP16-NEXT:    vrndscalesh $11, %xmm0, %xmm0, %xmm0
; FP16-NEXT:    vmovsh %xmm0, (%rdi)
; FP16-NEXT:    retq
;
; X64-LABEL: test_half_trunc:
; X64:       # %bb.0:
; X64-NEXT:    pushq %rbx
; X64-NEXT:    movq %rdi, %rbx
; X64-NEXT:    callq __extendhfsf2@PLT
; X64-NEXT:    callq truncf@PLT
; X64-NEXT:    callq __truncsfhf2@PLT
; X64-NEXT:    pextrw $0, %xmm0, %eax
; X64-NEXT:    movw %ax, (%rbx)
; X64-NEXT:    popq %rbx
; X64-NEXT:    retq
;
; X86-LABEL: test_half_trunc:
; X86:       # %bb.0:
; X86-NEXT:    pushl %esi
; X86-NEXT:    subl $8, %esp
; X86-NEXT:    pinsrw $0, {{[0-9]+}}(%esp), %xmm0
; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esp)
; X86-NEXT:    calll __extendhfsf2
; X86-NEXT:    fstps (%esp)
; X86-NEXT:    calll truncf
; X86-NEXT:    fstps (%esp)
; X86-NEXT:    calll __truncsfhf2
; X86-NEXT:    pextrw $0, %xmm0, %eax
; X86-NEXT:    movw %ax, (%esi)
; X86-NEXT:    addl $8, %esp
; X86-NEXT:    popl %esi
; X86-NEXT:    retl
  %res = call half @llvm.trunc.half(half %a0)
  store half %res, ptr %p0, align 2
  ret void
}