; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 -O3 | FileCheck %s --check-prefixes=SSE2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+f16c -O3 | FileCheck %s --check-prefixes=AVX
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -O3 | FileCheck %s --check-prefixes=AVX
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512fp16 -mattr=+avx512vl -O3 | FileCheck %s --check-prefixes=X86-FP16
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512fp16 -mattr=+avx512vl -O3 | FileCheck %s --check-prefixes=X64-FP16
define i32 @test_f16_oeq_q(i32 %a, i32 %b, half %f1, half %f2) #0 {
; SSE2-LABEL: test_f16_oeq_q:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rbp
; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: pushq %rax
; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; SSE2-NEXT: movl %esi, %ebx
; SSE2-NEXT: movl %edi, %ebp
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss %xmm0, (%rsp) # 4-byte Spill
; SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: ucomiss (%rsp), %xmm0 # 4-byte Folded Reload
; SSE2-NEXT: cmovnel %ebx, %ebp
; SSE2-NEXT: cmovpl %ebx, %ebp
; SSE2-NEXT: movl %ebp, %eax
; SSE2-NEXT: addq $8, %rsp
; SSE2-NEXT: popq %rbx
; SSE2-NEXT: popq %rbp
; SSE2-NEXT: retq
;
; AVX-LABEL: test_f16_oeq_q:
; AVX: # %bb.0:
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: vpextrw $0, %xmm0, %ecx
; AVX-NEXT: vpextrw $0, %xmm1, %edx
; AVX-NEXT: movzwl %dx, %edx
; AVX-NEXT: vmovd %edx, %xmm0
; AVX-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX-NEXT: movzwl %cx, %ecx
; AVX-NEXT: vmovd %ecx, %xmm1
; AVX-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX-NEXT: vucomiss %xmm0, %xmm1
; AVX-NEXT: cmovnel %esi, %eax
; AVX-NEXT: cmovpl %esi, %eax
; AVX-NEXT: retq
;
; X86-FP16-LABEL: test_f16_oeq_q:
; X86-FP16: # %bb.0:
; X86-FP16-NEXT: vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
; X86-FP16-NEXT: vucomish {{[0-9]+}}(%esp), %xmm0
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %ecx
; X86-FP16-NEXT: cmovnel %eax, %ecx
; X86-FP16-NEXT: cmovpl %eax, %ecx
; X86-FP16-NEXT: movl (%ecx), %eax
; X86-FP16-NEXT: retl
;
; X64-FP16-LABEL: test_f16_oeq_q:
; X64-FP16: # %bb.0:
; X64-FP16-NEXT: movl %edi, %eax
; X64-FP16-NEXT: vucomish %xmm1, %xmm0
; X64-FP16-NEXT: cmovnel %esi, %eax
; X64-FP16-NEXT: cmovpl %esi, %eax
; X64-FP16-NEXT: retq
%cond = call i1 @llvm.experimental.constrained.fcmp.f16(
half %f1, half %f2, metadata !"oeq",
metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
ret i32 %res
}
define i32 @test_f16_ogt_q(i32 %a, i32 %b, half %f1, half %f2) #0 {
; SSE2-LABEL: test_f16_ogt_q:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rbp
; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: pushq %rax
; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; SSE2-NEXT: movl %esi, %ebx
; SSE2-NEXT: movl %edi, %ebp
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss %xmm0, (%rsp) # 4-byte Spill
; SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: ucomiss (%rsp), %xmm0 # 4-byte Folded Reload
; SSE2-NEXT: cmovbel %ebx, %ebp
; SSE2-NEXT: movl %ebp, %eax
; SSE2-NEXT: addq $8, %rsp
; SSE2-NEXT: popq %rbx
; SSE2-NEXT: popq %rbp
; SSE2-NEXT: retq
;
; AVX-LABEL: test_f16_ogt_q:
; AVX: # %bb.0:
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: vpextrw $0, %xmm0, %ecx
; AVX-NEXT: vpextrw $0, %xmm1, %edx
; AVX-NEXT: movzwl %dx, %edx
; AVX-NEXT: vmovd %edx, %xmm0
; AVX-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX-NEXT: movzwl %cx, %ecx
; AVX-NEXT: vmovd %ecx, %xmm1
; AVX-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX-NEXT: vucomiss %xmm0, %xmm1
; AVX-NEXT: cmovbel %esi, %eax
; AVX-NEXT: retq
;
; X86-FP16-LABEL: test_f16_ogt_q:
; X86-FP16: # %bb.0:
; X86-FP16-NEXT: vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
; X86-FP16-NEXT: vucomish {{[0-9]+}}(%esp), %xmm0
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %ecx
; X86-FP16-NEXT: cmoval %eax, %ecx
; X86-FP16-NEXT: movl (%ecx), %eax
; X86-FP16-NEXT: retl
;
; X64-FP16-LABEL: test_f16_ogt_q:
; X64-FP16: # %bb.0:
; X64-FP16-NEXT: movl %edi, %eax
; X64-FP16-NEXT: vucomish %xmm1, %xmm0
; X64-FP16-NEXT: cmovbel %esi, %eax
; X64-FP16-NEXT: retq
%cond = call i1 @llvm.experimental.constrained.fcmp.f16(
half %f1, half %f2, metadata !"ogt",
metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
ret i32 %res
}
define i32 @test_f16_oge_q(i32 %a, i32 %b, half %f1, half %f2) #0 {
; SSE2-LABEL: test_f16_oge_q:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rbp
; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: pushq %rax
; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; SSE2-NEXT: movl %esi, %ebx
; SSE2-NEXT: movl %edi, %ebp
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss %xmm0, (%rsp) # 4-byte Spill
; SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: ucomiss (%rsp), %xmm0 # 4-byte Folded Reload
; SSE2-NEXT: cmovbl %ebx, %ebp
; SSE2-NEXT: movl %ebp, %eax
; SSE2-NEXT: addq $8, %rsp
; SSE2-NEXT: popq %rbx
; SSE2-NEXT: popq %rbp
; SSE2-NEXT: retq
;
; AVX-LABEL: test_f16_oge_q:
; AVX: # %bb.0:
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: vpextrw $0, %xmm0, %ecx
; AVX-NEXT: vpextrw $0, %xmm1, %edx
; AVX-NEXT: movzwl %dx, %edx
; AVX-NEXT: vmovd %edx, %xmm0
; AVX-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX-NEXT: movzwl %cx, %ecx
; AVX-NEXT: vmovd %ecx, %xmm1
; AVX-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX-NEXT: vucomiss %xmm0, %xmm1
; AVX-NEXT: cmovbl %esi, %eax
; AVX-NEXT: retq
;
; X86-FP16-LABEL: test_f16_oge_q:
; X86-FP16: # %bb.0:
; X86-FP16-NEXT: vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
; X86-FP16-NEXT: vucomish {{[0-9]+}}(%esp), %xmm0
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %ecx
; X86-FP16-NEXT: cmovael %eax, %ecx
; X86-FP16-NEXT: movl (%ecx), %eax
; X86-FP16-NEXT: retl
;
; X64-FP16-LABEL: test_f16_oge_q:
; X64-FP16: # %bb.0:
; X64-FP16-NEXT: movl %edi, %eax
; X64-FP16-NEXT: vucomish %xmm1, %xmm0
; X64-FP16-NEXT: cmovbl %esi, %eax
; X64-FP16-NEXT: retq
%cond = call i1 @llvm.experimental.constrained.fcmp.f16(
half %f1, half %f2, metadata !"oge",
metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
ret i32 %res
}
define i32 @test_f16_olt_q(i32 %a, i32 %b, half %f1, half %f2) #0 {
; SSE2-LABEL: test_f16_olt_q:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rbp
; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: pushq %rax
; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; SSE2-NEXT: movl %esi, %ebx
; SSE2-NEXT: movl %edi, %ebp
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss %xmm0, (%rsp) # 4-byte Spill
; SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss (%rsp), %xmm1 # 4-byte Reload
; SSE2-NEXT: # xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: ucomiss %xmm0, %xmm1
; SSE2-NEXT: cmovbel %ebx, %ebp
; SSE2-NEXT: movl %ebp, %eax
; SSE2-NEXT: addq $8, %rsp
; SSE2-NEXT: popq %rbx
; SSE2-NEXT: popq %rbp
; SSE2-NEXT: retq
;
; AVX-LABEL: test_f16_olt_q:
; AVX: # %bb.0:
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: vpextrw $0, %xmm1, %ecx
; AVX-NEXT: vpextrw $0, %xmm0, %edx
; AVX-NEXT: movzwl %dx, %edx
; AVX-NEXT: vmovd %edx, %xmm0
; AVX-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX-NEXT: movzwl %cx, %ecx
; AVX-NEXT: vmovd %ecx, %xmm1
; AVX-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX-NEXT: vucomiss %xmm0, %xmm1
; AVX-NEXT: cmovbel %esi, %eax
; AVX-NEXT: retq
;
; X86-FP16-LABEL: test_f16_olt_q:
; X86-FP16: # %bb.0:
; X86-FP16-NEXT: vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
; X86-FP16-NEXT: vucomish {{[0-9]+}}(%esp), %xmm0
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %ecx
; X86-FP16-NEXT: cmoval %eax, %ecx
; X86-FP16-NEXT: movl (%ecx), %eax
; X86-FP16-NEXT: retl
;
; X64-FP16-LABEL: test_f16_olt_q:
; X64-FP16: # %bb.0:
; X64-FP16-NEXT: movl %edi, %eax
; X64-FP16-NEXT: vucomish %xmm0, %xmm1
; X64-FP16-NEXT: cmovbel %esi, %eax
; X64-FP16-NEXT: retq
%cond = call i1 @llvm.experimental.constrained.fcmp.f16(
half %f1, half %f2, metadata !"olt",
metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
ret i32 %res
}
define i32 @test_f16_ole_q(i32 %a, i32 %b, half %f1, half %f2) #0 {
; SSE2-LABEL: test_f16_ole_q:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rbp
; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: pushq %rax
; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; SSE2-NEXT: movl %esi, %ebx
; SSE2-NEXT: movl %edi, %ebp
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss %xmm0, (%rsp) # 4-byte Spill
; SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss (%rsp), %xmm1 # 4-byte Reload
; SSE2-NEXT: # xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: ucomiss %xmm0, %xmm1
; SSE2-NEXT: cmovbl %ebx, %ebp
; SSE2-NEXT: movl %ebp, %eax
; SSE2-NEXT: addq $8, %rsp
; SSE2-NEXT: popq %rbx
; SSE2-NEXT: popq %rbp
; SSE2-NEXT: retq
;
; AVX-LABEL: test_f16_ole_q:
; AVX: # %bb.0:
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: vpextrw $0, %xmm1, %ecx
; AVX-NEXT: vpextrw $0, %xmm0, %edx
; AVX-NEXT: movzwl %dx, %edx
; AVX-NEXT: vmovd %edx, %xmm0
; AVX-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX-NEXT: movzwl %cx, %ecx
; AVX-NEXT: vmovd %ecx, %xmm1
; AVX-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX-NEXT: vucomiss %xmm0, %xmm1
; AVX-NEXT: cmovbl %esi, %eax
; AVX-NEXT: retq
;
; X86-FP16-LABEL: test_f16_ole_q:
; X86-FP16: # %bb.0:
; X86-FP16-NEXT: vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
; X86-FP16-NEXT: vucomish {{[0-9]+}}(%esp), %xmm0
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %ecx
; X86-FP16-NEXT: cmovael %eax, %ecx
; X86-FP16-NEXT: movl (%ecx), %eax
; X86-FP16-NEXT: retl
;
; X64-FP16-LABEL: test_f16_ole_q:
; X64-FP16: # %bb.0:
; X64-FP16-NEXT: movl %edi, %eax
; X64-FP16-NEXT: vucomish %xmm0, %xmm1
; X64-FP16-NEXT: cmovbl %esi, %eax
; X64-FP16-NEXT: retq
%cond = call i1 @llvm.experimental.constrained.fcmp.f16(
half %f1, half %f2, metadata !"ole",
metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
ret i32 %res
}
define i32 @test_f16_one_q(i32 %a, i32 %b, half %f1, half %f2) #0 {
; SSE2-LABEL: test_f16_one_q:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rbp
; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: pushq %rax
; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; SSE2-NEXT: movl %esi, %ebx
; SSE2-NEXT: movl %edi, %ebp
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss %xmm0, (%rsp) # 4-byte Spill
; SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: ucomiss (%rsp), %xmm0 # 4-byte Folded Reload
; SSE2-NEXT: cmovel %ebx, %ebp
; SSE2-NEXT: movl %ebp, %eax
; SSE2-NEXT: addq $8, %rsp
; SSE2-NEXT: popq %rbx
; SSE2-NEXT: popq %rbp
; SSE2-NEXT: retq
;
; AVX-LABEL: test_f16_one_q:
; AVX: # %bb.0:
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: vpextrw $0, %xmm0, %ecx
; AVX-NEXT: vpextrw $0, %xmm1, %edx
; AVX-NEXT: movzwl %dx, %edx
; AVX-NEXT: vmovd %edx, %xmm0
; AVX-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX-NEXT: movzwl %cx, %ecx
; AVX-NEXT: vmovd %ecx, %xmm1
; AVX-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX-NEXT: vucomiss %xmm0, %xmm1
; AVX-NEXT: cmovel %esi, %eax
; AVX-NEXT: retq
;
; X86-FP16-LABEL: test_f16_one_q:
; X86-FP16: # %bb.0:
; X86-FP16-NEXT: vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
; X86-FP16-NEXT: vucomish {{[0-9]+}}(%esp), %xmm0
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %ecx
; X86-FP16-NEXT: cmovnel %eax, %ecx
; X86-FP16-NEXT: movl (%ecx), %eax
; X86-FP16-NEXT: retl
;
; X64-FP16-LABEL: test_f16_one_q:
; X64-FP16: # %bb.0:
; X64-FP16-NEXT: movl %edi, %eax
; X64-FP16-NEXT: vucomish %xmm1, %xmm0
; X64-FP16-NEXT: cmovel %esi, %eax
; X64-FP16-NEXT: retq
%cond = call i1 @llvm.experimental.constrained.fcmp.f16(
half %f1, half %f2, metadata !"one",
metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
ret i32 %res
}
define i32 @test_f16_ord_q(i32 %a, i32 %b, half %f1, half %f2) #0 {
; SSE2-LABEL: test_f16_ord_q:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rbp
; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: pushq %rax
; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; SSE2-NEXT: movl %esi, %ebx
; SSE2-NEXT: movl %edi, %ebp
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss %xmm0, (%rsp) # 4-byte Spill
; SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: ucomiss (%rsp), %xmm0 # 4-byte Folded Reload
; SSE2-NEXT: cmovpl %ebx, %ebp
; SSE2-NEXT: movl %ebp, %eax
; SSE2-NEXT: addq $8, %rsp
; SSE2-NEXT: popq %rbx
; SSE2-NEXT: popq %rbp
; SSE2-NEXT: retq
;
; AVX-LABEL: test_f16_ord_q:
; AVX: # %bb.0:
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: vpextrw $0, %xmm0, %ecx
; AVX-NEXT: vpextrw $0, %xmm1, %edx
; AVX-NEXT: movzwl %dx, %edx
; AVX-NEXT: vmovd %edx, %xmm0
; AVX-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX-NEXT: movzwl %cx, %ecx
; AVX-NEXT: vmovd %ecx, %xmm1
; AVX-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX-NEXT: vucomiss %xmm0, %xmm1
; AVX-NEXT: cmovpl %esi, %eax
; AVX-NEXT: retq
;
; X86-FP16-LABEL: test_f16_ord_q:
; X86-FP16: # %bb.0:
; X86-FP16-NEXT: vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
; X86-FP16-NEXT: vucomish {{[0-9]+}}(%esp), %xmm0
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %ecx
; X86-FP16-NEXT: cmovnpl %eax, %ecx
; X86-FP16-NEXT: movl (%ecx), %eax
; X86-FP16-NEXT: retl
;
; X64-FP16-LABEL: test_f16_ord_q:
; X64-FP16: # %bb.0:
; X64-FP16-NEXT: movl %edi, %eax
; X64-FP16-NEXT: vucomish %xmm1, %xmm0
; X64-FP16-NEXT: cmovpl %esi, %eax
; X64-FP16-NEXT: retq
%cond = call i1 @llvm.experimental.constrained.fcmp.f16(
half %f1, half %f2, metadata !"ord",
metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
ret i32 %res
}
define i32 @test_f16_ueq_q(i32 %a, i32 %b, half %f1, half %f2) #0 {
; SSE2-LABEL: test_f16_ueq_q:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rbp
; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: pushq %rax
; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; SSE2-NEXT: movl %esi, %ebx
; SSE2-NEXT: movl %edi, %ebp
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss %xmm0, (%rsp) # 4-byte Spill
; SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: ucomiss (%rsp), %xmm0 # 4-byte Folded Reload
; SSE2-NEXT: cmovnel %ebx, %ebp
; SSE2-NEXT: movl %ebp, %eax
; SSE2-NEXT: addq $8, %rsp
; SSE2-NEXT: popq %rbx
; SSE2-NEXT: popq %rbp
; SSE2-NEXT: retq
;
; AVX-LABEL: test_f16_ueq_q:
; AVX: # %bb.0:
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: vpextrw $0, %xmm0, %ecx
; AVX-NEXT: vpextrw $0, %xmm1, %edx
; AVX-NEXT: movzwl %dx, %edx
; AVX-NEXT: vmovd %edx, %xmm0
; AVX-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX-NEXT: movzwl %cx, %ecx
; AVX-NEXT: vmovd %ecx, %xmm1
; AVX-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX-NEXT: vucomiss %xmm0, %xmm1
; AVX-NEXT: cmovnel %esi, %eax
; AVX-NEXT: retq
;
; X86-FP16-LABEL: test_f16_ueq_q:
; X86-FP16: # %bb.0:
; X86-FP16-NEXT: vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
; X86-FP16-NEXT: vucomish {{[0-9]+}}(%esp), %xmm0
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %ecx
; X86-FP16-NEXT: cmovel %eax, %ecx
; X86-FP16-NEXT: movl (%ecx), %eax
; X86-FP16-NEXT: retl
;
; X64-FP16-LABEL: test_f16_ueq_q:
; X64-FP16: # %bb.0:
; X64-FP16-NEXT: movl %edi, %eax
; X64-FP16-NEXT: vucomish %xmm1, %xmm0
; X64-FP16-NEXT: cmovnel %esi, %eax
; X64-FP16-NEXT: retq
%cond = call i1 @llvm.experimental.constrained.fcmp.f16(
half %f1, half %f2, metadata !"ueq",
metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
ret i32 %res
}
define i32 @test_f16_ugt_q(i32 %a, i32 %b, half %f1, half %f2) #0 {
; SSE2-LABEL: test_f16_ugt_q:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rbp
; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: pushq %rax
; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; SSE2-NEXT: movl %esi, %ebx
; SSE2-NEXT: movl %edi, %ebp
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss %xmm0, (%rsp) # 4-byte Spill
; SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss (%rsp), %xmm1 # 4-byte Reload
; SSE2-NEXT: # xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: ucomiss %xmm0, %xmm1
; SSE2-NEXT: cmovael %ebx, %ebp
; SSE2-NEXT: movl %ebp, %eax
; SSE2-NEXT: addq $8, %rsp
; SSE2-NEXT: popq %rbx
; SSE2-NEXT: popq %rbp
; SSE2-NEXT: retq
;
; AVX-LABEL: test_f16_ugt_q:
; AVX: # %bb.0:
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: vpextrw $0, %xmm1, %ecx
; AVX-NEXT: vpextrw $0, %xmm0, %edx
; AVX-NEXT: movzwl %dx, %edx
; AVX-NEXT: vmovd %edx, %xmm0
; AVX-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX-NEXT: movzwl %cx, %ecx
; AVX-NEXT: vmovd %ecx, %xmm1
; AVX-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX-NEXT: vucomiss %xmm0, %xmm1
; AVX-NEXT: cmovael %esi, %eax
; AVX-NEXT: retq
;
; X86-FP16-LABEL: test_f16_ugt_q:
; X86-FP16: # %bb.0:
; X86-FP16-NEXT: vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
; X86-FP16-NEXT: vucomish {{[0-9]+}}(%esp), %xmm0
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %ecx
; X86-FP16-NEXT: cmovbl %eax, %ecx
; X86-FP16-NEXT: movl (%ecx), %eax
; X86-FP16-NEXT: retl
;
; X64-FP16-LABEL: test_f16_ugt_q:
; X64-FP16: # %bb.0:
; X64-FP16-NEXT: movl %edi, %eax
; X64-FP16-NEXT: vucomish %xmm0, %xmm1
; X64-FP16-NEXT: cmovael %esi, %eax
; X64-FP16-NEXT: retq
%cond = call i1 @llvm.experimental.constrained.fcmp.f16(
half %f1, half %f2, metadata !"ugt",
metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
ret i32 %res
}
define i32 @test_f16_uge_q(i32 %a, i32 %b, half %f1, half %f2) #0 {
; SSE2-LABEL: test_f16_uge_q:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rbp
; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: pushq %rax
; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; SSE2-NEXT: movl %esi, %ebx
; SSE2-NEXT: movl %edi, %ebp
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss %xmm0, (%rsp) # 4-byte Spill
; SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss (%rsp), %xmm1 # 4-byte Reload
; SSE2-NEXT: # xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: ucomiss %xmm0, %xmm1
; SSE2-NEXT: cmoval %ebx, %ebp
; SSE2-NEXT: movl %ebp, %eax
; SSE2-NEXT: addq $8, %rsp
; SSE2-NEXT: popq %rbx
; SSE2-NEXT: popq %rbp
; SSE2-NEXT: retq
;
; AVX-LABEL: test_f16_uge_q:
; AVX: # %bb.0:
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: vpextrw $0, %xmm1, %ecx
; AVX-NEXT: vpextrw $0, %xmm0, %edx
; AVX-NEXT: movzwl %dx, %edx
; AVX-NEXT: vmovd %edx, %xmm0
; AVX-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX-NEXT: movzwl %cx, %ecx
; AVX-NEXT: vmovd %ecx, %xmm1
; AVX-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX-NEXT: vucomiss %xmm0, %xmm1
; AVX-NEXT: cmoval %esi, %eax
; AVX-NEXT: retq
;
; X86-FP16-LABEL: test_f16_uge_q:
; X86-FP16: # %bb.0:
; X86-FP16-NEXT: vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
; X86-FP16-NEXT: vucomish {{[0-9]+}}(%esp), %xmm0
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %ecx
; X86-FP16-NEXT: cmovbel %eax, %ecx
; X86-FP16-NEXT: movl (%ecx), %eax
; X86-FP16-NEXT: retl
;
; X64-FP16-LABEL: test_f16_uge_q:
; X64-FP16: # %bb.0:
; X64-FP16-NEXT: movl %edi, %eax
; X64-FP16-NEXT: vucomish %xmm0, %xmm1
; X64-FP16-NEXT: cmoval %esi, %eax
; X64-FP16-NEXT: retq
%cond = call i1 @llvm.experimental.constrained.fcmp.f16(
half %f1, half %f2, metadata !"uge",
metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
ret i32 %res
}
define i32 @test_f16_ult_q(i32 %a, i32 %b, half %f1, half %f2) #0 {
; SSE2-LABEL: test_f16_ult_q:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rbp
; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: pushq %rax
; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; SSE2-NEXT: movl %esi, %ebx
; SSE2-NEXT: movl %edi, %ebp
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss %xmm0, (%rsp) # 4-byte Spill
; SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: ucomiss (%rsp), %xmm0 # 4-byte Folded Reload
; SSE2-NEXT: cmovael %ebx, %ebp
; SSE2-NEXT: movl %ebp, %eax
; SSE2-NEXT: addq $8, %rsp
; SSE2-NEXT: popq %rbx
; SSE2-NEXT: popq %rbp
; SSE2-NEXT: retq
;
; AVX-LABEL: test_f16_ult_q:
; AVX: # %bb.0:
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: vpextrw $0, %xmm0, %ecx
; AVX-NEXT: vpextrw $0, %xmm1, %edx
; AVX-NEXT: movzwl %dx, %edx
; AVX-NEXT: vmovd %edx, %xmm0
; AVX-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX-NEXT: movzwl %cx, %ecx
; AVX-NEXT: vmovd %ecx, %xmm1
; AVX-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX-NEXT: vucomiss %xmm0, %xmm1
; AVX-NEXT: cmovael %esi, %eax
; AVX-NEXT: retq
;
; X86-FP16-LABEL: test_f16_ult_q:
; X86-FP16: # %bb.0:
; X86-FP16-NEXT: vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
; X86-FP16-NEXT: vucomish {{[0-9]+}}(%esp), %xmm0
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %ecx
; X86-FP16-NEXT: cmovbl %eax, %ecx
; X86-FP16-NEXT: movl (%ecx), %eax
; X86-FP16-NEXT: retl
;
; X64-FP16-LABEL: test_f16_ult_q:
; X64-FP16: # %bb.0:
; X64-FP16-NEXT: movl %edi, %eax
; X64-FP16-NEXT: vucomish %xmm1, %xmm0
; X64-FP16-NEXT: cmovael %esi, %eax
; X64-FP16-NEXT: retq
%cond = call i1 @llvm.experimental.constrained.fcmp.f16(
half %f1, half %f2, metadata !"ult",
metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
ret i32 %res
}
define i32 @test_f16_ule_q(i32 %a, i32 %b, half %f1, half %f2) #0 {
; SSE2-LABEL: test_f16_ule_q:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rbp
; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: pushq %rax
; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; SSE2-NEXT: movl %esi, %ebx
; SSE2-NEXT: movl %edi, %ebp
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss %xmm0, (%rsp) # 4-byte Spill
; SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: ucomiss (%rsp), %xmm0 # 4-byte Folded Reload
; SSE2-NEXT: cmoval %ebx, %ebp
; SSE2-NEXT: movl %ebp, %eax
; SSE2-NEXT: addq $8, %rsp
; SSE2-NEXT: popq %rbx
; SSE2-NEXT: popq %rbp
; SSE2-NEXT: retq
;
; AVX-LABEL: test_f16_ule_q:
; AVX: # %bb.0:
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: vpextrw $0, %xmm0, %ecx
; AVX-NEXT: vpextrw $0, %xmm1, %edx
; AVX-NEXT: movzwl %dx, %edx
; AVX-NEXT: vmovd %edx, %xmm0
; AVX-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX-NEXT: movzwl %cx, %ecx
; AVX-NEXT: vmovd %ecx, %xmm1
; AVX-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX-NEXT: vucomiss %xmm0, %xmm1
; AVX-NEXT: cmoval %esi, %eax
; AVX-NEXT: retq
;
; X86-FP16-LABEL: test_f16_ule_q:
; X86-FP16: # %bb.0:
; X86-FP16-NEXT: vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
; X86-FP16-NEXT: vucomish {{[0-9]+}}(%esp), %xmm0
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %ecx
; X86-FP16-NEXT: cmovbel %eax, %ecx
; X86-FP16-NEXT: movl (%ecx), %eax
; X86-FP16-NEXT: retl
;
; X64-FP16-LABEL: test_f16_ule_q:
; X64-FP16: # %bb.0:
; X64-FP16-NEXT: movl %edi, %eax
; X64-FP16-NEXT: vucomish %xmm1, %xmm0
; X64-FP16-NEXT: cmoval %esi, %eax
; X64-FP16-NEXT: retq
%cond = call i1 @llvm.experimental.constrained.fcmp.f16(
half %f1, half %f2, metadata !"ule",
metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
ret i32 %res
}
define i32 @test_f16_une_q(i32 %a, i32 %b, half %f1, half %f2) #0 {
; SSE2-LABEL: test_f16_une_q:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rbp
; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: pushq %rax
; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; SSE2-NEXT: movl %esi, %ebx
; SSE2-NEXT: movl %edi, %ebp
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss %xmm0, (%rsp) # 4-byte Spill
; SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: ucomiss (%rsp), %xmm0 # 4-byte Folded Reload
; SSE2-NEXT: cmovnel %ebp, %ebx
; SSE2-NEXT: cmovpl %ebp, %ebx
; SSE2-NEXT: movl %ebx, %eax
; SSE2-NEXT: addq $8, %rsp
; SSE2-NEXT: popq %rbx
; SSE2-NEXT: popq %rbp
; SSE2-NEXT: retq
;
; AVX-LABEL: test_f16_une_q:
; AVX: # %bb.0:
; AVX-NEXT: movl %esi, %eax
; AVX-NEXT: vpextrw $0, %xmm0, %ecx
; AVX-NEXT: vpextrw $0, %xmm1, %edx
; AVX-NEXT: movzwl %dx, %edx
; AVX-NEXT: vmovd %edx, %xmm0
; AVX-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX-NEXT: movzwl %cx, %ecx
; AVX-NEXT: vmovd %ecx, %xmm1
; AVX-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX-NEXT: vucomiss %xmm0, %xmm1
; AVX-NEXT: cmovnel %edi, %eax
; AVX-NEXT: cmovpl %edi, %eax
; AVX-NEXT: retq
;
; X86-FP16-LABEL: test_f16_une_q:
; X86-FP16: # %bb.0:
; X86-FP16-NEXT: vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
; X86-FP16-NEXT: vucomish {{[0-9]+}}(%esp), %xmm0
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %ecx
; X86-FP16-NEXT: cmovnel %eax, %ecx
; X86-FP16-NEXT: cmovpl %eax, %ecx
; X86-FP16-NEXT: movl (%ecx), %eax
; X86-FP16-NEXT: retl
;
; X64-FP16-LABEL: test_f16_une_q:
; X64-FP16: # %bb.0:
; X64-FP16-NEXT: movl %esi, %eax
; X64-FP16-NEXT: vucomish %xmm1, %xmm0
; X64-FP16-NEXT: cmovnel %edi, %eax
; X64-FP16-NEXT: cmovpl %edi, %eax
; X64-FP16-NEXT: retq
%cond = call i1 @llvm.experimental.constrained.fcmp.f16(
half %f1, half %f2, metadata !"une",
metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
ret i32 %res
}
define i32 @test_f16_uno_q(i32 %a, i32 %b, half %f1, half %f2) #0 {
; SSE2-LABEL: test_f16_uno_q:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rbp
; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: pushq %rax
; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; SSE2-NEXT: movl %esi, %ebx
; SSE2-NEXT: movl %edi, %ebp
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss %xmm0, (%rsp) # 4-byte Spill
; SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: ucomiss (%rsp), %xmm0 # 4-byte Folded Reload
; SSE2-NEXT: cmovnpl %ebx, %ebp
; SSE2-NEXT: movl %ebp, %eax
; SSE2-NEXT: addq $8, %rsp
; SSE2-NEXT: popq %rbx
; SSE2-NEXT: popq %rbp
; SSE2-NEXT: retq
;
; AVX-LABEL: test_f16_uno_q:
; AVX: # %bb.0:
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: vpextrw $0, %xmm0, %ecx
; AVX-NEXT: vpextrw $0, %xmm1, %edx
; AVX-NEXT: movzwl %dx, %edx
; AVX-NEXT: vmovd %edx, %xmm0
; AVX-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX-NEXT: movzwl %cx, %ecx
; AVX-NEXT: vmovd %ecx, %xmm1
; AVX-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX-NEXT: vucomiss %xmm0, %xmm1
; AVX-NEXT: cmovnpl %esi, %eax
; AVX-NEXT: retq
;
; X86-FP16-LABEL: test_f16_uno_q:
; X86-FP16: # %bb.0:
; X86-FP16-NEXT: vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
; X86-FP16-NEXT: vucomish {{[0-9]+}}(%esp), %xmm0
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %ecx
; X86-FP16-NEXT: cmovpl %eax, %ecx
; X86-FP16-NEXT: movl (%ecx), %eax
; X86-FP16-NEXT: retl
;
; X64-FP16-LABEL: test_f16_uno_q:
; X64-FP16: # %bb.0:
; X64-FP16-NEXT: movl %edi, %eax
; X64-FP16-NEXT: vucomish %xmm1, %xmm0
; X64-FP16-NEXT: cmovnpl %esi, %eax
; X64-FP16-NEXT: retq
%cond = call i1 @llvm.experimental.constrained.fcmp.f16(
half %f1, half %f2, metadata !"uno",
metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
ret i32 %res
}
define i32 @test_f16_oeq_s(i32 %a, i32 %b, half %f1, half %f2) #0 {
; SSE2-LABEL: test_f16_oeq_s:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rbp
; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: pushq %rax
; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; SSE2-NEXT: movl %esi, %ebx
; SSE2-NEXT: movl %edi, %ebp
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss %xmm0, (%rsp) # 4-byte Spill
; SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: comiss (%rsp), %xmm0 # 4-byte Folded Reload
; SSE2-NEXT: cmovnel %ebx, %ebp
; SSE2-NEXT: cmovpl %ebx, %ebp
; SSE2-NEXT: movl %ebp, %eax
; SSE2-NEXT: addq $8, %rsp
; SSE2-NEXT: popq %rbx
; SSE2-NEXT: popq %rbp
; SSE2-NEXT: retq
;
; AVX-LABEL: test_f16_oeq_s:
; AVX: # %bb.0:
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: vpextrw $0, %xmm0, %ecx
; AVX-NEXT: vpextrw $0, %xmm1, %edx
; AVX-NEXT: movzwl %dx, %edx
; AVX-NEXT: vmovd %edx, %xmm0
; AVX-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX-NEXT: movzwl %cx, %ecx
; AVX-NEXT: vmovd %ecx, %xmm1
; AVX-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX-NEXT: vcomiss %xmm0, %xmm1
; AVX-NEXT: cmovnel %esi, %eax
; AVX-NEXT: cmovpl %esi, %eax
; AVX-NEXT: retq
;
; X86-FP16-LABEL: test_f16_oeq_s:
; X86-FP16: # %bb.0:
; X86-FP16-NEXT: vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
; X86-FP16-NEXT: vcomish {{[0-9]+}}(%esp), %xmm0
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %ecx
; X86-FP16-NEXT: cmovnel %eax, %ecx
; X86-FP16-NEXT: cmovpl %eax, %ecx
; X86-FP16-NEXT: movl (%ecx), %eax
; X86-FP16-NEXT: retl
;
; X64-FP16-LABEL: test_f16_oeq_s:
; X64-FP16: # %bb.0:
; X64-FP16-NEXT: movl %edi, %eax
; X64-FP16-NEXT: vcomish %xmm1, %xmm0
; X64-FP16-NEXT: cmovnel %esi, %eax
; X64-FP16-NEXT: cmovpl %esi, %eax
; X64-FP16-NEXT: retq
%cond = call i1 @llvm.experimental.constrained.fcmps.f16(
half %f1, half %f2, metadata !"oeq",
metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
ret i32 %res
}
define i32 @test_f16_ogt_s(i32 %a, i32 %b, half %f1, half %f2) #0 {
; SSE2-LABEL: test_f16_ogt_s:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rbp
; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: pushq %rax
; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; SSE2-NEXT: movl %esi, %ebx
; SSE2-NEXT: movl %edi, %ebp
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss %xmm0, (%rsp) # 4-byte Spill
; SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: comiss (%rsp), %xmm0 # 4-byte Folded Reload
; SSE2-NEXT: cmovbel %ebx, %ebp
; SSE2-NEXT: movl %ebp, %eax
; SSE2-NEXT: addq $8, %rsp
; SSE2-NEXT: popq %rbx
; SSE2-NEXT: popq %rbp
; SSE2-NEXT: retq
;
; AVX-LABEL: test_f16_ogt_s:
; AVX: # %bb.0:
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: vpextrw $0, %xmm0, %ecx
; AVX-NEXT: vpextrw $0, %xmm1, %edx
; AVX-NEXT: movzwl %dx, %edx
; AVX-NEXT: vmovd %edx, %xmm0
; AVX-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX-NEXT: movzwl %cx, %ecx
; AVX-NEXT: vmovd %ecx, %xmm1
; AVX-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX-NEXT: vcomiss %xmm0, %xmm1
; AVX-NEXT: cmovbel %esi, %eax
; AVX-NEXT: retq
;
; X86-FP16-LABEL: test_f16_ogt_s:
; X86-FP16: # %bb.0:
; X86-FP16-NEXT: vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
; X86-FP16-NEXT: vcomish {{[0-9]+}}(%esp), %xmm0
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %ecx
; X86-FP16-NEXT: cmoval %eax, %ecx
; X86-FP16-NEXT: movl (%ecx), %eax
; X86-FP16-NEXT: retl
;
; X64-FP16-LABEL: test_f16_ogt_s:
; X64-FP16: # %bb.0:
; X64-FP16-NEXT: movl %edi, %eax
; X64-FP16-NEXT: vcomish %xmm1, %xmm0
; X64-FP16-NEXT: cmovbel %esi, %eax
; X64-FP16-NEXT: retq
%cond = call i1 @llvm.experimental.constrained.fcmps.f16(
half %f1, half %f2, metadata !"ogt",
metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
ret i32 %res
}
define i32 @test_f16_oge_s(i32 %a, i32 %b, half %f1, half %f2) #0 {
; SSE2-LABEL: test_f16_oge_s:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rbp
; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: pushq %rax
; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; SSE2-NEXT: movl %esi, %ebx
; SSE2-NEXT: movl %edi, %ebp
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss %xmm0, (%rsp) # 4-byte Spill
; SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: comiss (%rsp), %xmm0 # 4-byte Folded Reload
; SSE2-NEXT: cmovbl %ebx, %ebp
; SSE2-NEXT: movl %ebp, %eax
; SSE2-NEXT: addq $8, %rsp
; SSE2-NEXT: popq %rbx
; SSE2-NEXT: popq %rbp
; SSE2-NEXT: retq
;
; AVX-LABEL: test_f16_oge_s:
; AVX: # %bb.0:
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: vpextrw $0, %xmm0, %ecx
; AVX-NEXT: vpextrw $0, %xmm1, %edx
; AVX-NEXT: movzwl %dx, %edx
; AVX-NEXT: vmovd %edx, %xmm0
; AVX-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX-NEXT: movzwl %cx, %ecx
; AVX-NEXT: vmovd %ecx, %xmm1
; AVX-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX-NEXT: vcomiss %xmm0, %xmm1
; AVX-NEXT: cmovbl %esi, %eax
; AVX-NEXT: retq
;
; X86-FP16-LABEL: test_f16_oge_s:
; X86-FP16: # %bb.0:
; X86-FP16-NEXT: vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
; X86-FP16-NEXT: vcomish {{[0-9]+}}(%esp), %xmm0
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %ecx
; X86-FP16-NEXT: cmovael %eax, %ecx
; X86-FP16-NEXT: movl (%ecx), %eax
; X86-FP16-NEXT: retl
;
; X64-FP16-LABEL: test_f16_oge_s:
; X64-FP16: # %bb.0:
; X64-FP16-NEXT: movl %edi, %eax
; X64-FP16-NEXT: vcomish %xmm1, %xmm0
; X64-FP16-NEXT: cmovbl %esi, %eax
; X64-FP16-NEXT: retq
%cond = call i1 @llvm.experimental.constrained.fcmps.f16(
half %f1, half %f2, metadata !"oge",
metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
ret i32 %res
}
define i32 @test_f16_olt_s(i32 %a, i32 %b, half %f1, half %f2) #0 {
; SSE2-LABEL: test_f16_olt_s:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rbp
; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: pushq %rax
; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; SSE2-NEXT: movl %esi, %ebx
; SSE2-NEXT: movl %edi, %ebp
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss %xmm0, (%rsp) # 4-byte Spill
; SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss (%rsp), %xmm1 # 4-byte Reload
; SSE2-NEXT: # xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: comiss %xmm0, %xmm1
; SSE2-NEXT: cmovbel %ebx, %ebp
; SSE2-NEXT: movl %ebp, %eax
; SSE2-NEXT: addq $8, %rsp
; SSE2-NEXT: popq %rbx
; SSE2-NEXT: popq %rbp
; SSE2-NEXT: retq
;
; AVX-LABEL: test_f16_olt_s:
; AVX: # %bb.0:
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: vpextrw $0, %xmm1, %ecx
; AVX-NEXT: vpextrw $0, %xmm0, %edx
; AVX-NEXT: movzwl %dx, %edx
; AVX-NEXT: vmovd %edx, %xmm0
; AVX-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX-NEXT: movzwl %cx, %ecx
; AVX-NEXT: vmovd %ecx, %xmm1
; AVX-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX-NEXT: vcomiss %xmm0, %xmm1
; AVX-NEXT: cmovbel %esi, %eax
; AVX-NEXT: retq
;
; X86-FP16-LABEL: test_f16_olt_s:
; X86-FP16: # %bb.0:
; X86-FP16-NEXT: vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
; X86-FP16-NEXT: vcomish {{[0-9]+}}(%esp), %xmm0
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %ecx
; X86-FP16-NEXT: cmoval %eax, %ecx
; X86-FP16-NEXT: movl (%ecx), %eax
; X86-FP16-NEXT: retl
;
; X64-FP16-LABEL: test_f16_olt_s:
; X64-FP16: # %bb.0:
; X64-FP16-NEXT: movl %edi, %eax
; X64-FP16-NEXT: vcomish %xmm0, %xmm1
; X64-FP16-NEXT: cmovbel %esi, %eax
; X64-FP16-NEXT: retq
%cond = call i1 @llvm.experimental.constrained.fcmps.f16(
half %f1, half %f2, metadata !"olt",
metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
ret i32 %res
}
define i32 @test_f16_ole_s(i32 %a, i32 %b, half %f1, half %f2) #0 {
; SSE2-LABEL: test_f16_ole_s:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rbp
; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: pushq %rax
; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; SSE2-NEXT: movl %esi, %ebx
; SSE2-NEXT: movl %edi, %ebp
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss %xmm0, (%rsp) # 4-byte Spill
; SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss (%rsp), %xmm1 # 4-byte Reload
; SSE2-NEXT: # xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: comiss %xmm0, %xmm1
; SSE2-NEXT: cmovbl %ebx, %ebp
; SSE2-NEXT: movl %ebp, %eax
; SSE2-NEXT: addq $8, %rsp
; SSE2-NEXT: popq %rbx
; SSE2-NEXT: popq %rbp
; SSE2-NEXT: retq
;
; AVX-LABEL: test_f16_ole_s:
; AVX: # %bb.0:
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: vpextrw $0, %xmm1, %ecx
; AVX-NEXT: vpextrw $0, %xmm0, %edx
; AVX-NEXT: movzwl %dx, %edx
; AVX-NEXT: vmovd %edx, %xmm0
; AVX-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX-NEXT: movzwl %cx, %ecx
; AVX-NEXT: vmovd %ecx, %xmm1
; AVX-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX-NEXT: vcomiss %xmm0, %xmm1
; AVX-NEXT: cmovbl %esi, %eax
; AVX-NEXT: retq
;
; X86-FP16-LABEL: test_f16_ole_s:
; X86-FP16: # %bb.0:
; X86-FP16-NEXT: vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
; X86-FP16-NEXT: vcomish {{[0-9]+}}(%esp), %xmm0
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %ecx
; X86-FP16-NEXT: cmovael %eax, %ecx
; X86-FP16-NEXT: movl (%ecx), %eax
; X86-FP16-NEXT: retl
;
; X64-FP16-LABEL: test_f16_ole_s:
; X64-FP16: # %bb.0:
; X64-FP16-NEXT: movl %edi, %eax
; X64-FP16-NEXT: vcomish %xmm0, %xmm1
; X64-FP16-NEXT: cmovbl %esi, %eax
; X64-FP16-NEXT: retq
%cond = call i1 @llvm.experimental.constrained.fcmps.f16(
half %f1, half %f2, metadata !"ole",
metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
ret i32 %res
}
define i32 @test_f16_one_s(i32 %a, i32 %b, half %f1, half %f2) #0 {
; SSE2-LABEL: test_f16_one_s:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rbp
; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: pushq %rax
; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; SSE2-NEXT: movl %esi, %ebx
; SSE2-NEXT: movl %edi, %ebp
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss %xmm0, (%rsp) # 4-byte Spill
; SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: comiss (%rsp), %xmm0 # 4-byte Folded Reload
; SSE2-NEXT: cmovel %ebx, %ebp
; SSE2-NEXT: movl %ebp, %eax
; SSE2-NEXT: addq $8, %rsp
; SSE2-NEXT: popq %rbx
; SSE2-NEXT: popq %rbp
; SSE2-NEXT: retq
;
; AVX-LABEL: test_f16_one_s:
; AVX: # %bb.0:
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: vpextrw $0, %xmm0, %ecx
; AVX-NEXT: vpextrw $0, %xmm1, %edx
; AVX-NEXT: movzwl %dx, %edx
; AVX-NEXT: vmovd %edx, %xmm0
; AVX-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX-NEXT: movzwl %cx, %ecx
; AVX-NEXT: vmovd %ecx, %xmm1
; AVX-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX-NEXT: vcomiss %xmm0, %xmm1
; AVX-NEXT: cmovel %esi, %eax
; AVX-NEXT: retq
;
; X86-FP16-LABEL: test_f16_one_s:
; X86-FP16: # %bb.0:
; X86-FP16-NEXT: vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
; X86-FP16-NEXT: vcomish {{[0-9]+}}(%esp), %xmm0
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %ecx
; X86-FP16-NEXT: cmovnel %eax, %ecx
; X86-FP16-NEXT: movl (%ecx), %eax
; X86-FP16-NEXT: retl
;
; X64-FP16-LABEL: test_f16_one_s:
; X64-FP16: # %bb.0:
; X64-FP16-NEXT: movl %edi, %eax
; X64-FP16-NEXT: vcomish %xmm1, %xmm0
; X64-FP16-NEXT: cmovel %esi, %eax
; X64-FP16-NEXT: retq
%cond = call i1 @llvm.experimental.constrained.fcmps.f16(
half %f1, half %f2, metadata !"one",
metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
ret i32 %res
}
define i32 @test_f16_ord_s(i32 %a, i32 %b, half %f1, half %f2) #0 {
; SSE2-LABEL: test_f16_ord_s:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rbp
; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: pushq %rax
; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; SSE2-NEXT: movl %esi, %ebx
; SSE2-NEXT: movl %edi, %ebp
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss %xmm0, (%rsp) # 4-byte Spill
; SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: comiss (%rsp), %xmm0 # 4-byte Folded Reload
; SSE2-NEXT: cmovpl %ebx, %ebp
; SSE2-NEXT: movl %ebp, %eax
; SSE2-NEXT: addq $8, %rsp
; SSE2-NEXT: popq %rbx
; SSE2-NEXT: popq %rbp
; SSE2-NEXT: retq
;
; AVX-LABEL: test_f16_ord_s:
; AVX: # %bb.0:
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: vpextrw $0, %xmm0, %ecx
; AVX-NEXT: vpextrw $0, %xmm1, %edx
; AVX-NEXT: movzwl %dx, %edx
; AVX-NEXT: vmovd %edx, %xmm0
; AVX-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX-NEXT: movzwl %cx, %ecx
; AVX-NEXT: vmovd %ecx, %xmm1
; AVX-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX-NEXT: vcomiss %xmm0, %xmm1
; AVX-NEXT: cmovpl %esi, %eax
; AVX-NEXT: retq
;
; X86-FP16-LABEL: test_f16_ord_s:
; X86-FP16: # %bb.0:
; X86-FP16-NEXT: vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
; X86-FP16-NEXT: vcomish {{[0-9]+}}(%esp), %xmm0
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %ecx
; X86-FP16-NEXT: cmovnpl %eax, %ecx
; X86-FP16-NEXT: movl (%ecx), %eax
; X86-FP16-NEXT: retl
;
; X64-FP16-LABEL: test_f16_ord_s:
; X64-FP16: # %bb.0:
; X64-FP16-NEXT: movl %edi, %eax
; X64-FP16-NEXT: vcomish %xmm1, %xmm0
; X64-FP16-NEXT: cmovpl %esi, %eax
; X64-FP16-NEXT: retq
%cond = call i1 @llvm.experimental.constrained.fcmps.f16(
half %f1, half %f2, metadata !"ord",
metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
ret i32 %res
}
define i32 @test_f16_ueq_s(i32 %a, i32 %b, half %f1, half %f2) #0 {
; SSE2-LABEL: test_f16_ueq_s:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rbp
; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: pushq %rax
; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; SSE2-NEXT: movl %esi, %ebx
; SSE2-NEXT: movl %edi, %ebp
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss %xmm0, (%rsp) # 4-byte Spill
; SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: comiss (%rsp), %xmm0 # 4-byte Folded Reload
; SSE2-NEXT: cmovnel %ebx, %ebp
; SSE2-NEXT: movl %ebp, %eax
; SSE2-NEXT: addq $8, %rsp
; SSE2-NEXT: popq %rbx
; SSE2-NEXT: popq %rbp
; SSE2-NEXT: retq
;
; AVX-LABEL: test_f16_ueq_s:
; AVX: # %bb.0:
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: vpextrw $0, %xmm0, %ecx
; AVX-NEXT: vpextrw $0, %xmm1, %edx
; AVX-NEXT: movzwl %dx, %edx
; AVX-NEXT: vmovd %edx, %xmm0
; AVX-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX-NEXT: movzwl %cx, %ecx
; AVX-NEXT: vmovd %ecx, %xmm1
; AVX-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX-NEXT: vcomiss %xmm0, %xmm1
; AVX-NEXT: cmovnel %esi, %eax
; AVX-NEXT: retq
;
; X86-FP16-LABEL: test_f16_ueq_s:
; X86-FP16: # %bb.0:
; X86-FP16-NEXT: vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
; X86-FP16-NEXT: vcomish {{[0-9]+}}(%esp), %xmm0
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %ecx
; X86-FP16-NEXT: cmovel %eax, %ecx
; X86-FP16-NEXT: movl (%ecx), %eax
; X86-FP16-NEXT: retl
;
; X64-FP16-LABEL: test_f16_ueq_s:
; X64-FP16: # %bb.0:
; X64-FP16-NEXT: movl %edi, %eax
; X64-FP16-NEXT: vcomish %xmm1, %xmm0
; X64-FP16-NEXT: cmovnel %esi, %eax
; X64-FP16-NEXT: retq
%cond = call i1 @llvm.experimental.constrained.fcmps.f16(
half %f1, half %f2, metadata !"ueq",
metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
ret i32 %res
}
define i32 @test_f16_ugt_s(i32 %a, i32 %b, half %f1, half %f2) #0 {
; SSE2-LABEL: test_f16_ugt_s:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rbp
; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: pushq %rax
; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; SSE2-NEXT: movl %esi, %ebx
; SSE2-NEXT: movl %edi, %ebp
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss %xmm0, (%rsp) # 4-byte Spill
; SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss (%rsp), %xmm1 # 4-byte Reload
; SSE2-NEXT: # xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: comiss %xmm0, %xmm1
; SSE2-NEXT: cmovael %ebx, %ebp
; SSE2-NEXT: movl %ebp, %eax
; SSE2-NEXT: addq $8, %rsp
; SSE2-NEXT: popq %rbx
; SSE2-NEXT: popq %rbp
; SSE2-NEXT: retq
;
; AVX-LABEL: test_f16_ugt_s:
; AVX: # %bb.0:
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: vpextrw $0, %xmm1, %ecx
; AVX-NEXT: vpextrw $0, %xmm0, %edx
; AVX-NEXT: movzwl %dx, %edx
; AVX-NEXT: vmovd %edx, %xmm0
; AVX-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX-NEXT: movzwl %cx, %ecx
; AVX-NEXT: vmovd %ecx, %xmm1
; AVX-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX-NEXT: vcomiss %xmm0, %xmm1
; AVX-NEXT: cmovael %esi, %eax
; AVX-NEXT: retq
;
; X86-FP16-LABEL: test_f16_ugt_s:
; X86-FP16: # %bb.0:
; X86-FP16-NEXT: vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
; X86-FP16-NEXT: vcomish {{[0-9]+}}(%esp), %xmm0
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %ecx
; X86-FP16-NEXT: cmovbl %eax, %ecx
; X86-FP16-NEXT: movl (%ecx), %eax
; X86-FP16-NEXT: retl
;
; X64-FP16-LABEL: test_f16_ugt_s:
; X64-FP16: # %bb.0:
; X64-FP16-NEXT: movl %edi, %eax
; X64-FP16-NEXT: vcomish %xmm0, %xmm1
; X64-FP16-NEXT: cmovael %esi, %eax
; X64-FP16-NEXT: retq
%cond = call i1 @llvm.experimental.constrained.fcmps.f16(
half %f1, half %f2, metadata !"ugt",
metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
ret i32 %res
}
define i32 @test_f16_uge_s(i32 %a, i32 %b, half %f1, half %f2) #0 {
; SSE2-LABEL: test_f16_uge_s:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rbp
; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: pushq %rax
; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; SSE2-NEXT: movl %esi, %ebx
; SSE2-NEXT: movl %edi, %ebp
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss %xmm0, (%rsp) # 4-byte Spill
; SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss (%rsp), %xmm1 # 4-byte Reload
; SSE2-NEXT: # xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: comiss %xmm0, %xmm1
; SSE2-NEXT: cmoval %ebx, %ebp
; SSE2-NEXT: movl %ebp, %eax
; SSE2-NEXT: addq $8, %rsp
; SSE2-NEXT: popq %rbx
; SSE2-NEXT: popq %rbp
; SSE2-NEXT: retq
;
; AVX-LABEL: test_f16_uge_s:
; AVX: # %bb.0:
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: vpextrw $0, %xmm1, %ecx
; AVX-NEXT: vpextrw $0, %xmm0, %edx
; AVX-NEXT: movzwl %dx, %edx
; AVX-NEXT: vmovd %edx, %xmm0
; AVX-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX-NEXT: movzwl %cx, %ecx
; AVX-NEXT: vmovd %ecx, %xmm1
; AVX-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX-NEXT: vcomiss %xmm0, %xmm1
; AVX-NEXT: cmoval %esi, %eax
; AVX-NEXT: retq
;
; X86-FP16-LABEL: test_f16_uge_s:
; X86-FP16: # %bb.0:
; X86-FP16-NEXT: vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
; X86-FP16-NEXT: vcomish {{[0-9]+}}(%esp), %xmm0
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %ecx
; X86-FP16-NEXT: cmovbel %eax, %ecx
; X86-FP16-NEXT: movl (%ecx), %eax
; X86-FP16-NEXT: retl
;
; X64-FP16-LABEL: test_f16_uge_s:
; X64-FP16: # %bb.0:
; X64-FP16-NEXT: movl %edi, %eax
; X64-FP16-NEXT: vcomish %xmm0, %xmm1
; X64-FP16-NEXT: cmoval %esi, %eax
; X64-FP16-NEXT: retq
%cond = call i1 @llvm.experimental.constrained.fcmps.f16(
half %f1, half %f2, metadata !"uge",
metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
ret i32 %res
}
define i32 @test_f16_ult_s(i32 %a, i32 %b, half %f1, half %f2) #0 {
; SSE2-LABEL: test_f16_ult_s:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rbp
; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: pushq %rax
; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; SSE2-NEXT: movl %esi, %ebx
; SSE2-NEXT: movl %edi, %ebp
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss %xmm0, (%rsp) # 4-byte Spill
; SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: comiss (%rsp), %xmm0 # 4-byte Folded Reload
; SSE2-NEXT: cmovael %ebx, %ebp
; SSE2-NEXT: movl %ebp, %eax
; SSE2-NEXT: addq $8, %rsp
; SSE2-NEXT: popq %rbx
; SSE2-NEXT: popq %rbp
; SSE2-NEXT: retq
;
; AVX-LABEL: test_f16_ult_s:
; AVX: # %bb.0:
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: vpextrw $0, %xmm0, %ecx
; AVX-NEXT: vpextrw $0, %xmm1, %edx
; AVX-NEXT: movzwl %dx, %edx
; AVX-NEXT: vmovd %edx, %xmm0
; AVX-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX-NEXT: movzwl %cx, %ecx
; AVX-NEXT: vmovd %ecx, %xmm1
; AVX-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX-NEXT: vcomiss %xmm0, %xmm1
; AVX-NEXT: cmovael %esi, %eax
; AVX-NEXT: retq
;
; X86-FP16-LABEL: test_f16_ult_s:
; X86-FP16: # %bb.0:
; X86-FP16-NEXT: vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
; X86-FP16-NEXT: vcomish {{[0-9]+}}(%esp), %xmm0
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %ecx
; X86-FP16-NEXT: cmovbl %eax, %ecx
; X86-FP16-NEXT: movl (%ecx), %eax
; X86-FP16-NEXT: retl
;
; X64-FP16-LABEL: test_f16_ult_s:
; X64-FP16: # %bb.0:
; X64-FP16-NEXT: movl %edi, %eax
; X64-FP16-NEXT: vcomish %xmm1, %xmm0
; X64-FP16-NEXT: cmovael %esi, %eax
; X64-FP16-NEXT: retq
%cond = call i1 @llvm.experimental.constrained.fcmps.f16(
half %f1, half %f2, metadata !"ult",
metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
ret i32 %res
}
define i32 @test_f16_ule_s(i32 %a, i32 %b, half %f1, half %f2) #0 {
; SSE2-LABEL: test_f16_ule_s:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rbp
; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: pushq %rax
; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; SSE2-NEXT: movl %esi, %ebx
; SSE2-NEXT: movl %edi, %ebp
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss %xmm0, (%rsp) # 4-byte Spill
; SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: comiss (%rsp), %xmm0 # 4-byte Folded Reload
; SSE2-NEXT: cmoval %ebx, %ebp
; SSE2-NEXT: movl %ebp, %eax
; SSE2-NEXT: addq $8, %rsp
; SSE2-NEXT: popq %rbx
; SSE2-NEXT: popq %rbp
; SSE2-NEXT: retq
;
; AVX-LABEL: test_f16_ule_s:
; AVX: # %bb.0:
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: vpextrw $0, %xmm0, %ecx
; AVX-NEXT: vpextrw $0, %xmm1, %edx
; AVX-NEXT: movzwl %dx, %edx
; AVX-NEXT: vmovd %edx, %xmm0
; AVX-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX-NEXT: movzwl %cx, %ecx
; AVX-NEXT: vmovd %ecx, %xmm1
; AVX-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX-NEXT: vcomiss %xmm0, %xmm1
; AVX-NEXT: cmoval %esi, %eax
; AVX-NEXT: retq
;
; X86-FP16-LABEL: test_f16_ule_s:
; X86-FP16: # %bb.0:
; X86-FP16-NEXT: vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
; X86-FP16-NEXT: vcomish {{[0-9]+}}(%esp), %xmm0
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %ecx
; X86-FP16-NEXT: cmovbel %eax, %ecx
; X86-FP16-NEXT: movl (%ecx), %eax
; X86-FP16-NEXT: retl
;
; X64-FP16-LABEL: test_f16_ule_s:
; X64-FP16: # %bb.0:
; X64-FP16-NEXT: movl %edi, %eax
; X64-FP16-NEXT: vcomish %xmm1, %xmm0
; X64-FP16-NEXT: cmoval %esi, %eax
; X64-FP16-NEXT: retq
%cond = call i1 @llvm.experimental.constrained.fcmps.f16(
half %f1, half %f2, metadata !"ule",
metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
ret i32 %res
}
define i32 @test_f16_une_s(i32 %a, i32 %b, half %f1, half %f2) #0 {
; SSE2-LABEL: test_f16_une_s:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rbp
; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: pushq %rax
; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; SSE2-NEXT: movl %esi, %ebx
; SSE2-NEXT: movl %edi, %ebp
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss %xmm0, (%rsp) # 4-byte Spill
; SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: comiss (%rsp), %xmm0 # 4-byte Folded Reload
; SSE2-NEXT: cmovnel %ebp, %ebx
; SSE2-NEXT: cmovpl %ebp, %ebx
; SSE2-NEXT: movl %ebx, %eax
; SSE2-NEXT: addq $8, %rsp
; SSE2-NEXT: popq %rbx
; SSE2-NEXT: popq %rbp
; SSE2-NEXT: retq
;
; AVX-LABEL: test_f16_une_s:
; AVX: # %bb.0:
; AVX-NEXT: movl %esi, %eax
; AVX-NEXT: vpextrw $0, %xmm0, %ecx
; AVX-NEXT: vpextrw $0, %xmm1, %edx
; AVX-NEXT: movzwl %dx, %edx
; AVX-NEXT: vmovd %edx, %xmm0
; AVX-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX-NEXT: movzwl %cx, %ecx
; AVX-NEXT: vmovd %ecx, %xmm1
; AVX-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX-NEXT: vcomiss %xmm0, %xmm1
; AVX-NEXT: cmovnel %edi, %eax
; AVX-NEXT: cmovpl %edi, %eax
; AVX-NEXT: retq
;
; X86-FP16-LABEL: test_f16_une_s:
; X86-FP16: # %bb.0:
; X86-FP16-NEXT: vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
; X86-FP16-NEXT: vcomish {{[0-9]+}}(%esp), %xmm0
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %ecx
; X86-FP16-NEXT: cmovnel %eax, %ecx
; X86-FP16-NEXT: cmovpl %eax, %ecx
; X86-FP16-NEXT: movl (%ecx), %eax
; X86-FP16-NEXT: retl
;
; X64-FP16-LABEL: test_f16_une_s:
; X64-FP16: # %bb.0:
; X64-FP16-NEXT: movl %esi, %eax
; X64-FP16-NEXT: vcomish %xmm1, %xmm0
; X64-FP16-NEXT: cmovnel %edi, %eax
; X64-FP16-NEXT: cmovpl %edi, %eax
; X64-FP16-NEXT: retq
%cond = call i1 @llvm.experimental.constrained.fcmps.f16(
half %f1, half %f2, metadata !"une",
metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
ret i32 %res
}
define i32 @test_f16_uno_s(i32 %a, i32 %b, half %f1, half %f2) #0 {
; SSE2-LABEL: test_f16_uno_s:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rbp
; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: pushq %rax
; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; SSE2-NEXT: movl %esi, %ebx
; SSE2-NEXT: movl %edi, %ebp
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss %xmm0, (%rsp) # 4-byte Spill
; SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: comiss (%rsp), %xmm0 # 4-byte Folded Reload
; SSE2-NEXT: cmovnpl %ebx, %ebp
; SSE2-NEXT: movl %ebp, %eax
; SSE2-NEXT: addq $8, %rsp
; SSE2-NEXT: popq %rbx
; SSE2-NEXT: popq %rbp
; SSE2-NEXT: retq
;
; AVX-LABEL: test_f16_uno_s:
; AVX: # %bb.0:
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: vpextrw $0, %xmm0, %ecx
; AVX-NEXT: vpextrw $0, %xmm1, %edx
; AVX-NEXT: movzwl %dx, %edx
; AVX-NEXT: vmovd %edx, %xmm0
; AVX-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX-NEXT: movzwl %cx, %ecx
; AVX-NEXT: vmovd %ecx, %xmm1
; AVX-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX-NEXT: vcomiss %xmm0, %xmm1
; AVX-NEXT: cmovnpl %esi, %eax
; AVX-NEXT: retq
;
; X86-FP16-LABEL: test_f16_uno_s:
; X86-FP16: # %bb.0:
; X86-FP16-NEXT: vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
; X86-FP16-NEXT: vcomish {{[0-9]+}}(%esp), %xmm0
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-FP16-NEXT: leal {{[0-9]+}}(%esp), %ecx
; X86-FP16-NEXT: cmovpl %eax, %ecx
; X86-FP16-NEXT: movl (%ecx), %eax
; X86-FP16-NEXT: retl
;
; X64-FP16-LABEL: test_f16_uno_s:
; X64-FP16: # %bb.0:
; X64-FP16-NEXT: movl %edi, %eax
; X64-FP16-NEXT: vcomish %xmm1, %xmm0
; X64-FP16-NEXT: cmovnpl %esi, %eax
; X64-FP16-NEXT: retq
%cond = call i1 @llvm.experimental.constrained.fcmps.f16(
half %f1, half %f2, metadata !"uno",
metadata !"fpexcept.strict") #0
%res = select i1 %cond, i32 %a, i32 %b
ret i32 %res
}
define void @foo(half %0, half %1) #0 {
; SSE2-LABEL: foo:
; SSE2: # %bb.0:
; SSE2-NEXT: pushq %rax
; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: movss %xmm0, (%rsp) # 4-byte Spill
; SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: callq __extendhfsf2@PLT
; SSE2-NEXT: ucomiss (%rsp), %xmm0 # 4-byte Folded Reload
; SSE2-NEXT: jbe .LBB28_1
; SSE2-NEXT: # %bb.2:
; SSE2-NEXT: popq %rax
; SSE2-NEXT: jmp bar@PLT # TAILCALL
; SSE2-NEXT: .LBB28_1:
; SSE2-NEXT: popq %rax
; SSE2-NEXT: retq
;
; AVX-LABEL: foo:
; AVX: # %bb.0:
; AVX-NEXT: vpextrw $0, %xmm0, %eax
; AVX-NEXT: vpextrw $0, %xmm1, %ecx
; AVX-NEXT: movzwl %cx, %ecx
; AVX-NEXT: vmovd %ecx, %xmm0
; AVX-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX-NEXT: movzwl %ax, %eax
; AVX-NEXT: vmovd %eax, %xmm1
; AVX-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX-NEXT: vucomiss %xmm0, %xmm1
; AVX-NEXT: ja bar@PLT # TAILCALL
; AVX-NEXT: # %bb.1:
; AVX-NEXT: retq
;
; X86-FP16-LABEL: foo:
; X86-FP16: # %bb.0:
; X86-FP16-NEXT: vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
; X86-FP16-NEXT: vucomish {{[0-9]+}}(%esp), %xmm0
; X86-FP16-NEXT: ja bar@PLT # TAILCALL
; X86-FP16-NEXT: # %bb.1:
; X86-FP16-NEXT: retl
;
; X64-FP16-LABEL: foo:
; X64-FP16: # %bb.0:
; X64-FP16-NEXT: vucomish %xmm1, %xmm0
; X64-FP16-NEXT: ja bar@PLT # TAILCALL
; X64-FP16-NEXT: # %bb.1:
; X64-FP16-NEXT: retq
%3 = call i1 @llvm.experimental.constrained.fcmp.f16( half %0, half %1, metadata !"ogt", metadata !"fpexcept.strict") #0
br i1 %3, label %4, label %5
4: ; preds = %2
tail call void @bar() #0
br label %5
5: ; preds = %4, %2
ret void
}
declare void @bar()
attributes #0 = { strictfp nounwind }
declare i1 @llvm.experimental.constrained.fcmp.f16(half, half, metadata, metadata)
declare i1 @llvm.experimental.constrained.fcmps.f16(half, half, metadata, metadata)