; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX,X64-AVX1
; RUN: llc < %s -mtriple=i686-- -mattr=+avx | FileCheck %s --check-prefixes=AVX,X86-AVX1
declare i32 @llvm.fptoui.sat.i32.f32(float)
declare i64 @llvm.fptosi.sat.i64.f64(double)
define float @trunc_unsigned_f32(float %x) #0 {
; SSE2-LABEL: trunc_unsigned_f32:
; SSE2: # %bb.0:
; SSE2-NEXT: cvttss2si %xmm0, %rax
; SSE2-NEXT: movl %eax, %eax
; SSE2-NEXT: xorps %xmm0, %xmm0
; SSE2-NEXT: cvtsi2ss %rax, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: trunc_unsigned_f32:
; SSE41: # %bb.0:
; SSE41-NEXT: roundss $11, %xmm0, %xmm0
; SSE41-NEXT: retq
;
; X64-AVX1-LABEL: trunc_unsigned_f32:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vroundss $11, %xmm0, %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
; X86-AVX1-LABEL: trunc_unsigned_f32:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: pushl %eax
; X86-AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-AVX1-NEXT: vroundss $11, %xmm0, %xmm0, %xmm0
; X86-AVX1-NEXT: vmovss %xmm0, (%esp)
; X86-AVX1-NEXT: flds (%esp)
; X86-AVX1-NEXT: popl %eax
; X86-AVX1-NEXT: retl
%i = fptoui float %x to i32
%r = uitofp i32 %i to float
ret float %r
}
define double @trunc_unsigned_f64(double %x) #0 {
; SSE2-LABEL: trunc_unsigned_f64:
; SSE2: # %bb.0:
; SSE2-NEXT: cvttsd2si %xmm0, %rax
; SSE2-NEXT: movq %rax, %rcx
; SSE2-NEXT: sarq $63, %rcx
; SSE2-NEXT: subsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: cvttsd2si %xmm0, %rdx
; SSE2-NEXT: andq %rcx, %rdx
; SSE2-NEXT: orq %rax, %rdx
; SSE2-NEXT: movq %rdx, %xmm1
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
; SSE2-NEXT: subpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; SSE2-NEXT: movapd %xmm1, %xmm0
; SSE2-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; SSE2-NEXT: addsd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: trunc_unsigned_f64:
; SSE41: # %bb.0:
; SSE41-NEXT: roundsd $11, %xmm0, %xmm0
; SSE41-NEXT: retq
;
; X64-AVX1-LABEL: trunc_unsigned_f64:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vroundsd $11, %xmm0, %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
; X86-AVX1-LABEL: trunc_unsigned_f64:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: pushl %ebp
; X86-AVX1-NEXT: movl %esp, %ebp
; X86-AVX1-NEXT: andl $-8, %esp
; X86-AVX1-NEXT: subl $8, %esp
; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX1-NEXT: vroundsd $11, %xmm0, %xmm0, %xmm0
; X86-AVX1-NEXT: vmovsd %xmm0, (%esp)
; X86-AVX1-NEXT: fldl (%esp)
; X86-AVX1-NEXT: movl %ebp, %esp
; X86-AVX1-NEXT: popl %ebp
; X86-AVX1-NEXT: retl
%i = fptoui double %x to i64
%r = uitofp i64 %i to double
ret double %r
}
define <4 x float> @trunc_unsigned_v4f32(<4 x float> %x) #0 {
; SSE2-LABEL: trunc_unsigned_v4f32:
; SSE2: # %bb.0:
; SSE2-NEXT: cvttps2dq %xmm0, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: psrad $31, %xmm2
; SSE2-NEXT: subps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: cvttps2dq %xmm0, %xmm0
; SSE2-NEXT: pand %xmm2, %xmm0
; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535]
; SSE2-NEXT: pand %xmm0, %xmm1
; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; SSE2-NEXT: psrld $16, %xmm0
; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: subps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: addps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: trunc_unsigned_v4f32:
; SSE41: # %bb.0:
; SSE41-NEXT: roundps $11, %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: trunc_unsigned_v4f32:
; AVX: # %bb.0:
; AVX-NEXT: vroundps $11, %xmm0, %xmm0
; AVX-NEXT: ret{{[l|q]}}
%i = fptoui <4 x float> %x to <4 x i32>
%r = uitofp <4 x i32> %i to <4 x float>
ret <4 x float> %r
}
define <2 x double> @trunc_unsigned_v2f64(<2 x double> %x) #0 {
; SSE2-LABEL: trunc_unsigned_v2f64:
; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm2 = [9.2233720368547758E+18,0.0E+0]
; SSE2-NEXT: movapd %xmm0, %xmm1
; SSE2-NEXT: subsd %xmm2, %xmm1
; SSE2-NEXT: cvttsd2si %xmm1, %rax
; SSE2-NEXT: cvttsd2si %xmm0, %rcx
; SSE2-NEXT: movq %rcx, %rdx
; SSE2-NEXT: sarq $63, %rdx
; SSE2-NEXT: andq %rax, %rdx
; SSE2-NEXT: orq %rcx, %rdx
; SSE2-NEXT: movq %rdx, %xmm1
; SSE2-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
; SSE2-NEXT: cvttsd2si %xmm0, %rax
; SSE2-NEXT: subsd %xmm2, %xmm0
; SSE2-NEXT: cvttsd2si %xmm0, %rcx
; SSE2-NEXT: movq %rax, %rdx
; SSE2-NEXT: sarq $63, %rdx
; SSE2-NEXT: andq %rcx, %rdx
; SSE2-NEXT: orq %rax, %rdx
; SSE2-NEXT: movq %rdx, %xmm0
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [4294967295,4294967295]
; SSE2-NEXT: pand %xmm1, %xmm0
; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: psrlq $32, %xmm1
; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; SSE2-NEXT: subpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; SSE2-NEXT: addpd %xmm0, %xmm1
; SSE2-NEXT: movapd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: trunc_unsigned_v2f64:
; SSE41: # %bb.0:
; SSE41-NEXT: roundpd $11, %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: trunc_unsigned_v2f64:
; AVX: # %bb.0:
; AVX-NEXT: vroundpd $11, %xmm0, %xmm0
; AVX-NEXT: ret{{[l|q]}}
%i = fptoui <2 x double> %x to <2 x i64>
%r = uitofp <2 x i64> %i to <2 x double>
ret <2 x double> %r
}
define <4 x double> @trunc_unsigned_v4f64(<4 x double> %x) #0 {
; SSE2-LABEL: trunc_unsigned_v4f64:
; SSE2: # %bb.0:
; SSE2-NEXT: movapd %xmm1, %xmm2
; SSE2-NEXT: movsd {{.*#+}} xmm3 = [9.2233720368547758E+18,0.0E+0]
; SSE2-NEXT: subsd %xmm3, %xmm1
; SSE2-NEXT: cvttsd2si %xmm1, %rax
; SSE2-NEXT: cvttsd2si %xmm2, %rcx
; SSE2-NEXT: movq %rcx, %rdx
; SSE2-NEXT: sarq $63, %rdx
; SSE2-NEXT: andq %rax, %rdx
; SSE2-NEXT: orq %rcx, %rdx
; SSE2-NEXT: movq %rdx, %xmm1
; SSE2-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1]
; SSE2-NEXT: cvttsd2si %xmm2, %rax
; SSE2-NEXT: subsd %xmm3, %xmm2
; SSE2-NEXT: cvttsd2si %xmm2, %rcx
; SSE2-NEXT: movq %rax, %rdx
; SSE2-NEXT: sarq $63, %rdx
; SSE2-NEXT: andq %rcx, %rdx
; SSE2-NEXT: orq %rax, %rdx
; SSE2-NEXT: movq %rdx, %xmm2
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE2-NEXT: movapd %xmm0, %xmm2
; SSE2-NEXT: subsd %xmm3, %xmm2
; SSE2-NEXT: cvttsd2si %xmm2, %rax
; SSE2-NEXT: cvttsd2si %xmm0, %rcx
; SSE2-NEXT: movq %rcx, %rdx
; SSE2-NEXT: sarq $63, %rdx
; SSE2-NEXT: andq %rax, %rdx
; SSE2-NEXT: orq %rcx, %rdx
; SSE2-NEXT: movq %rdx, %xmm2
; SSE2-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
; SSE2-NEXT: cvttsd2si %xmm0, %rax
; SSE2-NEXT: subsd %xmm3, %xmm0
; SSE2-NEXT: cvttsd2si %xmm0, %rcx
; SSE2-NEXT: movq %rax, %rdx
; SSE2-NEXT: sarq $63, %rdx
; SSE2-NEXT: andq %rcx, %rdx
; SSE2-NEXT: orq %rax, %rdx
; SSE2-NEXT: movq %rdx, %xmm0
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0]
; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [4294967295,4294967295]
; SSE2-NEXT: movdqa %xmm2, %xmm3
; SSE2-NEXT: pand %xmm0, %xmm3
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [4841369599423283200,4841369599423283200]
; SSE2-NEXT: por %xmm4, %xmm3
; SSE2-NEXT: psrlq $32, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [4985484787499139072,4985484787499139072]
; SSE2-NEXT: por %xmm5, %xmm2
; SSE2-NEXT: movapd {{.*#+}} xmm6 = [1.9342813118337666E+25,1.9342813118337666E+25]
; SSE2-NEXT: subpd %xmm6, %xmm2
; SSE2-NEXT: addpd %xmm3, %xmm2
; SSE2-NEXT: pand %xmm1, %xmm0
; SSE2-NEXT: por %xmm4, %xmm0
; SSE2-NEXT: psrlq $32, %xmm1
; SSE2-NEXT: por %xmm5, %xmm1
; SSE2-NEXT: subpd %xmm6, %xmm1
; SSE2-NEXT: addpd %xmm0, %xmm1
; SSE2-NEXT: movapd %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: trunc_unsigned_v4f64:
; SSE41: # %bb.0:
; SSE41-NEXT: roundpd $11, %xmm0, %xmm0
; SSE41-NEXT: roundpd $11, %xmm1, %xmm1
; SSE41-NEXT: retq
;
; AVX-LABEL: trunc_unsigned_v4f64:
; AVX: # %bb.0:
; AVX-NEXT: vroundpd $11, %ymm0, %ymm0
; AVX-NEXT: ret{{[l|q]}}
%i = fptoui <4 x double> %x to <4 x i64>
%r = uitofp <4 x i64> %i to <4 x double>
ret <4 x double> %r
}
define float @trunc_signed_f32_no_fast_math(float %x) {
; SSE-LABEL: trunc_signed_f32_no_fast_math:
; SSE: # %bb.0:
; SSE-NEXT: cvttps2dq %xmm0, %xmm0
; SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; SSE-NEXT: retq
;
; X64-AVX1-LABEL: trunc_signed_f32_no_fast_math:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
; X64-AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
; X86-AVX1-LABEL: trunc_signed_f32_no_fast_math:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: pushl %eax
; X86-AVX1-NEXT: .cfi_def_cfa_offset 8
; X86-AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
; X86-AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0
; X86-AVX1-NEXT: vmovss %xmm0, (%esp)
; X86-AVX1-NEXT: flds (%esp)
; X86-AVX1-NEXT: popl %eax
; X86-AVX1-NEXT: .cfi_def_cfa_offset 4
; X86-AVX1-NEXT: retl
%i = fptosi float %x to i32
%r = sitofp i32 %i to float
ret float %r
}
; Without -0.0, it is ok to use roundss if it is available.
define float @trunc_signed_f32_nsz(float %x) #0 {
; SSE2-LABEL: trunc_signed_f32_nsz:
; SSE2: # %bb.0:
; SSE2-NEXT: cvttps2dq %xmm0, %xmm0
; SSE2-NEXT: cvtdq2ps %xmm0, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: trunc_signed_f32_nsz:
; SSE41: # %bb.0:
; SSE41-NEXT: roundss $11, %xmm0, %xmm0
; SSE41-NEXT: retq
;
; X64-AVX1-LABEL: trunc_signed_f32_nsz:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vroundss $11, %xmm0, %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
; X86-AVX1-LABEL: trunc_signed_f32_nsz:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: pushl %eax
; X86-AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-AVX1-NEXT: vroundss $11, %xmm0, %xmm0, %xmm0
; X86-AVX1-NEXT: vmovss %xmm0, (%esp)
; X86-AVX1-NEXT: flds (%esp)
; X86-AVX1-NEXT: popl %eax
; X86-AVX1-NEXT: retl
%i = fptosi float %x to i32
%r = sitofp i32 %i to float
ret float %r
}
define double @trunc_signed32_f64_no_fast_math(double %x) {
; SSE-LABEL: trunc_signed32_f64_no_fast_math:
; SSE: # %bb.0:
; SSE-NEXT: cvttpd2dq %xmm0, %xmm0
; SSE-NEXT: cvtdq2pd %xmm0, %xmm0
; SSE-NEXT: retq
;
; X64-AVX1-LABEL: trunc_signed32_f64_no_fast_math:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vcvttpd2dq %xmm0, %xmm0
; X64-AVX1-NEXT: vcvtdq2pd %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
; X86-AVX1-LABEL: trunc_signed32_f64_no_fast_math:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: pushl %ebp
; X86-AVX1-NEXT: .cfi_def_cfa_offset 8
; X86-AVX1-NEXT: .cfi_offset %ebp, -8
; X86-AVX1-NEXT: movl %esp, %ebp
; X86-AVX1-NEXT: .cfi_def_cfa_register %ebp
; X86-AVX1-NEXT: andl $-8, %esp
; X86-AVX1-NEXT: subl $8, %esp
; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX1-NEXT: vcvttpd2dq %xmm0, %xmm0
; X86-AVX1-NEXT: vcvtdq2pd %xmm0, %xmm0
; X86-AVX1-NEXT: vmovlps %xmm0, (%esp)
; X86-AVX1-NEXT: fldl (%esp)
; X86-AVX1-NEXT: movl %ebp, %esp
; X86-AVX1-NEXT: popl %ebp
; X86-AVX1-NEXT: .cfi_def_cfa %esp, 4
; X86-AVX1-NEXT: retl
%i = fptosi double %x to i32
%r = sitofp i32 %i to double
ret double %r
}
define double @trunc_signed32_f64_nsz(double %x) #0 {
; SSE2-LABEL: trunc_signed32_f64_nsz:
; SSE2: # %bb.0:
; SSE2-NEXT: cvttpd2dq %xmm0, %xmm0
; SSE2-NEXT: cvtdq2pd %xmm0, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: trunc_signed32_f64_nsz:
; SSE41: # %bb.0:
; SSE41-NEXT: roundsd $11, %xmm0, %xmm0
; SSE41-NEXT: retq
;
; X64-AVX1-LABEL: trunc_signed32_f64_nsz:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vroundsd $11, %xmm0, %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
; X86-AVX1-LABEL: trunc_signed32_f64_nsz:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: pushl %ebp
; X86-AVX1-NEXT: movl %esp, %ebp
; X86-AVX1-NEXT: andl $-8, %esp
; X86-AVX1-NEXT: subl $8, %esp
; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX1-NEXT: vroundsd $11, %xmm0, %xmm0, %xmm0
; X86-AVX1-NEXT: vmovsd %xmm0, (%esp)
; X86-AVX1-NEXT: fldl (%esp)
; X86-AVX1-NEXT: movl %ebp, %esp
; X86-AVX1-NEXT: popl %ebp
; X86-AVX1-NEXT: retl
%i = fptosi double %x to i32
%r = sitofp i32 %i to double
ret double %r
}
define double @trunc_f32_signed32_f64_no_fast_math(float %x) {
; SSE-LABEL: trunc_f32_signed32_f64_no_fast_math:
; SSE: # %bb.0:
; SSE-NEXT: cvttps2dq %xmm0, %xmm0
; SSE-NEXT: cvtdq2pd %xmm0, %xmm0
; SSE-NEXT: retq
;
; X64-AVX1-LABEL: trunc_f32_signed32_f64_no_fast_math:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
; X64-AVX1-NEXT: vcvtdq2pd %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
; X86-AVX1-LABEL: trunc_f32_signed32_f64_no_fast_math:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: pushl %ebp
; X86-AVX1-NEXT: .cfi_def_cfa_offset 8
; X86-AVX1-NEXT: .cfi_offset %ebp, -8
; X86-AVX1-NEXT: movl %esp, %ebp
; X86-AVX1-NEXT: .cfi_def_cfa_register %ebp
; X86-AVX1-NEXT: andl $-8, %esp
; X86-AVX1-NEXT: subl $8, %esp
; X86-AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
; X86-AVX1-NEXT: vcvtdq2pd %xmm0, %xmm0
; X86-AVX1-NEXT: vmovlps %xmm0, (%esp)
; X86-AVX1-NEXT: fldl (%esp)
; X86-AVX1-NEXT: movl %ebp, %esp
; X86-AVX1-NEXT: popl %ebp
; X86-AVX1-NEXT: .cfi_def_cfa %esp, 4
; X86-AVX1-NEXT: retl
%i = fptosi float %x to i32
%r = sitofp i32 %i to double
ret double %r
}
define double @trunc_f32_signed32_f64_nsz(float %x) #0 {
; SSE-LABEL: trunc_f32_signed32_f64_nsz:
; SSE: # %bb.0:
; SSE-NEXT: cvttps2dq %xmm0, %xmm0
; SSE-NEXT: cvtdq2pd %xmm0, %xmm0
; SSE-NEXT: retq
;
; X64-AVX1-LABEL: trunc_f32_signed32_f64_nsz:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
; X64-AVX1-NEXT: vcvtdq2pd %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
; X86-AVX1-LABEL: trunc_f32_signed32_f64_nsz:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: pushl %ebp
; X86-AVX1-NEXT: movl %esp, %ebp
; X86-AVX1-NEXT: andl $-8, %esp
; X86-AVX1-NEXT: subl $8, %esp
; X86-AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
; X86-AVX1-NEXT: vcvtdq2pd %xmm0, %xmm0
; X86-AVX1-NEXT: vmovlps %xmm0, (%esp)
; X86-AVX1-NEXT: fldl (%esp)
; X86-AVX1-NEXT: movl %ebp, %esp
; X86-AVX1-NEXT: popl %ebp
; X86-AVX1-NEXT: retl
%i = fptosi float %x to i32
%r = sitofp i32 %i to double
ret double %r
}
define float @trunc_f64_signed32_f32_no_fast_math(double %x) {
; SSE-LABEL: trunc_f64_signed32_f32_no_fast_math:
; SSE: # %bb.0:
; SSE-NEXT: cvttpd2dq %xmm0, %xmm0
; SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; SSE-NEXT: retq
;
; X64-AVX1-LABEL: trunc_f64_signed32_f32_no_fast_math:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vcvttpd2dq %xmm0, %xmm0
; X64-AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
; X86-AVX1-LABEL: trunc_f64_signed32_f32_no_fast_math:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: pushl %eax
; X86-AVX1-NEXT: .cfi_def_cfa_offset 8
; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX1-NEXT: vcvttpd2dq %xmm0, %xmm0
; X86-AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0
; X86-AVX1-NEXT: vmovss %xmm0, (%esp)
; X86-AVX1-NEXT: flds (%esp)
; X86-AVX1-NEXT: popl %eax
; X86-AVX1-NEXT: .cfi_def_cfa_offset 4
; X86-AVX1-NEXT: retl
%i = fptosi double %x to i32
%r = sitofp i32 %i to float
ret float %r
}
define float @trunc_f64_signed32_f32_nsz(double %x) #0 {
; SSE-LABEL: trunc_f64_signed32_f32_nsz:
; SSE: # %bb.0:
; SSE-NEXT: cvttpd2dq %xmm0, %xmm0
; SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; SSE-NEXT: retq
;
; X64-AVX1-LABEL: trunc_f64_signed32_f32_nsz:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vcvttpd2dq %xmm0, %xmm0
; X64-AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
; X86-AVX1-LABEL: trunc_f64_signed32_f32_nsz:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: pushl %eax
; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX1-NEXT: vcvttpd2dq %xmm0, %xmm0
; X86-AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0
; X86-AVX1-NEXT: vmovss %xmm0, (%esp)
; X86-AVX1-NEXT: flds (%esp)
; X86-AVX1-NEXT: popl %eax
; X86-AVX1-NEXT: retl
%i = fptosi double %x to i32
%r = sitofp i32 %i to float
ret float %r
}
define double @trunc_signed_f64_no_fast_math(double %x) {
; SSE-LABEL: trunc_signed_f64_no_fast_math:
; SSE: # %bb.0:
; SSE-NEXT: cvttsd2si %xmm0, %rax
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: cvtsi2sd %rax, %xmm0
; SSE-NEXT: retq
;
; X64-AVX1-LABEL: trunc_signed_f64_no_fast_math:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vcvttsd2si %xmm0, %rax
; X64-AVX1-NEXT: vcvtsi2sd %rax, %xmm1, %xmm0
; X64-AVX1-NEXT: retq
;
; X86-AVX1-LABEL: trunc_signed_f64_no_fast_math:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: pushl %ebp
; X86-AVX1-NEXT: .cfi_def_cfa_offset 8
; X86-AVX1-NEXT: .cfi_offset %ebp, -8
; X86-AVX1-NEXT: movl %esp, %ebp
; X86-AVX1-NEXT: .cfi_def_cfa_register %ebp
; X86-AVX1-NEXT: andl $-8, %esp
; X86-AVX1-NEXT: subl $24, %esp
; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX1-NEXT: vmovsd %xmm0, (%esp)
; X86-AVX1-NEXT: fldl (%esp)
; X86-AVX1-NEXT: fisttpll (%esp)
; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX1-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
; X86-AVX1-NEXT: fildll {{[0-9]+}}(%esp)
; X86-AVX1-NEXT: fstpl {{[0-9]+}}(%esp)
; X86-AVX1-NEXT: fldl {{[0-9]+}}(%esp)
; X86-AVX1-NEXT: movl %ebp, %esp
; X86-AVX1-NEXT: popl %ebp
; X86-AVX1-NEXT: .cfi_def_cfa %esp, 4
; X86-AVX1-NEXT: retl
%i = fptosi double %x to i64
%r = sitofp i64 %i to double
ret double %r
}
define double @trunc_signed_f64_nsz(double %x) #0 {
; SSE2-LABEL: trunc_signed_f64_nsz:
; SSE2: # %bb.0:
; SSE2-NEXT: cvttsd2si %xmm0, %rax
; SSE2-NEXT: xorps %xmm0, %xmm0
; SSE2-NEXT: cvtsi2sd %rax, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: trunc_signed_f64_nsz:
; SSE41: # %bb.0:
; SSE41-NEXT: roundsd $11, %xmm0, %xmm0
; SSE41-NEXT: retq
;
; X64-AVX1-LABEL: trunc_signed_f64_nsz:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vroundsd $11, %xmm0, %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
; X86-AVX1-LABEL: trunc_signed_f64_nsz:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: pushl %ebp
; X86-AVX1-NEXT: movl %esp, %ebp
; X86-AVX1-NEXT: andl $-8, %esp
; X86-AVX1-NEXT: subl $8, %esp
; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX1-NEXT: vroundsd $11, %xmm0, %xmm0, %xmm0
; X86-AVX1-NEXT: vmovsd %xmm0, (%esp)
; X86-AVX1-NEXT: fldl (%esp)
; X86-AVX1-NEXT: movl %ebp, %esp
; X86-AVX1-NEXT: popl %ebp
; X86-AVX1-NEXT: retl
%i = fptosi double %x to i64
%r = sitofp i64 %i to double
ret double %r
}
define <4 x float> @trunc_signed_v4f32_nsz(<4 x float> %x) #0 {
; SSE2-LABEL: trunc_signed_v4f32_nsz:
; SSE2: # %bb.0:
; SSE2-NEXT: cvttps2dq %xmm0, %xmm0
; SSE2-NEXT: cvtdq2ps %xmm0, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: trunc_signed_v4f32_nsz:
; SSE41: # %bb.0:
; SSE41-NEXT: roundps $11, %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: trunc_signed_v4f32_nsz:
; AVX: # %bb.0:
; AVX-NEXT: vroundps $11, %xmm0, %xmm0
; AVX-NEXT: ret{{[l|q]}}
%i = fptosi <4 x float> %x to <4 x i32>
%r = sitofp <4 x i32> %i to <4 x float>
ret <4 x float> %r
}
define <2 x double> @trunc_signed_v2f64_nsz(<2 x double> %x) #0 {
; SSE2-LABEL: trunc_signed_v2f64_nsz:
; SSE2: # %bb.0:
; SSE2-NEXT: cvttsd2si %xmm0, %rax
; SSE2-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
; SSE2-NEXT: cvttsd2si %xmm0, %rcx
; SSE2-NEXT: xorps %xmm0, %xmm0
; SSE2-NEXT: cvtsi2sd %rax, %xmm0
; SSE2-NEXT: cvtsi2sd %rcx, %xmm1
; SSE2-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE2-NEXT: retq
;
; SSE41-LABEL: trunc_signed_v2f64_nsz:
; SSE41: # %bb.0:
; SSE41-NEXT: roundpd $11, %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: trunc_signed_v2f64_nsz:
; AVX: # %bb.0:
; AVX-NEXT: vroundpd $11, %xmm0, %xmm0
; AVX-NEXT: ret{{[l|q]}}
%i = fptosi <2 x double> %x to <2 x i64>
%r = sitofp <2 x i64> %i to <2 x double>
ret <2 x double> %r
}
define <4 x double> @trunc_signed_v4f64_nsz(<4 x double> %x) #0 {
; SSE2-LABEL: trunc_signed_v4f64_nsz:
; SSE2: # %bb.0:
; SSE2-NEXT: cvttsd2si %xmm1, %rax
; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1,1]
; SSE2-NEXT: cvttsd2si %xmm1, %rcx
; SSE2-NEXT: cvttsd2si %xmm0, %rdx
; SSE2-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
; SSE2-NEXT: cvttsd2si %xmm0, %rsi
; SSE2-NEXT: xorps %xmm0, %xmm0
; SSE2-NEXT: cvtsi2sd %rdx, %xmm0
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: cvtsi2sd %rsi, %xmm1
; SSE2-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: cvtsi2sd %rax, %xmm1
; SSE2-NEXT: cvtsi2sd %rcx, %xmm2
; SSE2-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE2-NEXT: retq
;
; SSE41-LABEL: trunc_signed_v4f64_nsz:
; SSE41: # %bb.0:
; SSE41-NEXT: roundpd $11, %xmm0, %xmm0
; SSE41-NEXT: roundpd $11, %xmm1, %xmm1
; SSE41-NEXT: retq
;
; AVX-LABEL: trunc_signed_v4f64_nsz:
; AVX: # %bb.0:
; AVX-NEXT: vroundpd $11, %ymm0, %ymm0
; AVX-NEXT: ret{{[l|q]}}
%i = fptosi <4 x double> %x to <4 x i64>
%r = sitofp <4 x i64> %i to <4 x double>
ret <4 x double> %r
}
; The FTRUNC ("round**" x86 asm) fold relies on UB in the case of overflow.
; This used to be guarded with an attribute check. That allowed existing
; code to continue working based on its assumptions that float->int
; overflow had saturating behavior.
;
; Now, we expect a front-end to use IR intrinsics if it wants to avoid this
; transform.
define float @trunc_unsigned_f32_disable_via_intrinsic(float %x) #0 {
; SSE-LABEL: trunc_unsigned_f32_disable_via_intrinsic:
; SSE: # %bb.0:
; SSE-NEXT: cvttss2si %xmm0, %rax
; SSE-NEXT: xorl %ecx, %ecx
; SSE-NEXT: xorps %xmm1, %xmm1
; SSE-NEXT: ucomiss %xmm1, %xmm0
; SSE-NEXT: cmovael %eax, %ecx
; SSE-NEXT: ucomiss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE-NEXT: movl $-1, %eax
; SSE-NEXT: cmovbel %ecx, %eax
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: cvtsi2ss %rax, %xmm0
; SSE-NEXT: retq
;
; X64-AVX1-LABEL: trunc_unsigned_f32_disable_via_intrinsic:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vcvttss2si %xmm0, %rax
; X64-AVX1-NEXT: xorl %ecx, %ecx
; X64-AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64-AVX1-NEXT: vucomiss %xmm1, %xmm0
; X64-AVX1-NEXT: cmovael %eax, %ecx
; X64-AVX1-NEXT: vucomiss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; X64-AVX1-NEXT: movl $-1, %eax
; X64-AVX1-NEXT: cmovbel %ecx, %eax
; X64-AVX1-NEXT: vcvtsi2ss %rax, %xmm2, %xmm0
; X64-AVX1-NEXT: retq
;
; X86-AVX1-LABEL: trunc_unsigned_f32_disable_via_intrinsic:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: pushl %eax
; X86-AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-AVX1-NEXT: vcvttss2si %xmm0, %eax
; X86-AVX1-NEXT: movl %eax, %ecx
; X86-AVX1-NEXT: sarl $31, %ecx
; X86-AVX1-NEXT: vsubss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm1
; X86-AVX1-NEXT: vcvttss2si %xmm1, %edx
; X86-AVX1-NEXT: andl %ecx, %edx
; X86-AVX1-NEXT: orl %eax, %edx
; X86-AVX1-NEXT: xorl %eax, %eax
; X86-AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X86-AVX1-NEXT: vucomiss %xmm1, %xmm0
; X86-AVX1-NEXT: cmovael %edx, %eax
; X86-AVX1-NEXT: vucomiss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-AVX1-NEXT: movl $-1, %ecx
; X86-AVX1-NEXT: cmovbel %eax, %ecx
; X86-AVX1-NEXT: vmovd %ecx, %xmm0
; X86-AVX1-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vsubsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0
; X86-AVX1-NEXT: vmovss %xmm0, (%esp)
; X86-AVX1-NEXT: flds (%esp)
; X86-AVX1-NEXT: popl %eax
; X86-AVX1-NEXT: retl
%i = call i32 @llvm.fptoui.sat.i32.f32(float %x)
%r = uitofp i32 %i to float
ret float %r
}
define double @trunc_signed_f64_disable_via_intrinsic(double %x) #0 {
; SSE-LABEL: trunc_signed_f64_disable_via_intrinsic:
; SSE: # %bb.0:
; SSE-NEXT: cvttsd2si %xmm0, %rax
; SSE-NEXT: ucomisd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE-NEXT: movabsq $9223372036854775807, %rcx # imm = 0x7FFFFFFFFFFFFFFF
; SSE-NEXT: cmovbeq %rax, %rcx
; SSE-NEXT: xorl %eax, %eax
; SSE-NEXT: ucomisd %xmm0, %xmm0
; SSE-NEXT: cmovnpq %rcx, %rax
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: cvtsi2sd %rax, %xmm0
; SSE-NEXT: retq
;
; X64-AVX1-LABEL: trunc_signed_f64_disable_via_intrinsic:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vcvttsd2si %xmm0, %rax
; X64-AVX1-NEXT: vucomisd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; X64-AVX1-NEXT: movabsq $9223372036854775807, %rcx # imm = 0x7FFFFFFFFFFFFFFF
; X64-AVX1-NEXT: cmovbeq %rax, %rcx
; X64-AVX1-NEXT: xorl %eax, %eax
; X64-AVX1-NEXT: vucomisd %xmm0, %xmm0
; X64-AVX1-NEXT: cmovnpq %rcx, %rax
; X64-AVX1-NEXT: vcvtsi2sd %rax, %xmm1, %xmm0
; X64-AVX1-NEXT: retq
;
; X86-AVX1-LABEL: trunc_signed_f64_disable_via_intrinsic:
; X86-AVX1: # %bb.0:
; X86-AVX1-NEXT: pushl %ebp
; X86-AVX1-NEXT: movl %esp, %ebp
; X86-AVX1-NEXT: pushl %esi
; X86-AVX1-NEXT: andl $-8, %esp
; X86-AVX1-NEXT: subl $32, %esp
; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX1-NEXT: vmovsd %xmm0, (%esp)
; X86-AVX1-NEXT: fldl (%esp)
; X86-AVX1-NEXT: fisttpll (%esp)
; X86-AVX1-NEXT: xorl %eax, %eax
; X86-AVX1-NEXT: vucomisd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-AVX1-NEXT: movl $-2147483648, %ecx # imm = 0x80000000
; X86-AVX1-NEXT: movl $0, %edx
; X86-AVX1-NEXT: jb .LBB19_2
; X86-AVX1-NEXT: # %bb.1:
; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-AVX1-NEXT: movl (%esp), %edx
; X86-AVX1-NEXT: .LBB19_2:
; X86-AVX1-NEXT: vucomisd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-AVX1-NEXT: movl $-1, %esi
; X86-AVX1-NEXT: cmovbel %edx, %esi
; X86-AVX1-NEXT: movl $2147483647, %edx # imm = 0x7FFFFFFF
; X86-AVX1-NEXT: cmovbel %ecx, %edx
; X86-AVX1-NEXT: vucomisd %xmm0, %xmm0
; X86-AVX1-NEXT: cmovpl %eax, %edx
; X86-AVX1-NEXT: cmovpl %eax, %esi
; X86-AVX1-NEXT: vmovd %esi, %xmm0
; X86-AVX1-NEXT: vpinsrd $1, %edx, %xmm0, %xmm0
; X86-AVX1-NEXT: vmovq %xmm0, {{[0-9]+}}(%esp)
; X86-AVX1-NEXT: fildll {{[0-9]+}}(%esp)
; X86-AVX1-NEXT: fstpl {{[0-9]+}}(%esp)
; X86-AVX1-NEXT: fldl {{[0-9]+}}(%esp)
; X86-AVX1-NEXT: leal -4(%ebp), %esp
; X86-AVX1-NEXT: popl %esi
; X86-AVX1-NEXT: popl %ebp
; X86-AVX1-NEXT: retl
%i = call i64 @llvm.fptosi.sat.i64.f64(double %x)
%r = sitofp i64 %i to double
ret double %r
}
attributes #0 = { nounwind "no-signed-zeros-fp-math"="true" }