; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=SSE,SSE42
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512F
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX512,AVX512DQBW
; PR28925
define <4 x i32> @test1(<4 x i1> %cond, <4 x i32> %x) {
; SSE-LABEL: test1:
; SSE: # %bb.0:
; SSE-NEXT: pslld $31, %xmm0
; SSE-NEXT: psrad $31, %xmm0
; SSE-NEXT: pandn %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test1:
; AVX: # %bb.0:
; AVX-NEXT: vpslld $31, %xmm0, %xmm0
; AVX-NEXT: vpsrad $31, %xmm0, %xmm0
; AVX-NEXT: vpandn %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512F-LABEL: test1:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpslld $31, %xmm0, %xmm0
; AVX512F-NEXT: vptestnmd %xmm0, %xmm0, %k1
; AVX512F-NEXT: vmovdqa32 %xmm1, %xmm0 {%k1} {z}
; AVX512F-NEXT: retq
;
; AVX512DQBW-LABEL: test1:
; AVX512DQBW: # %bb.0:
; AVX512DQBW-NEXT: vpslld $31, %xmm0, %xmm0
; AVX512DQBW-NEXT: vpmovd2m %xmm0, %k0
; AVX512DQBW-NEXT: knotw %k0, %k1
; AVX512DQBW-NEXT: vmovdqa32 %xmm1, %xmm0 {%k1} {z}
; AVX512DQBW-NEXT: retq
%r = select <4 x i1> %cond, <4 x i32> zeroinitializer, <4 x i32> %x
ret <4 x i32> %r
}
define <4 x i32> @test2(<4 x float> %a, <4 x float> %b, <4 x i32> %x) {
; SSE-LABEL: test2:
; SSE: # %bb.0:
; SSE-NEXT: cmpneqps %xmm1, %xmm0
; SSE-NEXT: andps %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test2:
; AVX: # %bb.0:
; AVX-NEXT: vcmpneqps %xmm1, %xmm0, %xmm0
; AVX-NEXT: vandps %xmm2, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test2:
; AVX512: # %bb.0:
; AVX512-NEXT: vcmpneqps %xmm1, %xmm0, %k1
; AVX512-NEXT: vmovdqa32 %xmm2, %xmm0 {%k1} {z}
; AVX512-NEXT: retq
%cond = fcmp oeq <4 x float> %a, %b
%r = select <4 x i1> %cond, <4 x i32> zeroinitializer, <4 x i32> %x
ret <4 x i32> %r
}
define float @fsel_zero_false_val(float %a, float %b, float %x) {
; SSE-LABEL: fsel_zero_false_val:
; SSE: # %bb.0:
; SSE-NEXT: cmpeqss %xmm1, %xmm0
; SSE-NEXT: andps %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: fsel_zero_false_val:
; AVX: # %bb.0:
; AVX-NEXT: vcmpeqss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vandps %xmm2, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: fsel_zero_false_val:
; AVX512: # %bb.0:
; AVX512-NEXT: vcmpeqss %xmm1, %xmm0, %k1
; AVX512-NEXT: vmovss %xmm2, %xmm2, %xmm0 {%k1} {z}
; AVX512-NEXT: retq
%cond = fcmp oeq float %a, %b
%r = select i1 %cond, float %x, float 0.0
ret float %r
}
define float @fsel_zero_true_val(float %a, float %b, float %x) {
; SSE-LABEL: fsel_zero_true_val:
; SSE: # %bb.0:
; SSE-NEXT: cmpeqss %xmm1, %xmm0
; SSE-NEXT: andnps %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: fsel_zero_true_val:
; AVX: # %bb.0:
; AVX-NEXT: vcmpeqss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vandnps %xmm2, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: fsel_zero_true_val:
; AVX512: # %bb.0:
; AVX512-NEXT: vcmpeqss %xmm1, %xmm0, %k1
; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovss %xmm0, %xmm2, %xmm2 {%k1}
; AVX512-NEXT: vmovaps %xmm2, %xmm0
; AVX512-NEXT: retq
%cond = fcmp oeq float %a, %b
%r = select i1 %cond, float 0.0, float %x
ret float %r
}
define double @fsel_nonzero_false_val(double %x, double %y, double %z) {
; SSE-LABEL: fsel_nonzero_false_val:
; SSE: # %bb.0:
; SSE-NEXT: cmpeqsd %xmm1, %xmm0
; SSE-NEXT: andpd %xmm0, %xmm2
; SSE-NEXT: movsd {{.*#+}} xmm1 = [4.2E+1,0.0E+0]
; SSE-NEXT: andnpd %xmm1, %xmm0
; SSE-NEXT: orpd %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: fsel_nonzero_false_val:
; AVX: # %bb.0:
; AVX-NEXT: vcmpeqsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [4.2E+1,4.2E+1]
; AVX-NEXT: # xmm1 = mem[0,0]
; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm1, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: fsel_nonzero_false_val:
; AVX512: # %bb.0:
; AVX512-NEXT: vcmpeqsd %xmm1, %xmm0, %k1
; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm0 {%k1}
; AVX512-NEXT: retq
%cond = fcmp oeq double %x, %y
%r = select i1 %cond, double %z, double 42.0
ret double %r
}
define double @fsel_nonzero_true_val(double %x, double %y, double %z) {
; SSE-LABEL: fsel_nonzero_true_val:
; SSE: # %bb.0:
; SSE-NEXT: cmpeqsd %xmm1, %xmm0
; SSE-NEXT: movsd {{.*#+}} xmm1 = [4.2E+1,0.0E+0]
; SSE-NEXT: andpd %xmm0, %xmm1
; SSE-NEXT: andnpd %xmm2, %xmm0
; SSE-NEXT: orpd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: fsel_nonzero_true_val:
; AVX: # %bb.0:
; AVX-NEXT: vcmpeqsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vblendvpd %xmm0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: fsel_nonzero_true_val:
; AVX512: # %bb.0:
; AVX512-NEXT: vcmpeqsd %xmm1, %xmm0, %k1
; AVX512-NEXT: vmovsd {{.*#+}} xmm2 {%k1} = [4.2E+1,0.0E+0]
; AVX512-NEXT: vmovapd %xmm2, %xmm0
; AVX512-NEXT: retq
%cond = fcmp oeq double %x, %y
%r = select i1 %cond, double 42.0, double %z
ret double %r
}
define double @fsel_nonzero_constants(double %x, double %y) {
; SSE-LABEL: fsel_nonzero_constants:
; SSE: # %bb.0:
; SSE-NEXT: cmpeqsd %xmm1, %xmm0
; SSE-NEXT: movq %xmm0, %rax
; SSE-NEXT: andl $1, %eax
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: retq
;
; AVX-LABEL: fsel_nonzero_constants:
; AVX: # %bb.0:
; AVX-NEXT: vcmpeqsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [4.2E+1,4.2E+1]
; AVX-NEXT: # xmm1 = mem[0,0]
; AVX-NEXT: vblendvpd %xmm0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: fsel_nonzero_constants:
; AVX512: # %bb.0:
; AVX512-NEXT: vcmpeqsd %xmm1, %xmm0, %k1
; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; AVX512-NEXT: vmovsd {{.*#+}} xmm0 {%k1} = [1.2E+1,0.0E+0]
; AVX512-NEXT: retq
%cond = fcmp oeq double %x, %y
%r = select i1 %cond, double 12.0, double 42.0
ret double %r
}
define <2 x double> @vsel_nonzero_constants(<2 x double> %x, <2 x double> %y) {
; SSE2-LABEL: vsel_nonzero_constants:
; SSE2: # %bb.0:
; SSE2-NEXT: cmplepd %xmm0, %xmm1
; SSE2-NEXT: movsd {{.*#+}} xmm2 = [4.2E+1,0.0E+0]
; SSE2-NEXT: movapd %xmm1, %xmm0
; SSE2-NEXT: andnpd %xmm2, %xmm0
; SSE2-NEXT: andpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; SSE2-NEXT: orpd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: vsel_nonzero_constants:
; SSE42: # %bb.0:
; SSE42-NEXT: cmplepd %xmm0, %xmm1
; SSE42-NEXT: movsd {{.*#+}} xmm2 = [4.2E+1,0.0E+0]
; SSE42-NEXT: movapd %xmm1, %xmm0
; SSE42-NEXT: blendvpd %xmm0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
; SSE42-NEXT: movapd %xmm2, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: vsel_nonzero_constants:
; AVX: # %bb.0:
; AVX-NEXT: vcmplepd %xmm0, %xmm1, %xmm0
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = [4.2E+1,0.0E+0]
; AVX-NEXT: vblendvpd %xmm0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: vsel_nonzero_constants:
; AVX512: # %bb.0:
; AVX512-NEXT: vcmplepd %xmm0, %xmm1, %k1
; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
; AVX512-NEXT: vmovapd {{.*#+}} xmm0 {%k1} = [1.2E+1,-1.0E+0]
; AVX512-NEXT: retq
%cond = fcmp oge <2 x double> %x, %y
%r = select <2 x i1> %cond, <2 x double> <double 12.0, double -1.0>, <2 x double> <double 42.0, double 0.0>
ret <2 x double> %r
}
define <16 x i8> @signbit_mask_v16i8(<16 x i8> %a, <16 x i8> %b) {
; SSE-LABEL: signbit_mask_v16i8:
; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm2, %xmm2
; SSE-NEXT: pcmpgtb %xmm0, %xmm2
; SSE-NEXT: pand %xmm1, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: signbit_mask_v16i8:
; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX-NEXT: vpcmpgtb %xmm0, %xmm2, %xmm0
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: signbit_mask_v16i8:
; AVX512: # %bb.0:
; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512-NEXT: vpcmpgtb %xmm0, %xmm2, %xmm0
; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%cond = icmp slt <16 x i8> %a, zeroinitializer
%r = select <16 x i1> %cond, <16 x i8> %b, <16 x i8> zeroinitializer
ret <16 x i8> %r
}
define <8 x i16> @signbit_mask_v8i16(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: signbit_mask_v8i16:
; SSE: # %bb.0:
; SSE-NEXT: psraw $15, %xmm0
; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: signbit_mask_v8i16:
; AVX: # %bb.0:
; AVX-NEXT: vpsraw $15, %xmm0, %xmm0
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: signbit_mask_v8i16:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsraw $15, %xmm0, %xmm0
; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%cond = icmp slt <8 x i16> %a, zeroinitializer
%r = select <8 x i1> %cond, <8 x i16> %b, <8 x i16> zeroinitializer
ret <8 x i16> %r
}
define <4 x i32> @signbit_mask_v4i32(<4 x i32> %a, <4 x i32> %b) {
; SSE-LABEL: signbit_mask_v4i32:
; SSE: # %bb.0:
; SSE-NEXT: psrad $31, %xmm0
; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: signbit_mask_v4i32:
; AVX: # %bb.0:
; AVX-NEXT: vpsrad $31, %xmm0, %xmm0
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: signbit_mask_v4i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsrad $31, %xmm0, %xmm0
; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%cond = icmp slt <4 x i32> %a, zeroinitializer
%r = select <4 x i1> %cond, <4 x i32> %b, <4 x i32> zeroinitializer
ret <4 x i32> %r
}
define <2 x i64> @signbit_mask_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: signbit_mask_v2i64:
; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
; SSE2-NEXT: psrad $31, %xmm0
; SSE2-NEXT: pand %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: signbit_mask_v2i64:
; SSE42: # %bb.0:
; SSE42-NEXT: pxor %xmm2, %xmm2
; SSE42-NEXT: pcmpgtq %xmm0, %xmm2
; SSE42-NEXT: pand %xmm1, %xmm2
; SSE42-NEXT: movdqa %xmm2, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: signbit_mask_v2i64:
; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm0
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: signbit_mask_v2i64:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsraq $63, %xmm0, %xmm0
; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%cond = icmp slt <2 x i64> %a, zeroinitializer
%r = select <2 x i1> %cond, <2 x i64> %b, <2 x i64> zeroinitializer
ret <2 x i64> %r
}
; Swap cmp pred and select ops. This is logically equivalent to the above test.
define <2 x i64> @signbit_mask_swap_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: signbit_mask_swap_v2i64:
; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
; SSE2-NEXT: psrad $31, %xmm0
; SSE2-NEXT: pand %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: signbit_mask_swap_v2i64:
; SSE42: # %bb.0:
; SSE42-NEXT: pxor %xmm2, %xmm2
; SSE42-NEXT: pcmpgtq %xmm0, %xmm2
; SSE42-NEXT: pand %xmm1, %xmm2
; SSE42-NEXT: movdqa %xmm2, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: signbit_mask_swap_v2i64:
; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm0
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: signbit_mask_swap_v2i64:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsraq $63, %xmm0, %xmm0
; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%cond = icmp sgt <2 x i64> %a, <i64 -1, i64 -1>
%r = select <2 x i1> %cond, <2 x i64> zeroinitializer, <2 x i64> %b
ret <2 x i64> %r
}
define <32 x i8> @signbit_mask_v32i8(<32 x i8> %a, <32 x i8> %b) {
; SSE-LABEL: signbit_mask_v32i8:
; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm4, %xmm4
; SSE-NEXT: pxor %xmm5, %xmm5
; SSE-NEXT: pcmpgtb %xmm0, %xmm5
; SSE-NEXT: pand %xmm2, %xmm5
; SSE-NEXT: pcmpgtb %xmm1, %xmm4
; SSE-NEXT: pand %xmm3, %xmm4
; SSE-NEXT: movdqa %xmm5, %xmm0
; SSE-NEXT: movdqa %xmm4, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: signbit_mask_v32i8:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpcmpgtb %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpcmpgtb %xmm0, %xmm3, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: signbit_mask_v32i8:
; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpcmpgtb %ymm0, %ymm2, %ymm0
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: signbit_mask_v32i8:
; AVX512: # %bb.0:
; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512-NEXT: vpcmpgtb %ymm0, %ymm2, %ymm0
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%cond = icmp slt <32 x i8> %a, zeroinitializer
%r = select <32 x i1> %cond, <32 x i8> %b, <32 x i8> zeroinitializer
ret <32 x i8> %r
}
define <16 x i16> @signbit_mask_v16i16(<16 x i16> %a, <16 x i16> %b) {
; SSE-LABEL: signbit_mask_v16i16:
; SSE: # %bb.0:
; SSE-NEXT: psraw $15, %xmm0
; SSE-NEXT: pand %xmm2, %xmm0
; SSE-NEXT: psraw $15, %xmm1
; SSE-NEXT: pand %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: signbit_mask_v16i16:
; AVX1: # %bb.0:
; AVX1-NEXT: vpsraw $15, %xmm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsraw $15, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: signbit_mask_v16i16:
; AVX2: # %bb.0:
; AVX2-NEXT: vpsraw $15, %ymm0, %ymm0
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: signbit_mask_v16i16:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsraw $15, %ymm0, %ymm0
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%cond = icmp slt <16 x i16> %a, zeroinitializer
%r = select <16 x i1> %cond, <16 x i16> %b, <16 x i16> zeroinitializer
ret <16 x i16> %r
}
define <8 x i32> @signbit_mask_v8i32(<8 x i32> %a, <8 x i32> %b) {
; SSE-LABEL: signbit_mask_v8i32:
; SSE: # %bb.0:
; SSE-NEXT: psrad $31, %xmm0
; SSE-NEXT: pand %xmm2, %xmm0
; SSE-NEXT: psrad $31, %xmm1
; SSE-NEXT: pand %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: signbit_mask_v8i32:
; AVX1: # %bb.0:
; AVX1-NEXT: vpsrad $31, %xmm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: signbit_mask_v8i32:
; AVX2: # %bb.0:
; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: signbit_mask_v8i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsrad $31, %ymm0, %ymm0
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%cond = icmp slt <8 x i32> %a, zeroinitializer
%r = select <8 x i1> %cond, <8 x i32> %b, <8 x i32> zeroinitializer
ret <8 x i32> %r
}
; Swap cmp pred and select ops. This is logically equivalent to the above test.
define <8 x i32> @signbit_mask_swap_v8i32(<8 x i32> %a, <8 x i32> %b) {
; SSE-LABEL: signbit_mask_swap_v8i32:
; SSE: # %bb.0:
; SSE-NEXT: psrad $31, %xmm0
; SSE-NEXT: pand %xmm2, %xmm0
; SSE-NEXT: psrad $31, %xmm1
; SSE-NEXT: pand %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: signbit_mask_swap_v8i32:
; AVX1: # %bb.0:
; AVX1-NEXT: vpsrad $31, %xmm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: signbit_mask_swap_v8i32:
; AVX2: # %bb.0:
; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: signbit_mask_swap_v8i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsrad $31, %ymm0, %ymm0
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%cond = icmp sgt <8 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
%r = select <8 x i1> %cond, <8 x i32> zeroinitializer, <8 x i32> %b
ret <8 x i32> %r
}
define <4 x i64> @signbit_mask_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: signbit_mask_v4i64:
; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
; SSE2-NEXT: psrad $31, %xmm0
; SSE2-NEXT: pand %xmm2, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
; SSE2-NEXT: psrad $31, %xmm1
; SSE2-NEXT: pand %xmm3, %xmm1
; SSE2-NEXT: retq
;
; SSE42-LABEL: signbit_mask_v4i64:
; SSE42: # %bb.0:
; SSE42-NEXT: pxor %xmm4, %xmm4
; SSE42-NEXT: pxor %xmm5, %xmm5
; SSE42-NEXT: pcmpgtq %xmm0, %xmm5
; SSE42-NEXT: pand %xmm2, %xmm5
; SSE42-NEXT: pcmpgtq %xmm1, %xmm4
; SSE42-NEXT: pand %xmm3, %xmm4
; SSE42-NEXT: movdqa %xmm5, %xmm0
; SSE42-NEXT: movdqa %xmm4, %xmm1
; SSE42-NEXT: retq
;
; AVX1-LABEL: signbit_mask_v4i64:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpcmpgtq %xmm0, %xmm3, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: signbit_mask_v4i64:
; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpcmpgtq %ymm0, %ymm2, %ymm0
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: signbit_mask_v4i64:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsraq $63, %ymm0, %ymm0
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%cond = icmp slt <4 x i64> %a, zeroinitializer
%r = select <4 x i1> %cond, <4 x i64> %b, <4 x i64> zeroinitializer
ret <4 x i64> %r
}
define <16 x i8> @signbit_setmask_v16i8(<16 x i8> %a, <16 x i8> %b) {
; SSE-LABEL: signbit_setmask_v16i8:
; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm2, %xmm2
; SSE-NEXT: pcmpgtb %xmm0, %xmm2
; SSE-NEXT: por %xmm1, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: signbit_setmask_v16i8:
; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX-NEXT: vpcmpgtb %xmm0, %xmm2, %xmm0
; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: signbit_setmask_v16i8:
; AVX512: # %bb.0:
; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512-NEXT: vpcmpgtb %xmm0, %xmm2, %xmm0
; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%cond = icmp slt <16 x i8> %a, zeroinitializer
%r = select <16 x i1> %cond, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> %b
ret <16 x i8> %r
}
; Swap cmp pred and select ops. This is logically equivalent to the above test.
define <16 x i8> @signbit_setmask_swap_v16i8(<16 x i8> %a, <16 x i8> %b) {
; SSE-LABEL: signbit_setmask_swap_v16i8:
; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm2, %xmm2
; SSE-NEXT: pcmpgtb %xmm0, %xmm2
; SSE-NEXT: por %xmm1, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: signbit_setmask_swap_v16i8:
; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX-NEXT: vpcmpgtb %xmm0, %xmm2, %xmm0
; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: signbit_setmask_swap_v16i8:
; AVX512: # %bb.0:
; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512-NEXT: vpcmpgtb %xmm0, %xmm2, %xmm0
; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%cond = icmp sgt <16 x i8> %a, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
%r = select <16 x i1> %cond, <16 x i8> %b, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
ret <16 x i8> %r
}
define <8 x i16> @signbit_setmask_v8i16(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: signbit_setmask_v8i16:
; SSE: # %bb.0:
; SSE-NEXT: psraw $15, %xmm0
; SSE-NEXT: por %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: signbit_setmask_v8i16:
; AVX: # %bb.0:
; AVX-NEXT: vpsraw $15, %xmm0, %xmm0
; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: signbit_setmask_v8i16:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsraw $15, %xmm0, %xmm0
; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%cond = icmp slt <8 x i16> %a, zeroinitializer
%r = select <8 x i1> %cond, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> %b
ret <8 x i16> %r
}
define <4 x i32> @signbit_setmask_v4i32(<4 x i32> %a, <4 x i32> %b) {
; SSE-LABEL: signbit_setmask_v4i32:
; SSE: # %bb.0:
; SSE-NEXT: psrad $31, %xmm0
; SSE-NEXT: por %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: signbit_setmask_v4i32:
; AVX: # %bb.0:
; AVX-NEXT: vpsrad $31, %xmm0, %xmm0
; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: signbit_setmask_v4i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsrad $31, %xmm0, %xmm0
; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%cond = icmp slt <4 x i32> %a, zeroinitializer
%r = select <4 x i1> %cond, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %b
ret <4 x i32> %r
}
define <2 x i64> @signbit_setmask_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: signbit_setmask_v2i64:
; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
; SSE2-NEXT: psrad $31, %xmm0
; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: signbit_setmask_v2i64:
; SSE42: # %bb.0:
; SSE42-NEXT: pxor %xmm2, %xmm2
; SSE42-NEXT: pcmpgtq %xmm0, %xmm2
; SSE42-NEXT: por %xmm1, %xmm2
; SSE42-NEXT: movdqa %xmm2, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: signbit_setmask_v2i64:
; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm0
; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: signbit_setmask_v2i64:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsraq $63, %xmm0, %xmm0
; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%cond = icmp slt <2 x i64> %a, zeroinitializer
%r = select <2 x i1> %cond, <2 x i64> <i64 -1, i64 -1>, <2 x i64> %b
ret <2 x i64> %r
}
define <32 x i8> @signbit_setmask_v32i8(<32 x i8> %a, <32 x i8> %b) {
; SSE-LABEL: signbit_setmask_v32i8:
; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm4, %xmm4
; SSE-NEXT: pxor %xmm5, %xmm5
; SSE-NEXT: pcmpgtb %xmm0, %xmm5
; SSE-NEXT: por %xmm2, %xmm5
; SSE-NEXT: pcmpgtb %xmm1, %xmm4
; SSE-NEXT: por %xmm3, %xmm4
; SSE-NEXT: movdqa %xmm5, %xmm0
; SSE-NEXT: movdqa %xmm4, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: signbit_setmask_v32i8:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpcmpgtb %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpcmpgtb %xmm0, %xmm3, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: signbit_setmask_v32i8:
; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpcmpgtb %ymm0, %ymm2, %ymm0
; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: signbit_setmask_v32i8:
; AVX512: # %bb.0:
; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512-NEXT: vpcmpgtb %ymm0, %ymm2, %ymm0
; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%cond = icmp slt <32 x i8> %a, zeroinitializer
%r = select <32 x i1> %cond, <32 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <32 x i8> %b
ret <32 x i8> %r
}
define <16 x i16> @signbit_setmask_v16i16(<16 x i16> %a, <16 x i16> %b) {
; SSE-LABEL: signbit_setmask_v16i16:
; SSE: # %bb.0:
; SSE-NEXT: psraw $15, %xmm0
; SSE-NEXT: por %xmm2, %xmm0
; SSE-NEXT: psraw $15, %xmm1
; SSE-NEXT: por %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: signbit_setmask_v16i16:
; AVX1: # %bb.0:
; AVX1-NEXT: vpsraw $15, %xmm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsraw $15, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: signbit_setmask_v16i16:
; AVX2: # %bb.0:
; AVX2-NEXT: vpsraw $15, %ymm0, %ymm0
; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: signbit_setmask_v16i16:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsraw $15, %ymm0, %ymm0
; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%cond = icmp slt <16 x i16> %a, zeroinitializer
%r = select <16 x i1> %cond, <16 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <16 x i16> %b
ret <16 x i16> %r
}
define <8 x i32> @signbit_setmask_v8i32(<8 x i32> %a, <8 x i32> %b) {
; SSE-LABEL: signbit_setmask_v8i32:
; SSE: # %bb.0:
; SSE-NEXT: psrad $31, %xmm0
; SSE-NEXT: por %xmm2, %xmm0
; SSE-NEXT: psrad $31, %xmm1
; SSE-NEXT: por %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: signbit_setmask_v8i32:
; AVX1: # %bb.0:
; AVX1-NEXT: vpsrad $31, %xmm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: signbit_setmask_v8i32:
; AVX2: # %bb.0:
; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0
; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: signbit_setmask_v8i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsrad $31, %ymm0, %ymm0
; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%cond = icmp slt <8 x i32> %a, zeroinitializer
%r = select <8 x i1> %cond, <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <8 x i32> %b
ret <8 x i32> %r
}
define <4 x i64> @signbit_setmask_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: signbit_setmask_v4i64:
; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
; SSE2-NEXT: psrad $31, %xmm0
; SSE2-NEXT: por %xmm2, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
; SSE2-NEXT: psrad $31, %xmm1
; SSE2-NEXT: por %xmm3, %xmm1
; SSE2-NEXT: retq
;
; SSE42-LABEL: signbit_setmask_v4i64:
; SSE42: # %bb.0:
; SSE42-NEXT: pxor %xmm4, %xmm4
; SSE42-NEXT: pxor %xmm5, %xmm5
; SSE42-NEXT: pcmpgtq %xmm0, %xmm5
; SSE42-NEXT: por %xmm2, %xmm5
; SSE42-NEXT: pcmpgtq %xmm1, %xmm4
; SSE42-NEXT: por %xmm3, %xmm4
; SSE42-NEXT: movdqa %xmm5, %xmm0
; SSE42-NEXT: movdqa %xmm4, %xmm1
; SSE42-NEXT: retq
;
; AVX1-LABEL: signbit_setmask_v4i64:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpcmpgtq %xmm0, %xmm3, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: signbit_setmask_v4i64:
; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpcmpgtq %ymm0, %ymm2, %ymm0
; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: signbit_setmask_v4i64:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsraq $63, %ymm0, %ymm0
; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%cond = icmp slt <4 x i64> %a, zeroinitializer
%r = select <4 x i1> %cond, <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, <4 x i64> %b
ret <4 x i64> %r
}
; Swap cmp pred and select ops. This is logically equivalent to the above test.
define <4 x i64> @signbit_setmask_swap_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: signbit_setmask_swap_v4i64:
; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
; SSE2-NEXT: psrad $31, %xmm0
; SSE2-NEXT: por %xmm2, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
; SSE2-NEXT: psrad $31, %xmm1
; SSE2-NEXT: por %xmm3, %xmm1
; SSE2-NEXT: retq
;
; SSE42-LABEL: signbit_setmask_swap_v4i64:
; SSE42: # %bb.0:
; SSE42-NEXT: pxor %xmm4, %xmm4
; SSE42-NEXT: pxor %xmm5, %xmm5
; SSE42-NEXT: pcmpgtq %xmm0, %xmm5
; SSE42-NEXT: por %xmm2, %xmm5
; SSE42-NEXT: pcmpgtq %xmm1, %xmm4
; SSE42-NEXT: por %xmm3, %xmm4
; SSE42-NEXT: movdqa %xmm5, %xmm0
; SSE42-NEXT: movdqa %xmm4, %xmm1
; SSE42-NEXT: retq
;
; AVX1-LABEL: signbit_setmask_swap_v4i64:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpcmpgtq %xmm0, %xmm3, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: signbit_setmask_swap_v4i64:
; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpcmpgtq %ymm0, %ymm2, %ymm0
; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: signbit_setmask_swap_v4i64:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsraq $63, %ymm0, %ymm0
; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%cond = icmp sgt <4 x i64> %a, <i64 -1, i64 -1, i64 -1, i64 -1>
%r = select <4 x i1> %cond, <4 x i64> %b, <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>
ret <4 x i64> %r
}
define <16 x i8> @not_signbit_mask_v16i8(<16 x i8> %a, <16 x i8> %b) {
; SSE-LABEL: not_signbit_mask_v16i8:
; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm2, %xmm2
; SSE-NEXT: pcmpgtb %xmm2, %xmm0
; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: not_signbit_mask_v16i8:
; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX-NEXT: vpcmpgtb %xmm2, %xmm0, %xmm0
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: not_signbit_mask_v16i8:
; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX512-NEXT: vpcmpgtb %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%cond = icmp sgt <16 x i8> %a, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
%r = select <16 x i1> %cond, <16 x i8> %b, <16 x i8> zeroinitializer
ret <16 x i8> %r
}
define <8 x i16> @not_signbit_mask_v8i16(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: not_signbit_mask_v8i16:
; SSE: # %bb.0:
; SSE-NEXT: psraw $15, %xmm0
; SSE-NEXT: pandn %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: not_signbit_mask_v8i16:
; AVX: # %bb.0:
; AVX-NEXT: vpsraw $15, %xmm0, %xmm0
; AVX-NEXT: vpandn %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: not_signbit_mask_v8i16:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsraw $15, %xmm0, %xmm0
; AVX512-NEXT: vpandn %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%cond = icmp sgt <8 x i16> %a, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
%r = select <8 x i1> %cond, <8 x i16> %b, <8 x i16> zeroinitializer
ret <8 x i16> %r
}
; Swap cmp pred and select ops. This is logically equivalent to the above test.
define <8 x i16> @not_signbit_mask_swap_v8i16(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: not_signbit_mask_swap_v8i16:
; SSE: # %bb.0:
; SSE-NEXT: psraw $15, %xmm0
; SSE-NEXT: pandn %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: not_signbit_mask_swap_v8i16:
; AVX: # %bb.0:
; AVX-NEXT: vpsraw $15, %xmm0, %xmm0
; AVX-NEXT: vpandn %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: not_signbit_mask_swap_v8i16:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsraw $15, %xmm0, %xmm0
; AVX512-NEXT: vpandn %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%cond = icmp slt <8 x i16> %a, zeroinitializer
%r = select <8 x i1> %cond, <8 x i16> zeroinitializer, <8 x i16> %b
ret <8 x i16> %r
}
define <4 x i32> @not_signbit_mask_v4i32(<4 x i32> %a, <4 x i32> %b) {
; SSE-LABEL: not_signbit_mask_v4i32:
; SSE: # %bb.0:
; SSE-NEXT: psrad $31, %xmm0
; SSE-NEXT: pandn %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: not_signbit_mask_v4i32:
; AVX: # %bb.0:
; AVX-NEXT: vpsrad $31, %xmm0, %xmm0
; AVX-NEXT: vpandn %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: not_signbit_mask_v4i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsrad $31, %xmm0, %xmm0
; AVX512-NEXT: vpandn %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%cond = icmp sgt <4 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1>
%r = select <4 x i1> %cond, <4 x i32> %b, <4 x i32> zeroinitializer
ret <4 x i32> %r
}
define <2 x i64> @not_signbit_mask_v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE2-LABEL: not_signbit_mask_v2i64:
; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
; SSE2-NEXT: pcmpeqd %xmm2, %xmm2
; SSE2-NEXT: pcmpgtd %xmm2, %xmm0
; SSE2-NEXT: pand %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: not_signbit_mask_v2i64:
; SSE42: # %bb.0:
; SSE42-NEXT: pcmpeqd %xmm2, %xmm2
; SSE42-NEXT: pcmpgtq %xmm2, %xmm0
; SSE42-NEXT: pand %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: not_signbit_mask_v2i64:
; AVX: # %bb.0:
; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; AVX-NEXT: vpcmpgtq %xmm2, %xmm0, %xmm0
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: not_signbit_mask_v2i64:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsraq $63, %xmm0, %xmm0
; AVX512-NEXT: vpandn %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%cond = icmp sgt <2 x i64> %a, <i64 -1, i64 -1>
%r = select <2 x i1> %cond, <2 x i64> %b, <2 x i64> zeroinitializer
ret <2 x i64> %r
}
define <32 x i8> @not_signbit_mask_v32i8(<32 x i8> %a, <32 x i8> %b) {
; SSE-LABEL: not_signbit_mask_v32i8:
; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm4, %xmm4
; SSE-NEXT: pcmpgtb %xmm4, %xmm0
; SSE-NEXT: pand %xmm2, %xmm0
; SSE-NEXT: pcmpgtb %xmm4, %xmm1
; SSE-NEXT: pand %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: not_signbit_mask_v32i8:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpcmpgtb %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpcmpgtb %xmm0, %xmm3, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: vandnps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: not_signbit_mask_v32i8:
; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; AVX2-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: not_signbit_mask_v32i8:
; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; AVX512-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm0
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%cond = icmp sgt <32 x i8> %a, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
%r = select <32 x i1> %cond, <32 x i8> %b, <32 x i8> zeroinitializer
ret <32 x i8> %r
}
define <16 x i16> @not_signbit_mask_v16i16(<16 x i16> %a, <16 x i16> %b) {
; SSE-LABEL: not_signbit_mask_v16i16:
; SSE: # %bb.0:
; SSE-NEXT: psraw $15, %xmm0
; SSE-NEXT: pandn %xmm2, %xmm0
; SSE-NEXT: psraw $15, %xmm1
; SSE-NEXT: pandn %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: not_signbit_mask_v16i16:
; AVX1: # %bb.0:
; AVX1-NEXT: vpsraw $15, %xmm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsraw $15, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
; AVX1-NEXT: vandnps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: not_signbit_mask_v16i16:
; AVX2: # %bb.0:
; AVX2-NEXT: vpsraw $15, %ymm0, %ymm0
; AVX2-NEXT: vpandn %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: not_signbit_mask_v16i16:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsraw $15, %ymm0, %ymm0
; AVX512-NEXT: vpandn %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%cond = icmp sgt <16 x i16> %a, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
%r = select <16 x i1> %cond, <16 x i16> %b, <16 x i16> zeroinitializer
ret <16 x i16> %r
}
define <8 x i32> @not_signbit_mask_v8i32(<8 x i32> %a, <8 x i32> %b) {
; SSE-LABEL: not_signbit_mask_v8i32:
; SSE: # %bb.0:
; SSE-NEXT: psrad $31, %xmm0
; SSE-NEXT: pandn %xmm2, %xmm0
; SSE-NEXT: psrad $31, %xmm1
; SSE-NEXT: pandn %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: not_signbit_mask_v8i32:
; AVX1: # %bb.0:
; AVX1-NEXT: vpsrad $31, %xmm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
; AVX1-NEXT: vandnps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: not_signbit_mask_v8i32:
; AVX2: # %bb.0:
; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0
; AVX2-NEXT: vpandn %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: not_signbit_mask_v8i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsrad $31, %ymm0, %ymm0
; AVX512-NEXT: vpandn %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%cond = icmp sgt <8 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
%r = select <8 x i1> %cond, <8 x i32> %b, <8 x i32> zeroinitializer
ret <8 x i32> %r
}
; Swap cmp pred and select ops. This is logically equivalent to the above test.
define <8 x i32> @not_signbit_mask_swap_v8i32(<8 x i32> %a, <8 x i32> %b) {
; SSE-LABEL: not_signbit_mask_swap_v8i32:
; SSE: # %bb.0:
; SSE-NEXT: psrad $31, %xmm0
; SSE-NEXT: pandn %xmm2, %xmm0
; SSE-NEXT: psrad $31, %xmm1
; SSE-NEXT: pandn %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: not_signbit_mask_swap_v8i32:
; AVX1: # %bb.0:
; AVX1-NEXT: vpsrad $31, %xmm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
; AVX1-NEXT: vandnps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: not_signbit_mask_swap_v8i32:
; AVX2: # %bb.0:
; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0
; AVX2-NEXT: vpandn %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: not_signbit_mask_swap_v8i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsrad $31, %ymm0, %ymm0
; AVX512-NEXT: vpandn %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%cond = icmp slt <8 x i32> %a, zeroinitializer
%r = select <8 x i1> %cond, <8 x i32> zeroinitializer, <8 x i32> %b
ret <8 x i32> %r
}
define <4 x i64> @not_signbit_mask_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: not_signbit_mask_v4i64:
; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
; SSE2-NEXT: pcmpeqd %xmm4, %xmm4
; SSE2-NEXT: pcmpgtd %xmm4, %xmm0
; SSE2-NEXT: pand %xmm2, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
; SSE2-NEXT: pcmpgtd %xmm4, %xmm1
; SSE2-NEXT: pand %xmm3, %xmm1
; SSE2-NEXT: retq
;
; SSE42-LABEL: not_signbit_mask_v4i64:
; SSE42: # %bb.0:
; SSE42-NEXT: pcmpeqd %xmm4, %xmm4
; SSE42-NEXT: pcmpgtq %xmm4, %xmm0
; SSE42-NEXT: pand %xmm2, %xmm0
; SSE42-NEXT: pcmpgtq %xmm4, %xmm1
; SSE42-NEXT: pand %xmm3, %xmm1
; SSE42-NEXT: retq
;
; AVX1-LABEL: not_signbit_mask_v4i64:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpcmpgtq %xmm0, %xmm3, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: vandnps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: not_signbit_mask_v4i64:
; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; AVX2-NEXT: vpcmpgtq %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: not_signbit_mask_v4i64:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsraq $63, %ymm0, %ymm0
; AVX512-NEXT: vpandn %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%cond = icmp sgt <4 x i64> %a, <i64 -1, i64 -1, i64 -1, i64 -1>
%r = select <4 x i1> %cond, <4 x i64> %b, <4 x i64> zeroinitializer
ret <4 x i64> %r
}