; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64 | FileCheck %s --check-prefixes=SSE,SSE2
; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=SSE,SSE41
; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=AVX2
; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=AVX512
define <8 x i16> @pow2_mask_v16i8(i8 zeroext %0) {
; SSE2-LABEL: pow2_mask_v16i8:
; SSE2: # %bb.0:
; SSE2-NEXT: movd %edi, %xmm0
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [128,128,64,64,32,32,16,16,8,8,4,4,2,2,1,1]
; SSE2-NEXT: pand %xmm1, %xmm0
; SSE2-NEXT: pcmpeqb %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: pow2_mask_v16i8:
; SSE41: # %bb.0:
; SSE41-NEXT: movd %edi, %xmm0
; SSE41-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE41-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; SSE41-NEXT: movq {{.*#+}} xmm1 = [128,64,32,16,8,4,2,1,0,0,0,0,0,0,0,0]
; SSE41-NEXT: pand %xmm1, %xmm0
; SSE41-NEXT: pcmpeqb %xmm1, %xmm0
; SSE41-NEXT: pmovsxbw %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX2-LABEL: pow2_mask_v16i8:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %edi, %xmm0
; AVX2-NEXT: vpbroadcastb %xmm0, %xmm0
; AVX2-NEXT: vpbroadcastq {{.*#+}} xmm1 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpmovsxbw %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: pow2_mask_v16i8:
; AVX512: # %bb.0:
; AVX512-NEXT: vpbroadcastb %edi, %xmm0
; AVX512-NEXT: vptestmb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %k0
; AVX512-NEXT: vpmovm2w %k0, %xmm0
; AVX512-NEXT: retq
%vec = insertelement <1 x i8> poison, i8 %0, i64 0
%splat = shufflevector <1 x i8> %vec, <1 x i8> poison, <8 x i32> zeroinitializer
%mask = and <8 x i8> %splat, <i8 -128, i8 64, i8 32, i8 16, i8 8, i8 4, i8 2, i8 1>
%not = icmp ne <8 x i8> %mask, zeroinitializer
%ext = sext <8 x i1> %not to <8 x i16>
ret <8 x i16> %ext
}
define <16 x i16> @pow2_mask_v16i16(i16 zeroext %0) {
; SSE2-LABEL: pow2_mask_v16i16:
; SSE2: # %bb.0:
; SSE2-NEXT: movd %edi, %xmm0
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [128,64,32,16,8,4,2,1]
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: pand %xmm2, %xmm1
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [32768,16384,8192,4096,2048,1024,512,256]
; SSE2-NEXT: pand %xmm3, %xmm0
; SSE2-NEXT: pcmpeqw %xmm3, %xmm0
; SSE2-NEXT: pcmpeqw %xmm2, %xmm1
; SSE2-NEXT: retq
;
; SSE41-LABEL: pow2_mask_v16i16:
; SSE41: # %bb.0:
; SSE41-NEXT: movd %edi, %xmm0
; SSE41-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = [128,64,32,16,8,4,2,1]
; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pand %xmm2, %xmm1
; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [32768,16384,8192,4096,2048,1024,512,256]
; SSE41-NEXT: pand %xmm3, %xmm0
; SSE41-NEXT: pcmpeqw %xmm3, %xmm0
; SSE41-NEXT: pcmpeqw %xmm2, %xmm1
; SSE41-NEXT: retq
;
; AVX2-LABEL: pow2_mask_v16i16:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %edi, %xmm0
; AVX2-NEXT: vpbroadcastw %xmm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [32768,16384,8192,4096,2048,1024,512,256,128,64,32,16,8,4,2,1]
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: pow2_mask_v16i16:
; AVX512: # %bb.0:
; AVX512-NEXT: vpbroadcastw %edi, %ymm0
; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [32768,16384,8192,4096,2048,1024,512,256,128,64,32,16,8,4,2,1]
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%vec = insertelement <1 x i16> poison, i16 %0, i64 0
%splat = shufflevector <1 x i16> %vec, <1 x i16> poison, <16 x i32> zeroinitializer
%mask = and <16 x i16> %splat, <i16 -32768, i16 16384, i16 8192, i16 4096, i16 2048, i16 1024, i16 512, i16 256, i16 128, i16 64, i16 32, i16 16, i16 8, i16 4, i16 2, i16 1>
%not = icmp ne <16 x i16> %mask, zeroinitializer
%ext = sext <16 x i1> %not to <16 x i16>
ret <16 x i16> %ext
}
; PR78888
define i64 @pow2_mask_v8i8(i8 zeroext %0) {
; SSE-LABEL: pow2_mask_v8i8:
; SSE: # %bb.0:
; SSE-NEXT: movd %edi, %xmm0
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; SSE-NEXT: movq {{.*#+}} xmm1 = [128,64,32,16,8,4,2,1,0,0,0,0,0,0,0,0]
; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: pcmpeqb %xmm1, %xmm0
; SSE-NEXT: movq %xmm0, %rax
; SSE-NEXT: retq
;
; AVX2-LABEL: pow2_mask_v8i8:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %edi, %xmm0
; AVX2-NEXT: vpbroadcastb %xmm0, %xmm0
; AVX2-NEXT: vpbroadcastq {{.*#+}} xmm1 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovq %xmm0, %rax
; AVX2-NEXT: retq
;
; AVX512-LABEL: pow2_mask_v8i8:
; AVX512: # %bb.0:
; AVX512-NEXT: vpbroadcastb %edi, %xmm0
; AVX512-NEXT: vpbroadcastq {{.*#+}} xmm1 = [128,64,32,16,8,4,2,1,128,64,32,16,8,4,2,1]
; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovq %xmm0, %rax
; AVX512-NEXT: retq
%vec = insertelement <1 x i8> poison, i8 %0, i64 0
%splat = shufflevector <1 x i8> %vec, <1 x i8> poison, <8 x i32> zeroinitializer
%mask = and <8 x i8> %splat, <i8 -128, i8 64, i8 32, i8 16, i8 8, i8 4, i8 2, i8 1>
%not = icmp ne <8 x i8> %mask, zeroinitializer
%ext = sext <8 x i1> %not to <8 x i8>
%res = bitcast <8 x i8> %ext to i64
ret i64 %res
}