; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512
; Combine tests involving SSE41 target shuffles (BLEND,INSERTPS,MOVZX)
declare <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8>, <16 x i8>)
define <16 x i8> @combine_vpshufb_as_movzx(<16 x i8> %a0) {
; SSE-LABEL: combine_vpshufb_as_movzx:
; SSE: # %bb.0:
; SSE-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vpshufb_as_movzx:
; AVX: # %bb.0:
; AVX-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; AVX-NEXT: retq
%res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 -1, i8 -1, i8 -1, i8 -1, i8 undef, i8 undef, i8 undef, i8 undef, i8 -1, i8 -1, i8 -1, i8 -1>)
ret <16 x i8> %res0
}
define <4 x i32> @combine_blend_of_permutes_v4i32(<2 x i64> %a0, <2 x i64> %a1) {
; SSE-LABEL: combine_blend_of_permutes_v4i32:
; SSE: # %bb.0:
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; SSE-NEXT: retq
;
; AVX1-LABEL: combine_blend_of_permutes_v4i32:
; AVX1: # %bb.0:
; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: retq
;
; AVX2-LABEL: combine_blend_of_permutes_v4i32:
; AVX2: # %bb.0:
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX2-NEXT: retq
;
; AVX512-LABEL: combine_blend_of_permutes_v4i32:
; AVX512: # %bb.0:
; AVX512-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512-NEXT: vpmovsxbd {{.*#+}} xmm2 = [2,19,0,17]
; AVX512-NEXT: vpermt2d %zmm1, %zmm2, %zmm0
; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%s0 = shufflevector <2 x i64> %a0, <2 x i64> undef, <2 x i32> <i32 1, i32 0>
%s1 = shufflevector <2 x i64> %a1, <2 x i64> undef, <2 x i32> <i32 1, i32 0>
%x0 = bitcast <2 x i64> %s0 to <4 x i32>
%x1 = bitcast <2 x i64> %s1 to <4 x i32>
%r = shufflevector <4 x i32> %x0, <4 x i32> %x1, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
ret <4 x i32> %r
}
define <16 x i8> @PR50049(ptr %p1, ptr %p2) {
; SSE-LABEL: PR50049:
; SSE: # %bb.0:
; SSE-NEXT: movdqa (%rdi), %xmm2
; SSE-NEXT: movdqa 16(%rdi), %xmm3
; SSE-NEXT: movdqa 32(%rdi), %xmm0
; SSE-NEXT: movdqa (%rsi), %xmm4
; SSE-NEXT: movdqa 16(%rsi), %xmm5
; SSE-NEXT: movdqa 32(%rsi), %xmm1
; SSE-NEXT: movdqa {{.*#+}} xmm6 = [128,128,128,128,128,128,2,5,8,11,14,u,u,u,u,u]
; SSE-NEXT: pshufb %xmm6, %xmm3
; SSE-NEXT: movdqa {{.*#+}} xmm7 = [0,3,6,9,12,15,128,128,128,128,128,u,u,u,u,u]
; SSE-NEXT: pshufb %xmm7, %xmm2
; SSE-NEXT: por %xmm3, %xmm2
; SSE-NEXT: movdqa {{.*#+}} xmm3 = [0,1,2,3,4,5,6,7,8,9,10,128,128,128,128,128]
; SSE-NEXT: pshufb %xmm3, %xmm2
; SSE-NEXT: movdqa {{.*#+}} xmm8 = [128,128,128,128,128,128,128,128,128,128,128,1,4,7,10,13]
; SSE-NEXT: pshufb %xmm8, %xmm0
; SSE-NEXT: por %xmm2, %xmm0
; SSE-NEXT: pshufb %xmm6, %xmm5
; SSE-NEXT: pshufb %xmm7, %xmm4
; SSE-NEXT: por %xmm5, %xmm4
; SSE-NEXT: pshufb %xmm3, %xmm4
; SSE-NEXT: pshufb %xmm8, %xmm1
; SSE-NEXT: por %xmm4, %xmm1
; SSE-NEXT: pmovzxbw {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
; SSE-NEXT: movdqa %xmm2, %xmm3
; SSE-NEXT: pand %xmm1, %xmm3
; SSE-NEXT: movdqa %xmm0, %xmm4
; SSE-NEXT: pmaddubsw %xmm3, %xmm4
; SSE-NEXT: pand %xmm2, %xmm4
; SSE-NEXT: pandn %xmm1, %xmm2
; SSE-NEXT: pmaddubsw %xmm2, %xmm0
; SSE-NEXT: psllw $8, %xmm0
; SSE-NEXT: por %xmm4, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: PR50049:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rdi), %xmm0
; AVX1-NEXT: vmovdqa 16(%rdi), %xmm1
; AVX1-NEXT: vmovdqa 32(%rdi), %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,4,7,10,13,128,128,128,128,128,128,u,u,u,u,u]
; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [128,128,128,128,128,0,3,6,9,12,15,u,u,u,u,u]
; AVX1-NEXT: vpshufb %xmm4, %xmm0, %xmm0
; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vmovdqa (%rsi), %xmm2
; AVX1-NEXT: vmovdqa 16(%rsi), %xmm5
; AVX1-NEXT: vmovdqa 32(%rsi), %xmm6
; AVX1-NEXT: vpshufb %xmm3, %xmm6, %xmm3
; AVX1-NEXT: vpshufb %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [5,6,7,8,9,10,128,128,128,128,128,0,1,2,3,4]
; AVX1-NEXT: vpshufb %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [128,128,128,128,128,128,2,5,8,11,14,128,128,128,128,128]
; AVX1-NEXT: vpshufb %xmm4, %xmm1, %xmm1
; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm1
; AVX1-NEXT: vpshufb %xmm4, %xmm5, %xmm2
; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
; AVX1-NEXT: vpand %xmm1, %xmm2, %xmm3
; AVX1-NEXT: vpmaddubsw %xmm3, %xmm0, %xmm3
; AVX1-NEXT: vpand %xmm2, %xmm3, %xmm3
; AVX1-NEXT: vpandn %xmm1, %xmm2, %xmm1
; AVX1-NEXT: vpmaddubsw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpsllw $8, %xmm0, %xmm0
; AVX1-NEXT: vpor %xmm0, %xmm3, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: PR50049:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rdi), %xmm0
; AVX2-NEXT: vmovdqa 16(%rdi), %xmm1
; AVX2-NEXT: vmovdqa 32(%rdi), %xmm2
; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [1,4,7,10,13,128,128,128,128,128,128,u,u,u,u,u]
; AVX2-NEXT: vpshufb %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = [128,128,128,128,128,0,3,6,9,12,15,u,u,u,u,u]
; AVX2-NEXT: vpshufb %xmm4, %xmm0, %xmm0
; AVX2-NEXT: vpor %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vmovdqa (%rsi), %xmm2
; AVX2-NEXT: vmovdqa 16(%rsi), %xmm5
; AVX2-NEXT: vmovdqa 32(%rsi), %xmm6
; AVX2-NEXT: vpshufb %xmm3, %xmm6, %xmm3
; AVX2-NEXT: vpshufb %xmm4, %xmm2, %xmm2
; AVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [5,6,7,8,9,10,128,128,128,128,128,0,1,2,3,4]
; AVX2-NEXT: vpshufb %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = [128,128,128,128,128,128,2,5,8,11,14,128,128,128,128,128]
; AVX2-NEXT: vpshufb %xmm4, %xmm5, %xmm5
; AVX2-NEXT: vpor %xmm5, %xmm2, %xmm2
; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0
; AVX2-NEXT: vpshufb %xmm4, %xmm1, %xmm1
; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; AVX2-NEXT: vpmullw %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: PR50049:
; AVX512: # %bb.0:
; AVX512-NEXT: vmovdqa (%rdi), %xmm0
; AVX512-NEXT: vmovdqa 16(%rdi), %xmm1
; AVX512-NEXT: vmovdqa 32(%rdi), %xmm2
; AVX512-NEXT: vmovdqa {{.*#+}} xmm3 = [1,4,7,10,13,128,128,128,128,128,128,u,u,u,u,u]
; AVX512-NEXT: vpshufb %xmm3, %xmm2, %xmm2
; AVX512-NEXT: vmovdqa {{.*#+}} xmm4 = [128,128,128,128,128,0,3,6,9,12,15,u,u,u,u,u]
; AVX512-NEXT: vpshufb %xmm4, %xmm0, %xmm0
; AVX512-NEXT: vpor %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa (%rsi), %xmm2
; AVX512-NEXT: vmovdqa 16(%rsi), %xmm5
; AVX512-NEXT: vmovdqa 32(%rsi), %xmm6
; AVX512-NEXT: vpshufb %xmm3, %xmm6, %xmm3
; AVX512-NEXT: vpshufb %xmm4, %xmm2, %xmm2
; AVX512-NEXT: vpor %xmm3, %xmm2, %xmm2
; AVX512-NEXT: vmovdqa {{.*#+}} xmm3 = [5,6,7,8,9,10,128,128,128,128,128,0,1,2,3,4]
; AVX512-NEXT: vpshufb %xmm3, %xmm2, %xmm2
; AVX512-NEXT: vmovdqa {{.*#+}} xmm4 = [128,128,128,128,128,128,2,5,8,11,14,128,128,128,128,128]
; AVX512-NEXT: vpshufb %xmm4, %xmm5, %xmm5
; AVX512-NEXT: vpor %xmm5, %xmm2, %xmm2
; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
; AVX512-NEXT: vpshufb %xmm3, %xmm0, %xmm0
; AVX512-NEXT: vpshufb %xmm4, %xmm1, %xmm1
; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; AVX512-NEXT: vpmullw %ymm2, %ymm0, %ymm0
; AVX512-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%x1 = load <48 x i8>, ptr %p1, align 16
%x2 = load <48 x i8>, ptr %p2, align 16
%s1 = shufflevector <48 x i8> %x1, <48 x i8> poison, <16 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45>
%s2 = shufflevector <48 x i8> %x2, <48 x i8> poison, <16 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45>
%r = mul <16 x i8> %s1, %s2
ret <16 x i8> %r
}