; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+sse4.2 | FileCheck %s --check-prefixes=SSE,SSE42
; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2,AVX2-SLOW
; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX2-FAST,AVX2-FAST-ALL
; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX2-FAST,AVX2-FAST-PERLANE
; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+xop | FileCheck %s --check-prefixes=AVX,XOP
define void @v3i64(<2 x i64> %a, <2 x i64> %b, ptr %p) nounwind {
; SSE2-LABEL: v3i64:
; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE2-NEXT: movq %xmm2, 16(%rdi)
; SSE2-NEXT: movdqa %xmm0, (%rdi)
; SSE2-NEXT: retq
;
; SSE42-LABEL: v3i64:
; SSE42: # %bb.0:
; SSE42-NEXT: pextrq $1, %xmm0, 16(%rdi)
; SSE42-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE42-NEXT: movdqa %xmm0, (%rdi)
; SSE42-NEXT: retq
;
; AVX-LABEL: v3i64:
; AVX: # %bb.0:
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm0[0],xmm1[0]
; AVX-NEXT: vpextrq $1, %xmm0, 16(%rdi)
; AVX-NEXT: vmovdqa %xmm1, (%rdi)
; AVX-NEXT: retq
%r = shufflevector <2 x i64> %a, <2 x i64> %b, <3 x i32> <i32 0, i32 2, i32 1>
store <3 x i64> %r, ptr %p
ret void
}
define void @v3f64(<2 x double> %a, <2 x double> %b, ptr %p) nounwind {
; SSE-LABEL: v3f64:
; SSE: # %bb.0:
; SSE-NEXT: movhps %xmm0, 16(%rdi)
; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: movaps %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: v3f64:
; AVX: # %bb.0:
; AVX-NEXT: vmovlhps {{.*#+}} xmm1 = xmm0[0],xmm1[0]
; AVX-NEXT: vmovhps %xmm0, 16(%rdi)
; AVX-NEXT: vmovaps %xmm1, (%rdi)
; AVX-NEXT: retq
%r = shufflevector <2 x double> %a, <2 x double> %b, <3 x i32> <i32 0, i32 2, i32 1>
store <3 x double> %r, ptr %p
ret void
}
define void @v3i32(<2 x i32> %a, <2 x i32> %b, ptr %p) nounwind {
; SSE2-LABEL: v3i32:
; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: movd %xmm2, 8(%rdi)
; SSE2-NEXT: movq %xmm0, (%rdi)
; SSE2-NEXT: retq
;
; SSE42-LABEL: v3i32:
; SSE42: # %bb.0:
; SSE42-NEXT: extractps $1, %xmm0, 8(%rdi)
; SSE42-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE42-NEXT: movlps %xmm0, (%rdi)
; SSE42-NEXT: retq
;
; AVX-LABEL: v3i32:
; AVX: # %bb.0:
; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; AVX-NEXT: vextractps $1, %xmm0, 8(%rdi)
; AVX-NEXT: vmovlps %xmm1, (%rdi)
; AVX-NEXT: retq
%r = shufflevector <2 x i32> %a, <2 x i32> %b, <3 x i32> <i32 0, i32 2, i32 1>
store <3 x i32> %r, ptr %p
ret void
}
define void @v5i16(<4 x i16> %a, <4 x i16> %b, ptr %p) nounwind {
; SSE2-LABEL: v5i16:
; SSE2: # %bb.0:
; SSE2-NEXT: psrlq $16, %xmm1
; SSE2-NEXT: pextrw $3, %xmm0, %eax
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE2-NEXT: movw %ax, 8(%rdi)
; SSE2-NEXT: movq %xmm0, (%rdi)
; SSE2-NEXT: retq
;
; SSE42-LABEL: v5i16:
; SSE42: # %bb.0:
; SSE42-NEXT: psrlq $16, %xmm1
; SSE42-NEXT: pextrw $3, %xmm0, 8(%rdi)
; SSE42-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE42-NEXT: movq %xmm0, (%rdi)
; SSE42-NEXT: retq
;
; AVX-LABEL: v5i16:
; AVX: # %bb.0:
; AVX-NEXT: vpsrlq $16, %xmm1, %xmm1
; AVX-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX-NEXT: vpextrw $3, %xmm0, 8(%rdi)
; AVX-NEXT: vmovq %xmm1, (%rdi)
; AVX-NEXT: retq
%r = shufflevector <4 x i16> %a, <4 x i16> %b, <5 x i32> <i32 0, i32 5, i32 1, i32 6, i32 3>
store <5 x i16> %r, ptr %p
ret void
}
define void @v5i32(<4 x i32> %a, <4 x i32> %b, ptr %p) nounwind {
; SSE2-LABEL: v5i32:
; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,2,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,3,3,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: movd %xmm2, 16(%rdi)
; SSE2-NEXT: movdqa %xmm0, (%rdi)
; SSE2-NEXT: retq
;
; SSE42-LABEL: v5i32:
; SSE42: # %bb.0:
; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,2,2,3]
; SSE42-NEXT: pextrd $3, %xmm0, 16(%rdi)
; SSE42-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE42-NEXT: movdqa %xmm0, (%rdi)
; SSE42-NEXT: retq
;
; AVX-LABEL: v5i32:
; AVX: # %bb.0:
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,2,2,3]
; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; AVX-NEXT: vextractps $3, %xmm0, 16(%rdi)
; AVX-NEXT: vmovaps %xmm1, (%rdi)
; AVX-NEXT: retq
%r = shufflevector <4 x i32> %a, <4 x i32> %b, <5 x i32> <i32 0, i32 5, i32 1, i32 6, i32 3>
store <5 x i32> %r, ptr %p
ret void
}
define void @v5f32(<4 x float> %a, <4 x float> %b, ptr %p) nounwind {
; SSE2-LABEL: v5f32:
; SSE2: # %bb.0:
; SSE2-NEXT: movaps %xmm0, %xmm2
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm1[1,2]
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
; SSE2-NEXT: movss %xmm0, 16(%rdi)
; SSE2-NEXT: movaps %xmm2, (%rdi)
; SSE2-NEXT: retq
;
; SSE42-LABEL: v5f32:
; SSE42: # %bb.0:
; SSE42-NEXT: extractps $3, %xmm0, 16(%rdi)
; SSE42-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[1,2]
; SSE42-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSE42-NEXT: movaps %xmm0, (%rdi)
; SSE42-NEXT: retq
;
; AVX-LABEL: v5f32:
; AVX: # %bb.0:
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm0[0,1],xmm1[1,2]
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2,1,3]
; AVX-NEXT: vextractps $3, %xmm0, 16(%rdi)
; AVX-NEXT: vmovaps %xmm1, (%rdi)
; AVX-NEXT: retq
%r = shufflevector <4 x float> %a, <4 x float> %b, <5 x i32> <i32 0, i32 5, i32 1, i32 6, i32 3>
store <5 x float> %r, ptr %p
ret void
}
define void @v7i8(<4 x i8> %a, <4 x i8> %b, ptr %p) nounwind {
; SSE2-LABEL: v7i8:
; SSE2: # %bb.0:
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,1,3,4,5,6,7]
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,255,0,255,0,255,255,255,255,255,255,255,255,255,255,255]
; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,2,3,0,4,5,6,7]
; SSE2-NEXT: pand %xmm2, %xmm1
; SSE2-NEXT: pandn %xmm0, %xmm2
; SSE2-NEXT: por %xmm1, %xmm2
; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: movb %al, 6(%rdi)
; SSE2-NEXT: movd %xmm2, (%rdi)
; SSE2-NEXT: pextrw $2, %xmm2, %eax
; SSE2-NEXT: movw %ax, 4(%rdi)
; SSE2-NEXT: retq
;
; SSE42-LABEL: v7i8:
; SSE42: # %bb.0:
; SSE42-NEXT: pextrb $0, %xmm1, 6(%rdi)
; SSE42-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE42-NEXT: pshufb {{.*#+}} xmm1 = xmm1[1,4,7,4,3,6,0,u,u,u,u,u,u,u,u,u]
; SSE42-NEXT: pextrw $2, %xmm1, 4(%rdi)
; SSE42-NEXT: movd %xmm1, (%rdi)
; SSE42-NEXT: retq
;
; AVX1-LABEL: v7i8:
; AVX1: # %bb.0:
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1,4,7,4,3,6,0,u,u,u,u,u,u,u,u,u]
; AVX1-NEXT: vpextrb $0, %xmm1, 6(%rdi)
; AVX1-NEXT: vpextrw $2, %xmm0, 4(%rdi)
; AVX1-NEXT: vmovd %xmm0, (%rdi)
; AVX1-NEXT: retq
;
; AVX2-LABEL: v7i8:
; AVX2: # %bb.0:
; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1,4,7,4,3,6,0,u,u,u,u,u,u,u,u,u]
; AVX2-NEXT: vpextrb $0, %xmm1, 6(%rdi)
; AVX2-NEXT: vpextrw $2, %xmm0, 4(%rdi)
; AVX2-NEXT: vmovd %xmm0, (%rdi)
; AVX2-NEXT: retq
;
; XOP-LABEL: v7i8:
; XOP: # %bb.0:
; XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm0[0],xmm1[2],xmm0[3],xmm1[2],xmm0[1],xmm1[3,0,u,u,u,u,u,u,u,u,u]
; XOP-NEXT: vpextrb $0, %xmm1, 6(%rdi)
; XOP-NEXT: vpextrw $2, %xmm0, 4(%rdi)
; XOP-NEXT: vmovd %xmm0, (%rdi)
; XOP-NEXT: retq
%r = shufflevector <4 x i8> %a, <4 x i8> %b, <7 x i32> <i32 0, i32 6, i32 3, i32 6, i32 1, i32 7, i32 4>
store <7 x i8> %r, ptr %p
ret void
}
define void @v7i16(<4 x i16> %a, <4 x i16> %b, ptr %p) nounwind {
; SSE2-LABEL: v7i16:
; SSE2: # %bb.0:
; SSE2-NEXT: movd %xmm1, %eax
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,1,0,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,2,3,2,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,5,6,4,7]
; SSE2-NEXT: movw %ax, 12(%rdi)
; SSE2-NEXT: movq %xmm0, (%rdi)
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
; SSE2-NEXT: movd %xmm0, 8(%rdi)
; SSE2-NEXT: retq
;
; SSE42-LABEL: v7i16:
; SSE42: # %bb.0:
; SSE42-NEXT: pextrw $0, %xmm1, 12(%rdi)
; SSE42-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSE42-NEXT: pshufb {{.*#+}} xmm1 = xmm1[2,3,8,9,14,15,8,9,6,7,12,13,0,1,14,15]
; SSE42-NEXT: pextrd $2, %xmm1, 8(%rdi)
; SSE42-NEXT: movq %xmm1, (%rdi)
; SSE42-NEXT: retq
;
; AVX1-LABEL: v7i16:
; AVX1: # %bb.0:
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,3,8,9,14,15,8,9,6,7,12,13,0,1,14,15]
; AVX1-NEXT: vpextrw $0, %xmm1, 12(%rdi)
; AVX1-NEXT: vpextrd $2, %xmm0, 8(%rdi)
; AVX1-NEXT: vmovq %xmm0, (%rdi)
; AVX1-NEXT: retq
;
; AVX2-LABEL: v7i16:
; AVX2: # %bb.0:
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,3,8,9,14,15,8,9,6,7,12,13,0,1,14,15]
; AVX2-NEXT: vpextrw $0, %xmm1, 12(%rdi)
; AVX2-NEXT: vpextrd $2, %xmm0, 8(%rdi)
; AVX2-NEXT: vmovq %xmm0, (%rdi)
; AVX2-NEXT: retq
;
; XOP-LABEL: v7i16:
; XOP: # %bb.0:
; XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm0[0,1],xmm1[4,5],xmm0[6,7],xmm1[4,5],xmm0[2,3],xmm1[6,7,0,1],xmm0[6,7]
; XOP-NEXT: vpextrw $0, %xmm1, 12(%rdi)
; XOP-NEXT: vpextrd $2, %xmm0, 8(%rdi)
; XOP-NEXT: vmovq %xmm0, (%rdi)
; XOP-NEXT: retq
%r = shufflevector <4 x i16> %a, <4 x i16> %b, <7 x i32> <i32 0, i32 6, i32 3, i32 6, i32 1, i32 7, i32 4>
store <7 x i16> %r, ptr %p
ret void
}
define void @v7i32(<4 x i32> %a, <4 x i32> %b, ptr %p) nounwind {
; SSE2-LABEL: v7i32:
; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,2,2,2]
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,1,0,3]
; SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm2[2],xmm3[3],xmm2[3]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,3,3,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSE2-NEXT: movd %xmm1, 24(%rdi)
; SSE2-NEXT: movq %xmm0, 16(%rdi)
; SSE2-NEXT: movdqa %xmm3, (%rdi)
; SSE2-NEXT: retq
;
; SSE42-LABEL: v7i32:
; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm2
; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5],xmm2[6,7]
; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,3,2]
; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
; SSE42-NEXT: movd %xmm1, 24(%rdi)
; SSE42-NEXT: movq %xmm0, 16(%rdi)
; SSE42-NEXT: movdqa %xmm2, (%rdi)
; SSE42-NEXT: retq
;
; AVX-LABEL: v7i32:
; AVX: # %bb.0:
; AVX-NEXT: vblendps {{.*#+}} xmm2 = xmm0[0,1],xmm1[2],xmm0[3]
; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,2,3,2]
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3,2,3]
; AVX-NEXT: vmovss %xmm1, 24(%rdi)
; AVX-NEXT: vmovlps %xmm0, 16(%rdi)
; AVX-NEXT: vmovaps %xmm2, (%rdi)
; AVX-NEXT: retq
%r = shufflevector <4 x i32> %a, <4 x i32> %b, <7 x i32> <i32 0, i32 6, i32 3, i32 6, i32 1, i32 7, i32 4>
store <7 x i32> %r, ptr %p
ret void
}
define void @v12i8(<8 x i8> %a, <8 x i8> %b, ptr %p) nounwind {
; SSE2-LABEL: v12i8:
; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,3,1,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,4,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,1,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,6,4]
; SSE2-NEXT: packuswb %xmm2, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,0,255,255,0,255,255,0,255,255,0,255,255,255,255]
; SSE2-NEXT: pand %xmm2, %xmm0
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,1,1,4,5,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,3]
; SSE2-NEXT: pandn %xmm1, %xmm2
; SSE2-NEXT: por %xmm0, %xmm2
; SSE2-NEXT: movq %xmm2, (%rdi)
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
; SSE2-NEXT: movd %xmm0, 8(%rdi)
; SSE2-NEXT: retq
;
; SSE42-LABEL: v12i8:
; SSE42: # %bb.0:
; SSE42-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,8,1,2,10,3,4,12,5,6,14,7,u,u,u,u]
; SSE42-NEXT: pextrd $2, %xmm0, 8(%rdi)
; SSE42-NEXT: movq %xmm0, (%rdi)
; SSE42-NEXT: retq
;
; AVX1-LABEL: v12i8:
; AVX1: # %bb.0:
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,8,1,2,10,3,4,12,5,6,14,7,u,u,u,u]
; AVX1-NEXT: vpextrd $2, %xmm0, 8(%rdi)
; AVX1-NEXT: vmovq %xmm0, (%rdi)
; AVX1-NEXT: retq
;
; AVX2-LABEL: v12i8:
; AVX2: # %bb.0:
; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,8,1,2,10,3,4,12,5,6,14,7,u,u,u,u]
; AVX2-NEXT: vpextrd $2, %xmm0, 8(%rdi)
; AVX2-NEXT: vmovq %xmm0, (%rdi)
; AVX2-NEXT: retq
;
; XOP-LABEL: v12i8:
; XOP: # %bb.0:
; XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm0[0,4],xmm1[0],xmm0[1,5],xmm1[1],xmm0[2,6],xmm1[2],xmm0[3,7],xmm1[3],xmm0[u,u,u,u]
; XOP-NEXT: vpextrd $2, %xmm0, 8(%rdi)
; XOP-NEXT: vmovq %xmm0, (%rdi)
; XOP-NEXT: retq
%r = shufflevector <8 x i8> %a, <8 x i8> %b, <12 x i32> <i32 0, i32 4, i32 8, i32 1, i32 5, i32 9, i32 2, i32 6, i32 10, i32 3, i32 7, i32 11>
store <12 x i8> %r, ptr %p
ret void
}
define void @v12i16(<8 x i16> %a, <8 x i16> %b, ptr %p) nounwind {
; SSE2-LABEL: v12i16:
; SSE2: # %bb.0:
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,0,0]
; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [65535,65535,0,65535,65535,0,65535,65535]
; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm0[0,1,2,3,6,5,4,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,3,2,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,2,2,1,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,6,4]
; SSE2-NEXT: pand %xmm3, %xmm4
; SSE2-NEXT: pandn %xmm2, %xmm3
; SSE2-NEXT: por %xmm4, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,65535,65535,0,65535,65535,65535,65535]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,1,3,4,5,6,7]
; SSE2-NEXT: pand %xmm2, %xmm0
; SSE2-NEXT: pandn %xmm1, %xmm2
; SSE2-NEXT: por %xmm0, %xmm2
; SSE2-NEXT: movq %xmm2, 16(%rdi)
; SSE2-NEXT: movdqa %xmm3, (%rdi)
; SSE2-NEXT: retq
;
; SSE42-LABEL: v12i16:
; SSE42: # %bb.0:
; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,1,2,3]
; SSE42-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,3,1,3,4,5,6,7]
; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm2[0],xmm3[1,2],xmm2[3],xmm3[4,5,6,7]
; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,u,u,2,3,10,11,u,u,4,5,12,13]
; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3,4],xmm1[5],xmm0[6,7]
; SSE42-NEXT: movdqa %xmm0, (%rdi)
; SSE42-NEXT: movq %xmm3, 16(%rdi)
; SSE42-NEXT: retq
;
; AVX1-LABEL: v12i16:
; AVX1: # %bb.0:
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[3,1,2,3]
; AVX1-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[0,3,1,3,4,5,6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2],xmm2[3],xmm3[4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,u,u,2,3,10,11,u,u,4,5,12,13]
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3,4],xmm1[5],xmm0[6,7]
; AVX1-NEXT: vmovdqa %xmm0, (%rdi)
; AVX1-NEXT: vmovq %xmm2, 16(%rdi)
; AVX1-NEXT: retq
;
; AVX2-SLOW-LABEL: v12i16:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[3,1,2,3]
; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[0,3,1,3,4,5,6,7]
; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2],xmm2[3],xmm3[4,5,6,7]
; AVX2-SLOW-NEXT: vpbroadcastd %xmm1, %xmm1
; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,u,u,2,3,10,11,u,u,4,5,12,13]
; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3,4],xmm1[5],xmm0[6,7]
; AVX2-SLOW-NEXT: vmovdqa %xmm0, (%rdi)
; AVX2-SLOW-NEXT: vmovq %xmm2, 16(%rdi)
; AVX2-SLOW-NEXT: retq
;
; AVX2-FAST-LABEL: v12i16:
; AVX2-FAST: # %bb.0:
; AVX2-FAST-NEXT: vpbroadcastd %xmm1, %xmm2
; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[0,1,8,9,u,u,2,3,10,11,u,u,4,5,12,13]
; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2],xmm3[3,4],xmm2[5],xmm3[6,7]
; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,6,7,14,15,u,u,u,u,u,u,u,u,u,u]
; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
; AVX2-FAST-NEXT: vmovq %xmm0, 16(%rdi)
; AVX2-FAST-NEXT: vmovdqa %xmm2, (%rdi)
; AVX2-FAST-NEXT: retq
;
; XOP-LABEL: v12i16:
; XOP: # %bb.0:
; XOP-NEXT: vpperm {{.*#+}} xmm2 = xmm0[0,1,8,9],xmm1[0,1],xmm0[2,3,10,11],xmm1[2,3],xmm0[4,5,12,13]
; XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm1[4,5],xmm0[6,7,14,15],xmm1[6,7],xmm0[u,u,u,u,u,u,u,u]
; XOP-NEXT: vmovq %xmm0, 16(%rdi)
; XOP-NEXT: vmovdqa %xmm2, (%rdi)
; XOP-NEXT: retq
%r = shufflevector <8 x i16> %a, <8 x i16> %b, <12 x i32> <i32 0, i32 4, i32 8, i32 1, i32 5, i32 9, i32 2, i32 6, i32 10, i32 3, i32 7, i32 11>
store <12 x i16> %r, ptr %p
ret void
}
define void @v12i32(<8 x i32> %a, <8 x i32> %b, ptr %p) nounwind {
; SSE2-LABEL: v12i32:
; SSE2: # %bb.0:
; SSE2-NEXT: movaps %xmm2, %xmm3
; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm0[1,3]
; SSE2-NEXT: movaps %xmm0, %xmm4
; SSE2-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm3[0,2]
; SSE2-NEXT: movaps %xmm0, %xmm3
; SSE2-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm1[1]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3],xmm1[3,3]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,2],xmm2[2,3]
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm1[1,1]
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm3[0,2]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0,1,3]
; SSE2-NEXT: movaps %xmm2, 16(%rdi)
; SSE2-NEXT: movaps %xmm4, (%rdi)
; SSE2-NEXT: movaps %xmm0, 32(%rdi)
; SSE2-NEXT: retq
;
; SSE42-LABEL: v12i32:
; SSE42: # %bb.0:
; SSE42-NEXT: movdqa %xmm0, %xmm3
; SSE42-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,2]
; SSE42-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,1,0,1]
; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm3[0,1,2,3],xmm4[4,5],xmm3[6,7]
; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,2,2]
; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm0[4,5],xmm3[6,7]
; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm2[2,3],xmm3[4,5,6,7]
; SSE42-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3],xmm1[3,3]
; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,3,4,5],xmm1[6,7]
; SSE42-NEXT: movdqa %xmm1, 32(%rdi)
; SSE42-NEXT: movdqa %xmm3, 16(%rdi)
; SSE42-NEXT: movdqa %xmm4, (%rdi)
; SSE42-NEXT: retq
;
; AVX1-LABEL: v12i32:
; AVX1: # %bb.0:
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3,0,1]
; AVX1-NEXT: vmovsldup {{.*#+}} ymm2 = ymm2[0,0,2,2,4,4,6,6]
; AVX1-NEXT: vpermilps {{.*#+}} ymm3 = ymm0[0,u,u,1,5,u,u,6]
; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3,4,5],ymm2[6],ymm3[7]
; AVX1-NEXT: vshufps {{.*#+}} xmm3 = xmm1[0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm3
; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3],xmm3[3,3]
; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[2,3,2,3]
; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3]
; AVX1-NEXT: vmovaps %xmm0, 32(%rdi)
; AVX1-NEXT: vmovaps %ymm2, (%rdi)
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-SLOW-LABEL: v12i32:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vmovaps {{.*#+}} ymm2 = [0,4,u,1,5,u,2,6]
; AVX2-SLOW-NEXT: vpermps %ymm0, %ymm2, %ymm2
; AVX2-SLOW-NEXT: vbroadcastsd %xmm1, %ymm3
; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3],xmm3[3,3]
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[2,3,2,3]
; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3]
; AVX2-SLOW-NEXT: vmovaps %xmm0, 32(%rdi)
; AVX2-SLOW-NEXT: vmovaps %ymm2, (%rdi)
; AVX2-SLOW-NEXT: vzeroupper
; AVX2-SLOW-NEXT: retq
;
; AVX2-FAST-ALL-LABEL: v12i32:
; AVX2-FAST-ALL: # %bb.0:
; AVX2-FAST-ALL-NEXT: vmovaps {{.*#+}} ymm2 = [0,4,u,1,5,u,2,6]
; AVX2-FAST-ALL-NEXT: vpermps %ymm0, %ymm2, %ymm2
; AVX2-FAST-ALL-NEXT: vbroadcastsd %xmm1, %ymm3
; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
; AVX2-FAST-ALL-NEXT: vbroadcastsd {{.*#+}} ymm3 = [7,3,7,3,7,3,7,3]
; AVX2-FAST-ALL-NEXT: vpermps %ymm0, %ymm3, %ymm0
; AVX2-FAST-ALL-NEXT: vshufps {{.*#+}} xmm1 = xmm1[2,3,2,3]
; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3]
; AVX2-FAST-ALL-NEXT: vmovaps %xmm0, 32(%rdi)
; AVX2-FAST-ALL-NEXT: vmovaps %ymm2, (%rdi)
; AVX2-FAST-ALL-NEXT: vzeroupper
; AVX2-FAST-ALL-NEXT: retq
;
; AVX2-FAST-PERLANE-LABEL: v12i32:
; AVX2-FAST-PERLANE: # %bb.0:
; AVX2-FAST-PERLANE-NEXT: vmovaps {{.*#+}} ymm2 = [0,4,u,1,5,u,2,6]
; AVX2-FAST-PERLANE-NEXT: vpermps %ymm0, %ymm2, %ymm2
; AVX2-FAST-PERLANE-NEXT: vbroadcastsd %xmm1, %ymm3
; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
; AVX2-FAST-PERLANE-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3],xmm3[3,3]
; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} xmm1 = xmm1[2,3,2,3]
; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3]
; AVX2-FAST-PERLANE-NEXT: vmovaps %xmm0, 32(%rdi)
; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm2, (%rdi)
; AVX2-FAST-PERLANE-NEXT: vzeroupper
; AVX2-FAST-PERLANE-NEXT: retq
;
; XOP-LABEL: v12i32:
; XOP: # %bb.0:
; XOP-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3,0,1]
; XOP-NEXT: vpermil2ps {{.*#+}} ymm2 = ymm0[0],ymm2[0],ymm0[u,1,5,u],ymm2[6],ymm0[6]
; XOP-NEXT: vshufps {{.*#+}} xmm3 = xmm1[0,1,0,1]
; XOP-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm3
; XOP-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
; XOP-NEXT: vextractf128 $1, %ymm0, %xmm3
; XOP-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3],xmm3[3,3]
; XOP-NEXT: vshufps {{.*#+}} xmm1 = xmm1[2,3,2,3]
; XOP-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3]
; XOP-NEXT: vmovaps %xmm0, 32(%rdi)
; XOP-NEXT: vmovaps %ymm2, (%rdi)
; XOP-NEXT: vzeroupper
; XOP-NEXT: retq
%r = shufflevector <8 x i32> %a, <8 x i32> %b, <12 x i32> <i32 0, i32 4, i32 8, i32 1, i32 5, i32 9, i32 2, i32 6, i32 10, i32 3, i32 7, i32 11>
store <12 x i32> %r, ptr %p
ret void
}
define void @pr29025(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, ptr%p) nounwind {
; SSE2-LABEL: pr29025:
; SSE2: # %bb.0:
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,1,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,4,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,1,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,6,4]
; SSE2-NEXT: packuswb %xmm1, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [255,255,0,255,255,0,255,255,0,255,255,0,255,255,255,255]
; SSE2-NEXT: pand %xmm1, %xmm0
; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,0,1,1,4,5,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,1,3]
; SSE2-NEXT: pandn %xmm2, %xmm1
; SSE2-NEXT: por %xmm0, %xmm1
; SSE2-NEXT: movq %xmm1, (%rdi)
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
; SSE2-NEXT: movd %xmm0, 8(%rdi)
; SSE2-NEXT: retq
;
; SSE42-LABEL: pr29025:
; SSE42: # %bb.0:
; SSE42-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE42-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,8,1,2,10,3,4,12,5,6,14,7,u,u,u,u]
; SSE42-NEXT: pextrd $2, %xmm0, 8(%rdi)
; SSE42-NEXT: movq %xmm0, (%rdi)
; SSE42-NEXT: retq
;
; AVX1-LABEL: pr29025:
; AVX1: # %bb.0:
; AVX1-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,8,1,2,10,3,4,12,5,6,14,7,u,u,u,u]
; AVX1-NEXT: vpextrd $2, %xmm0, 8(%rdi)
; AVX1-NEXT: vmovq %xmm0, (%rdi)
; AVX1-NEXT: retq
;
; AVX2-LABEL: pr29025:
; AVX2: # %bb.0:
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,8,1,2,10,3,4,12,5,6,14,7,u,u,u,u]
; AVX2-NEXT: vpextrd $2, %xmm0, 8(%rdi)
; AVX2-NEXT: vmovq %xmm0, (%rdi)
; AVX2-NEXT: retq
;
; XOP-LABEL: pr29025:
; XOP: # %bb.0:
; XOP-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm0[0,4],xmm2[0],xmm0[1,5],xmm2[1],xmm0[2,6],xmm2[2],xmm0[3,7],xmm2[3],xmm0[u,u,u,u]
; XOP-NEXT: vpextrd $2, %xmm0, 8(%rdi)
; XOP-NEXT: vmovq %xmm0, (%rdi)
; XOP-NEXT: retq
%s1 = shufflevector <4 x i8> %a, <4 x i8> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%s2 = shufflevector <4 x i8> %c, <4 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
%r = shufflevector <8 x i8> %s1, <8 x i8> %s2, <12 x i32> <i32 0, i32 4, i32 8, i32 1, i32 5, i32 9, i32 2, i32 6, i32 10, i32 3, i32 7, i32 11>
store <12 x i8> %r, ptr %p, align 1
ret void
}
define void @interleave_24i8_out(ptr %p, ptr %q1, ptr %q2, ptr %q3) nounwind {
; SSE2-LABEL: interleave_24i8_out:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqu (%rdi), %xmm0
; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,0,255,255,0,255,255,255,255,255,255,255,255,255,255]
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pand %xmm4, %xmm2
; SSE2-NEXT: pandn %xmm1, %xmm4
; SSE2-NEXT: por %xmm2, %xmm4
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3],xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [65535,0,65535,65535,0,65535,65535,0]
; SSE2-NEXT: pand %xmm5, %xmm4
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
; SSE2-NEXT: pandn %xmm3, %xmm5
; SSE2-NEXT: por %xmm4, %xmm5
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[0,2,1,3]
; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,7,6,5]
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,3,2,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,3,2,1,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,6,5,4,7]
; SSE2-NEXT: packuswb %xmm4, %xmm4
; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [0,255,255,0,255,255,0,255,255,255,255,255,255,255,255,255]
; SSE2-NEXT: movdqa %xmm0, %xmm6
; SSE2-NEXT: pand %xmm5, %xmm6
; SSE2-NEXT: pandn %xmm1, %xmm5
; SSE2-NEXT: por %xmm6, %xmm5
; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3],xmm5[4],xmm2[4],xmm5[5],xmm2[5],xmm5[6],xmm2[6],xmm5[7],xmm2[7]
; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [65535,65535,0,65535,65535,0,65535,65535]
; SSE2-NEXT: pand %xmm6, %xmm5
; SSE2-NEXT: pandn %xmm3, %xmm6
; SSE2-NEXT: por %xmm5, %xmm6
; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm6[2,1,0,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,6,5,4,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,3,2,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[1,2,3,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,6,7,4]
; SSE2-NEXT: packuswb %xmm5, %xmm5
; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [255,0,255,255,0,255,255,0,255,255,255,255,255,255,255,255]
; SSE2-NEXT: pand %xmm6, %xmm0
; SSE2-NEXT: pandn %xmm1, %xmm6
; SSE2-NEXT: por %xmm0, %xmm6
; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3],xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [0,65535,65535,0,65535,65535,0,65535]
; SSE2-NEXT: pand %xmm0, %xmm6
; SSE2-NEXT: pandn %xmm3, %xmm0
; SSE2-NEXT: por %xmm6, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,0]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,5]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,0]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,0,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,4,7]
; SSE2-NEXT: packuswb %xmm0, %xmm0
; SSE2-NEXT: movq %xmm4, (%rsi)
; SSE2-NEXT: movq %xmm5, (%rdx)
; SSE2-NEXT: movq %xmm0, (%rcx)
; SSE2-NEXT: retq
;
; SSE42-LABEL: interleave_24i8_out:
; SSE42: # %bb.0:
; SSE42-NEXT: movdqu (%rdi), %xmm0
; SSE42-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE42-NEXT: movdqa %xmm1, %xmm2
; SSE42-NEXT: pshufb {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,xmm2[2,5,u,u,u,u,u,u,u,u]
; SSE42-NEXT: movdqa %xmm0, %xmm3
; SSE42-NEXT: pshufb {{.*#+}} xmm3 = xmm3[0,3,6,9,12,15],zero,zero,xmm3[u,u,u,u,u,u,u,u]
; SSE42-NEXT: por %xmm2, %xmm3
; SSE42-NEXT: movdqa %xmm1, %xmm2
; SSE42-NEXT: pshufb {{.*#+}} xmm2 = zero,zero,zero,zero,zero,xmm2[0,3,6,u,u,u,u,u,u,u,u]
; SSE42-NEXT: movdqa %xmm0, %xmm4
; SSE42-NEXT: pshufb {{.*#+}} xmm4 = xmm4[1,4,7,10,13],zero,zero,zero,xmm4[u,u,u,u,u,u,u,u]
; SSE42-NEXT: por %xmm2, %xmm4
; SSE42-NEXT: pshufb {{.*#+}} xmm1 = zero,zero,zero,zero,zero,xmm1[1,4,7,u,u,u,u,u,u,u,u]
; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[2,5,8,11,14],zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
; SSE42-NEXT: por %xmm1, %xmm0
; SSE42-NEXT: movq %xmm3, (%rsi)
; SSE42-NEXT: movq %xmm4, (%rdx)
; SSE42-NEXT: movq %xmm0, (%rcx)
; SSE42-NEXT: retq
;
; AVX1-LABEL: interleave_24i8_out:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqu (%rdi), %xmm0
; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX1-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,xmm1[2,5,u,u,u,u,u,u,u,u]
; AVX1-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[0,3,6,9,12,15],zero,zero,xmm0[u,u,u,u,u,u,u,u]
; AVX1-NEXT: vpor %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,zero,zero,xmm1[0,3,6,u,u,u,u,u,u,u,u]
; AVX1-NEXT: vpshufb {{.*#+}} xmm4 = xmm0[1,4,7,10,13],zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,zero,zero,zero,xmm1[1,4,7,u,u,u,u,u,u,u,u]
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,5,8,11,14],zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vmovq %xmm2, (%rsi)
; AVX1-NEXT: vmovq %xmm3, (%rdx)
; AVX1-NEXT: vmovq %xmm0, (%rcx)
; AVX1-NEXT: retq
;
; AVX2-LABEL: interleave_24i8_out:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqu (%rdi), %xmm0
; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX2-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,xmm1[2,5,u,u,u,u,u,u,u,u]
; AVX2-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[0,3,6,9,12,15],zero,zero,xmm0[u,u,u,u,u,u,u,u]
; AVX2-NEXT: vpor %xmm2, %xmm3, %xmm2
; AVX2-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,zero,zero,xmm1[0,3,6,u,u,u,u,u,u,u,u]
; AVX2-NEXT: vpshufb {{.*#+}} xmm4 = xmm0[1,4,7,10,13],zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
; AVX2-NEXT: vpor %xmm3, %xmm4, %xmm3
; AVX2-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,zero,zero,zero,xmm1[1,4,7,u,u,u,u,u,u,u,u]
; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,5,8,11,14],zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovq %xmm2, (%rsi)
; AVX2-NEXT: vmovq %xmm3, (%rdx)
; AVX2-NEXT: vmovq %xmm0, (%rcx)
; AVX2-NEXT: retq
;
; XOP-LABEL: interleave_24i8_out:
; XOP: # %bb.0:
; XOP-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; XOP-NEXT: vmovdqu (%rdi), %xmm1
; XOP-NEXT: vpperm {{.*#+}} xmm2 = xmm1[0,3,6,9,12,15],xmm0[2,5],xmm1[u,u,u,u,u,u,u,u]
; XOP-NEXT: vpperm {{.*#+}} xmm3 = xmm1[1,4,7,10,13],xmm0[0,3,6],xmm1[u,u,u,u,u,u,u,u]
; XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm1[2,5,8,11,14],xmm0[1,4,7],xmm1[u,u,u,u,u,u,u,u]
; XOP-NEXT: vmovq %xmm2, (%rsi)
; XOP-NEXT: vmovq %xmm3, (%rdx)
; XOP-NEXT: vmovq %xmm0, (%rcx)
; XOP-NEXT: retq
%wide.vec = load <24 x i8>, ptr %p, align 4
%s1 = shufflevector <24 x i8> %wide.vec, <24 x i8> undef, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21>
%s2 = shufflevector <24 x i8> %wide.vec, <24 x i8> undef, <8 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22>
%s3 = shufflevector <24 x i8> %wide.vec, <24 x i8> undef, <8 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23>
store <8 x i8> %s1, ptr %q1, align 4
store <8 x i8> %s2, ptr %q2, align 4
store <8 x i8> %s3, ptr %q3, align 4
ret void
}
define void @interleave_24i8_in(ptr %p, ptr %q1, ptr %q2, ptr %q3) nounwind {
; SSE2-LABEL: interleave_24i8_in:
; SSE2: # %bb.0:
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE2-NEXT: movq {{.*#+}} xmm2 = mem[0],zero
; SSE2-NEXT: pxor %xmm3, %xmm3
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,2,2]
; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [65535,65535,0,65535,65535,0,65535,65535]
; SSE2-NEXT: pand %xmm5, %xmm4
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[3,3,3,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4]
; SSE2-NEXT: pandn %xmm3, %xmm5
; SSE2-NEXT: por %xmm4, %xmm5
; SSE2-NEXT: movdqa %xmm2, %xmm3
; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,2,2,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,4,5]
; SSE2-NEXT: packuswb %xmm5, %xmm3
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255]
; SSE2-NEXT: pand %xmm4, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm0[0,1,0,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,0,0,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,6,6]
; SSE2-NEXT: pandn %xmm5, %xmm4
; SSE2-NEXT: por %xmm3, %xmm4
; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,3,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,1,0,4,5,6,7]
; SSE2-NEXT: packuswb %xmm1, %xmm1
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,255,0,255,255,0,255,255,255,255,255,255,255,255]
; SSE2-NEXT: pand %xmm2, %xmm1
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,3,3,4,5,6,7]
; SSE2-NEXT: pandn %xmm0, %xmm2
; SSE2-NEXT: por %xmm1, %xmm2
; SSE2-NEXT: movq %xmm2, 16(%rdi)
; SSE2-NEXT: movdqu %xmm4, (%rdi)
; SSE2-NEXT: retq
;
; SSE42-LABEL: interleave_24i8_in:
; SSE42: # %bb.0:
; SSE42-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE42-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE42-NEXT: movq {{.*#+}} xmm2 = mem[0],zero
; SSE42-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
; SSE42-NEXT: movdqa %xmm2, %xmm1
; SSE42-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,8],zero,xmm1[1,9],zero,xmm1[2,10],zero,xmm1[3,11],zero,xmm1[4,12],zero,xmm1[5]
; SSE42-NEXT: movdqa %xmm0, %xmm3
; SSE42-NEXT: pshufb {{.*#+}} xmm3 = zero,zero,xmm3[0],zero,zero,xmm3[1],zero,zero,xmm3[2],zero,zero,xmm3[3],zero,zero,xmm3[4],zero
; SSE42-NEXT: por %xmm1, %xmm3
; SSE42-NEXT: pshufb {{.*#+}} xmm2 = xmm2[13],zero,xmm2[6,14],zero,xmm2[7,15],zero,xmm2[u,u,u,u,u,u,u,u]
; SSE42-NEXT: pshufb {{.*#+}} xmm0 = zero,xmm0[5],zero,zero,xmm0[6],zero,zero,xmm0[7,u,u,u,u,u,u,u,u]
; SSE42-NEXT: por %xmm2, %xmm0
; SSE42-NEXT: movq %xmm0, 16(%rdi)
; SSE42-NEXT: movdqu %xmm3, (%rdi)
; SSE42-NEXT: retq
;
; AVX1-LABEL: interleave_24i8_in:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX1-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX1-NEXT: vpshufb {{.*#+}} xmm2 = xmm1[0,8],zero,xmm1[1,9],zero,xmm1[2,10],zero,xmm1[3,11],zero,xmm1[4,12],zero,xmm1[5]
; AVX1-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,xmm0[0],zero,zero,xmm0[1],zero,zero,xmm0[2],zero,zero,xmm0[3],zero,zero,xmm0[4],zero
; AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[13],zero,xmm1[6,14],zero,xmm1[7,15],zero,xmm1[u,u,u,u,u,u,u,u]
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = zero,xmm0[5],zero,zero,xmm0[6],zero,zero,xmm0[7,u,u,u,u,u,u,u,u]
; AVX1-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX1-NEXT: vmovq %xmm0, 16(%rdi)
; AVX1-NEXT: vmovdqu %xmm2, (%rdi)
; AVX1-NEXT: retq
;
; AVX2-LABEL: interleave_24i8_in:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX2-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX2-NEXT: vpshufb {{.*#+}} xmm2 = xmm1[0,8],zero,xmm1[1,9],zero,xmm1[2,10],zero,xmm1[3,11],zero,xmm1[4,12],zero,xmm1[5]
; AVX2-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,xmm0[0],zero,zero,xmm0[1],zero,zero,xmm0[2],zero,zero,xmm0[3],zero,zero,xmm0[4],zero
; AVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[13],zero,xmm1[6,14],zero,xmm1[7,15],zero,xmm1[u,u,u,u,u,u,u,u]
; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = zero,xmm0[5],zero,zero,xmm0[6],zero,zero,xmm0[7,u,u,u,u,u,u,u,u]
; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX2-NEXT: vmovq %xmm0, 16(%rdi)
; AVX2-NEXT: vmovdqu %xmm2, (%rdi)
; AVX2-NEXT: retq
;
; XOP-LABEL: interleave_24i8_in:
; XOP: # %bb.0:
; XOP-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; XOP-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; XOP-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
; XOP-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; XOP-NEXT: vpperm {{.*#+}} xmm2 = xmm1[0,8],xmm0[0],xmm1[1,9],xmm0[1],xmm1[2,10],xmm0[2],xmm1[3,11],xmm0[3],xmm1[4,12],xmm0[4],xmm1[5]
; XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm1[13],xmm0[5],xmm1[6,14],xmm0[6],xmm1[7,15],xmm0[7],xmm1[u,u,u,u,u,u,u,u]
; XOP-NEXT: vmovq %xmm0, 16(%rdi)
; XOP-NEXT: vmovdqu %xmm2, (%rdi)
; XOP-NEXT: retq
%s1 = load <8 x i8>, ptr %q1, align 4
%s2 = load <8 x i8>, ptr %q2, align 4
%s3 = load <8 x i8>, ptr %q3, align 4
%t1 = shufflevector <8 x i8> %s1, <8 x i8> %s2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%t2 = shufflevector <8 x i8> %s3, <8 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%interleaved = shufflevector <16 x i8> %t1, <16 x i8> %t2, <24 x i32> <i32 0, i32 8, i32 16, i32 1, i32 9, i32 17, i32 2, i32 10, i32 18, i32 3, i32 11, i32 19, i32 4, i32 12, i32 20, i32 5, i32 13, i32 21, i32 6, i32 14, i32 22, i32 7, i32 15, i32 23>
store <24 x i8> %interleaved, ptr %p, align 4
ret void
}
define void @interleave_24i16_out(ptr %p, ptr %q1, ptr %q2, ptr %q3) nounwind {
; SSE2-LABEL: interleave_24i16_out:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqu (%rdi), %xmm3
; SSE2-NEXT: movdqu 16(%rdi), %xmm2
; SSE2-NEXT: movdqu 32(%rdi), %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [65535,0,65535,65535,0,65535,65535,0]
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: pand %xmm1, %xmm4
; SSE2-NEXT: pandn %xmm2, %xmm1
; SSE2-NEXT: por %xmm4, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,1,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm2[0,1,2,3,4,7,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm0[0,1,2,1]
; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,6,5]
; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[3,0],xmm4[2,0]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm5[2,0]
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,0,65535,65535,0,65535,65535]
; SSE2-NEXT: movdqa %xmm4, %xmm5
; SSE2-NEXT: pandn %xmm2, %xmm5
; SSE2-NEXT: movdqa %xmm3, %xmm6
; SSE2-NEXT: pand %xmm4, %xmm6
; SSE2-NEXT: por %xmm5, %xmm6
; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm6[2,1,2,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,4,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,3,2,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[1,2,3,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5]
; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [65535,65535,65535,65535,65535,0,0,0]
; SSE2-NEXT: pand %xmm6, %xmm5
; SSE2-NEXT: pshuflw {{.*#+}} xmm7 = xmm0[0,3,2,3,4,5,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,1,0,3]
; SSE2-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,5,6]
; SSE2-NEXT: movdqa %xmm6, %xmm8
; SSE2-NEXT: pandn %xmm7, %xmm8
; SSE2-NEXT: por %xmm5, %xmm8
; SSE2-NEXT: pand %xmm4, %xmm2
; SSE2-NEXT: pandn %xmm3, %xmm4
; SSE2-NEXT: por %xmm2, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[3,1,2,0]
; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,5,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,0,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,1,0,3,4,5,6,7]
; SSE2-NEXT: pand %xmm6, %xmm2
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,2]
; SSE2-NEXT: pandn %xmm0, %xmm6
; SSE2-NEXT: por %xmm2, %xmm6
; SSE2-NEXT: movups %xmm1, (%rsi)
; SSE2-NEXT: movdqu %xmm8, (%rdx)
; SSE2-NEXT: movdqu %xmm6, (%rcx)
; SSE2-NEXT: retq
;
; SSE42-LABEL: interleave_24i16_out:
; SSE42: # %bb.0:
; SSE42-NEXT: movdqu (%rdi), %xmm0
; SSE42-NEXT: movdqu 16(%rdi), %xmm1
; SSE42-NEXT: movdqu 32(%rdi), %xmm2
; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,1,2,1]
; SSE42-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,6,5]
; SSE42-NEXT: movdqa %xmm0, %xmm4
; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0],xmm1[1],xmm4[2,3],xmm1[4],xmm4[5,6],xmm1[7]
; SSE42-NEXT: pshufb {{.*#+}} xmm4 = xmm4[0,1,6,7,12,13,2,3,8,9,14,15,u,u,u,u]
; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5],xmm3[6,7]
; SSE42-NEXT: movdqa %xmm0, %xmm3
; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm1[2],xmm3[3,4],xmm1[5],xmm3[6,7]
; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm2[0],xmm3[1,2],xmm2[3],xmm3[4,5],xmm2[6],xmm3[7]
; SSE42-NEXT: pshufb {{.*#+}} xmm3 = xmm3[2,3,8,9,14,15,4,5,10,11,0,1,6,7,12,13]
; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm0[2],xmm1[3,4],xmm0[5],xmm1[6,7]
; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6],xmm2[7]
; SSE42-NEXT: pshufb {{.*#+}} xmm1 = xmm1[4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15]
; SSE42-NEXT: movdqu %xmm4, (%rsi)
; SSE42-NEXT: movdqu %xmm3, (%rdx)
; SSE42-NEXT: movdqu %xmm1, (%rcx)
; SSE42-NEXT: retq
;
; AVX1-LABEL: interleave_24i16_out:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqu (%rdi), %xmm0
; AVX1-NEXT: vmovdqu 16(%rdi), %xmm1
; AVX1-NEXT: vmovdqu 32(%rdi), %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[0,1,2,1]
; AVX1-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,6,5]
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6],xmm1[7]
; AVX1-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[0,1,6,7,12,13,2,3,8,9,14,15,u,u,u,u]
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3,4,5],xmm3[6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm0[0,1],xmm1[2],xmm0[3,4],xmm1[5],xmm0[6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm2[0],xmm4[1,2],xmm2[3],xmm4[4,5],xmm2[6],xmm4[7]
; AVX1-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[2,3,8,9,14,15,4,5,10,11,0,1,6,7,12,13]
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3,4],xmm0[5],xmm1[6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6],xmm2[7]
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15]
; AVX1-NEXT: vmovdqu %xmm3, (%rsi)
; AVX1-NEXT: vmovdqu %xmm4, (%rdx)
; AVX1-NEXT: vmovdqu %xmm0, (%rcx)
; AVX1-NEXT: retq
;
; AVX2-LABEL: interleave_24i16_out:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqu (%rdi), %xmm0
; AVX2-NEXT: vmovdqu 16(%rdi), %xmm1
; AVX2-NEXT: vmovdqu 32(%rdi), %xmm2
; AVX2-NEXT: vpblendw {{.*#+}} xmm3 = xmm0[0,1],xmm2[2],xmm0[3,4],xmm2[5],xmm0[6,7]
; AVX2-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm1[1],xmm3[2,3],xmm1[4],xmm3[5,6],xmm1[7]
; AVX2-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[0,1,6,7,12,13,2,3,8,9,14,15,4,5,10,11]
; AVX2-NEXT: vpblendw {{.*#+}} xmm4 = xmm2[0],xmm0[1,2],xmm2[3],xmm0[4,5],xmm2[6],xmm0[7]
; AVX2-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm1[2],xmm4[3,4],xmm1[5],xmm4[6,7]
; AVX2-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[2,3,8,9,14,15,4,5,10,11,0,1,6,7,12,13]
; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6],xmm2[7]
; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5],xmm1[6],xmm0[7]
; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15]
; AVX2-NEXT: vmovdqu %xmm3, (%rsi)
; AVX2-NEXT: vmovdqu %xmm4, (%rdx)
; AVX2-NEXT: vmovdqu %xmm0, (%rcx)
; AVX2-NEXT: retq
;
; XOP-LABEL: interleave_24i16_out:
; XOP: # %bb.0:
; XOP-NEXT: vmovdqu (%rdi), %xmm0
; XOP-NEXT: vmovdqu 16(%rdi), %xmm1
; XOP-NEXT: vmovdqu 32(%rdi), %xmm2
; XOP-NEXT: vpblendw {{.*#+}} xmm3 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6],xmm1[7]
; XOP-NEXT: vpperm {{.*#+}} xmm3 = xmm3[0,1,6,7,12,13,2,3,8,9,14,15],xmm2[4,5,10,11]
; XOP-NEXT: vpblendw {{.*#+}} xmm4 = xmm0[0,1],xmm1[2],xmm0[3,4],xmm1[5],xmm0[6,7]
; XOP-NEXT: vpperm {{.*#+}} xmm4 = xmm4[2,3,8,9,14,15,4,5,10,11],xmm2[0,1,6,7,12,13]
; XOP-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3,4],xmm0[5],xmm1[6,7]
; XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm0[4,5,10,11,0,1,6,7,12,13],xmm2[2,3,8,9,14,15]
; XOP-NEXT: vmovdqu %xmm3, (%rsi)
; XOP-NEXT: vmovdqu %xmm4, (%rdx)
; XOP-NEXT: vmovdqu %xmm0, (%rcx)
; XOP-NEXT: retq
%wide.vec = load <24 x i16>, ptr %p, align 4
%s1 = shufflevector <24 x i16> %wide.vec, <24 x i16> undef, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21>
%s2 = shufflevector <24 x i16> %wide.vec, <24 x i16> undef, <8 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22>
%s3 = shufflevector <24 x i16> %wide.vec, <24 x i16> undef, <8 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23>
store <8 x i16> %s1, ptr %q1, align 4
store <8 x i16> %s2, ptr %q2, align 4
store <8 x i16> %s3, ptr %q3, align 4
ret void
}
define void @interleave_24i16_out_reverse(ptr %p, ptr %q1, ptr %q2, ptr %q3) nounwind {
; SSE2-LABEL: interleave_24i16_out_reverse:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqu (%rdi), %xmm0
; SSE2-NEXT: movdqu 16(%rdi), %xmm1
; SSE2-NEXT: movdqu 32(%rdi), %xmm3
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65535,0,65535,65535,0,65535,65535,0]
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pand %xmm2, %xmm4
; SSE2-NEXT: pandn %xmm3, %xmm2
; SSE2-NEXT: por %xmm4, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,1,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,1,0,3,4,5,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,1]
; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm2[0,1,2,3,5,6,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm0[0,1,2,1]
; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,5,6]
; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[3,0],xmm4[2,0]
; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,0,1,2,4,5,6,7]
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm5[2,0]
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,0,65535,65535,0,65535,65535]
; SSE2-NEXT: movdqa %xmm4, %xmm5
; SSE2-NEXT: pandn %xmm1, %xmm5
; SSE2-NEXT: movdqa %xmm3, %xmm6
; SSE2-NEXT: pand %xmm4, %xmm6
; SSE2-NEXT: por %xmm5, %xmm6
; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm6[0,3,2,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,6,5,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,1,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[2,1,0,3,4,5,6,7]
; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [65535,65535,65535,65535,65535,0,0,0]
; SSE2-NEXT: pand %xmm6, %xmm5
; SSE2-NEXT: pshufhw {{.*#+}} xmm7 = xmm0[0,1,2,3,4,7,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,1,2,0]
; SSE2-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,5,4,7]
; SSE2-NEXT: movdqa %xmm6, %xmm8
; SSE2-NEXT: pandn %xmm7, %xmm8
; SSE2-NEXT: por %xmm5, %xmm8
; SSE2-NEXT: pand %xmm4, %xmm1
; SSE2-NEXT: pandn %xmm3, %xmm4
; SSE2-NEXT: por %xmm1, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm4[3,1,2,0]
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,2,3,4,5,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,0,1,2,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,7,7,7]
; SSE2-NEXT: pand %xmm6, %xmm1
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,4]
; SSE2-NEXT: pandn %xmm0, %xmm6
; SSE2-NEXT: por %xmm1, %xmm6
; SSE2-NEXT: movups %xmm2, (%rsi)
; SSE2-NEXT: movdqu %xmm8, (%rdx)
; SSE2-NEXT: movdqu %xmm6, (%rcx)
; SSE2-NEXT: retq
;
; SSE42-LABEL: interleave_24i16_out_reverse:
; SSE42: # %bb.0:
; SSE42-NEXT: movdqu (%rdi), %xmm0
; SSE42-NEXT: movdqu 16(%rdi), %xmm1
; SSE42-NEXT: movdqu 32(%rdi), %xmm2
; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,1,2,1]
; SSE42-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,5,6]
; SSE42-NEXT: movdqa %xmm1, %xmm4
; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0],xmm2[1],xmm4[2,3],xmm2[4],xmm4[5,6],xmm2[7]
; SSE42-NEXT: pshufb {{.*#+}} xmm4 = xmm4[14,15,8,9,2,3,12,13,6,7,0,1,u,u,u,u]
; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5],xmm3[6,7]
; SSE42-NEXT: movdqa %xmm2, %xmm3
; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm1[2],xmm3[3,4],xmm1[5],xmm3[6,7]
; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm0[1],xmm3[2,3],xmm0[4],xmm3[5,6],xmm0[7]
; SSE42-NEXT: pshufb {{.*#+}} xmm3 = xmm3[12,13,6,7,0,1,10,11,4,5,14,15,8,9,2,3]
; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3,4],xmm2[5],xmm1[6,7]
; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2],xmm0[3],xmm1[4,5],xmm0[6],xmm1[7]
; SSE42-NEXT: pshufb {{.*#+}} xmm1 = xmm1[10,11,4,5,14,15,8,9,2,3,12,13,6,7,0,1]
; SSE42-NEXT: movdqu %xmm4, (%rsi)
; SSE42-NEXT: movdqu %xmm3, (%rdx)
; SSE42-NEXT: movdqu %xmm1, (%rcx)
; SSE42-NEXT: retq
;
; AVX1-LABEL: interleave_24i16_out_reverse:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqu (%rdi), %xmm0
; AVX1-NEXT: vmovdqu 16(%rdi), %xmm1
; AVX1-NEXT: vmovdqu 32(%rdi), %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm1[0,1],xmm0[2],xmm1[3,4],xmm0[5],xmm1[6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm2[1],xmm3[2,3],xmm2[4],xmm3[5,6],xmm2[7]
; AVX1-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[14,15,8,9,2,3,12,13,6,7,0,1,10,11,4,5]
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm0[0,1],xmm1[2],xmm0[3,4],xmm1[5],xmm0[6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm2[0],xmm4[1,2],xmm2[3],xmm4[4,5],xmm2[6],xmm4[7]
; AVX1-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[12,13,6,7,0,1,10,11,4,5,14,15,8,9,2,3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,1,2,3]
; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[1,2,2,3,4,5,6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6],xmm1[7]
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,14,15,8,9,2,3,12,13,6,7,0,1]
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3,4,5,6,7]
; AVX1-NEXT: vmovdqu %xmm3, (%rsi)
; AVX1-NEXT: vmovdqu %xmm4, (%rdx)
; AVX1-NEXT: vmovdqu %xmm0, (%rcx)
; AVX1-NEXT: retq
;
; AVX2-LABEL: interleave_24i16_out_reverse:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqu (%rdi), %xmm0
; AVX2-NEXT: vmovdqu 16(%rdi), %xmm1
; AVX2-NEXT: vmovdqu 32(%rdi), %xmm2
; AVX2-NEXT: vpblendw {{.*#+}} xmm3 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6],xmm2[7]
; AVX2-NEXT: vpblendw {{.*#+}} xmm3 = xmm1[0],xmm3[1,2],xmm1[3],xmm3[4,5],xmm1[6],xmm3[7]
; AVX2-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[14,15,8,9,2,3,12,13,6,7,0,1,10,11,4,5]
; AVX2-NEXT: vpblendw {{.*#+}} xmm4 = xmm2[0],xmm0[1,2],xmm2[3],xmm0[4,5],xmm2[6],xmm0[7]
; AVX2-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm1[2],xmm4[3,4],xmm1[5],xmm4[6,7]
; AVX2-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[12,13,6,7,0,1,10,11,4,5,14,15,8,9,2,3]
; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2],xmm0[3,4],xmm2[5],xmm0[6,7]
; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6],xmm1[7]
; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[10,11,4,5,14,15,8,9,2,3,12,13,6,7,0,1]
; AVX2-NEXT: vmovdqu %xmm3, (%rsi)
; AVX2-NEXT: vmovdqu %xmm4, (%rdx)
; AVX2-NEXT: vmovdqu %xmm0, (%rcx)
; AVX2-NEXT: retq
;
; XOP-LABEL: interleave_24i16_out_reverse:
; XOP: # %bb.0:
; XOP-NEXT: vmovdqu (%rdi), %xmm0
; XOP-NEXT: vmovdqu 16(%rdi), %xmm1
; XOP-NEXT: vmovdqu 32(%rdi), %xmm2
; XOP-NEXT: vpblendw {{.*#+}} xmm3 = xmm1[0,1],xmm0[2],xmm1[3,4],xmm0[5],xmm1[6,7]
; XOP-NEXT: vpperm {{.*#+}} xmm3 = xmm2[14,15,8,9,2,3],xmm3[12,13,6,7,0,1,10,11,4,5]
; XOP-NEXT: vpblendw {{.*#+}} xmm4 = xmm0[0,1],xmm1[2],xmm0[3,4],xmm1[5],xmm0[6,7]
; XOP-NEXT: vpperm {{.*#+}} xmm4 = xmm2[12,13,6,7,0,1],xmm4[10,11,4,5,14,15,8,9,2,3]
; XOP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6],xmm1[7]
; XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm2[10,11,4,5],xmm0[14,15,8,9,2,3,12,13,6,7,0,1]
; XOP-NEXT: vmovdqu %xmm3, (%rsi)
; XOP-NEXT: vmovdqu %xmm4, (%rdx)
; XOP-NEXT: vmovdqu %xmm0, (%rcx)
; XOP-NEXT: retq
%wide.vec.reverse = load <24 x i16>, ptr %p, align 4
%wide.vec = shufflevector <24 x i16> %wide.vec.reverse, <24 x i16> undef, <24 x i32> <i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
%s1 = shufflevector <24 x i16> %wide.vec, <24 x i16> undef, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21>
%s2 = shufflevector <24 x i16> %wide.vec, <24 x i16> undef, <8 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22>
%s3 = shufflevector <24 x i16> %wide.vec, <24 x i16> undef, <8 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23>
store <8 x i16> %s1, ptr %q1, align 4
store <8 x i16> %s2, ptr %q2, align 4
store <8 x i16> %s3, ptr %q3, align 4
ret void
}
define void @interleave_24i16_in(ptr %p, ptr %q1, ptr %q2, ptr %q3) nounwind {
; SSE2-LABEL: interleave_24i16_in:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqu (%rsi), %xmm0
; SSE2-NEXT: movdqu (%rdx), %xmm2
; SSE2-NEXT: movdqu (%rcx), %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,0,0,0]
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,0,65535,65535,0,65535,65535]
; SSE2-NEXT: movdqa %xmm4, %xmm5
; SSE2-NEXT: pandn %xmm1, %xmm5
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,2,2,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,4,5]
; SSE2-NEXT: pand %xmm4, %xmm1
; SSE2-NEXT: por %xmm5, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,2,2]
; SSE2-NEXT: pand %xmm4, %xmm5
; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm2[3,3,3,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,4,4]
; SSE2-NEXT: pandn %xmm6, %xmm4
; SSE2-NEXT: por %xmm5, %xmm4
; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [0,65535,65535,0,65535,65535,0,65535]
; SSE2-NEXT: pand %xmm5, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm3[1,1,2,2]
; SSE2-NEXT: pandn %xmm6, %xmm5
; SSE2-NEXT: por %xmm4, %xmm5
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [65535,0,65535,65535,0,65535,65535,0]
; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,1,3,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,1,0,4,5,6,7]
; SSE2-NEXT: pand %xmm4, %xmm0
; SSE2-NEXT: pandn %xmm3, %xmm4
; SSE2-NEXT: por %xmm0, %xmm4
; SSE2-NEXT: movdqu %xmm4, 32(%rdi)
; SSE2-NEXT: movdqu %xmm5, 16(%rdi)
; SSE2-NEXT: movdqu %xmm1, (%rdi)
; SSE2-NEXT: retq
;
; SSE42-LABEL: interleave_24i16_in:
; SSE42: # %bb.0:
; SSE42-NEXT: movdqu (%rsi), %xmm0
; SSE42-NEXT: movdqu (%rdx), %xmm1
; SSE42-NEXT: movdqu (%rcx), %xmm2
; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,2,2]
; SSE42-NEXT: pshuflw {{.*#+}} xmm4 = xmm1[3,3,3,3,4,5,6,7]
; SSE42-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,4]
; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm3[0,1],xmm4[2],xmm3[3,4],xmm4[5],xmm3[6,7]
; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,2,2]
; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1,2],xmm3[3],xmm4[4,5],xmm3[6],xmm4[7]
; SSE42-NEXT: movdqa %xmm0, %xmm4
; SSE42-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
; SSE42-NEXT: pshufb {{.*#+}} xmm4 = xmm4[0,1,2,3,u,u,4,5,6,7,u,u,8,9,10,11]
; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm2[0,0,0,0]
; SSE42-NEXT: pblendw {{.*#+}} xmm5 = xmm4[0,1],xmm5[2],xmm4[3,4],xmm5[5],xmm4[6,7]
; SSE42-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE42-NEXT: pshufb {{.*#+}} xmm1 = xmm1[4,5,u,u,10,11,8,9,u,u,14,15,12,13,u,u]
; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,2,3,3]
; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3],xmm0[4],xmm1[5,6],xmm0[7]
; SSE42-NEXT: movdqu %xmm0, 32(%rdi)
; SSE42-NEXT: movdqu %xmm5, (%rdi)
; SSE42-NEXT: movdqu %xmm3, 16(%rdi)
; SSE42-NEXT: retq
;
; AVX1-LABEL: interleave_24i16_in:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqu (%rsi), %xmm0
; AVX1-NEXT: vmovdqu (%rdx), %xmm1
; AVX1-NEXT: vmovdqu (%rcx), %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,2,2]
; AVX1-NEXT: vpshuflw {{.*#+}} xmm4 = xmm1[3,3,3,3,4,5,6,7]
; AVX1-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,4]
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2],xmm3[3,4],xmm4[5],xmm3[6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[1,1,2,2]
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1,2],xmm4[3],xmm3[4,5],xmm4[6],xmm3[7]
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; AVX1-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[4,5,u,u,10,11,8,9,u,u,14,15,12,13,u,u]
; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[2,2,3,3]
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0],xmm5[1],xmm4[2,3],xmm5[4],xmm4[5,6],xmm5[7]
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,u,u,4,5,6,7,u,u,8,9,10,11]
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[0,0,0,0]
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3,4],xmm1[5],xmm0[6,7]
; AVX1-NEXT: vmovdqu %xmm0, (%rdi)
; AVX1-NEXT: vmovdqu %xmm4, 32(%rdi)
; AVX1-NEXT: vmovdqu %xmm3, 16(%rdi)
; AVX1-NEXT: retq
;
; AVX2-SLOW-LABEL: interleave_24i16_in:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vmovdqu (%rsi), %xmm0
; AVX2-SLOW-NEXT: vmovdqu (%rdx), %xmm1
; AVX2-SLOW-NEXT: vmovdqu (%rcx), %xmm2
; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm3
; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,0,1,6,7,2,3,2,3,8,9,4,5,4,5,16,17,6,7,22,23,18,19,8,9,24,25,20,21,10,11]
; AVX2-SLOW-NEXT: vpshufb %ymm4, %ymm3, %ymm5
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,0,1]
; AVX2-SLOW-NEXT: vpshufb %ymm4, %ymm3, %ymm3
; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm3 = ymm5[0],ymm3[1],ymm5[2,3],ymm3[4],ymm5[5,6],ymm3[7],ymm5[8],ymm3[9],ymm5[10,11],ymm3[12],ymm5[13,14],ymm3[15]
; AVX2-SLOW-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,0,0,0,1,1,0,2]
; AVX2-SLOW-NEXT: vpermd %ymm2, %ymm4, %ymm4
; AVX2-SLOW-NEXT: vpmovsxbw {{.*#+}} ymm5 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
; AVX2-SLOW-NEXT: vpblendvb %ymm5, %ymm3, %ymm4, %ymm3
; AVX2-SLOW-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,u,u,10,11,8,9,u,u,14,15,12,13,u,u]
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[2,2,3,3]
; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6],xmm1[7]
; AVX2-SLOW-NEXT: vmovdqu %xmm0, 32(%rdi)
; AVX2-SLOW-NEXT: vmovdqu %ymm3, (%rdi)
; AVX2-SLOW-NEXT: vzeroupper
; AVX2-SLOW-NEXT: retq
;
; AVX2-FAST-ALL-LABEL: interleave_24i16_in:
; AVX2-FAST-ALL: # %bb.0:
; AVX2-FAST-ALL-NEXT: vmovdqu (%rsi), %xmm0
; AVX2-FAST-ALL-NEXT: vmovdqu (%rdx), %xmm1
; AVX2-FAST-ALL-NEXT: vmovdqu (%rcx), %xmm2
; AVX2-FAST-ALL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm3
; AVX2-FAST-ALL-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,0,0,0,1,1,0,2]
; AVX2-FAST-ALL-NEXT: vpermd %ymm2, %ymm4, %ymm4
; AVX2-FAST-ALL-NEXT: vpmovsxbd {{.*#+}} ymm5 = [0,4,1,5,1,5,2,6]
; AVX2-FAST-ALL-NEXT: vpermd %ymm3, %ymm5, %ymm3
; AVX2-FAST-ALL-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[0,1,4,5,u,u,2,3,6,7,u,u,8,9,12,13,u,u,18,19,22,23,u,u,24,25,28,29,u,u,26,27]
; AVX2-FAST-ALL-NEXT: vpmovsxbw {{.*#+}} ymm5 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
; AVX2-FAST-ALL-NEXT: vpblendvb %ymm5, %ymm3, %ymm4, %ymm3
; AVX2-FAST-ALL-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; AVX2-FAST-ALL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,u,u,10,11,8,9,u,u,14,15,12,13,u,u]
; AVX2-FAST-ALL-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[2,2,3,3]
; AVX2-FAST-ALL-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6],xmm1[7]
; AVX2-FAST-ALL-NEXT: vmovdqu %xmm0, 32(%rdi)
; AVX2-FAST-ALL-NEXT: vmovdqu %ymm3, (%rdi)
; AVX2-FAST-ALL-NEXT: vzeroupper
; AVX2-FAST-ALL-NEXT: retq
;
; AVX2-FAST-PERLANE-LABEL: interleave_24i16_in:
; AVX2-FAST-PERLANE: # %bb.0:
; AVX2-FAST-PERLANE-NEXT: vmovdqu (%rsi), %xmm0
; AVX2-FAST-PERLANE-NEXT: vmovdqu (%rdx), %xmm1
; AVX2-FAST-PERLANE-NEXT: vmovdqu (%rcx), %xmm2
; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm3
; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,0,1,6,7,2,3,2,3,8,9,4,5,4,5,16,17,6,7,22,23,18,19,8,9,24,25,20,21,10,11]
; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm4, %ymm3, %ymm5
; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,0,1]
; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm4, %ymm3, %ymm3
; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm3 = ymm5[0],ymm3[1],ymm5[2,3],ymm3[4],ymm5[5,6],ymm3[7],ymm5[8],ymm3[9],ymm5[10,11],ymm3[12],ymm5[13,14],ymm3[15]
; AVX2-FAST-PERLANE-NEXT: vpmovsxbd {{.*#+}} ymm4 = [0,0,0,0,1,1,0,2]
; AVX2-FAST-PERLANE-NEXT: vpermd %ymm2, %ymm4, %ymm4
; AVX2-FAST-PERLANE-NEXT: vpmovsxbw {{.*#+}} ymm5 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm5, %ymm3, %ymm4, %ymm3
; AVX2-FAST-PERLANE-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,u,u,10,11,8,9,u,u,14,15,12,13,u,u]
; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[2,2,3,3]
; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6],xmm1[7]
; AVX2-FAST-PERLANE-NEXT: vmovdqu %xmm0, 32(%rdi)
; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm3, (%rdi)
; AVX2-FAST-PERLANE-NEXT: vzeroupper
; AVX2-FAST-PERLANE-NEXT: retq
;
; XOP-LABEL: interleave_24i16_in:
; XOP: # %bb.0:
; XOP-NEXT: vmovdqu (%rsi), %xmm0
; XOP-NEXT: vmovdqu (%rdx), %xmm1
; XOP-NEXT: vmovdqu (%rcx), %xmm2
; XOP-NEXT: vpperm {{.*#+}} xmm3 = xmm0[u,u,6,7],xmm1[6,7],xmm0[u,u,8,9],xmm1[8,9],xmm0[u,u,10,11]
; XOP-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[1,1,2,2]
; XOP-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1,2],xmm4[3],xmm3[4,5],xmm4[6],xmm3[7]
; XOP-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; XOP-NEXT: vpperm {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm2[0,1],xmm4[4,5,6,7],xmm2[2,3],xmm4[8,9,10,11]
; XOP-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
; XOP-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm0[4,5],xmm2[10,11],xmm0[10,11,8,9],xmm2[12,13],xmm0[14,15,12,13],xmm2[14,15]
; XOP-NEXT: vmovdqu %xmm0, 32(%rdi)
; XOP-NEXT: vmovups %ymm3, (%rdi)
; XOP-NEXT: vzeroupper
; XOP-NEXT: retq
%s1 = load <8 x i16>, ptr %q1, align 4
%s2 = load <8 x i16>, ptr %q2, align 4
%s3 = load <8 x i16>, ptr %q3, align 4
%t1 = shufflevector <8 x i16> %s1, <8 x i16> %s2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%t2 = shufflevector <8 x i16> %s3, <8 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%interleaved = shufflevector <16 x i16> %t1, <16 x i16> %t2, <24 x i32> <i32 0, i32 8, i32 16, i32 1, i32 9, i32 17, i32 2, i32 10, i32 18, i32 3, i32 11, i32 19, i32 4, i32 12, i32 20, i32 5, i32 13, i32 21, i32 6, i32 14, i32 22, i32 7, i32 15, i32 23>
store <24 x i16> %interleaved, ptr %p, align 4
ret void
}
define void @interleave_24i32_out(ptr %p, ptr %q1, ptr %q2, ptr %q3) nounwind {
; SSE2-LABEL: interleave_24i32_out:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqu 64(%rdi), %xmm2
; SSE2-NEXT: movups 80(%rdi), %xmm4
; SSE2-NEXT: movdqu (%rdi), %xmm0
; SSE2-NEXT: movdqu 16(%rdi), %xmm3
; SSE2-NEXT: movups 32(%rdi), %xmm6
; SSE2-NEXT: movdqu 48(%rdi), %xmm1
; SSE2-NEXT: movaps %xmm6, %xmm7
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm0[2,3,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm3[1,1,1,1]
; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm8[0],xmm5[1],xmm8[1]
; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm6[0,3]
; SSE2-NEXT: shufps {{.*#+}} xmm6 = xmm6[1,0],xmm3[2,0]
; SSE2-NEXT: movdqa %xmm0, %xmm8
; SSE2-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,3],xmm6[2,0]
; SSE2-NEXT: movaps %xmm4, %xmm6
; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm1[2,3,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm2[1,1,1,1]
; SSE2-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm10[0],xmm9[1],xmm10[1]
; SSE2-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,1],xmm4[0,3]
; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,0],xmm2[2,0]
; SSE2-NEXT: movdqa %xmm1, %xmm10
; SSE2-NEXT: shufps {{.*#+}} xmm10 = xmm10[0,3],xmm4[2,0]
; SSE2-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,1],xmm2[3,3]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm2[0,0]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm6[2,0]
; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,1],xmm3[3,3]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm3[0,0]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm7[2,0]
; SSE2-NEXT: movups %xmm10, 16(%rsi)
; SSE2-NEXT: movups %xmm8, (%rsi)
; SSE2-NEXT: movups %xmm1, 16(%rdx)
; SSE2-NEXT: movups %xmm0, (%rdx)
; SSE2-NEXT: movups %xmm9, 16(%rcx)
; SSE2-NEXT: movups %xmm5, (%rcx)
; SSE2-NEXT: retq
;
; SSE42-LABEL: interleave_24i32_out:
; SSE42: # %bb.0:
; SSE42-NEXT: movups 80(%rdi), %xmm0
; SSE42-NEXT: movdqu 64(%rdi), %xmm1
; SSE42-NEXT: movdqu (%rdi), %xmm4
; SSE42-NEXT: movdqu 16(%rdi), %xmm2
; SSE42-NEXT: movups 32(%rdi), %xmm3
; SSE42-NEXT: movdqu 48(%rdi), %xmm5
; SSE42-NEXT: movdqa %xmm2, %xmm6
; SSE42-NEXT: pblendw {{.*#+}} xmm6 = xmm6[0,1],xmm4[2,3],xmm6[4,5,6,7]
; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm4[2,3,2,3]
; SSE42-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,3],xmm2[2,3]
; SSE42-NEXT: insertps {{.*#+}} xmm4 = xmm4[0,1,2],xmm3[1]
; SSE42-NEXT: movdqa %xmm1, %xmm8
; SSE42-NEXT: pblendw {{.*#+}} xmm8 = xmm8[0,1],xmm5[2,3],xmm8[4,5,6,7]
; SSE42-NEXT: pshufd {{.*#+}} xmm9 = xmm5[2,3,2,3]
; SSE42-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,3],xmm1[2,3]
; SSE42-NEXT: insertps {{.*#+}} xmm5 = xmm5[0,1,2],xmm0[1]
; SSE42-NEXT: pblendw {{.*#+}} xmm6 = xmm6[0,1,2,3],xmm3[4,5],xmm6[6,7]
; SSE42-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,0,3,2]
; SSE42-NEXT: pblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm0[4,5],xmm8[6,7]
; SSE42-NEXT: pshufd {{.*#+}} xmm8 = xmm8[1,0,3,2]
; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1],xmm2[2,3],xmm7[4,5,6,7]
; SSE42-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm3[0,3]
; SSE42-NEXT: pblendw {{.*#+}} xmm9 = xmm9[0,1],xmm1[2,3],xmm9[4,5,6,7]
; SSE42-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,1],xmm0[0,3]
; SSE42-NEXT: movups %xmm5, 16(%rsi)
; SSE42-NEXT: movups %xmm4, (%rsi)
; SSE42-NEXT: movdqu %xmm8, 16(%rdx)
; SSE42-NEXT: movdqu %xmm6, (%rdx)
; SSE42-NEXT: movups %xmm9, 16(%rcx)
; SSE42-NEXT: movups %xmm7, (%rcx)
; SSE42-NEXT: retq
;
; AVX1-LABEL: interleave_24i32_out:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovups 64(%rdi), %ymm0
; AVX1-NEXT: vmovups 32(%rdi), %ymm1
; AVX1-NEXT: vmovups (%rdi), %ymm2
; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7]
; AVX1-NEXT: vmovups 16(%rdi), %xmm4
; AVX1-NEXT: vshufps {{.*#+}} ymm4 = ymm4[2,1],ymm1[1,3],ymm4[6,5],ymm1[5,7]
; AVX1-NEXT: vshufps {{.*#+}} ymm3 = ymm3[0,3],ymm4[0,2],ymm3[4,7],ymm4[4,6]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm0[2,3,0,1]
; AVX1-NEXT: vshufps {{.*#+}} ymm5 = ymm0[1,0],ymm4[2,0],ymm0[5,4],ymm4[6,4]
; AVX1-NEXT: vshufps {{.*#+}} ymm5 = ymm5[0,1,2,0,4,5,6,4]
; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm5[6,7]
; AVX1-NEXT: vshufps {{.*#+}} ymm5 = ymm0[2,0],ymm4[3,0],ymm0[6,4],ymm4[7,4]
; AVX1-NEXT: vshufps {{.*#+}} ymm5 = ymm4[0,0],ymm5[2,0],ymm4[4,4],ymm5[6,4]
; AVX1-NEXT: vmovups 16(%rdi), %xmm6
; AVX1-NEXT: vblendps {{.*#+}} ymm7 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
; AVX1-NEXT: vshufps {{.*#+}} ymm7 = ymm7[1,2],ymm6[0,3],ymm7[5,6],ymm6[4,7]
; AVX1-NEXT: vshufps {{.*#+}} ymm7 = ymm7[0,2,3,1,4,6,7,5]
; AVX1-NEXT: vblendps {{.*#+}} ymm5 = ymm7[0,1,2,3,4],ymm5[5,6,7]
; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
; AVX1-NEXT: vshufps {{.*#+}} ymm2 = ymm6[1,0],ymm2[2,0],ymm6[5,4],ymm2[6,4]
; AVX1-NEXT: vshufps {{.*#+}} ymm1 = ymm2[2,0],ymm1[0,3],ymm2[6,4],ymm1[4,7]
; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm4[0,1],ymm0[0,3],ymm4[4,5],ymm0[4,7]
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7]
; AVX1-NEXT: vmovups %ymm3, (%rsi)
; AVX1-NEXT: vmovups %ymm5, (%rdx)
; AVX1-NEXT: vmovups %ymm0, (%rcx)
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-SLOW-LABEL: interleave_24i32_out:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vmovups (%rdi), %ymm0
; AVX2-SLOW-NEXT: vmovups 32(%rdi), %ymm1
; AVX2-SLOW-NEXT: vmovups 64(%rdi), %ymm2
; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7]
; AVX2-SLOW-NEXT: vmovaps {{.*#+}} ymm4 = [0,3,6,1,4,7,2,5]
; AVX2-SLOW-NEXT: vpermps %ymm3, %ymm4, %ymm3
; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm2[0],ymm4[1,2],ymm2[3],ymm4[4,5],ymm2[6],ymm4[7]
; AVX2-SLOW-NEXT: vmovaps {{.*#+}} ymm5 = [1,4,7,2,5,0,3,6]
; AVX2-SLOW-NEXT: vpermps %ymm4, %ymm5, %ymm4
; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
; AVX2-SLOW-NEXT: vmovaps {{.*#+}} ymm1 = [2,5,0,3,6,u,u,u]
; AVX2-SLOW-NEXT: vpermps %ymm0, %ymm1, %ymm0
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,1,0,3,4,5,4,7]
; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
; AVX2-SLOW-NEXT: vmovups %ymm3, (%rsi)
; AVX2-SLOW-NEXT: vmovups %ymm4, (%rdx)
; AVX2-SLOW-NEXT: vmovups %ymm0, (%rcx)
; AVX2-SLOW-NEXT: vzeroupper
; AVX2-SLOW-NEXT: retq
;
; AVX2-FAST-ALL-LABEL: interleave_24i32_out:
; AVX2-FAST-ALL: # %bb.0:
; AVX2-FAST-ALL-NEXT: vmovups (%rdi), %ymm0
; AVX2-FAST-ALL-NEXT: vmovups 32(%rdi), %ymm1
; AVX2-FAST-ALL-NEXT: vmovups 64(%rdi), %ymm2
; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm3 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7]
; AVX2-FAST-ALL-NEXT: vmovaps {{.*#+}} ymm4 = [0,3,6,1,4,7,2,5]
; AVX2-FAST-ALL-NEXT: vpermps %ymm3, %ymm4, %ymm3
; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm4 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm4 = ymm2[0],ymm4[1,2],ymm2[3],ymm4[4,5],ymm2[6],ymm4[7]
; AVX2-FAST-ALL-NEXT: vmovaps {{.*#+}} ymm5 = [1,4,7,2,5,0,3,6]
; AVX2-FAST-ALL-NEXT: vpermps %ymm4, %ymm5, %ymm4
; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3],ymm2[4],ymm0[5,6],ymm2[7]
; AVX2-FAST-ALL-NEXT: vmovaps {{.*#+}} ymm1 = [2,5,0,3,6,1,4,7]
; AVX2-FAST-ALL-NEXT: vpermps %ymm0, %ymm1, %ymm0
; AVX2-FAST-ALL-NEXT: vmovups %ymm3, (%rsi)
; AVX2-FAST-ALL-NEXT: vmovups %ymm4, (%rdx)
; AVX2-FAST-ALL-NEXT: vmovups %ymm0, (%rcx)
; AVX2-FAST-ALL-NEXT: vzeroupper
; AVX2-FAST-ALL-NEXT: retq
;
; AVX2-FAST-PERLANE-LABEL: interleave_24i32_out:
; AVX2-FAST-PERLANE: # %bb.0:
; AVX2-FAST-PERLANE-NEXT: vmovups (%rdi), %ymm0
; AVX2-FAST-PERLANE-NEXT: vmovups 32(%rdi), %ymm1
; AVX2-FAST-PERLANE-NEXT: vmovups 64(%rdi), %ymm2
; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7]
; AVX2-FAST-PERLANE-NEXT: vmovaps {{.*#+}} ymm4 = [0,3,6,1,4,7,2,5]
; AVX2-FAST-PERLANE-NEXT: vpermps %ymm3, %ymm4, %ymm3
; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm2[0],ymm4[1,2],ymm2[3],ymm4[4,5],ymm2[6],ymm4[7]
; AVX2-FAST-PERLANE-NEXT: vmovaps {{.*#+}} ymm5 = [1,4,7,2,5,0,3,6]
; AVX2-FAST-PERLANE-NEXT: vpermps %ymm4, %ymm5, %ymm4
; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
; AVX2-FAST-PERLANE-NEXT: vmovaps {{.*#+}} ymm1 = [2,5,0,3,6,u,u,u]
; AVX2-FAST-PERLANE-NEXT: vpermps %ymm0, %ymm1, %ymm0
; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,1,0,3,4,5,4,7]
; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
; AVX2-FAST-PERLANE-NEXT: vmovups %ymm3, (%rsi)
; AVX2-FAST-PERLANE-NEXT: vmovups %ymm4, (%rdx)
; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, (%rcx)
; AVX2-FAST-PERLANE-NEXT: vzeroupper
; AVX2-FAST-PERLANE-NEXT: retq
;
; XOP-LABEL: interleave_24i32_out:
; XOP: # %bb.0:
; XOP-NEXT: vmovups 64(%rdi), %ymm0
; XOP-NEXT: vmovups 32(%rdi), %ymm1
; XOP-NEXT: vmovups (%rdi), %ymm2
; XOP-NEXT: vblendps {{.*#+}} ymm3 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7]
; XOP-NEXT: vmovups 16(%rdi), %xmm4
; XOP-NEXT: vshufps {{.*#+}} ymm4 = ymm4[2,1],ymm1[1,3],ymm4[6,5],ymm1[5,7]
; XOP-NEXT: vshufps {{.*#+}} ymm3 = ymm3[0,3],ymm4[0,2],ymm3[4,7],ymm4[4,6]
; XOP-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm0[2,3,0,1]
; XOP-NEXT: vshufps {{.*#+}} ymm5 = ymm0[1,0],ymm4[2,0],ymm0[5,4],ymm4[6,4]
; XOP-NEXT: vshufps {{.*#+}} ymm5 = ymm5[0,1,2,0,4,5,6,4]
; XOP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm5[6,7]
; XOP-NEXT: vshufps {{.*#+}} ymm5 = ymm0[2,0],ymm4[3,0],ymm0[6,4],ymm4[7,4]
; XOP-NEXT: vshufps {{.*#+}} ymm5 = ymm4[0,0],ymm5[2,0],ymm4[4,4],ymm5[6,4]
; XOP-NEXT: vmovups 16(%rdi), %xmm6
; XOP-NEXT: vblendps {{.*#+}} ymm7 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
; XOP-NEXT: vshufps {{.*#+}} ymm7 = ymm7[1,2],ymm6[0,3],ymm7[5,6],ymm6[4,7]
; XOP-NEXT: vshufps {{.*#+}} ymm7 = ymm7[0,2,3,1,4,6,7,5]
; XOP-NEXT: vblendps {{.*#+}} ymm5 = ymm7[0,1,2,3,4],ymm5[5,6,7]
; XOP-NEXT: vblendps {{.*#+}} ymm2 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
; XOP-NEXT: vshufps {{.*#+}} ymm2 = ymm6[1,0],ymm2[2,0],ymm6[5,4],ymm2[6,4]
; XOP-NEXT: vshufps {{.*#+}} ymm1 = ymm2[2,0],ymm1[0,3],ymm2[6,4],ymm1[4,7]
; XOP-NEXT: vshufps {{.*#+}} ymm0 = ymm4[0,1],ymm0[0,3],ymm4[4,5],ymm0[4,7]
; XOP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7]
; XOP-NEXT: vmovups %ymm3, (%rsi)
; XOP-NEXT: vmovups %ymm5, (%rdx)
; XOP-NEXT: vmovups %ymm0, (%rcx)
; XOP-NEXT: vzeroupper
; XOP-NEXT: retq
%wide.vec = load <24 x i32>, ptr %p, align 4
%s1 = shufflevector <24 x i32> %wide.vec, <24 x i32> undef, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21>
%s2 = shufflevector <24 x i32> %wide.vec, <24 x i32> undef, <8 x i32> <i32 1, i32 4, i32 7, i32 10, i32 13, i32 16, i32 19, i32 22>
%s3 = shufflevector <24 x i32> %wide.vec, <24 x i32> undef, <8 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23>
store <8 x i32> %s1, ptr %q1, align 4
store <8 x i32> %s2, ptr %q2, align 4
store <8 x i32> %s3, ptr %q3, align 4
ret void
}
define void @interleave_24i32_in(ptr %p, ptr %q1, ptr %q2, ptr %q3) nounwind {
; SSE2-LABEL: interleave_24i32_in:
; SSE2: # %bb.0:
; SSE2-NEXT: movups (%rsi), %xmm1
; SSE2-NEXT: movups 16(%rsi), %xmm0
; SSE2-NEXT: movups (%rdx), %xmm3
; SSE2-NEXT: movups 16(%rdx), %xmm5
; SSE2-NEXT: movups (%rcx), %xmm4
; SSE2-NEXT: movups 16(%rcx), %xmm7
; SSE2-NEXT: movaps %xmm4, %xmm6
; SSE2-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm1[1,3]
; SSE2-NEXT: movaps %xmm1, %xmm2
; SSE2-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm6[0,2]
; SSE2-NEXT: movaps %xmm0, %xmm8
; SSE2-NEXT: unpckhpd {{.*#+}} xmm8 = xmm8[1],xmm5[1]
; SSE2-NEXT: movaps %xmm7, %xmm9
; SSE2-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,1],xmm0[1,3]
; SSE2-NEXT: movaps %xmm0, %xmm6
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3],xmm5[3,3]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,2],xmm7[2,3]
; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,1],xmm5[1,1]
; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm8[0,2]
; SSE2-NEXT: unpcklps {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
; SSE2-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm9[0,2]
; SSE2-NEXT: movaps %xmm1, %xmm5
; SSE2-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm3[1]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm3[3,3]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,2],xmm4[2,3]
; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm3[1,1]
; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0],xmm5[0,2]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0,1,3]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0,1,3]
; SSE2-NEXT: movups %xmm4, 16(%rdi)
; SSE2-NEXT: movups %xmm6, 48(%rdi)
; SSE2-NEXT: movups %xmm7, 64(%rdi)
; SSE2-NEXT: movups %xmm2, (%rdi)
; SSE2-NEXT: movups %xmm1, 32(%rdi)
; SSE2-NEXT: movups %xmm0, 80(%rdi)
; SSE2-NEXT: retq
;
; SSE42-LABEL: interleave_24i32_in:
; SSE42: # %bb.0:
; SSE42-NEXT: movdqu (%rsi), %xmm0
; SSE42-NEXT: movdqu 16(%rsi), %xmm2
; SSE42-NEXT: movdqu (%rdx), %xmm3
; SSE42-NEXT: movdqu 16(%rdx), %xmm4
; SSE42-NEXT: movdqu (%rcx), %xmm5
; SSE42-NEXT: movdqu 16(%rcx), %xmm6
; SSE42-NEXT: movdqa %xmm0, %xmm1
; SSE42-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm1[0,1,2,2]
; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm5[0,1,0,1]
; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm7[0,1,2,3],xmm1[4,5],xmm7[6,7]
; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm4[1,1,2,2]
; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm2[4,5],xmm7[6,7]
; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1],xmm6[2,3],xmm7[4,5,6,7]
; SSE42-NEXT: movdqa %xmm2, %xmm8
; SSE42-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1]
; SSE42-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,1,2,2]
; SSE42-NEXT: pshufd {{.*#+}} xmm9 = xmm6[0,1,0,1]
; SSE42-NEXT: pblendw {{.*#+}} xmm9 = xmm8[0,1,2,3],xmm9[4,5],xmm8[6,7]
; SSE42-NEXT: pshufd {{.*#+}} xmm8 = xmm3[1,1,2,2]
; SSE42-NEXT: pblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm0[4,5],xmm8[6,7]
; SSE42-NEXT: pblendw {{.*#+}} xmm8 = xmm8[0,1],xmm5[2,3],xmm8[4,5,6,7]
; SSE42-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3],xmm4[3,3]
; SSE42-NEXT: pshufd {{.*#+}} xmm4 = xmm6[2,3,2,3]
; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm2[2,3,4,5],xmm4[6,7]
; SSE42-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3],xmm3[3,3]
; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm5[2,3,2,3]
; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,3,4,5],xmm2[6,7]
; SSE42-NEXT: movdqu %xmm2, 32(%rdi)
; SSE42-NEXT: movdqu %xmm4, 80(%rdi)
; SSE42-NEXT: movdqu %xmm8, 16(%rdi)
; SSE42-NEXT: movdqu %xmm9, 48(%rdi)
; SSE42-NEXT: movdqu %xmm7, 64(%rdi)
; SSE42-NEXT: movdqu %xmm1, (%rdi)
; SSE42-NEXT: retq
;
; AVX1-LABEL: interleave_24i32_in:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovupd (%rcx), %ymm0
; AVX1-NEXT: vmovups (%rdx), %xmm1
; AVX1-NEXT: vmovups 16(%rdx), %xmm2
; AVX1-NEXT: vmovups (%rsi), %xmm3
; AVX1-NEXT: vmovups 16(%rsi), %xmm4
; AVX1-NEXT: vshufps {{.*#+}} xmm5 = xmm4[3,3],xmm2[3,3]
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm4 = xmm4[1],xmm2[1]
; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm2[1,1],xmm4[0,2]
; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm2, %ymm2
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm0[2,3,2,3]
; AVX1-NEXT: vshufpd {{.*#+}} ymm4 = ymm4[0,0,3,3]
; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm4[1],ymm2[2,3],ymm4[4],ymm2[5,6],ymm4[7]
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm4 = xmm3[1],xmm1[1]
; AVX1-NEXT: vshufps {{.*#+}} xmm4 = xmm1[1,1],xmm4[0,2]
; AVX1-NEXT: vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm3[0]
; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[2,0],xmm3[2,1]
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1
; AVX1-NEXT: vbroadcastsd (%rcx), %ymm3
; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7]
; AVX1-NEXT: vpermilps {{.*#+}} ymm3 = mem[0,0,3,3,4,4,7,7]
; AVX1-NEXT: vpermilpd {{.*#+}} ymm4 = mem[1,0,2,2]
; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
; AVX1-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm3[1,2],ymm0[3],ymm3[4,5],ymm0[6],ymm3[7]
; AVX1-NEXT: vmovups %ymm0, 32(%rdi)
; AVX1-NEXT: vmovups %ymm1, (%rdi)
; AVX1-NEXT: vmovups %ymm2, 64(%rdi)
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-SLOW-LABEL: interleave_24i32_in:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vmovups (%rsi), %ymm0
; AVX2-SLOW-NEXT: vmovups (%rdx), %ymm1
; AVX2-SLOW-NEXT: vmovups (%rcx), %ymm2
; AVX2-SLOW-NEXT: vbroadcastsd 24(%rsi), %ymm3
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm4 = ymm1[1,2,3,3,5,6,7,7]
; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[2,2,2,3]
; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm2[2,1,3,3]
; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm4 = mem[1,0,2,2]
; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,1,0,1]
; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm5 = ymm0[0,0,2,1]
; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6],ymm4[7]
; AVX2-SLOW-NEXT: vbroadcastsd (%rcx), %ymm5
; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,0,3,3,4,4,7,7]
; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm2[1,1,2,2]
; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7]
; AVX2-SLOW-NEXT: vmovups %ymm0, 32(%rdi)
; AVX2-SLOW-NEXT: vmovups %ymm4, (%rdi)
; AVX2-SLOW-NEXT: vmovups %ymm3, 64(%rdi)
; AVX2-SLOW-NEXT: vzeroupper
; AVX2-SLOW-NEXT: retq
;
; AVX2-FAST-ALL-LABEL: interleave_24i32_in:
; AVX2-FAST-ALL: # %bb.0:
; AVX2-FAST-ALL-NEXT: vmovups (%rsi), %ymm0
; AVX2-FAST-ALL-NEXT: vmovups (%rdx), %ymm1
; AVX2-FAST-ALL-NEXT: vmovups (%rcx), %ymm2
; AVX2-FAST-ALL-NEXT: vbroadcastf128 {{.*#+}} ymm3 = [5,0,7,6,5,0,7,6]
; AVX2-FAST-ALL-NEXT: # ymm3 = mem[0,1,0,1]
; AVX2-FAST-ALL-NEXT: vpermps %ymm1, %ymm3, %ymm3
; AVX2-FAST-ALL-NEXT: vbroadcastsd 24(%rsi), %ymm4
; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm4 = ymm2[2,1,3,3]
; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
; AVX2-FAST-ALL-NEXT: vbroadcastf128 {{.*#+}} ymm4 = [1,0,2,2,1,0,2,2]
; AVX2-FAST-ALL-NEXT: # ymm4 = mem[0,1,0,1]
; AVX2-FAST-ALL-NEXT: vpermps %ymm1, %ymm4, %ymm4
; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm5 = ymm0[0,0,2,1]
; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6],ymm4[7]
; AVX2-FAST-ALL-NEXT: vbroadcastsd (%rcx), %ymm5
; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
; AVX2-FAST-ALL-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,0,3,3,4,4,7,7]
; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm1 = ymm2[1,1,2,2]
; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7]
; AVX2-FAST-ALL-NEXT: vmovups %ymm0, 32(%rdi)
; AVX2-FAST-ALL-NEXT: vmovups %ymm4, (%rdi)
; AVX2-FAST-ALL-NEXT: vmovups %ymm3, 64(%rdi)
; AVX2-FAST-ALL-NEXT: vzeroupper
; AVX2-FAST-ALL-NEXT: retq
;
; AVX2-FAST-PERLANE-LABEL: interleave_24i32_in:
; AVX2-FAST-PERLANE: # %bb.0:
; AVX2-FAST-PERLANE-NEXT: vmovups (%rsi), %ymm0
; AVX2-FAST-PERLANE-NEXT: vmovups (%rdx), %ymm1
; AVX2-FAST-PERLANE-NEXT: vmovups (%rcx), %ymm2
; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 24(%rsi), %ymm3
; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm4 = ymm1[1,2,3,3,5,6,7,7]
; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[2,2,2,3]
; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm2[2,1,3,3]
; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} xmm4 = mem[1,0,2,2]
; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,1,0,1]
; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm5 = ymm0[0,0,2,1]
; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6],ymm4[7]
; AVX2-FAST-PERLANE-NEXT: vbroadcastsd (%rcx), %ymm5
; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,0,3,3,4,4,7,7]
; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm2[1,1,2,2]
; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7]
; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, 32(%rdi)
; AVX2-FAST-PERLANE-NEXT: vmovups %ymm4, (%rdi)
; AVX2-FAST-PERLANE-NEXT: vmovups %ymm3, 64(%rdi)
; AVX2-FAST-PERLANE-NEXT: vzeroupper
; AVX2-FAST-PERLANE-NEXT: retq
;
; XOP-LABEL: interleave_24i32_in:
; XOP: # %bb.0:
; XOP-NEXT: vmovups (%rsi), %ymm0
; XOP-NEXT: vmovups (%rdx), %ymm1
; XOP-NEXT: vpermil2ps {{.*#+}} ymm0 = ymm0[u,3],ymm1[3],ymm0[u,4],ymm1[4],ymm0[u,5]
; XOP-NEXT: vmovups (%rcx), %ymm1
; XOP-NEXT: vmovups (%rdx), %xmm2
; XOP-NEXT: vmovups 16(%rdx), %xmm3
; XOP-NEXT: vmovups (%rsi), %xmm4
; XOP-NEXT: vmovups 16(%rsi), %xmm5
; XOP-NEXT: vshufps {{.*#+}} xmm6 = xmm5[3,3],xmm3[3,3]
; XOP-NEXT: vunpckhpd {{.*#+}} xmm5 = xmm5[1],xmm3[1]
; XOP-NEXT: vshufps {{.*#+}} xmm3 = xmm3[1,1],xmm5[0,2]
; XOP-NEXT: vinsertf128 $1, %xmm6, %ymm3, %ymm3
; XOP-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm1[2,3,2,3]
; XOP-NEXT: vshufpd {{.*#+}} ymm5 = ymm5[0,0,3,3]
; XOP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm5[1],ymm3[2,3],ymm5[4],ymm3[5,6],ymm5[7]
; XOP-NEXT: vunpckhpd {{.*#+}} xmm5 = xmm4[1],xmm2[1]
; XOP-NEXT: vshufps {{.*#+}} xmm5 = xmm2[1,1],xmm5[0,2]
; XOP-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm4[0]
; XOP-NEXT: vshufps {{.*#+}} xmm2 = xmm2[2,0],xmm4[2,1]
; XOP-NEXT: vinsertf128 $1, %xmm5, %ymm2, %ymm2
; XOP-NEXT: vbroadcastsd (%rcx), %ymm4
; XOP-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7]
; XOP-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[1,1,2,2]
; XOP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7]
; XOP-NEXT: vmovups %ymm0, 32(%rdi)
; XOP-NEXT: vmovups %ymm2, (%rdi)
; XOP-NEXT: vmovups %ymm3, 64(%rdi)
; XOP-NEXT: vzeroupper
; XOP-NEXT: retq
%s1 = load <8 x i32>, ptr %q1, align 4
%s2 = load <8 x i32>, ptr %q2, align 4
%s3 = load <8 x i32>, ptr %q3, align 4
%t1 = shufflevector <8 x i32> %s1, <8 x i32> %s2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%t2 = shufflevector <8 x i32> %s3, <8 x i32> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%interleaved = shufflevector <16 x i32> %t1, <16 x i32> %t2, <24 x i32> <i32 0, i32 8, i32 16, i32 1, i32 9, i32 17, i32 2, i32 10, i32 18, i32 3, i32 11, i32 19, i32 4, i32 12, i32 20, i32 5, i32 13, i32 21, i32 6, i32 14, i32 22, i32 7, i32 15, i32 23>
store <24 x i32> %interleaved, ptr %p, align 4
ret void
}
; Repeat each element x 3 of <16 x i8> a0 + a1 to create a <96 x i8>.
define void @splat3_128(<16 x i8> %a0, <16 x i8> %a1, ptr%a2) {
; SSE2-LABEL: splat3_128:
; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm4, %xmm4
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm3[2,3,3,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm2[0,1,2,3,4,4,4,5]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,1,0,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,0,0,1,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,6,6]
; SSE2-NEXT: packuswb %xmm5, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm4[8],xmm0[9],xmm4[9],xmm0[10],xmm4[10],xmm0[11],xmm4[11],xmm0[12],xmm4[12],xmm0[13],xmm4[13],xmm0[14],xmm4[14],xmm0[15],xmm4[15]
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm0[0,1,0,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,0,0,1,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,6,6]
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,1,2,2,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,7,7,7]
; SSE2-NEXT: packuswb %xmm5, %xmm3
; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm0[2,3,3,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,4,5]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,1,2,2,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,7,7,7]
; SSE2-NEXT: packuswb %xmm0, %xmm5
; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm0[2,3,3,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,4,5]
; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm0[0,1,0,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,0,0,1,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,5,6,6]
; SSE2-NEXT: packuswb %xmm6, %xmm7
; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm4[8],xmm1[9],xmm4[9],xmm1[10],xmm4[10],xmm1[11],xmm4[11],xmm1[12],xmm4[12],xmm1[13],xmm4[13],xmm1[14],xmm4[14],xmm1[15],xmm4[15]
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,1,0,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,0,0,1,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,6,6]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,1,2,2,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,7,7,7]
; SSE2-NEXT: packuswb %xmm4, %xmm0
; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm1[2,3,3,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,5]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,1,2,2,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,7,7,7]
; SSE2-NEXT: packuswb %xmm1, %xmm4
; SSE2-NEXT: movdqa %xmm4, 80(%rdi)
; SSE2-NEXT: movdqa %xmm0, 64(%rdi)
; SSE2-NEXT: movdqa %xmm7, 48(%rdi)
; SSE2-NEXT: movdqa %xmm5, 32(%rdi)
; SSE2-NEXT: movdqa %xmm3, 16(%rdi)
; SSE2-NEXT: movdqa %xmm2, (%rdi)
; SSE2-NEXT: retq
;
; SSE42-LABEL: splat3_128:
; SSE42: # %bb.0:
; SSE42-NEXT: movdqa {{.*#+}} xmm2 = [0,0,0,1,1,1,2,2,2,3,3,3,4,4,4,5]
; SSE42-NEXT: movdqa %xmm0, %xmm3
; SSE42-NEXT: pshufb %xmm2, %xmm3
; SSE42-NEXT: movdqa {{.*#+}} xmm4 = [5,5,6,6,6,7,7,7,8,8,8,9,9,9,10,10]
; SSE42-NEXT: movdqa %xmm0, %xmm5
; SSE42-NEXT: pshufb %xmm4, %xmm5
; SSE42-NEXT: movdqa {{.*#+}} xmm6 = [10,11,11,11,12,12,12,13,13,13,14,14,14,15,15,15]
; SSE42-NEXT: pshufb %xmm6, %xmm0
; SSE42-NEXT: movdqa %xmm1, %xmm7
; SSE42-NEXT: pshufb %xmm2, %xmm7
; SSE42-NEXT: movdqa %xmm1, %xmm2
; SSE42-NEXT: pshufb %xmm4, %xmm2
; SSE42-NEXT: pshufb %xmm6, %xmm1
; SSE42-NEXT: movdqa %xmm1, 80(%rdi)
; SSE42-NEXT: movdqa %xmm2, 64(%rdi)
; SSE42-NEXT: movdqa %xmm7, 48(%rdi)
; SSE42-NEXT: movdqa %xmm0, 32(%rdi)
; SSE42-NEXT: movdqa %xmm5, 16(%rdi)
; SSE42-NEXT: movdqa %xmm3, (%rdi)
; SSE42-NEXT: retq
;
; AVX1-LABEL: splat3_128:
; AVX1: # %bb.0:
; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm1[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5]
; AVX1-NEXT: vpalignr {{.*#+}} xmm3 = xmm0[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5]
; AVX1-NEXT: vpalignr {{.*#+}} xmm4 = xmm1[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10]
; AVX1-NEXT: vpalignr {{.*#+}} xmm5 = xmm0[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10]
; AVX1-NEXT: vpalignr {{.*#+}} xmm6 = xmm3[5,6,7,8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm7 = xmm2[5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm3 = xmm5[5,6,7,8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm4[5,6,7,8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm5 = xmm0[5,6,7,8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm4 = xmm1[5,6,7,8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm1 = xmm7[5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm6[5,6,7,8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm2[5,6,7,8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm3 = xmm3[5,6,7,8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm4 = xmm4[5,6,7,8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm5 = xmm5[5,6,7,8,9,10,11,12,13,14,15],xmm6[0,1,2,3,4]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5]
; AVX1-NEXT: vpshufb %xmm6, %xmm3, %xmm3
; AVX1-NEXT: vpshufb %xmm6, %xmm0, %xmm0
; AVX1-NEXT: vpshufb %xmm6, %xmm5, %xmm5
; AVX1-NEXT: vpshufb %xmm6, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm6, %xmm2, %xmm2
; AVX1-NEXT: vpshufb %xmm6, %xmm4, %xmm4
; AVX1-NEXT: vmovdqa %xmm4, 80(%rdi)
; AVX1-NEXT: vmovdqa %xmm2, 64(%rdi)
; AVX1-NEXT: vmovdqa %xmm1, 48(%rdi)
; AVX1-NEXT: vmovdqa %xmm5, 32(%rdi)
; AVX1-NEXT: vmovdqa %xmm3, 16(%rdi)
; AVX1-NEXT: vmovdqa %xmm0, (%rdi)
; AVX1-NEXT: retq
;
; AVX2-LABEL: splat3_128:
; AVX2: # %bb.0:
; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vpalignr {{.*#+}} ymm1 = ymm0[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,22,23,24,25,26,27,28,29,30,31,16,17,18,19,20,21]
; AVX2-NEXT: vpalignr {{.*#+}} ymm2 = ymm0[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25,26]
; AVX2-NEXT: vpalignr {{.*#+}} ymm3 = ymm1[5,6,7,8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4],ymm1[21,22,23,24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20]
; AVX2-NEXT: vpalignr {{.*#+}} ymm1 = ymm2[5,6,7,8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4],ymm2[21,22,23,24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20]
; AVX2-NEXT: vpalignr {{.*#+}} ymm2 = ymm0[5,6,7,8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4],ymm0[21,22,23,24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20]
; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm3[5,6,7,8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4],ymm3[21,22,23,24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20]
; AVX2-NEXT: vpalignr {{.*#+}} ymm1 = ymm1[5,6,7,8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4],ymm1[21,22,23,24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20]
; AVX2-NEXT: vpalignr {{.*#+}} ymm2 = ymm2[5,6,7,8,9,10,11,12,13,14,15],ymm3[0,1,2,3,4],ymm2[21,22,23,24,25,26,27,28,29,30,31],ymm3[16,17,18,19,20]
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm3
; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5,0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5]
; AVX2-NEXT: # ymm4 = mem[0,1,0,1]
; AVX2-NEXT: vpshufb %ymm4, %ymm3, %ymm3
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
; AVX2-NEXT: vpshufb %ymm4, %ymm0, %ymm0
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm2[2,3]
; AVX2-NEXT: vpshufb %ymm4, %ymm1, %ymm1
; AVX2-NEXT: vmovdqa %ymm1, 64(%rdi)
; AVX2-NEXT: vmovdqa %ymm0, 32(%rdi)
; AVX2-NEXT: vmovdqa %ymm3, (%rdi)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; XOP-LABEL: splat3_128:
; XOP: # %bb.0:
; XOP-NEXT: vpalignr {{.*#+}} xmm2 = xmm0[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5]
; XOP-NEXT: vpalignr {{.*#+}} xmm3 = xmm1[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5]
; XOP-NEXT: vpalignr {{.*#+}} xmm4 = xmm0[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10]
; XOP-NEXT: vpalignr {{.*#+}} xmm5 = xmm1[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10]
; XOP-NEXT: vpalignr {{.*#+}} xmm6 = xmm3[5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4]
; XOP-NEXT: vpalignr {{.*#+}} xmm7 = xmm2[5,6,7,8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4]
; XOP-NEXT: vpalignr {{.*#+}} xmm3 = xmm5[5,6,7,8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4]
; XOP-NEXT: vpalignr {{.*#+}} xmm2 = xmm4[5,6,7,8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4]
; XOP-NEXT: vpalignr {{.*#+}} xmm5 = xmm1[5,6,7,8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4]
; XOP-NEXT: vpalignr {{.*#+}} xmm4 = xmm0[5,6,7,8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4]
; XOP-NEXT: vmovdqa {{.*#+}} xmm8 = [5,16,11,6,17,12,7,18,13,8,19,14,9,20,15,10]
; XOP-NEXT: vpperm %xmm8, %xmm4, %xmm2, %xmm2
; XOP-NEXT: vpperm %xmm8, %xmm0, %xmm7, %xmm0
; XOP-NEXT: vpperm %xmm8, %xmm7, %xmm4, %xmm4
; XOP-NEXT: vpperm %xmm8, %xmm1, %xmm6, %xmm1
; XOP-NEXT: vpperm %xmm8, %xmm5, %xmm3, %xmm3
; XOP-NEXT: vpperm %xmm8, %xmm6, %xmm5, %xmm5
; XOP-NEXT: vmovdqa %xmm5, 80(%rdi)
; XOP-NEXT: vmovdqa %xmm3, 64(%rdi)
; XOP-NEXT: vmovdqa %xmm1, 48(%rdi)
; XOP-NEXT: vmovdqa %xmm4, 32(%rdi)
; XOP-NEXT: vmovdqa %xmm2, 16(%rdi)
; XOP-NEXT: vmovdqa %xmm0, (%rdi)
; XOP-NEXT: retq
%1 = shufflevector <16 x i8> %a0, <16 x i8> %a1, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
%2 = shufflevector <16 x i8> %a0, <16 x i8> %a1, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%3 = shufflevector <64 x i8> %1, <64 x i8> %2, <96 x i32> <i32 0, i32 32, i32 64, i32 1, i32 33, i32 65, i32 2, i32 34, i32 66, i32 3, i32 35, i32 67, i32 4, i32 36, i32 68, i32 5, i32 37, i32 69, i32 6, i32 38, i32 70, i32 7, i32 39, i32 71, i32 8, i32 40, i32 72, i32 9, i32 41, i32 73, i32 10, i32 42, i32 74, i32 11, i32 43, i32 75, i32 12, i32 44, i32 76, i32 13, i32 45, i32 77, i32 14, i32 46, i32 78, i32 15, i32 47, i32 79, i32 16, i32 48, i32 80, i32 17, i32 49, i32 81, i32 18, i32 50, i32 82, i32 19, i32 51, i32 83, i32 20, i32 52, i32 84, i32 21, i32 53, i32 85, i32 22, i32 54, i32 86, i32 23, i32 55, i32 87, i32 24, i32 56, i32 88, i32 25, i32 57, i32 89, i32 26, i32 58, i32 90, i32 27, i32 59, i32 91, i32 28, i32 60, i32 92, i32 29, i32 61, i32 93, i32 30, i32 62, i32 94, i32 31, i32 63, i32 95>
store <96 x i8> %3, ptr %a2
ret void
}
; Repeat each element x 3 of <32 x i8> a0 to create a <96 x i8>.
define void @splat3_256(<32 x i8> %a0, ptr%a1) {
; SSE2-LABEL: splat3_256:
; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm4, %xmm4
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm3[2,3,3,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm2[0,1,2,3,4,4,4,5]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,1,0,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,0,0,1,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,6,6]
; SSE2-NEXT: packuswb %xmm5, %xmm2
; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm4[8],xmm0[9],xmm4[9],xmm0[10],xmm4[10],xmm0[11],xmm4[11],xmm0[12],xmm4[12],xmm0[13],xmm4[13],xmm0[14],xmm4[14],xmm0[15],xmm4[15]
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm0[0,1,0,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,0,0,1,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,6,6]
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,1,2,2,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,7,7,7]
; SSE2-NEXT: packuswb %xmm5, %xmm3
; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm0[2,3,3,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,4,5]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,1,2,2,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,7,7,7]
; SSE2-NEXT: packuswb %xmm0, %xmm5
; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm0[2,3,3,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,4,5]
; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm0[0,1,0,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,0,0,1,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,5,6,6]
; SSE2-NEXT: packuswb %xmm6, %xmm7
; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm4[8],xmm1[9],xmm4[9],xmm1[10],xmm4[10],xmm1[11],xmm4[11],xmm1[12],xmm4[12],xmm1[13],xmm4[13],xmm1[14],xmm4[14],xmm1[15],xmm4[15]
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,1,0,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,0,0,1,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,6,6]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,1,2,2,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,7,7,7]
; SSE2-NEXT: packuswb %xmm4, %xmm0
; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm1[2,3,3,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,5]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,1,2,2,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,7,7,7]
; SSE2-NEXT: packuswb %xmm1, %xmm4
; SSE2-NEXT: movdqa %xmm4, 80(%rdi)
; SSE2-NEXT: movdqa %xmm0, 64(%rdi)
; SSE2-NEXT: movdqa %xmm7, 48(%rdi)
; SSE2-NEXT: movdqa %xmm5, 32(%rdi)
; SSE2-NEXT: movdqa %xmm3, 16(%rdi)
; SSE2-NEXT: movdqa %xmm2, (%rdi)
; SSE2-NEXT: retq
;
; SSE42-LABEL: splat3_256:
; SSE42: # %bb.0:
; SSE42-NEXT: movdqa {{.*#+}} xmm2 = [0,0,0,1,1,1,2,2,2,3,3,3,4,4,4,5]
; SSE42-NEXT: movdqa %xmm0, %xmm3
; SSE42-NEXT: pshufb %xmm2, %xmm3
; SSE42-NEXT: movdqa {{.*#+}} xmm4 = [5,5,6,6,6,7,7,7,8,8,8,9,9,9,10,10]
; SSE42-NEXT: movdqa %xmm0, %xmm5
; SSE42-NEXT: pshufb %xmm4, %xmm5
; SSE42-NEXT: movdqa {{.*#+}} xmm6 = [10,11,11,11,12,12,12,13,13,13,14,14,14,15,15,15]
; SSE42-NEXT: pshufb %xmm6, %xmm0
; SSE42-NEXT: movdqa %xmm1, %xmm7
; SSE42-NEXT: pshufb %xmm2, %xmm7
; SSE42-NEXT: movdqa %xmm1, %xmm2
; SSE42-NEXT: pshufb %xmm4, %xmm2
; SSE42-NEXT: pshufb %xmm6, %xmm1
; SSE42-NEXT: movdqa %xmm1, 80(%rdi)
; SSE42-NEXT: movdqa %xmm2, 64(%rdi)
; SSE42-NEXT: movdqa %xmm7, 48(%rdi)
; SSE42-NEXT: movdqa %xmm0, 32(%rdi)
; SSE42-NEXT: movdqa %xmm5, 16(%rdi)
; SSE42-NEXT: movdqa %xmm3, (%rdi)
; SSE42-NEXT: retq
;
; AVX1-LABEL: splat3_256:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm1[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5]
; AVX1-NEXT: vpalignr {{.*#+}} xmm3 = xmm0[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5]
; AVX1-NEXT: vpalignr {{.*#+}} xmm4 = xmm1[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10]
; AVX1-NEXT: vpalignr {{.*#+}} xmm5 = xmm0[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10]
; AVX1-NEXT: vpalignr {{.*#+}} xmm6 = xmm3[5,6,7,8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm7 = xmm2[5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm3 = xmm5[5,6,7,8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm4[5,6,7,8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm5 = xmm0[5,6,7,8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm4 = xmm1[5,6,7,8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm1 = xmm7[5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm6[5,6,7,8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm2[5,6,7,8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm3 = xmm3[5,6,7,8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm4 = xmm4[5,6,7,8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm5 = xmm5[5,6,7,8,9,10,11,12,13,14,15],xmm6[0,1,2,3,4]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5]
; AVX1-NEXT: vpshufb %xmm6, %xmm3, %xmm3
; AVX1-NEXT: vpshufb %xmm6, %xmm0, %xmm0
; AVX1-NEXT: vpshufb %xmm6, %xmm5, %xmm5
; AVX1-NEXT: vpshufb %xmm6, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm6, %xmm2, %xmm2
; AVX1-NEXT: vpshufb %xmm6, %xmm4, %xmm4
; AVX1-NEXT: vmovdqa %xmm4, 80(%rdi)
; AVX1-NEXT: vmovdqa %xmm2, 64(%rdi)
; AVX1-NEXT: vmovdqa %xmm1, 48(%rdi)
; AVX1-NEXT: vmovdqa %xmm5, 32(%rdi)
; AVX1-NEXT: vmovdqa %xmm3, 16(%rdi)
; AVX1-NEXT: vmovdqa %xmm0, (%rdi)
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: splat3_256:
; AVX2: # %bb.0:
; AVX2-NEXT: vpalignr {{.*#+}} ymm1 = ymm0[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,22,23,24,25,26,27,28,29,30,31,16,17,18,19,20,21]
; AVX2-NEXT: vpalignr {{.*#+}} ymm2 = ymm0[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25,26]
; AVX2-NEXT: vpalignr {{.*#+}} ymm3 = ymm1[5,6,7,8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4],ymm1[21,22,23,24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20]
; AVX2-NEXT: vpalignr {{.*#+}} ymm1 = ymm2[5,6,7,8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4],ymm2[21,22,23,24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20]
; AVX2-NEXT: vpalignr {{.*#+}} ymm2 = ymm0[5,6,7,8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4],ymm0[21,22,23,24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20]
; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm3[5,6,7,8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4],ymm3[21,22,23,24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20]
; AVX2-NEXT: vpalignr {{.*#+}} ymm1 = ymm1[5,6,7,8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4],ymm1[21,22,23,24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20]
; AVX2-NEXT: vpalignr {{.*#+}} ymm2 = ymm2[5,6,7,8,9,10,11,12,13,14,15],ymm3[0,1,2,3,4],ymm2[21,22,23,24,25,26,27,28,29,30,31],ymm3[16,17,18,19,20]
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm3
; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5,0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5]
; AVX2-NEXT: # ymm4 = mem[0,1,0,1]
; AVX2-NEXT: vpshufb %ymm4, %ymm3, %ymm3
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
; AVX2-NEXT: vpshufb %ymm4, %ymm0, %ymm0
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm2[2,3]
; AVX2-NEXT: vpshufb %ymm4, %ymm1, %ymm1
; AVX2-NEXT: vmovdqa %ymm1, 64(%rdi)
; AVX2-NEXT: vmovdqa %ymm0, 32(%rdi)
; AVX2-NEXT: vmovdqa %ymm3, (%rdi)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; XOP-LABEL: splat3_256:
; XOP: # %bb.0:
; XOP-NEXT: vpalignr {{.*#+}} xmm1 = xmm0[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5]
; XOP-NEXT: vextractf128 $1, %ymm0, %xmm2
; XOP-NEXT: vpalignr {{.*#+}} xmm3 = xmm2[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5]
; XOP-NEXT: vpalignr {{.*#+}} xmm4 = xmm0[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10]
; XOP-NEXT: vpalignr {{.*#+}} xmm5 = xmm2[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10]
; XOP-NEXT: vpalignr {{.*#+}} xmm6 = xmm3[5,6,7,8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4]
; XOP-NEXT: vpalignr {{.*#+}} xmm7 = xmm1[5,6,7,8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4]
; XOP-NEXT: vpalignr {{.*#+}} xmm3 = xmm5[5,6,7,8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4]
; XOP-NEXT: vpalignr {{.*#+}} xmm1 = xmm4[5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4]
; XOP-NEXT: vpalignr {{.*#+}} xmm5 = xmm2[5,6,7,8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4]
; XOP-NEXT: vpalignr {{.*#+}} xmm4 = xmm0[5,6,7,8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4]
; XOP-NEXT: vmovdqa {{.*#+}} xmm8 = [5,16,11,6,17,12,7,18,13,8,19,14,9,20,15,10]
; XOP-NEXT: vpperm %xmm8, %xmm4, %xmm1, %xmm1
; XOP-NEXT: vpperm %xmm8, %xmm0, %xmm7, %xmm0
; XOP-NEXT: vpperm %xmm8, %xmm7, %xmm4, %xmm4
; XOP-NEXT: vpperm %xmm8, %xmm2, %xmm6, %xmm2
; XOP-NEXT: vpperm %xmm8, %xmm5, %xmm3, %xmm3
; XOP-NEXT: vpperm %xmm8, %xmm6, %xmm5, %xmm5
; XOP-NEXT: vmovdqa %xmm5, 80(%rdi)
; XOP-NEXT: vmovdqa %xmm3, 64(%rdi)
; XOP-NEXT: vmovdqa %xmm2, 48(%rdi)
; XOP-NEXT: vmovdqa %xmm4, 32(%rdi)
; XOP-NEXT: vmovdqa %xmm1, 16(%rdi)
; XOP-NEXT: vmovdqa %xmm0, (%rdi)
; XOP-NEXT: vzeroupper
; XOP-NEXT: retq
%1 = shufflevector <32 x i8> %a0, <32 x i8> undef, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
%2 = shufflevector <32 x i8> %a0, <32 x i8> undef, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%3 = shufflevector <64 x i8> %1, <64 x i8> %2, <96 x i32> <i32 0, i32 32, i32 64, i32 1, i32 33, i32 65, i32 2, i32 34, i32 66, i32 3, i32 35, i32 67, i32 4, i32 36, i32 68, i32 5, i32 37, i32 69, i32 6, i32 38, i32 70, i32 7, i32 39, i32 71, i32 8, i32 40, i32 72, i32 9, i32 41, i32 73, i32 10, i32 42, i32 74, i32 11, i32 43, i32 75, i32 12, i32 44, i32 76, i32 13, i32 45, i32 77, i32 14, i32 46, i32 78, i32 15, i32 47, i32 79, i32 16, i32 48, i32 80, i32 17, i32 49, i32 81, i32 18, i32 50, i32 82, i32 19, i32 51, i32 83, i32 20, i32 52, i32 84, i32 21, i32 53, i32 85, i32 22, i32 54, i32 86, i32 23, i32 55, i32 87, i32 24, i32 56, i32 88, i32 25, i32 57, i32 89, i32 26, i32 58, i32 90, i32 27, i32 59, i32 91, i32 28, i32 60, i32 92, i32 29, i32 61, i32 93, i32 30, i32 62, i32 94, i32 31, i32 63, i32 95>
store <96 x i8> %3, ptr %a1
ret void
}
; D79987
define <16 x i32> @splat_v3i32(ptr %ptr) {
; SSE2-LABEL: splat_v3i32:
; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,0,1]
; SSE2-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: xorps %xmm3, %xmm3
; SSE2-NEXT: retq
;
; SSE42-LABEL: splat_v3i32:
; SSE42: # %bb.0:
; SSE42-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE42-NEXT: pxor %xmm1, %xmm1
; SSE42-NEXT: pxor %xmm2, %xmm2
; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2,3,4,5,6,7]
; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,0,1]
; SSE42-NEXT: xorps %xmm3, %xmm3
; SSE42-NEXT: retq
;
; AVX1-LABEL: splat_v3i32:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
; AVX1-NEXT: vxorps %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0],ymm1[1],ymm2[2,3,4,5,6,7]
; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4,5,6,7]
; AVX1-NEXT: retq
;
; AVX2-SLOW-LABEL: splat_v3i32:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX2-SLOW-NEXT: vxorps %xmm2, %xmm2, %xmm2
; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0],ymm1[1],ymm2[2,3,4,5,6,7]
; AVX2-SLOW-NEXT: vbroadcastss %xmm1, %xmm1
; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4,5,6,7]
; AVX2-SLOW-NEXT: retq
;
; AVX2-FAST-LABEL: splat_v3i32:
; AVX2-FAST: # %bb.0:
; AVX2-FAST-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX2-FAST-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6,7]
; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm1 = zero,zero,zero,zero,zero,zero,zero,zero,ymm1[0,1,2,3],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX2-FAST-NEXT: retq
;
; XOP-LABEL: splat_v3i32:
; XOP: # %bb.0:
; XOP-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
; XOP-NEXT: vxorps %xmm2, %xmm2, %xmm2
; XOP-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0],ymm1[1],ymm2[2,3,4,5,6,7]
; XOP-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4,5,6,7]
; XOP-NEXT: retq
%1 = load <3 x i32>, ptr %ptr, align 1
%2 = shufflevector <3 x i32> %1, <3 x i32> undef, <16 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%3 = shufflevector <16 x i32> <i32 0, i32 undef, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 undef, i32 0, i32 0, i32 0, i32 0, i32 0>, <16 x i32> %2, <16 x i32> <i32 0, i32 17, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 16, i32 11, i32 12, i32 13, i32 14, i32 15>
ret <16 x i32 > %3
}
define <2 x double> @wrongorder(<4 x double> %A, ptr %P) #0 {
; SSE2-LABEL: wrongorder:
; SSE2: # %bb.0:
; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
; SSE2-NEXT: movaps %xmm0, 48(%rdi)
; SSE2-NEXT: movaps %xmm0, 32(%rdi)
; SSE2-NEXT: movaps %xmm0, 16(%rdi)
; SSE2-NEXT: movaps %xmm0, (%rdi)
; SSE2-NEXT: retq
;
; SSE42-LABEL: wrongorder:
; SSE42: # %bb.0:
; SSE42-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
; SSE42-NEXT: movapd %xmm0, 48(%rdi)
; SSE42-NEXT: movapd %xmm0, 32(%rdi)
; SSE42-NEXT: movapd %xmm0, 16(%rdi)
; SSE42-NEXT: movapd %xmm0, (%rdi)
; SSE42-NEXT: retq
;
; AVX1-LABEL: wrongorder:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
; AVX1-NEXT: vmovaps %ymm1, 32(%rdi)
; AVX1-NEXT: vmovaps %ymm1, (%rdi)
; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: wrongorder:
; AVX2: # %bb.0:
; AVX2-NEXT: vbroadcastsd %xmm0, %ymm0
; AVX2-NEXT: vmovaps %ymm0, 32(%rdi)
; AVX2-NEXT: vmovaps %ymm0, (%rdi)
; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; XOP-LABEL: wrongorder:
; XOP: # %bb.0:
; XOP-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; XOP-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
; XOP-NEXT: vmovaps %ymm1, 32(%rdi)
; XOP-NEXT: vmovaps %ymm1, (%rdi)
; XOP-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; XOP-NEXT: vzeroupper
; XOP-NEXT: retq
%shuffle = shufflevector <4 x double> %A, <4 x double> %A, <8 x i32> zeroinitializer
store <8 x double> %shuffle, ptr %P, align 64
%m2 = load <8 x double>, ptr %P, align 64
store <8 x double> %m2, ptr %P, align 64
%m3 = load <8 x double>, ptr %P, align 64
%m4 = shufflevector <8 x double> %m3, <8 x double> undef, <2 x i32> <i32 2, i32 0>
ret <2 x double> %m4
}
define void @PR41097() {
; SSE2-LABEL: PR41097:
; SSE2: # %bb.0:
; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,3,4,5,6,7]
; SSE2-NEXT: psrad $24, %xmm0
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: movdqu %xmm0, (%rax)
; SSE2-NEXT: retq
;
; SSE42-LABEL: PR41097:
; SSE42: # %bb.0:
; SSE42-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,3,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; SSE42-NEXT: pmovsxbd %xmm0, %xmm0
; SSE42-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; SSE42-NEXT: movdqu %xmm0, (%rax)
; SSE42-NEXT: retq
;
; AVX-LABEL: PR41097:
; AVX: # %bb.0:
; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,3,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX-NEXT: vpmovsxbd %xmm0, %xmm0
; AVX-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; AVX-NEXT: vmovdqu %xmm0, (%rax)
; AVX-NEXT: retq
%wide.vec = load <6 x i8>, ptr undef, align 1
%strided.vec = shufflevector <6 x i8> %wide.vec, <6 x i8> undef, <2 x i32> <i32 0, i32 3>
%tmp = sext <2 x i8> %strided.vec to <2 x i32>
%tmp7 = zext <2 x i32> %tmp to <2 x i64>
store <2 x i64> %tmp7, ptr undef, align 8
ret void
}
; FIXME - should use INSERTPS
define <2 x float> @PR86068(<2 x float> %0, <2 x float> %1) {
; SSE2-LABEL: PR86068:
; SSE2: # %bb.0: # %entry
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[1,1]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[1,1]
; SSE2-NEXT: retq
;
; SSE42-LABEL: PR86068:
; SSE42: # %bb.0: # %entry
; SSE42-NEXT: movshdup {{.*#+}} xmm1 = xmm1[1,1,3,3]
; SSE42-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
; SSE42-NEXT: retq
;
; AVX-LABEL: PR86068:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm1[1,1,3,3]
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
; AVX-NEXT: retq
entry:
%3 = shufflevector <2 x float> %1, <2 x float> poison, <2 x i32> <i32 1, i32 poison>
%4 = shufflevector <2 x float> %3, <2 x float> %0, <2 x i32> <i32 0, i32 3>
ret <2 x float> %4
}
define void @D107009(ptr %input, ptr %output) {
; SSE-LABEL: D107009:
; SSE: # %bb.0:
; SSE-NEXT: movdqa 16(%rdi), %xmm0
; SSE-NEXT: movdqa 80(%rdi), %xmm1
; SSE-NEXT: movdqa 144(%rdi), %xmm2
; SSE-NEXT: movdqa 208(%rdi), %xmm3
; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],mem[0],xmm3[1],mem[1]
; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[1],mem[1]
; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
; SSE-NEXT: psrld $16, %xmm2
; SSE-NEXT: psrld $16, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[3,3,3,3]
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm2[1,1,1,1]
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm2[2,3,2,3]
; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm2[3,3,3,3]
; SSE-NEXT: movdqa %xmm0, 128(%rsi)
; SSE-NEXT: movdqa %xmm2, 144(%rsi)
; SSE-NEXT: movdqa %xmm0, 16(%rsi)
; SSE-NEXT: movdqa %xmm7, 240(%rsi)
; SSE-NEXT: movdqa %xmm6, 208(%rsi)
; SSE-NEXT: movdqa %xmm5, 176(%rsi)
; SSE-NEXT: movdqa %xmm4, 112(%rsi)
; SSE-NEXT: movdqa %xmm3, 80(%rsi)
; SSE-NEXT: movdqa %xmm1, 48(%rsi)
; SSE-NEXT: retq
;
; AVX1-LABEL: D107009:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovups 96(%rdi), %ymm0
; AVX1-NEXT: vmovups 128(%rdi), %ymm1
; AVX1-NEXT: vmovups 224(%rdi), %ymm2
; AVX1-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX1-NEXT: vunpcklps {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[1],mem[1],ymm1[4],mem[4],ymm1[5],mem[5]
; AVX1-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,0],ymm1[4,5],ymm2[6,4]
; AVX1-NEXT: vmovdqa 16(%rdi), %xmm2
; AVX1-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[1],mem[1]
; AVX1-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[2],mem[2]
; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1,2,0,4,5,6,4]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7]
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX1-NEXT: vpsrld $16, %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[0,1,1,3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,1,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
; AVX1-NEXT: vshufps {{.*#+}} ymm3 = ymm1[3,3,3,3,7,7,7,7]
; AVX1-NEXT: vshufpd {{.*#+}} ymm4 = ymm1[0,0,3,2]
; AVX1-NEXT: vmovshdup {{.*#+}} ymm5 = ymm1[1,1,3,3,5,5,7,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm0[3,3,3,3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm0[1,1,1,1]
; AVX1-NEXT: vmovdqa %xmm0, 16(%rsi)
; AVX1-NEXT: vmovdqa %xmm7, 48(%rsi)
; AVX1-NEXT: vmovdqa %xmm6, 112(%rsi)
; AVX1-NEXT: vmovups %ymm1, 128(%rsi)
; AVX1-NEXT: vmovups %ymm5, 160(%rsi)
; AVX1-NEXT: vmovupd %ymm4, 192(%rsi)
; AVX1-NEXT: vmovupd %ymm3, 224(%rsi)
; AVX1-NEXT: vmovups %ymm2, 64(%rsi)
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: D107009:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqu 64(%rdi), %ymm0
; AVX2-NEXT: vmovdqu 128(%rdi), %ymm1
; AVX2-NEXT: vmovdqu 192(%rdi), %ymm2
; AVX2-NEXT: vpunpckldq {{.*#+}} ymm2 = ymm2[0],mem[0],ymm2[1],mem[1],ymm2[4],mem[4],ymm2[5],mem[5]
; AVX2-NEXT: vpunpckldq {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[1],mem[1],ymm1[4],mem[4],ymm1[5],mem[5]
; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[2]
; AVX2-NEXT: vmovdqa 16(%rdi), %xmm2
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[1],mem[1]
; AVX2-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,2,2]
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3]
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[3,3,3,3]
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,1,1]
; AVX2-NEXT: vpbroadcastd %xmm0, %ymm4
; AVX2-NEXT: vpshufd {{.*#+}} ymm5 = ymm0[3,3,3,3,7,7,7,7]
; AVX2-NEXT: vpshufd {{.*#+}} ymm6 = ymm0[2,3,2,3,6,7,6,7]
; AVX2-NEXT: vpshufd {{.*#+}} ymm7 = ymm0[1,1,1,1,5,5,5,5]
; AVX2-NEXT: vmovdqu %ymm0, 128(%rsi)
; AVX2-NEXT: vmovdqu %ymm7, 160(%rsi)
; AVX2-NEXT: vmovdqu %ymm6, 192(%rsi)
; AVX2-NEXT: vmovdqu %ymm5, 224(%rsi)
; AVX2-NEXT: vmovdqu %ymm4, (%rsi)
; AVX2-NEXT: vmovdqa %xmm3, 48(%rsi)
; AVX2-NEXT: vmovdqa %xmm2, 112(%rsi)
; AVX2-NEXT: vmovdqu %ymm1, 64(%rsi)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; XOP-LABEL: D107009:
; XOP: # %bb.0:
; XOP-NEXT: vmovups 96(%rdi), %ymm0
; XOP-NEXT: vmovups 128(%rdi), %ymm1
; XOP-NEXT: vmovups 224(%rdi), %ymm2
; XOP-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm2[0],mem[0],ymm2[2],mem[2]
; XOP-NEXT: vunpcklps {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[1],mem[1],ymm1[4],mem[4],ymm1[5],mem[5]
; XOP-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,0],ymm1[4,5],ymm2[6,4]
; XOP-NEXT: vmovdqa 16(%rdi), %xmm2
; XOP-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[1],mem[1]
; XOP-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[2],mem[2]
; XOP-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,1,2,0,4,5,6,4]
; XOP-NEXT: vextractf128 $1, %ymm0, %xmm0
; XOP-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7]
; XOP-NEXT: vpsrld $16, %xmm0, %xmm0
; XOP-NEXT: vextractf128 $1, %ymm1, %xmm1
; XOP-NEXT: vpsrld $16, %xmm1, %xmm1
; XOP-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; XOP-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[0,1,1,3]
; XOP-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,1,3,3]
; XOP-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
; XOP-NEXT: vshufps {{.*#+}} ymm3 = ymm1[3,3,3,3,7,7,7,7]
; XOP-NEXT: vshufpd {{.*#+}} ymm4 = ymm1[0,0,3,2]
; XOP-NEXT: vmovshdup {{.*#+}} ymm5 = ymm1[1,1,3,3,5,5,7,7]
; XOP-NEXT: vpshufd {{.*#+}} xmm6 = xmm0[3,3,3,3]
; XOP-NEXT: vpshufd {{.*#+}} xmm7 = xmm0[1,1,1,1]
; XOP-NEXT: vmovdqa %xmm0, 16(%rsi)
; XOP-NEXT: vmovdqa %xmm7, 48(%rsi)
; XOP-NEXT: vmovdqa %xmm6, 112(%rsi)
; XOP-NEXT: vmovups %ymm1, 128(%rsi)
; XOP-NEXT: vmovups %ymm5, 160(%rsi)
; XOP-NEXT: vmovupd %ymm4, 192(%rsi)
; XOP-NEXT: vmovupd %ymm3, 224(%rsi)
; XOP-NEXT: vmovups %ymm2, 64(%rsi)
; XOP-NEXT: vzeroupper
; XOP-NEXT: retq
%i = load <64 x i32>, ptr %input, align 16
%i2 = shufflevector <64 x i32> %i, <64 x i32> poison, <8 x i32> <i32 4, i32 12, i32 20, i32 28, i32 36, i32 44, i32 52, i32 60>
%i3 = lshr <8 x i32> %i2, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
%i4 = add <8 x i32> zeroinitializer, %i3
%i5 = shufflevector <8 x i32> %i4, <8 x i32> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%i6 = shufflevector <16 x i32> %i5, <16 x i32> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
%i7 = shufflevector <32 x i32> poison, <32 x i32> %i6, <64 x i32> <i32 0, i32 8, i32 16, i32 24, i32 32, i32 40, i32 48, i32 56, i32 1, i32 9, i32 17, i32 25, i32 33, i32 41, i32 49, i32 57, i32 2, i32 10, i32 18, i32 26, i32 34, i32 42, i32 50, i32 58, i32 3, i32 11, i32 19, i32 27, i32 35, i32 43, i32 51, i32 59, i32 4, i32 12, i32 20, i32 28, i32 36, i32 44, i32 52, i32 60, i32 5, i32 13, i32 21, i32 29, i32 37, i32 45, i32 53, i32 61, i32 6, i32 14, i32 22, i32 30, i32 38, i32 46, i32 54, i32 62, i32 7, i32 15, i32 23, i32 31, i32 39, i32 47, i32 55, i32 63>
store <64 x i32> %i7, ptr %output, align 16
ret void
}