; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX2
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX2-FP
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX2-FCP
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX512
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512-FCP
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX512DQ
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512DQ-FCP
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX512BW
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512BW-FCP
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX512DQ-BW
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512DQ-BW-FCP
; These patterns are produced by LoopVectorizer for interleaved stores.
define void @store_i32_stride4_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %in.vecptr3, ptr %out.vec) nounwind {
; SSE-LABEL: store_i32_stride4_vf2:
; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm1[0]
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1]
; SSE-NEXT: movaps %xmm1, 16(%r8)
; SSE-NEXT: movaps %xmm2, (%r8)
; SSE-NEXT: retq
;
; AVX-LABEL: store_i32_stride4_vf2:
; AVX: # %bb.0:
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; AVX-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm2
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3,0,2,5,7,4,6]
; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,2,1,3,4,6,5,7]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
; AVX-NEXT: vmovaps %ymm0, (%r8)
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; AVX2-LABEL: store_i32_stride4_vf2:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX2-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX2-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; AVX2-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: vmovaps %ymm0, (%r8)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX2-FP-LABEL: store_i32_stride4_vf2:
; AVX2-FP: # %bb.0:
; AVX2-FP-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX2-FP-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX2-FP-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX2-FP-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX2-FP-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; AVX2-FP-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX2-FP-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-FP-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-FP-NEXT: vmovaps %ymm0, (%r8)
; AVX2-FP-NEXT: vzeroupper
; AVX2-FP-NEXT: retq
;
; AVX2-FCP-LABEL: store_i32_stride4_vf2:
; AVX2-FCP: # %bb.0:
; AVX2-FCP-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX2-FCP-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX2-FCP-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX2-FCP-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX2-FCP-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; AVX2-FCP-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX2-FCP-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-FCP-NEXT: vmovaps {{.*#+}} ymm1 = [0,2,4,6,1,3,5,7]
; AVX2-FCP-NEXT: vpermps %ymm0, %ymm1, %ymm0
; AVX2-FCP-NEXT: vmovaps %ymm0, (%r8)
; AVX2-FCP-NEXT: vzeroupper
; AVX2-FCP-NEXT: retq
;
; AVX512-LABEL: store_i32_stride4_vf2:
; AVX512: # %bb.0:
; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX512-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX512-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX512-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; AVX512-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX512-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
; AVX512-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX512-NEXT: vmovaps %ymm0, (%r8)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
; AVX512-FCP-LABEL: store_i32_stride4_vf2:
; AVX512-FCP: # %bb.0:
; AVX512-FCP-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512-FCP-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX512-FCP-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX512-FCP-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX512-FCP-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; AVX512-FCP-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX512-FCP-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX512-FCP-NEXT: vmovaps {{.*#+}} ymm1 = [0,2,4,6,1,3,5,7]
; AVX512-FCP-NEXT: vpermps %ymm0, %ymm1, %ymm0
; AVX512-FCP-NEXT: vmovaps %ymm0, (%r8)
; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
;
; AVX512DQ-LABEL: store_i32_stride4_vf2:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512DQ-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX512DQ-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX512DQ-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX512DQ-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; AVX512DQ-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX512DQ-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX512DQ-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
; AVX512DQ-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX512DQ-NEXT: vmovaps %ymm0, (%r8)
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512DQ-FCP-LABEL: store_i32_stride4_vf2:
; AVX512DQ-FCP: # %bb.0:
; AVX512DQ-FCP-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512DQ-FCP-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX512DQ-FCP-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX512DQ-FCP-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX512DQ-FCP-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; AVX512DQ-FCP-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX512DQ-FCP-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX512DQ-FCP-NEXT: vmovaps {{.*#+}} ymm1 = [0,2,4,6,1,3,5,7]
; AVX512DQ-FCP-NEXT: vpermps %ymm0, %ymm1, %ymm0
; AVX512DQ-FCP-NEXT: vmovaps %ymm0, (%r8)
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
;
; AVX512BW-LABEL: store_i32_stride4_vf2:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512BW-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX512BW-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX512BW-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX512BW-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; AVX512BW-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX512BW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX512BW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
; AVX512BW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX512BW-NEXT: vmovaps %ymm0, (%r8)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BW-FCP-LABEL: store_i32_stride4_vf2:
; AVX512BW-FCP: # %bb.0:
; AVX512BW-FCP-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512BW-FCP-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX512BW-FCP-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX512BW-FCP-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX512BW-FCP-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; AVX512BW-FCP-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX512BW-FCP-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX512BW-FCP-NEXT: vmovaps {{.*#+}} ymm1 = [0,2,4,6,1,3,5,7]
; AVX512BW-FCP-NEXT: vpermps %ymm0, %ymm1, %ymm0
; AVX512BW-FCP-NEXT: vmovaps %ymm0, (%r8)
; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
;
; AVX512DQ-BW-LABEL: store_i32_stride4_vf2:
; AVX512DQ-BW: # %bb.0:
; AVX512DQ-BW-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512DQ-BW-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX512DQ-BW-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX512DQ-BW-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX512DQ-BW-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; AVX512DQ-BW-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX512DQ-BW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX512DQ-BW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
; AVX512DQ-BW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX512DQ-BW-NEXT: vmovaps %ymm0, (%r8)
; AVX512DQ-BW-NEXT: vzeroupper
; AVX512DQ-BW-NEXT: retq
;
; AVX512DQ-BW-FCP-LABEL: store_i32_stride4_vf2:
; AVX512DQ-BW-FCP: # %bb.0:
; AVX512DQ-BW-FCP-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512DQ-BW-FCP-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX512DQ-BW-FCP-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX512DQ-BW-FCP-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX512DQ-BW-FCP-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; AVX512DQ-BW-FCP-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX512DQ-BW-FCP-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX512DQ-BW-FCP-NEXT: vmovaps {{.*#+}} ymm1 = [0,2,4,6,1,3,5,7]
; AVX512DQ-BW-FCP-NEXT: vpermps %ymm0, %ymm1, %ymm0
; AVX512DQ-BW-FCP-NEXT: vmovaps %ymm0, (%r8)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
%in.vec0 = load <2 x i32>, ptr %in.vecptr0, align 64
%in.vec1 = load <2 x i32>, ptr %in.vecptr1, align 64
%in.vec2 = load <2 x i32>, ptr %in.vecptr2, align 64
%in.vec3 = load <2 x i32>, ptr %in.vecptr3, align 64
%1 = shufflevector <2 x i32> %in.vec0, <2 x i32> %in.vec1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%2 = shufflevector <2 x i32> %in.vec2, <2 x i32> %in.vec3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%3 = shufflevector <4 x i32> %1, <4 x i32> %2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%interleaved.vec = shufflevector <8 x i32> %3, <8 x i32> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 1, i32 3, i32 5, i32 7>
store <8 x i32> %interleaved.vec, ptr %out.vec, align 64
ret void
}
define void @store_i32_stride4_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %in.vecptr3, ptr %out.vec) nounwind {
; SSE-LABEL: store_i32_stride4_vf4:
; SSE: # %bb.0:
; SSE-NEXT: movaps (%rdi), %xmm0
; SSE-NEXT: movaps (%rsi), %xmm1
; SSE-NEXT: movaps (%rdx), %xmm2
; SSE-NEXT: movaps (%rcx), %xmm3
; SSE-NEXT: movaps %xmm2, %xmm4
; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
; SSE-NEXT: movaps %xmm0, %xmm5
; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1]
; SSE-NEXT: movaps %xmm5, %xmm6
; SSE-NEXT: movlhps {{.*#+}} xmm6 = xmm6[0],xmm4[0]
; SSE-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm4[1]
; SSE-NEXT: unpckhps {{.*#+}} xmm2 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE-NEXT: movaps %xmm0, %xmm1
; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1]
; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT: movaps %xmm0, 32(%r8)
; SSE-NEXT: movaps %xmm1, 48(%r8)
; SSE-NEXT: movaps %xmm5, 16(%r8)
; SSE-NEXT: movaps %xmm6, (%r8)
; SSE-NEXT: retq
;
; AVX-LABEL: store_i32_stride4_vf4:
; AVX: # %bb.0:
; AVX-NEXT: vmovaps (%rdi), %xmm0
; AVX-NEXT: vmovaps (%rsi), %xmm1
; AVX-NEXT: vmovaps (%rdx), %xmm2
; AVX-NEXT: vmovaps (%rcx), %xmm3
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm4
; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm5
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
; AVX-NEXT: vshufps {{.*#+}} ymm3 = ymm2[0,1,1,0,4,5,5,4]
; AVX-NEXT: vmovddup {{.*#+}} ymm6 = ymm5[0,0,2,2]
; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm6[0,1,2],ymm3[3],ymm6[4,5],ymm3[6],ymm6[7]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: vshufps {{.*#+}} ymm1 = ymm0[1,0,2,3,5,4,6,7]
; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm4[0],ymm1[1],ymm4[2,3],ymm1[4],ymm4[5,6,7]
; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm3[2,3],ymm1[4,5],ymm3[6,7]
; AVX-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,1,3,2,4,5,7,6]
; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0,1,2],ymm2[3],ymm5[4,5],ymm2[6],ymm5[7]
; AVX-NEXT: vshufps {{.*#+}} ymm0 = ymm0[3,2,2,3,7,6,6,7]
; AVX-NEXT: vshufpd {{.*#+}} ymm3 = ymm4[1,0,3,2]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0],ymm0[1],ymm3[2,3],ymm0[4],ymm3[5,6,7]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3],ymm0[4,5],ymm2[6,7]
; AVX-NEXT: vmovaps %ymm0, 32(%r8)
; AVX-NEXT: vmovaps %ymm1, (%r8)
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; AVX2-LABEL: store_i32_stride4_vf4:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovaps (%rdi), %xmm0
; AVX2-NEXT: vmovaps (%rdx), %xmm1
; AVX2-NEXT: vinsertf128 $1, (%rsi), %ymm0, %ymm0
; AVX2-NEXT: vinsertf128 $1, (%rcx), %ymm1, %ymm1
; AVX2-NEXT: vmovaps {{.*#+}} ymm2 = [u,u,0,4,u,u,1,5]
; AVX2-NEXT: vpermps %ymm1, %ymm2, %ymm2
; AVX2-NEXT: vmovaps {{.*#+}} ymm3 = [0,4,u,u,1,5,u,u]
; AVX2-NEXT: vpermps %ymm0, %ymm3, %ymm3
; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5],ymm2[6,7]
; AVX2-NEXT: vmovaps {{.*#+}} ymm3 = [u,u,2,6,u,u,3,7]
; AVX2-NEXT: vpermps %ymm1, %ymm3, %ymm1
; AVX2-NEXT: vmovaps {{.*#+}} ymm3 = [2,6,u,u,3,7,u,u]
; AVX2-NEXT: vpermps %ymm0, %ymm3, %ymm0
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX2-NEXT: vmovaps %ymm0, 32(%r8)
; AVX2-NEXT: vmovaps %ymm2, (%r8)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX2-FP-LABEL: store_i32_stride4_vf4:
; AVX2-FP: # %bb.0:
; AVX2-FP-NEXT: vmovaps (%rdi), %xmm0
; AVX2-FP-NEXT: vmovaps (%rdx), %xmm1
; AVX2-FP-NEXT: vinsertf128 $1, (%rsi), %ymm0, %ymm0
; AVX2-FP-NEXT: vinsertf128 $1, (%rcx), %ymm1, %ymm1
; AVX2-FP-NEXT: vmovaps {{.*#+}} ymm2 = [u,u,0,4,u,u,1,5]
; AVX2-FP-NEXT: vpermps %ymm1, %ymm2, %ymm2
; AVX2-FP-NEXT: vmovaps {{.*#+}} ymm3 = [0,4,u,u,1,5,u,u]
; AVX2-FP-NEXT: vpermps %ymm0, %ymm3, %ymm3
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5],ymm2[6,7]
; AVX2-FP-NEXT: vmovaps {{.*#+}} ymm3 = [u,u,2,6,u,u,3,7]
; AVX2-FP-NEXT: vpermps %ymm1, %ymm3, %ymm1
; AVX2-FP-NEXT: vmovaps {{.*#+}} ymm3 = [2,6,u,u,3,7,u,u]
; AVX2-FP-NEXT: vpermps %ymm0, %ymm3, %ymm0
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX2-FP-NEXT: vmovaps %ymm0, 32(%r8)
; AVX2-FP-NEXT: vmovaps %ymm2, (%r8)
; AVX2-FP-NEXT: vzeroupper
; AVX2-FP-NEXT: retq
;
; AVX2-FCP-LABEL: store_i32_stride4_vf4:
; AVX2-FCP: # %bb.0:
; AVX2-FCP-NEXT: vmovaps (%rdi), %xmm0
; AVX2-FCP-NEXT: vmovaps (%rdx), %xmm1
; AVX2-FCP-NEXT: vinsertf128 $1, (%rsi), %ymm0, %ymm0
; AVX2-FCP-NEXT: vinsertf128 $1, (%rcx), %ymm1, %ymm1
; AVX2-FCP-NEXT: vmovaps {{.*#+}} ymm2 = [u,u,0,4,u,u,1,5]
; AVX2-FCP-NEXT: vpermps %ymm1, %ymm2, %ymm2
; AVX2-FCP-NEXT: vmovaps {{.*#+}} ymm3 = [0,4,u,u,1,5,u,u]
; AVX2-FCP-NEXT: vpermps %ymm0, %ymm3, %ymm3
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5],ymm2[6,7]
; AVX2-FCP-NEXT: vmovaps {{.*#+}} ymm3 = [u,u,2,6,u,u,3,7]
; AVX2-FCP-NEXT: vpermps %ymm1, %ymm3, %ymm1
; AVX2-FCP-NEXT: vmovaps {{.*#+}} ymm3 = [2,6,u,u,3,7,u,u]
; AVX2-FCP-NEXT: vpermps %ymm0, %ymm3, %ymm0
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX2-FCP-NEXT: vmovaps %ymm0, 32(%r8)
; AVX2-FCP-NEXT: vmovaps %ymm2, (%r8)
; AVX2-FCP-NEXT: vzeroupper
; AVX2-FCP-NEXT: retq
;
; AVX512-LABEL: store_i32_stride4_vf4:
; AVX512: # %bb.0:
; AVX512-NEXT: vmovaps (%rdi), %xmm0
; AVX512-NEXT: vmovaps (%rdx), %xmm1
; AVX512-NEXT: vinsertf128 $1, (%rcx), %ymm1, %ymm1
; AVX512-NEXT: vinsertf128 $1, (%rsi), %ymm0, %ymm0
; AVX512-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512-NEXT: vmovaps {{.*#+}} zmm1 = [0,4,8,12,1,5,9,13,2,6,10,14,3,7,11,15]
; AVX512-NEXT: vpermps %zmm0, %zmm1, %zmm0
; AVX512-NEXT: vmovaps %zmm0, (%r8)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
; AVX512-FCP-LABEL: store_i32_stride4_vf4:
; AVX512-FCP: # %bb.0:
; AVX512-FCP-NEXT: vmovaps (%rdi), %xmm0
; AVX512-FCP-NEXT: vmovaps (%rdx), %xmm1
; AVX512-FCP-NEXT: vinsertf128 $1, (%rcx), %ymm1, %ymm1
; AVX512-FCP-NEXT: vinsertf128 $1, (%rsi), %ymm0, %ymm0
; AVX512-FCP-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512-FCP-NEXT: vmovaps {{.*#+}} zmm1 = [0,4,8,12,1,5,9,13,2,6,10,14,3,7,11,15]
; AVX512-FCP-NEXT: vpermps %zmm0, %zmm1, %zmm0
; AVX512-FCP-NEXT: vmovaps %zmm0, (%r8)
; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
;
; AVX512DQ-LABEL: store_i32_stride4_vf4:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovaps (%rdi), %xmm0
; AVX512DQ-NEXT: vmovaps (%rdx), %xmm1
; AVX512DQ-NEXT: vinsertf128 $1, (%rcx), %ymm1, %ymm1
; AVX512DQ-NEXT: vinsertf128 $1, (%rsi), %ymm0, %ymm0
; AVX512DQ-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512DQ-NEXT: vmovaps {{.*#+}} zmm1 = [0,4,8,12,1,5,9,13,2,6,10,14,3,7,11,15]
; AVX512DQ-NEXT: vpermps %zmm0, %zmm1, %zmm0
; AVX512DQ-NEXT: vmovaps %zmm0, (%r8)
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512DQ-FCP-LABEL: store_i32_stride4_vf4:
; AVX512DQ-FCP: # %bb.0:
; AVX512DQ-FCP-NEXT: vmovaps (%rdi), %xmm0
; AVX512DQ-FCP-NEXT: vmovaps (%rdx), %xmm1
; AVX512DQ-FCP-NEXT: vinsertf128 $1, (%rcx), %ymm1, %ymm1
; AVX512DQ-FCP-NEXT: vinsertf128 $1, (%rsi), %ymm0, %ymm0
; AVX512DQ-FCP-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512DQ-FCP-NEXT: vmovaps {{.*#+}} zmm1 = [0,4,8,12,1,5,9,13,2,6,10,14,3,7,11,15]
; AVX512DQ-FCP-NEXT: vpermps %zmm0, %zmm1, %zmm0
; AVX512DQ-FCP-NEXT: vmovaps %zmm0, (%r8)
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
;
; AVX512BW-LABEL: store_i32_stride4_vf4:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovaps (%rdi), %xmm0
; AVX512BW-NEXT: vmovaps (%rdx), %xmm1
; AVX512BW-NEXT: vinsertf128 $1, (%rcx), %ymm1, %ymm1
; AVX512BW-NEXT: vinsertf128 $1, (%rsi), %ymm0, %ymm0
; AVX512BW-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512BW-NEXT: vmovaps {{.*#+}} zmm1 = [0,4,8,12,1,5,9,13,2,6,10,14,3,7,11,15]
; AVX512BW-NEXT: vpermps %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: vmovaps %zmm0, (%r8)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BW-FCP-LABEL: store_i32_stride4_vf4:
; AVX512BW-FCP: # %bb.0:
; AVX512BW-FCP-NEXT: vmovaps (%rdi), %xmm0
; AVX512BW-FCP-NEXT: vmovaps (%rdx), %xmm1
; AVX512BW-FCP-NEXT: vinsertf128 $1, (%rcx), %ymm1, %ymm1
; AVX512BW-FCP-NEXT: vinsertf128 $1, (%rsi), %ymm0, %ymm0
; AVX512BW-FCP-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512BW-FCP-NEXT: vmovaps {{.*#+}} zmm1 = [0,4,8,12,1,5,9,13,2,6,10,14,3,7,11,15]
; AVX512BW-FCP-NEXT: vpermps %zmm0, %zmm1, %zmm0
; AVX512BW-FCP-NEXT: vmovaps %zmm0, (%r8)
; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
;
; AVX512DQ-BW-LABEL: store_i32_stride4_vf4:
; AVX512DQ-BW: # %bb.0:
; AVX512DQ-BW-NEXT: vmovaps (%rdi), %xmm0
; AVX512DQ-BW-NEXT: vmovaps (%rdx), %xmm1
; AVX512DQ-BW-NEXT: vinsertf128 $1, (%rcx), %ymm1, %ymm1
; AVX512DQ-BW-NEXT: vinsertf128 $1, (%rsi), %ymm0, %ymm0
; AVX512DQ-BW-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512DQ-BW-NEXT: vmovaps {{.*#+}} zmm1 = [0,4,8,12,1,5,9,13,2,6,10,14,3,7,11,15]
; AVX512DQ-BW-NEXT: vpermps %zmm0, %zmm1, %zmm0
; AVX512DQ-BW-NEXT: vmovaps %zmm0, (%r8)
; AVX512DQ-BW-NEXT: vzeroupper
; AVX512DQ-BW-NEXT: retq
;
; AVX512DQ-BW-FCP-LABEL: store_i32_stride4_vf4:
; AVX512DQ-BW-FCP: # %bb.0:
; AVX512DQ-BW-FCP-NEXT: vmovaps (%rdi), %xmm0
; AVX512DQ-BW-FCP-NEXT: vmovaps (%rdx), %xmm1
; AVX512DQ-BW-FCP-NEXT: vinsertf128 $1, (%rcx), %ymm1, %ymm1
; AVX512DQ-BW-FCP-NEXT: vinsertf128 $1, (%rsi), %ymm0, %ymm0
; AVX512DQ-BW-FCP-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512DQ-BW-FCP-NEXT: vmovaps {{.*#+}} zmm1 = [0,4,8,12,1,5,9,13,2,6,10,14,3,7,11,15]
; AVX512DQ-BW-FCP-NEXT: vpermps %zmm0, %zmm1, %zmm0
; AVX512DQ-BW-FCP-NEXT: vmovaps %zmm0, (%r8)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
%in.vec0 = load <4 x i32>, ptr %in.vecptr0, align 64
%in.vec1 = load <4 x i32>, ptr %in.vecptr1, align 64
%in.vec2 = load <4 x i32>, ptr %in.vecptr2, align 64
%in.vec3 = load <4 x i32>, ptr %in.vecptr3, align 64
%1 = shufflevector <4 x i32> %in.vec0, <4 x i32> %in.vec1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%2 = shufflevector <4 x i32> %in.vec2, <4 x i32> %in.vec3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%3 = shufflevector <8 x i32> %1, <8 x i32> %2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%interleaved.vec = shufflevector <16 x i32> %3, <16 x i32> poison, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 2, i32 6, i32 10, i32 14, i32 3, i32 7, i32 11, i32 15>
store <16 x i32> %interleaved.vec, ptr %out.vec, align 64
ret void
}
define void @store_i32_stride4_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %in.vecptr3, ptr %out.vec) nounwind {
; SSE-LABEL: store_i32_stride4_vf8:
; SSE: # %bb.0:
; SSE-NEXT: movaps (%rdi), %xmm0
; SSE-NEXT: movaps 16(%rdi), %xmm1
; SSE-NEXT: movaps (%rsi), %xmm5
; SSE-NEXT: movaps 16(%rsi), %xmm6
; SSE-NEXT: movaps (%rdx), %xmm7
; SSE-NEXT: movaps 16(%rdx), %xmm4
; SSE-NEXT: movaps (%rcx), %xmm8
; SSE-NEXT: movaps 16(%rcx), %xmm9
; SSE-NEXT: movaps %xmm7, %xmm10
; SSE-NEXT: unpcklps {{.*#+}} xmm10 = xmm10[0],xmm8[0],xmm10[1],xmm8[1]
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
; SSE-NEXT: movaps %xmm2, %xmm3
; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm10[1]
; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm10[0]
; SSE-NEXT: unpckhps {{.*#+}} xmm7 = xmm7[2],xmm8[2],xmm7[3],xmm8[3]
; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm5[2],xmm0[3],xmm5[3]
; SSE-NEXT: movaps %xmm0, %xmm5
; SSE-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm7[1]
; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm7[0]
; SSE-NEXT: movaps %xmm4, %xmm7
; SSE-NEXT: unpcklps {{.*#+}} xmm7 = xmm7[0],xmm9[0],xmm7[1],xmm9[1]
; SSE-NEXT: movaps %xmm1, %xmm8
; SSE-NEXT: unpcklps {{.*#+}} xmm8 = xmm8[0],xmm6[0],xmm8[1],xmm6[1]
; SSE-NEXT: movaps %xmm8, %xmm10
; SSE-NEXT: unpckhpd {{.*#+}} xmm10 = xmm10[1],xmm7[1]
; SSE-NEXT: movlhps {{.*#+}} xmm8 = xmm8[0],xmm7[0]
; SSE-NEXT: unpckhps {{.*#+}} xmm4 = xmm4[2],xmm9[2],xmm4[3],xmm9[3]
; SSE-NEXT: unpckhps {{.*#+}} xmm1 = xmm1[2],xmm6[2],xmm1[3],xmm6[3]
; SSE-NEXT: movaps %xmm1, %xmm6
; SSE-NEXT: unpckhpd {{.*#+}} xmm6 = xmm6[1],xmm4[1]
; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm4[0]
; SSE-NEXT: movaps %xmm1, 96(%r8)
; SSE-NEXT: movaps %xmm6, 112(%r8)
; SSE-NEXT: movaps %xmm8, 64(%r8)
; SSE-NEXT: movaps %xmm10, 80(%r8)
; SSE-NEXT: movaps %xmm0, 32(%r8)
; SSE-NEXT: movaps %xmm5, 48(%r8)
; SSE-NEXT: movaps %xmm2, (%r8)
; SSE-NEXT: movaps %xmm3, 16(%r8)
; SSE-NEXT: retq
;
; AVX-LABEL: store_i32_stride4_vf8:
; AVX: # %bb.0:
; AVX-NEXT: vmovaps (%rdi), %xmm2
; AVX-NEXT: vmovaps 16(%rdi), %xmm0
; AVX-NEXT: vmovaps (%rsi), %xmm4
; AVX-NEXT: vmovaps 16(%rsi), %xmm1
; AVX-NEXT: vinsertps {{.*#+}} xmm3 = xmm2[1],xmm4[1],zero,zero
; AVX-NEXT: vunpcklps {{.*#+}} xmm5 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm5, %ymm3
; AVX-NEXT: vmovaps (%rcx), %xmm5
; AVX-NEXT: vmovaps 16(%rcx), %xmm6
; AVX-NEXT: vmovaps (%rdx), %xmm7
; AVX-NEXT: vmovaps 16(%rdx), %xmm8
; AVX-NEXT: vunpcklps {{.*#+}} xmm9 = xmm7[0],xmm5[0],xmm7[1],xmm5[1]
; AVX-NEXT: vmovlhps {{.*#+}} xmm10 = xmm5[0],xmm7[0]
; AVX-NEXT: vshufps {{.*#+}} xmm10 = xmm10[0,1,2,0]
; AVX-NEXT: vinsertf128 $1, %xmm9, %ymm10, %ymm9
; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm9[2,3],ymm3[4,5],ymm9[6,7]
; AVX-NEXT: vinsertps {{.*#+}} xmm9 = xmm0[1],xmm1[1],zero,zero
; AVX-NEXT: vunpcklps {{.*#+}} xmm10 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; AVX-NEXT: vinsertf128 $1, %xmm9, %ymm10, %ymm9
; AVX-NEXT: vunpcklps {{.*#+}} xmm10 = xmm8[0],xmm6[0],xmm8[1],xmm6[1]
; AVX-NEXT: vmovlhps {{.*#+}} xmm11 = xmm6[0],xmm8[0]
; AVX-NEXT: vshufps {{.*#+}} xmm11 = xmm11[0,1,2,0]
; AVX-NEXT: vinsertf128 $1, %xmm10, %ymm11, %ymm10
; AVX-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1],ymm10[2,3],ymm9[4,5],ymm10[6,7]
; AVX-NEXT: vunpckhps {{.*#+}} xmm10 = xmm7[2],xmm5[2],xmm7[3],xmm5[3]
; AVX-NEXT: vinsertps {{.*#+}} xmm5 = zero,zero,xmm7[2],xmm5[2]
; AVX-NEXT: vinsertf128 $1, %xmm10, %ymm5, %ymm5
; AVX-NEXT: vunpckhps {{.*#+}} xmm7 = xmm2[2],xmm4[2],xmm2[3],xmm4[3]
; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm4[3,0],xmm2[3,0]
; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm2[2,0,2,3]
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm7, %ymm2
; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm5[2,3],ymm2[4,5],ymm5[6,7]
; AVX-NEXT: vunpckhps {{.*#+}} xmm4 = xmm8[2],xmm6[2],xmm8[3],xmm6[3]
; AVX-NEXT: vinsertps {{.*#+}} xmm5 = zero,zero,xmm8[2],xmm6[2]
; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm5, %ymm4
; AVX-NEXT: vunpckhps {{.*#+}} xmm5 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm1[3,0],xmm0[3,0]
; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0,2,3]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm5, %ymm0
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm4[2,3],ymm0[4,5],ymm4[6,7]
; AVX-NEXT: vmovaps %ymm0, 96(%r8)
; AVX-NEXT: vmovaps %ymm2, 32(%r8)
; AVX-NEXT: vmovaps %ymm9, 64(%r8)
; AVX-NEXT: vmovaps %ymm3, (%r8)
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; AVX2-LABEL: store_i32_stride4_vf8:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovaps (%rdi), %ymm0
; AVX2-NEXT: vmovaps (%rsi), %ymm1
; AVX2-NEXT: vmovaps (%rdx), %ymm2
; AVX2-NEXT: vmovaps (%rcx), %ymm3
; AVX2-NEXT: vmovaps (%rcx), %xmm4
; AVX2-NEXT: vmovaps (%rdx), %xmm5
; AVX2-NEXT: vunpcklps {{.*#+}} xmm6 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,0,2,1]
; AVX2-NEXT: vmovaps (%rsi), %xmm7
; AVX2-NEXT: vmovaps (%rdi), %xmm8
; AVX2-NEXT: vunpcklps {{.*#+}} xmm9 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm9[0,1],ymm6[2,3],ymm9[4,5],ymm6[6,7]
; AVX2-NEXT: vunpckhps {{.*#+}} xmm4 = xmm5[2],xmm4[2],xmm5[3],xmm4[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,0,2,1]
; AVX2-NEXT: vunpckhps {{.*#+}} xmm5 = xmm8[2],xmm7[2],xmm8[3],xmm7[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3],ymm5[4,5],ymm4[6,7]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm5 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,2,2,3]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm7 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm7[0,1],ymm5[2,3],ymm7[4,5],ymm5[6,7]
; AVX2-NEXT: vunpcklps {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
; AVX2-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3],ymm0[4,5],ymm2[6,7]
; AVX2-NEXT: vmovaps %ymm0, 64(%r8)
; AVX2-NEXT: vmovaps %ymm5, 96(%r8)
; AVX2-NEXT: vmovaps %ymm4, 32(%r8)
; AVX2-NEXT: vmovaps %ymm6, (%r8)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX2-FP-LABEL: store_i32_stride4_vf8:
; AVX2-FP: # %bb.0:
; AVX2-FP-NEXT: vmovaps (%rdi), %ymm0
; AVX2-FP-NEXT: vmovaps (%rsi), %ymm1
; AVX2-FP-NEXT: vmovaps (%rdx), %ymm2
; AVX2-FP-NEXT: vmovaps (%rcx), %ymm3
; AVX2-FP-NEXT: vmovaps (%rcx), %xmm4
; AVX2-FP-NEXT: vmovaps (%rdx), %xmm5
; AVX2-FP-NEXT: vunpcklps {{.*#+}} xmm6 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,0,2,1]
; AVX2-FP-NEXT: vmovaps (%rsi), %xmm7
; AVX2-FP-NEXT: vmovaps (%rdi), %xmm8
; AVX2-FP-NEXT: vunpcklps {{.*#+}} xmm9 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[0,1,1,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm6 = ymm9[0,1],ymm6[2,3],ymm9[4,5],ymm6[6,7]
; AVX2-FP-NEXT: vunpckhps {{.*#+}} xmm4 = xmm5[2],xmm4[2],xmm5[3],xmm4[3]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,0,2,1]
; AVX2-FP-NEXT: vunpckhps {{.*#+}} xmm5 = xmm8[2],xmm7[2],xmm8[3],xmm7[3]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,1,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3],ymm5[4,5],ymm4[6,7]
; AVX2-FP-NEXT: vunpckhps {{.*#+}} ymm5 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,2,2,3]
; AVX2-FP-NEXT: vunpckhps {{.*#+}} ymm7 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[2,1,3,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm5 = ymm7[0,1],ymm5[2,3],ymm7[4,5],ymm5[6,7]
; AVX2-FP-NEXT: vunpcklps {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-FP-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3],ymm0[4,5],ymm2[6,7]
; AVX2-FP-NEXT: vmovaps %ymm0, 64(%r8)
; AVX2-FP-NEXT: vmovaps %ymm5, 96(%r8)
; AVX2-FP-NEXT: vmovaps %ymm4, 32(%r8)
; AVX2-FP-NEXT: vmovaps %ymm6, (%r8)
; AVX2-FP-NEXT: vzeroupper
; AVX2-FP-NEXT: retq
;
; AVX2-FCP-LABEL: store_i32_stride4_vf8:
; AVX2-FCP: # %bb.0:
; AVX2-FCP-NEXT: vmovaps (%rdi), %ymm0
; AVX2-FCP-NEXT: vmovaps (%rsi), %ymm1
; AVX2-FCP-NEXT: vmovaps (%rdx), %ymm2
; AVX2-FCP-NEXT: vmovaps (%rcx), %ymm3
; AVX2-FCP-NEXT: vmovaps (%rcx), %xmm4
; AVX2-FCP-NEXT: vmovaps (%rdx), %xmm5
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} xmm6 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,0,2,1]
; AVX2-FCP-NEXT: vmovaps (%rsi), %xmm7
; AVX2-FCP-NEXT: vmovaps (%rdi), %xmm8
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} xmm9 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[0,1,1,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm6 = ymm9[0,1],ymm6[2,3],ymm9[4,5],ymm6[6,7]
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} xmm4 = xmm5[2],xmm4[2],xmm5[3],xmm4[3]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,0,2,1]
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} xmm5 = xmm8[2],xmm7[2],xmm8[3],xmm7[3]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,1,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3],ymm5[4,5],ymm4[6,7]
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} ymm5 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,2,2,3]
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} ymm7 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[2,1,3,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm5 = ymm7[0,1],ymm5[2,3],ymm7[4,5],ymm5[6,7]
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3],ymm0[4,5],ymm2[6,7]
; AVX2-FCP-NEXT: vmovaps %ymm0, 64(%r8)
; AVX2-FCP-NEXT: vmovaps %ymm5, 96(%r8)
; AVX2-FCP-NEXT: vmovaps %ymm4, 32(%r8)
; AVX2-FCP-NEXT: vmovaps %ymm6, (%r8)
; AVX2-FCP-NEXT: vzeroupper
; AVX2-FCP-NEXT: retq
;
; AVX512-LABEL: store_i32_stride4_vf8:
; AVX512: # %bb.0:
; AVX512-NEXT: vmovdqa (%rdi), %ymm0
; AVX512-NEXT: vmovdqa (%rdx), %ymm1
; AVX512-NEXT: vinserti64x4 $1, (%rsi), %zmm0, %zmm0
; AVX512-NEXT: vinserti64x4 $1, (%rcx), %zmm1, %zmm1
; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm2 = [0,8,16,24,1,9,17,25,2,10,18,26,3,11,19,27]
; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm3 = [4,12,20,28,5,13,21,29,6,14,22,30,7,15,23,31]
; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm3
; AVX512-NEXT: vmovdqa64 %zmm3, 64(%r8)
; AVX512-NEXT: vmovdqa64 %zmm2, (%r8)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
; AVX512-FCP-LABEL: store_i32_stride4_vf8:
; AVX512-FCP: # %bb.0:
; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm0
; AVX512-FCP-NEXT: vmovdqa (%rdx), %ymm1
; AVX512-FCP-NEXT: vinserti64x4 $1, (%rsi), %zmm0, %zmm0
; AVX512-FCP-NEXT: vinserti64x4 $1, (%rcx), %zmm1, %zmm1
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm2 = [0,8,16,24,1,9,17,25,2,10,18,26,3,11,19,27]
; AVX512-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm3 = [4,12,20,28,5,13,21,29,6,14,22,30,7,15,23,31]
; AVX512-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm3
; AVX512-FCP-NEXT: vmovdqa64 %zmm3, 64(%r8)
; AVX512-FCP-NEXT: vmovdqa64 %zmm2, (%r8)
; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
;
; AVX512DQ-LABEL: store_i32_stride4_vf8:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0
; AVX512DQ-NEXT: vmovdqa (%rdx), %ymm1
; AVX512DQ-NEXT: vinserti64x4 $1, (%rsi), %zmm0, %zmm0
; AVX512DQ-NEXT: vinserti64x4 $1, (%rcx), %zmm1, %zmm1
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm2 = [0,8,16,24,1,9,17,25,2,10,18,26,3,11,19,27]
; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm3 = [4,12,20,28,5,13,21,29,6,14,22,30,7,15,23,31]
; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm0, %zmm3
; AVX512DQ-NEXT: vmovdqa64 %zmm3, 64(%r8)
; AVX512DQ-NEXT: vmovdqa64 %zmm2, (%r8)
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512DQ-FCP-LABEL: store_i32_stride4_vf8:
; AVX512DQ-FCP: # %bb.0:
; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm0
; AVX512DQ-FCP-NEXT: vmovdqa (%rdx), %ymm1
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, (%rsi), %zmm0, %zmm0
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, (%rcx), %zmm1, %zmm1
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm2 = [0,8,16,24,1,9,17,25,2,10,18,26,3,11,19,27]
; AVX512DQ-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm3 = [4,12,20,28,5,13,21,29,6,14,22,30,7,15,23,31]
; AVX512DQ-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm3
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm3, 64(%r8)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm2, (%r8)
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
;
; AVX512BW-LABEL: store_i32_stride4_vf8:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BW-NEXT: vmovdqa (%rdx), %ymm1
; AVX512BW-NEXT: vinserti64x4 $1, (%rsi), %zmm0, %zmm0
; AVX512BW-NEXT: vinserti64x4 $1, (%rcx), %zmm1, %zmm1
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} zmm2 = [0,8,16,24,1,9,17,25,2,10,18,26,3,11,19,27]
; AVX512BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} zmm3 = [4,12,20,28,5,13,21,29,6,14,22,30,7,15,23,31]
; AVX512BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm3
; AVX512BW-NEXT: vmovdqa64 %zmm3, 64(%r8)
; AVX512BW-NEXT: vmovdqa64 %zmm2, (%r8)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BW-FCP-LABEL: store_i32_stride4_vf8:
; AVX512BW-FCP: # %bb.0:
; AVX512BW-FCP-NEXT: vmovdqa (%rdi), %ymm0
; AVX512BW-FCP-NEXT: vmovdqa (%rdx), %ymm1
; AVX512BW-FCP-NEXT: vinserti64x4 $1, (%rsi), %zmm0, %zmm0
; AVX512BW-FCP-NEXT: vinserti64x4 $1, (%rcx), %zmm1, %zmm1
; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm2 = [0,8,16,24,1,9,17,25,2,10,18,26,3,11,19,27]
; AVX512BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm3 = [4,12,20,28,5,13,21,29,6,14,22,30,7,15,23,31]
; AVX512BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm3
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm3, 64(%r8)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm2, (%r8)
; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
;
; AVX512DQ-BW-LABEL: store_i32_stride4_vf8:
; AVX512DQ-BW: # %bb.0:
; AVX512DQ-BW-NEXT: vmovdqa (%rdi), %ymm0
; AVX512DQ-BW-NEXT: vmovdqa (%rdx), %ymm1
; AVX512DQ-BW-NEXT: vinserti64x4 $1, (%rsi), %zmm0, %zmm0
; AVX512DQ-BW-NEXT: vinserti64x4 $1, (%rcx), %zmm1, %zmm1
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} zmm2 = [0,8,16,24,1,9,17,25,2,10,18,26,3,11,19,27]
; AVX512DQ-BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} zmm3 = [4,12,20,28,5,13,21,29,6,14,22,30,7,15,23,31]
; AVX512DQ-BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm3
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm3, 64(%r8)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm2, (%r8)
; AVX512DQ-BW-NEXT: vzeroupper
; AVX512DQ-BW-NEXT: retq
;
; AVX512DQ-BW-FCP-LABEL: store_i32_stride4_vf8:
; AVX512DQ-BW-FCP: # %bb.0:
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdi), %ymm0
; AVX512DQ-BW-FCP-NEXT: vmovdqa (%rdx), %ymm1
; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, (%rsi), %zmm0, %zmm0
; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, (%rcx), %zmm1, %zmm1
; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm2 = [0,8,16,24,1,9,17,25,2,10,18,26,3,11,19,27]
; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm3 = [4,12,20,28,5,13,21,29,6,14,22,30,7,15,23,31]
; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm3
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm3, 64(%r8)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm2, (%r8)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
%in.vec0 = load <8 x i32>, ptr %in.vecptr0, align 64
%in.vec1 = load <8 x i32>, ptr %in.vecptr1, align 64
%in.vec2 = load <8 x i32>, ptr %in.vecptr2, align 64
%in.vec3 = load <8 x i32>, ptr %in.vecptr3, align 64
%1 = shufflevector <8 x i32> %in.vec0, <8 x i32> %in.vec1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%2 = shufflevector <8 x i32> %in.vec2, <8 x i32> %in.vec3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%3 = shufflevector <16 x i32> %1, <16 x i32> %2, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
%interleaved.vec = shufflevector <32 x i32> %3, <32 x i32> poison, <32 x i32> <i32 0, i32 8, i32 16, i32 24, i32 1, i32 9, i32 17, i32 25, i32 2, i32 10, i32 18, i32 26, i32 3, i32 11, i32 19, i32 27, i32 4, i32 12, i32 20, i32 28, i32 5, i32 13, i32 21, i32 29, i32 6, i32 14, i32 22, i32 30, i32 7, i32 15, i32 23, i32 31>
store <32 x i32> %interleaved.vec, ptr %out.vec, align 64
ret void
}
define void @store_i32_stride4_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %in.vecptr3, ptr %out.vec) nounwind {
; SSE-LABEL: store_i32_stride4_vf16:
; SSE: # %bb.0:
; SSE-NEXT: movaps (%rdi), %xmm5
; SSE-NEXT: movaps 16(%rdi), %xmm11
; SSE-NEXT: movaps 32(%rdi), %xmm4
; SSE-NEXT: movaps 48(%rdi), %xmm2
; SSE-NEXT: movaps (%rsi), %xmm0
; SSE-NEXT: movaps 16(%rsi), %xmm3
; SSE-NEXT: movaps 32(%rsi), %xmm9
; SSE-NEXT: movaps (%rdx), %xmm7
; SSE-NEXT: movaps 16(%rdx), %xmm13
; SSE-NEXT: movaps 32(%rdx), %xmm10
; SSE-NEXT: movaps (%rcx), %xmm8
; SSE-NEXT: movaps 16(%rcx), %xmm14
; SSE-NEXT: movaps 32(%rcx), %xmm12
; SSE-NEXT: movaps %xmm7, %xmm15
; SSE-NEXT: unpcklps {{.*#+}} xmm15 = xmm15[0],xmm8[0],xmm15[1],xmm8[1]
; SSE-NEXT: movaps %xmm5, %xmm6
; SSE-NEXT: unpcklps {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1]
; SSE-NEXT: movaps %xmm6, %xmm1
; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm15[1]
; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movlhps {{.*#+}} xmm6 = xmm6[0],xmm15[0]
; SSE-NEXT: unpckhps {{.*#+}} xmm7 = xmm7[2],xmm8[2],xmm7[3],xmm8[3]
; SSE-NEXT: unpckhps {{.*#+}} xmm5 = xmm5[2],xmm0[2],xmm5[3],xmm0[3]
; SSE-NEXT: movaps %xmm5, %xmm0
; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm7[1]
; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movlhps {{.*#+}} xmm5 = xmm5[0],xmm7[0]
; SSE-NEXT: movaps %xmm13, %xmm15
; SSE-NEXT: unpcklps {{.*#+}} xmm15 = xmm15[0],xmm14[0],xmm15[1],xmm14[1]
; SSE-NEXT: movaps %xmm11, %xmm7
; SSE-NEXT: unpcklps {{.*#+}} xmm7 = xmm7[0],xmm3[0],xmm7[1],xmm3[1]
; SSE-NEXT: movaps %xmm7, %xmm0
; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm15[1]
; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movlhps {{.*#+}} xmm7 = xmm7[0],xmm15[0]
; SSE-NEXT: unpckhps {{.*#+}} xmm13 = xmm13[2],xmm14[2],xmm13[3],xmm14[3]
; SSE-NEXT: unpckhps {{.*#+}} xmm11 = xmm11[2],xmm3[2],xmm11[3],xmm3[3]
; SSE-NEXT: movaps %xmm11, %xmm8
; SSE-NEXT: unpckhpd {{.*#+}} xmm8 = xmm8[1],xmm13[1]
; SSE-NEXT: movlhps {{.*#+}} xmm11 = xmm11[0],xmm13[0]
; SSE-NEXT: movaps %xmm10, %xmm15
; SSE-NEXT: unpcklps {{.*#+}} xmm15 = xmm15[0],xmm12[0],xmm15[1],xmm12[1]
; SSE-NEXT: movaps %xmm4, %xmm13
; SSE-NEXT: unpcklps {{.*#+}} xmm13 = xmm13[0],xmm9[0],xmm13[1],xmm9[1]
; SSE-NEXT: movaps %xmm13, %xmm14
; SSE-NEXT: unpckhpd {{.*#+}} xmm14 = xmm14[1],xmm15[1]
; SSE-NEXT: movlhps {{.*#+}} xmm13 = xmm13[0],xmm15[0]
; SSE-NEXT: movaps 48(%rdx), %xmm15
; SSE-NEXT: unpckhps {{.*#+}} xmm10 = xmm10[2],xmm12[2],xmm10[3],xmm12[3]
; SSE-NEXT: movaps 48(%rcx), %xmm12
; SSE-NEXT: unpckhps {{.*#+}} xmm4 = xmm4[2],xmm9[2],xmm4[3],xmm9[3]
; SSE-NEXT: movaps %xmm4, %xmm9
; SSE-NEXT: unpckhpd {{.*#+}} xmm9 = xmm9[1],xmm10[1]
; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm10[0]
; SSE-NEXT: movaps %xmm15, %xmm10
; SSE-NEXT: unpcklps {{.*#+}} xmm10 = xmm10[0],xmm12[0],xmm10[1],xmm12[1]
; SSE-NEXT: movaps 48(%rsi), %xmm1
; SSE-NEXT: movaps %xmm2, %xmm3
; SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
; SSE-NEXT: movaps %xmm3, %xmm0
; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm10[1]
; SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm10[0]
; SSE-NEXT: unpckhps {{.*#+}} xmm15 = xmm15[2],xmm12[2],xmm15[3],xmm12[3]
; SSE-NEXT: unpckhps {{.*#+}} xmm2 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
; SSE-NEXT: movaps %xmm2, %xmm1
; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm15[1]
; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm15[0]
; SSE-NEXT: movaps %xmm2, 224(%r8)
; SSE-NEXT: movaps %xmm1, 240(%r8)
; SSE-NEXT: movaps %xmm3, 192(%r8)
; SSE-NEXT: movaps %xmm0, 208(%r8)
; SSE-NEXT: movaps %xmm4, 160(%r8)
; SSE-NEXT: movaps %xmm9, 176(%r8)
; SSE-NEXT: movaps %xmm13, 128(%r8)
; SSE-NEXT: movaps %xmm14, 144(%r8)
; SSE-NEXT: movaps %xmm11, 96(%r8)
; SSE-NEXT: movaps %xmm8, 112(%r8)
; SSE-NEXT: movaps %xmm7, 64(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 80(%r8)
; SSE-NEXT: movaps %xmm5, 32(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 48(%r8)
; SSE-NEXT: movaps %xmm6, (%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 16(%r8)
; SSE-NEXT: retq
;
; AVX-LABEL: store_i32_stride4_vf16:
; AVX: # %bb.0:
; AVX-NEXT: subq $24, %rsp
; AVX-NEXT: vmovaps (%rdi), %xmm7
; AVX-NEXT: vmovaps 32(%rdi), %xmm3
; AVX-NEXT: vmovaps 48(%rdi), %xmm10
; AVX-NEXT: vmovaps (%rsi), %xmm9
; AVX-NEXT: vmovaps 32(%rsi), %xmm4
; AVX-NEXT: vmovaps 48(%rsi), %xmm5
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm7[1],xmm9[1],zero,zero
; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm7[0],xmm9[0],xmm7[1],xmm9[1]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: vmovaps (%rcx), %xmm14
; AVX-NEXT: vmovaps 32(%rcx), %xmm6
; AVX-NEXT: vmovaps 48(%rcx), %xmm11
; AVX-NEXT: vmovaps (%rdx), %xmm2
; AVX-NEXT: vmovaps 32(%rdx), %xmm8
; AVX-NEXT: vmovaps 48(%rdx), %xmm12
; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm14[0],xmm2[1],xmm14[1]
; AVX-NEXT: vmovlhps {{.*#+}} xmm13 = xmm14[0],xmm2[0]
; AVX-NEXT: vshufps {{.*#+}} xmm13 = xmm13[0,1,2,0]
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm13, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm10[1],xmm5[1],zero,zero
; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm10[0],xmm5[0],xmm10[1],xmm5[1]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm12[0],xmm11[0],xmm12[1],xmm11[1]
; AVX-NEXT: vmovlhps {{.*#+}} xmm13 = xmm11[0],xmm12[0]
; AVX-NEXT: vshufps {{.*#+}} xmm13 = xmm13[0,1,2,0]
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm13, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm3[1],xmm4[1],zero,zero
; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: vmovlhps {{.*#+}} xmm1 = xmm6[0],xmm8[0]
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
; AVX-NEXT: vunpcklps {{.*#+}} xmm13 = xmm8[0],xmm6[0],xmm8[1],xmm6[1]
; AVX-NEXT: vinsertf128 $1, %xmm13, %ymm1, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 16(%rdi), %xmm13
; AVX-NEXT: vmovaps 16(%rsi), %xmm15
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm13[1],xmm15[1],zero,zero
; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm13[0],xmm15[0],xmm13[1],xmm15[1]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm3
; AVX-NEXT: vmovaps 16(%rcx), %xmm1
; AVX-NEXT: vmovaps 16(%rdx), %xmm0
; AVX-NEXT: vmovlhps {{.*#+}} xmm10 = xmm1[0],xmm0[0]
; AVX-NEXT: vshufps {{.*#+}} xmm10 = xmm10[0,1,2,0]
; AVX-NEXT: vunpcklps {{.*#+}} xmm4 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm10, %ymm4
; AVX-NEXT: vblendps {{.*#+}} ymm10 = ymm3[0,1],ymm4[2,3],ymm3[4,5],ymm4[6,7]
; AVX-NEXT: vunpckhps {{.*#+}} xmm3 = xmm2[2],xmm14[2],xmm2[3],xmm14[3]
; AVX-NEXT: vinsertps {{.*#+}} xmm2 = zero,zero,xmm2[2],xmm14[2]
; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
; AVX-NEXT: vunpckhps {{.*#+}} xmm3 = xmm7[2],xmm9[2],xmm7[3],xmm9[3]
; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm9[3,0],xmm7[3,0]
; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm4[2,0,2,3]
; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5],ymm2[6,7]
; AVX-NEXT: vunpckhps {{.*#+}} xmm3 = xmm12[2],xmm11[2],xmm12[3],xmm11[3]
; AVX-NEXT: vinsertps {{.*#+}} xmm4 = zero,zero,xmm12[2],xmm11[2]
; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
; AVX-NEXT: vunpckhps {{.*#+}} xmm4 = xmm7[2],xmm5[2],xmm7[3],xmm5[3]
; AVX-NEXT: vshufps {{.*#+}} xmm5 = xmm5[3,0],xmm7[3,0]
; AVX-NEXT: vshufps {{.*#+}} xmm5 = xmm5[2,0,2,3]
; AVX-NEXT: vinsertf128 $1, %xmm5, %ymm4, %ymm4
; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1],ymm3[2,3],ymm4[4,5],ymm3[6,7]
; AVX-NEXT: vunpckhps {{.*#+}} xmm4 = xmm8[2],xmm6[2],xmm8[3],xmm6[3]
; AVX-NEXT: vinsertps {{.*#+}} xmm5 = zero,zero,xmm8[2],xmm6[2]
; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm5, %ymm4
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
; AVX-NEXT: vunpckhps {{.*#+}} xmm5 = xmm7[2],xmm6[2],xmm7[3],xmm6[3]
; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm6[3,0],xmm7[3,0]
; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm6[2,0,2,3]
; AVX-NEXT: vinsertf128 $1, %xmm6, %ymm5, %ymm5
; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3],ymm5[4,5],ymm4[6,7]
; AVX-NEXT: vunpckhps {{.*#+}} xmm5 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = zero,zero,xmm0[2],xmm1[2]
; AVX-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm0
; AVX-NEXT: vunpckhps {{.*#+}} xmm1 = xmm13[2],xmm15[2],xmm13[3],xmm15[3]
; AVX-NEXT: vshufps {{.*#+}} xmm5 = xmm15[3,0],xmm13[3,0]
; AVX-NEXT: vshufps {{.*#+}} xmm5 = xmm5[2,0,2,3]
; AVX-NEXT: vinsertf128 $1, %xmm5, %ymm1, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX-NEXT: vmovaps %ymm0, 96(%r8)
; AVX-NEXT: vmovaps %ymm4, 160(%r8)
; AVX-NEXT: vmovaps %ymm3, 224(%r8)
; AVX-NEXT: vmovaps %ymm2, 32(%r8)
; AVX-NEXT: vmovaps %ymm10, 64(%r8)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 128(%r8)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 192(%r8)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, (%r8)
; AVX-NEXT: addq $24, %rsp
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; AVX2-LABEL: store_i32_stride4_vf16:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovaps (%rdi), %ymm0
; AVX2-NEXT: vmovaps 32(%rdi), %ymm2
; AVX2-NEXT: vmovaps (%rsi), %ymm1
; AVX2-NEXT: vmovaps 32(%rsi), %ymm4
; AVX2-NEXT: vmovaps (%rcx), %xmm5
; AVX2-NEXT: vmovaps 32(%rcx), %xmm7
; AVX2-NEXT: vmovaps (%rdx), %xmm6
; AVX2-NEXT: vmovaps 32(%rdx), %xmm8
; AVX2-NEXT: vunpckhps {{.*#+}} xmm3 = xmm6[2],xmm5[2],xmm6[3],xmm5[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,0,2,1]
; AVX2-NEXT: vmovaps (%rsi), %xmm9
; AVX2-NEXT: vmovaps (%rdi), %xmm10
; AVX2-NEXT: vunpckhps {{.*#+}} xmm11 = xmm10[2],xmm9[2],xmm10[3],xmm9[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm11[0,1],ymm3[2,3],ymm11[4,5],ymm3[6,7]
; AVX2-NEXT: vmovaps 32(%rsi), %xmm11
; AVX2-NEXT: vunpcklps {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
; AVX2-NEXT: vmovaps 32(%rdi), %xmm12
; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,0,2,1]
; AVX2-NEXT: vunpcklps {{.*#+}} xmm6 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1],ymm5[2,3],ymm6[4,5],ymm5[6,7]
; AVX2-NEXT: vunpcklps {{.*#+}} xmm6 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,0,2,1]
; AVX2-NEXT: vunpcklps {{.*#+}} xmm9 = xmm12[0],xmm11[0],xmm12[1],xmm11[1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm9[0,1],ymm6[2,3],ymm9[4,5],ymm6[6,7]
; AVX2-NEXT: vmovaps 32(%rdx), %ymm9
; AVX2-NEXT: vunpckhps {{.*#+}} xmm7 = xmm8[2],xmm7[2],xmm8[3],xmm7[3]
; AVX2-NEXT: vmovaps 32(%rcx), %ymm8
; AVX2-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,0,2,1]
; AVX2-NEXT: vunpckhps {{.*#+}} xmm10 = xmm12[2],xmm11[2],xmm12[3],xmm11[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm10[0,1],ymm7[2,3],ymm10[4,5],ymm7[6,7]
; AVX2-NEXT: vunpcklps {{.*#+}} ymm10 = ymm9[0],ymm8[0],ymm9[1],ymm8[1],ymm9[4],ymm8[4],ymm9[5],ymm8[5]
; AVX2-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,2,2,3]
; AVX2-NEXT: vunpcklps {{.*#+}} ymm11 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[4],ymm4[4],ymm2[5],ymm4[5]
; AVX2-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0,1],ymm10[2,3],ymm11[4,5],ymm10[6,7]
; AVX2-NEXT: vmovaps (%rdx), %ymm11
; AVX2-NEXT: vunpckhps {{.*#+}} ymm8 = ymm9[2],ymm8[2],ymm9[3],ymm8[3],ymm9[6],ymm8[6],ymm9[7],ymm8[7]
; AVX2-NEXT: vmovaps (%rcx), %ymm9
; AVX2-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[0,2,2,3]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm2 = ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[6],ymm4[6],ymm2[7],ymm4[7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm8[2,3],ymm2[4,5],ymm8[6,7]
; AVX2-NEXT: vunpcklps {{.*#+}} ymm4 = ymm11[0],ymm9[0],ymm11[1],ymm9[1],ymm11[4],ymm9[4],ymm11[5],ymm9[5]
; AVX2-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,2,2,3]
; AVX2-NEXT: vunpcklps {{.*#+}} ymm8 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
; AVX2-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm8[0,1],ymm4[2,3],ymm8[4,5],ymm4[6,7]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm8 = ymm11[2],ymm9[2],ymm11[3],ymm9[3],ymm11[6],ymm9[6],ymm11[7],ymm9[7]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm8[0,2,2,3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX2-NEXT: vmovaps %ymm0, 96(%r8)
; AVX2-NEXT: vmovaps %ymm4, 64(%r8)
; AVX2-NEXT: vmovaps %ymm2, 224(%r8)
; AVX2-NEXT: vmovaps %ymm10, 192(%r8)
; AVX2-NEXT: vmovaps %ymm7, 160(%r8)
; AVX2-NEXT: vmovaps %ymm6, 128(%r8)
; AVX2-NEXT: vmovaps %ymm5, (%r8)
; AVX2-NEXT: vmovaps %ymm3, 32(%r8)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX2-FP-LABEL: store_i32_stride4_vf16:
; AVX2-FP: # %bb.0:
; AVX2-FP-NEXT: vmovaps (%rdi), %ymm0
; AVX2-FP-NEXT: vmovaps 32(%rdi), %ymm2
; AVX2-FP-NEXT: vmovaps (%rsi), %ymm1
; AVX2-FP-NEXT: vmovaps 32(%rsi), %ymm4
; AVX2-FP-NEXT: vmovaps (%rcx), %xmm5
; AVX2-FP-NEXT: vmovaps 32(%rcx), %xmm7
; AVX2-FP-NEXT: vmovaps (%rdx), %xmm6
; AVX2-FP-NEXT: vmovaps 32(%rdx), %xmm8
; AVX2-FP-NEXT: vunpckhps {{.*#+}} xmm3 = xmm6[2],xmm5[2],xmm6[3],xmm5[3]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,0,2,1]
; AVX2-FP-NEXT: vmovaps (%rsi), %xmm9
; AVX2-FP-NEXT: vmovaps (%rdi), %xmm10
; AVX2-FP-NEXT: vunpckhps {{.*#+}} xmm11 = xmm10[2],xmm9[2],xmm10[3],xmm9[3]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[0,1,1,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm3 = ymm11[0,1],ymm3[2,3],ymm11[4,5],ymm3[6,7]
; AVX2-FP-NEXT: vmovaps 32(%rsi), %xmm11
; AVX2-FP-NEXT: vunpcklps {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
; AVX2-FP-NEXT: vmovaps 32(%rdi), %xmm12
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,0,2,1]
; AVX2-FP-NEXT: vunpcklps {{.*#+}} xmm6 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,1,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1],ymm5[2,3],ymm6[4,5],ymm5[6,7]
; AVX2-FP-NEXT: vunpcklps {{.*#+}} xmm6 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,0,2,1]
; AVX2-FP-NEXT: vunpcklps {{.*#+}} xmm9 = xmm12[0],xmm11[0],xmm12[1],xmm11[1]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[0,1,1,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm6 = ymm9[0,1],ymm6[2,3],ymm9[4,5],ymm6[6,7]
; AVX2-FP-NEXT: vmovaps 32(%rdx), %ymm9
; AVX2-FP-NEXT: vunpckhps {{.*#+}} xmm7 = xmm8[2],xmm7[2],xmm8[3],xmm7[3]
; AVX2-FP-NEXT: vmovaps 32(%rcx), %ymm8
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,0,2,1]
; AVX2-FP-NEXT: vunpckhps {{.*#+}} xmm10 = xmm12[2],xmm11[2],xmm12[3],xmm11[3]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,1,1,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm7 = ymm10[0,1],ymm7[2,3],ymm10[4,5],ymm7[6,7]
; AVX2-FP-NEXT: vunpcklps {{.*#+}} ymm10 = ymm9[0],ymm8[0],ymm9[1],ymm8[1],ymm9[4],ymm8[4],ymm9[5],ymm8[5]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,2,2,3]
; AVX2-FP-NEXT: vunpcklps {{.*#+}} ymm11 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[4],ymm4[4],ymm2[5],ymm4[5]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[2,1,3,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0,1],ymm10[2,3],ymm11[4,5],ymm10[6,7]
; AVX2-FP-NEXT: vmovaps (%rdx), %ymm11
; AVX2-FP-NEXT: vunpckhps {{.*#+}} ymm8 = ymm9[2],ymm8[2],ymm9[3],ymm8[3],ymm9[6],ymm8[6],ymm9[7],ymm8[7]
; AVX2-FP-NEXT: vmovaps (%rcx), %ymm9
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[0,2,2,3]
; AVX2-FP-NEXT: vunpckhps {{.*#+}} ymm2 = ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[6],ymm4[6],ymm2[7],ymm4[7]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[2,1,3,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm8[2,3],ymm2[4,5],ymm8[6,7]
; AVX2-FP-NEXT: vunpcklps {{.*#+}} ymm4 = ymm11[0],ymm9[0],ymm11[1],ymm9[1],ymm11[4],ymm9[4],ymm11[5],ymm9[5]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,2,2,3]
; AVX2-FP-NEXT: vunpcklps {{.*#+}} ymm8 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[2,1,3,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm4 = ymm8[0,1],ymm4[2,3],ymm8[4,5],ymm4[6,7]
; AVX2-FP-NEXT: vunpckhps {{.*#+}} ymm8 = ymm11[2],ymm9[2],ymm11[3],ymm9[3],ymm11[6],ymm9[6],ymm11[7],ymm9[7]
; AVX2-FP-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = ymm8[0,2,2,3]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX2-FP-NEXT: vmovaps %ymm0, 96(%r8)
; AVX2-FP-NEXT: vmovaps %ymm4, 64(%r8)
; AVX2-FP-NEXT: vmovaps %ymm2, 224(%r8)
; AVX2-FP-NEXT: vmovaps %ymm10, 192(%r8)
; AVX2-FP-NEXT: vmovaps %ymm7, 160(%r8)
; AVX2-FP-NEXT: vmovaps %ymm6, 128(%r8)
; AVX2-FP-NEXT: vmovaps %ymm5, (%r8)
; AVX2-FP-NEXT: vmovaps %ymm3, 32(%r8)
; AVX2-FP-NEXT: vzeroupper
; AVX2-FP-NEXT: retq
;
; AVX2-FCP-LABEL: store_i32_stride4_vf16:
; AVX2-FCP: # %bb.0:
; AVX2-FCP-NEXT: vmovaps (%rdi), %ymm0
; AVX2-FCP-NEXT: vmovaps 32(%rdi), %ymm2
; AVX2-FCP-NEXT: vmovaps (%rsi), %ymm1
; AVX2-FCP-NEXT: vmovaps 32(%rsi), %ymm4
; AVX2-FCP-NEXT: vmovaps (%rcx), %xmm5
; AVX2-FCP-NEXT: vmovaps 32(%rcx), %xmm7
; AVX2-FCP-NEXT: vmovaps (%rdx), %xmm6
; AVX2-FCP-NEXT: vmovaps 32(%rdx), %xmm8
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} xmm3 = xmm6[2],xmm5[2],xmm6[3],xmm5[3]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,0,2,1]
; AVX2-FCP-NEXT: vmovaps (%rsi), %xmm9
; AVX2-FCP-NEXT: vmovaps (%rdi), %xmm10
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} xmm11 = xmm10[2],xmm9[2],xmm10[3],xmm9[3]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[0,1,1,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm3 = ymm11[0,1],ymm3[2,3],ymm11[4,5],ymm3[6,7]
; AVX2-FCP-NEXT: vmovaps 32(%rsi), %xmm11
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
; AVX2-FCP-NEXT: vmovaps 32(%rdi), %xmm12
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,0,2,1]
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} xmm6 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,1,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1],ymm5[2,3],ymm6[4,5],ymm5[6,7]
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} xmm6 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,0,2,1]
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} xmm9 = xmm12[0],xmm11[0],xmm12[1],xmm11[1]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[0,1,1,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm6 = ymm9[0,1],ymm6[2,3],ymm9[4,5],ymm6[6,7]
; AVX2-FCP-NEXT: vmovaps 32(%rdx), %ymm9
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} xmm7 = xmm8[2],xmm7[2],xmm8[3],xmm7[3]
; AVX2-FCP-NEXT: vmovaps 32(%rcx), %ymm8
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,0,2,1]
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} xmm10 = xmm12[2],xmm11[2],xmm12[3],xmm11[3]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,1,1,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm7 = ymm10[0,1],ymm7[2,3],ymm10[4,5],ymm7[6,7]
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} ymm10 = ymm9[0],ymm8[0],ymm9[1],ymm8[1],ymm9[4],ymm8[4],ymm9[5],ymm8[5]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,2,2,3]
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} ymm11 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[4],ymm4[4],ymm2[5],ymm4[5]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[2,1,3,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0,1],ymm10[2,3],ymm11[4,5],ymm10[6,7]
; AVX2-FCP-NEXT: vmovaps (%rdx), %ymm11
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} ymm8 = ymm9[2],ymm8[2],ymm9[3],ymm8[3],ymm9[6],ymm8[6],ymm9[7],ymm8[7]
; AVX2-FCP-NEXT: vmovaps (%rcx), %ymm9
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[0,2,2,3]
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} ymm2 = ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[6],ymm4[6],ymm2[7],ymm4[7]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[2,1,3,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm8[2,3],ymm2[4,5],ymm8[6,7]
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} ymm4 = ymm11[0],ymm9[0],ymm11[1],ymm9[1],ymm11[4],ymm9[4],ymm11[5],ymm9[5]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,2,2,3]
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} ymm8 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[2,1,3,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm4 = ymm8[0,1],ymm4[2,3],ymm8[4,5],ymm4[6,7]
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} ymm8 = ymm11[2],ymm9[2],ymm11[3],ymm9[3],ymm11[6],ymm9[6],ymm11[7],ymm9[7]
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = ymm8[0,2,2,3]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX2-FCP-NEXT: vmovaps %ymm0, 96(%r8)
; AVX2-FCP-NEXT: vmovaps %ymm4, 64(%r8)
; AVX2-FCP-NEXT: vmovaps %ymm2, 224(%r8)
; AVX2-FCP-NEXT: vmovaps %ymm10, 192(%r8)
; AVX2-FCP-NEXT: vmovaps %ymm7, 160(%r8)
; AVX2-FCP-NEXT: vmovaps %ymm6, 128(%r8)
; AVX2-FCP-NEXT: vmovaps %ymm5, (%r8)
; AVX2-FCP-NEXT: vmovaps %ymm3, 32(%r8)
; AVX2-FCP-NEXT: vzeroupper
; AVX2-FCP-NEXT: retq
;
; AVX512-LABEL: store_i32_stride4_vf16:
; AVX512: # %bb.0:
; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512-NEXT: vmovdqa64 (%rsi), %zmm1
; AVX512-NEXT: vmovdqa64 (%rdx), %zmm2
; AVX512-NEXT: vmovdqa64 (%rcx), %zmm3
; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm4 = [0,0,0,16,0,0,1,17,0,0,2,18,0,0,3,19]
; AVX512-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm5 = [0,16,0,0,1,17,0,0,2,18,0,0,3,19,0,0]
; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm5
; AVX512-NEXT: movb $-86, %al
; AVX512-NEXT: kmovw %eax, %k1
; AVX512-NEXT: vmovdqa64 %zmm4, %zmm5 {%k1}
; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm4 = [0,0,4,20,0,0,5,21,0,0,6,22,0,0,7,23]
; AVX512-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm6 = [4,20,0,0,5,21,0,0,6,22,0,0,7,23,0,0]
; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm6
; AVX512-NEXT: vmovdqa64 %zmm4, %zmm6 {%k1}
; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm4 = [0,0,8,24,0,0,9,25,0,0,10,26,0,0,11,27]
; AVX512-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm7 = [8,24,0,0,9,25,0,0,10,26,0,0,11,27,0,0]
; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm7
; AVX512-NEXT: vmovdqa64 %zmm4, %zmm7 {%k1}
; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm4 = [0,0,12,28,0,0,13,29,0,0,14,30,0,0,15,31]
; AVX512-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm2 = [12,28,0,0,13,29,0,0,14,30,0,0,15,31,0,0]
; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
; AVX512-NEXT: vmovdqa64 %zmm4, %zmm2 {%k1}
; AVX512-NEXT: vmovdqa64 %zmm2, 192(%r8)
; AVX512-NEXT: vmovdqa64 %zmm7, 128(%r8)
; AVX512-NEXT: vmovdqa64 %zmm6, 64(%r8)
; AVX512-NEXT: vmovdqa64 %zmm5, (%r8)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
; AVX512-FCP-LABEL: store_i32_stride4_vf16:
; AVX512-FCP: # %bb.0:
; AVX512-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512-FCP-NEXT: vmovdqa64 (%rsi), %zmm1
; AVX512-FCP-NEXT: vmovdqa64 (%rdx), %zmm2
; AVX512-FCP-NEXT: vmovdqa64 (%rcx), %zmm3
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm4 = [0,0,0,16,0,0,1,17,0,0,2,18,0,0,3,19]
; AVX512-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm5 = [0,16,0,0,1,17,0,0,2,18,0,0,3,19,0,0]
; AVX512-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm5
; AVX512-FCP-NEXT: movb $-86, %al
; AVX512-FCP-NEXT: kmovw %eax, %k1
; AVX512-FCP-NEXT: vmovdqa64 %zmm4, %zmm5 {%k1}
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm4 = [0,0,4,20,0,0,5,21,0,0,6,22,0,0,7,23]
; AVX512-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm6 = [4,20,0,0,5,21,0,0,6,22,0,0,7,23,0,0]
; AVX512-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm6
; AVX512-FCP-NEXT: vmovdqa64 %zmm4, %zmm6 {%k1}
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm4 = [0,0,8,24,0,0,9,25,0,0,10,26,0,0,11,27]
; AVX512-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm7 = [8,24,0,0,9,25,0,0,10,26,0,0,11,27,0,0]
; AVX512-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm7
; AVX512-FCP-NEXT: vmovdqa64 %zmm4, %zmm7 {%k1}
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm4 = [0,0,12,28,0,0,13,29,0,0,14,30,0,0,15,31]
; AVX512-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm2 = [12,28,0,0,13,29,0,0,14,30,0,0,15,31,0,0]
; AVX512-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
; AVX512-FCP-NEXT: vmovdqa64 %zmm4, %zmm2 {%k1}
; AVX512-FCP-NEXT: vmovdqa64 %zmm2, 192(%r8)
; AVX512-FCP-NEXT: vmovdqa64 %zmm7, 128(%r8)
; AVX512-FCP-NEXT: vmovdqa64 %zmm6, 64(%r8)
; AVX512-FCP-NEXT: vmovdqa64 %zmm5, (%r8)
; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
;
; AVX512DQ-LABEL: store_i32_stride4_vf16:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512DQ-NEXT: vmovdqa64 (%rsi), %zmm1
; AVX512DQ-NEXT: vmovdqa64 (%rdx), %zmm2
; AVX512DQ-NEXT: vmovdqa64 (%rcx), %zmm3
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm4 = [0,0,0,16,0,0,1,17,0,0,2,18,0,0,3,19]
; AVX512DQ-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm5 = [0,16,0,0,1,17,0,0,2,18,0,0,3,19,0,0]
; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm0, %zmm5
; AVX512DQ-NEXT: movb $-86, %al
; AVX512DQ-NEXT: kmovw %eax, %k1
; AVX512DQ-NEXT: vmovdqa64 %zmm4, %zmm5 {%k1}
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm4 = [0,0,4,20,0,0,5,21,0,0,6,22,0,0,7,23]
; AVX512DQ-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm6 = [4,20,0,0,5,21,0,0,6,22,0,0,7,23,0,0]
; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm0, %zmm6
; AVX512DQ-NEXT: vmovdqa64 %zmm4, %zmm6 {%k1}
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm4 = [0,0,8,24,0,0,9,25,0,0,10,26,0,0,11,27]
; AVX512DQ-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm7 = [8,24,0,0,9,25,0,0,10,26,0,0,11,27,0,0]
; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm0, %zmm7
; AVX512DQ-NEXT: vmovdqa64 %zmm4, %zmm7 {%k1}
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm4 = [0,0,12,28,0,0,13,29,0,0,14,30,0,0,15,31]
; AVX512DQ-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm2 = [12,28,0,0,13,29,0,0,14,30,0,0,15,31,0,0]
; AVX512DQ-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
; AVX512DQ-NEXT: vmovdqa64 %zmm4, %zmm2 {%k1}
; AVX512DQ-NEXT: vmovdqa64 %zmm2, 192(%r8)
; AVX512DQ-NEXT: vmovdqa64 %zmm7, 128(%r8)
; AVX512DQ-NEXT: vmovdqa64 %zmm6, 64(%r8)
; AVX512DQ-NEXT: vmovdqa64 %zmm5, (%r8)
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512DQ-FCP-LABEL: store_i32_stride4_vf16:
; AVX512DQ-FCP: # %bb.0:
; AVX512DQ-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512DQ-FCP-NEXT: vmovdqa64 (%rsi), %zmm1
; AVX512DQ-FCP-NEXT: vmovdqa64 (%rdx), %zmm2
; AVX512DQ-FCP-NEXT: vmovdqa64 (%rcx), %zmm3
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm4 = [0,0,0,16,0,0,1,17,0,0,2,18,0,0,3,19]
; AVX512DQ-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm5 = [0,16,0,0,1,17,0,0,2,18,0,0,3,19,0,0]
; AVX512DQ-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm5
; AVX512DQ-FCP-NEXT: movb $-86, %al
; AVX512DQ-FCP-NEXT: kmovw %eax, %k1
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm4, %zmm5 {%k1}
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm4 = [0,0,4,20,0,0,5,21,0,0,6,22,0,0,7,23]
; AVX512DQ-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm6 = [4,20,0,0,5,21,0,0,6,22,0,0,7,23,0,0]
; AVX512DQ-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm6
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm4, %zmm6 {%k1}
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm4 = [0,0,8,24,0,0,9,25,0,0,10,26,0,0,11,27]
; AVX512DQ-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm7 = [8,24,0,0,9,25,0,0,10,26,0,0,11,27,0,0]
; AVX512DQ-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm7
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm4, %zmm7 {%k1}
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm4 = [0,0,12,28,0,0,13,29,0,0,14,30,0,0,15,31]
; AVX512DQ-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm2 = [12,28,0,0,13,29,0,0,14,30,0,0,15,31,0,0]
; AVX512DQ-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm4, %zmm2 {%k1}
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm2, 192(%r8)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm7, 128(%r8)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm6, 64(%r8)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm5, (%r8)
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
;
; AVX512BW-LABEL: store_i32_stride4_vf16:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-NEXT: vmovdqa64 (%rsi), %zmm1
; AVX512BW-NEXT: vmovdqa64 (%rdx), %zmm2
; AVX512BW-NEXT: vmovdqa64 (%rcx), %zmm3
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} zmm4 = [0,0,0,16,0,0,1,17,0,0,2,18,0,0,3,19]
; AVX512BW-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} zmm5 = [0,16,0,0,1,17,0,0,2,18,0,0,3,19,0,0]
; AVX512BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm5
; AVX512BW-NEXT: movb $-86, %al
; AVX512BW-NEXT: kmovd %eax, %k1
; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm5 {%k1}
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} zmm4 = [0,0,4,20,0,0,5,21,0,0,6,22,0,0,7,23]
; AVX512BW-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} zmm6 = [4,20,0,0,5,21,0,0,6,22,0,0,7,23,0,0]
; AVX512BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm6
; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm6 {%k1}
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} zmm4 = [0,0,8,24,0,0,9,25,0,0,10,26,0,0,11,27]
; AVX512BW-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} zmm7 = [8,24,0,0,9,25,0,0,10,26,0,0,11,27,0,0]
; AVX512BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm7
; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm7 {%k1}
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} zmm4 = [0,0,12,28,0,0,13,29,0,0,14,30,0,0,15,31]
; AVX512BW-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} zmm2 = [12,28,0,0,13,29,0,0,14,30,0,0,15,31,0,0]
; AVX512BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm2 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm2, 192(%r8)
; AVX512BW-NEXT: vmovdqa64 %zmm7, 128(%r8)
; AVX512BW-NEXT: vmovdqa64 %zmm6, 64(%r8)
; AVX512BW-NEXT: vmovdqa64 %zmm5, (%r8)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BW-FCP-LABEL: store_i32_stride4_vf16:
; AVX512BW-FCP: # %bb.0:
; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-FCP-NEXT: vmovdqa64 (%rsi), %zmm1
; AVX512BW-FCP-NEXT: vmovdqa64 (%rdx), %zmm2
; AVX512BW-FCP-NEXT: vmovdqa64 (%rcx), %zmm3
; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm4 = [0,0,0,16,0,0,1,17,0,0,2,18,0,0,3,19]
; AVX512BW-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm5 = [0,16,0,0,1,17,0,0,2,18,0,0,3,19,0,0]
; AVX512BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm5
; AVX512BW-FCP-NEXT: movb $-86, %al
; AVX512BW-FCP-NEXT: kmovd %eax, %k1
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm4, %zmm5 {%k1}
; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm4 = [0,0,4,20,0,0,5,21,0,0,6,22,0,0,7,23]
; AVX512BW-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm6 = [4,20,0,0,5,21,0,0,6,22,0,0,7,23,0,0]
; AVX512BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm6
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm4, %zmm6 {%k1}
; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm4 = [0,0,8,24,0,0,9,25,0,0,10,26,0,0,11,27]
; AVX512BW-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm7 = [8,24,0,0,9,25,0,0,10,26,0,0,11,27,0,0]
; AVX512BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm7
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm4, %zmm7 {%k1}
; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm4 = [0,0,12,28,0,0,13,29,0,0,14,30,0,0,15,31]
; AVX512BW-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm2 = [12,28,0,0,13,29,0,0,14,30,0,0,15,31,0,0]
; AVX512BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm4, %zmm2 {%k1}
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm2, 192(%r8)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm7, 128(%r8)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm6, 64(%r8)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm5, (%r8)
; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
;
; AVX512DQ-BW-LABEL: store_i32_stride4_vf16:
; AVX512DQ-BW: # %bb.0:
; AVX512DQ-BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512DQ-BW-NEXT: vmovdqa64 (%rsi), %zmm1
; AVX512DQ-BW-NEXT: vmovdqa64 (%rdx), %zmm2
; AVX512DQ-BW-NEXT: vmovdqa64 (%rcx), %zmm3
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} zmm4 = [0,0,0,16,0,0,1,17,0,0,2,18,0,0,3,19]
; AVX512DQ-BW-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} zmm5 = [0,16,0,0,1,17,0,0,2,18,0,0,3,19,0,0]
; AVX512DQ-BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm5
; AVX512DQ-BW-NEXT: movb $-86, %al
; AVX512DQ-BW-NEXT: kmovd %eax, %k1
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm4, %zmm5 {%k1}
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} zmm4 = [0,0,4,20,0,0,5,21,0,0,6,22,0,0,7,23]
; AVX512DQ-BW-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} zmm6 = [4,20,0,0,5,21,0,0,6,22,0,0,7,23,0,0]
; AVX512DQ-BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm6
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm4, %zmm6 {%k1}
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} zmm4 = [0,0,8,24,0,0,9,25,0,0,10,26,0,0,11,27]
; AVX512DQ-BW-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} zmm7 = [8,24,0,0,9,25,0,0,10,26,0,0,11,27,0,0]
; AVX512DQ-BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm7
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm4, %zmm7 {%k1}
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} zmm4 = [0,0,12,28,0,0,13,29,0,0,14,30,0,0,15,31]
; AVX512DQ-BW-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} zmm2 = [12,28,0,0,13,29,0,0,14,30,0,0,15,31,0,0]
; AVX512DQ-BW-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm4, %zmm2 {%k1}
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm2, 192(%r8)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm7, 128(%r8)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm6, 64(%r8)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm5, (%r8)
; AVX512DQ-BW-NEXT: vzeroupper
; AVX512DQ-BW-NEXT: retq
;
; AVX512DQ-BW-FCP-LABEL: store_i32_stride4_vf16:
; AVX512DQ-BW-FCP: # %bb.0:
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rsi), %zmm1
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdx), %zmm2
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rcx), %zmm3
; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm4 = [0,0,0,16,0,0,1,17,0,0,2,18,0,0,3,19]
; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm5 = [0,16,0,0,1,17,0,0,2,18,0,0,3,19,0,0]
; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm5
; AVX512DQ-BW-FCP-NEXT: movb $-86, %al
; AVX512DQ-BW-FCP-NEXT: kmovd %eax, %k1
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm4, %zmm5 {%k1}
; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm4 = [0,0,4,20,0,0,5,21,0,0,6,22,0,0,7,23]
; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm6 = [4,20,0,0,5,21,0,0,6,22,0,0,7,23,0,0]
; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm6
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm4, %zmm6 {%k1}
; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm4 = [0,0,8,24,0,0,9,25,0,0,10,26,0,0,11,27]
; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm7 = [8,24,0,0,9,25,0,0,10,26,0,0,11,27,0,0]
; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm7
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm4, %zmm7 {%k1}
; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm4 = [0,0,12,28,0,0,13,29,0,0,14,30,0,0,15,31]
; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm2 = [12,28,0,0,13,29,0,0,14,30,0,0,15,31,0,0]
; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm4, %zmm2 {%k1}
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm2, 192(%r8)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm7, 128(%r8)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm6, 64(%r8)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm5, (%r8)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
%in.vec0 = load <16 x i32>, ptr %in.vecptr0, align 64
%in.vec1 = load <16 x i32>, ptr %in.vecptr1, align 64
%in.vec2 = load <16 x i32>, ptr %in.vecptr2, align 64
%in.vec3 = load <16 x i32>, ptr %in.vecptr3, align 64
%1 = shufflevector <16 x i32> %in.vec0, <16 x i32> %in.vec1, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
%2 = shufflevector <16 x i32> %in.vec2, <16 x i32> %in.vec3, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
%3 = shufflevector <32 x i32> %1, <32 x i32> %2, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
%interleaved.vec = shufflevector <64 x i32> %3, <64 x i32> poison, <64 x i32> <i32 0, i32 16, i32 32, i32 48, i32 1, i32 17, i32 33, i32 49, i32 2, i32 18, i32 34, i32 50, i32 3, i32 19, i32 35, i32 51, i32 4, i32 20, i32 36, i32 52, i32 5, i32 21, i32 37, i32 53, i32 6, i32 22, i32 38, i32 54, i32 7, i32 23, i32 39, i32 55, i32 8, i32 24, i32 40, i32 56, i32 9, i32 25, i32 41, i32 57, i32 10, i32 26, i32 42, i32 58, i32 11, i32 27, i32 43, i32 59, i32 12, i32 28, i32 44, i32 60, i32 13, i32 29, i32 45, i32 61, i32 14, i32 30, i32 46, i32 62, i32 15, i32 31, i32 47, i32 63>
store <64 x i32> %interleaved.vec, ptr %out.vec, align 64
ret void
}
define void @store_i32_stride4_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %in.vecptr3, ptr %out.vec) nounwind {
; SSE-LABEL: store_i32_stride4_vf32:
; SSE: # %bb.0:
; SSE-NEXT: subq $184, %rsp
; SSE-NEXT: movaps (%rdi), %xmm10
; SSE-NEXT: movaps 16(%rdi), %xmm11
; SSE-NEXT: movaps 32(%rdi), %xmm12
; SSE-NEXT: movaps 48(%rdi), %xmm13
; SSE-NEXT: movaps (%rsi), %xmm5
; SSE-NEXT: movaps 16(%rsi), %xmm2
; SSE-NEXT: movaps 32(%rsi), %xmm0
; SSE-NEXT: movaps (%rdx), %xmm6
; SSE-NEXT: movaps 16(%rdx), %xmm4
; SSE-NEXT: movaps 32(%rdx), %xmm1
; SSE-NEXT: movaps (%rcx), %xmm7
; SSE-NEXT: movaps 16(%rcx), %xmm8
; SSE-NEXT: movaps 32(%rcx), %xmm3
; SSE-NEXT: movaps %xmm6, %xmm9
; SSE-NEXT: unpcklps {{.*#+}} xmm9 = xmm9[0],xmm7[0],xmm9[1],xmm7[1]
; SSE-NEXT: movaps %xmm10, %xmm14
; SSE-NEXT: unpcklps {{.*#+}} xmm14 = xmm14[0],xmm5[0],xmm14[1],xmm5[1]
; SSE-NEXT: movaps %xmm14, %xmm15
; SSE-NEXT: movlhps {{.*#+}} xmm15 = xmm15[0],xmm9[0]
; SSE-NEXT: movaps %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhpd {{.*#+}} xmm14 = xmm14[1],xmm9[1]
; SSE-NEXT: movaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhps {{.*#+}} xmm6 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
; SSE-NEXT: unpckhps {{.*#+}} xmm10 = xmm10[2],xmm5[2],xmm10[3],xmm5[3]
; SSE-NEXT: movaps %xmm10, %xmm5
; SSE-NEXT: movlhps {{.*#+}} xmm5 = xmm5[0],xmm6[0]
; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhpd {{.*#+}} xmm10 = xmm10[1],xmm6[1]
; SSE-NEXT: movaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm4, %xmm5
; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm8[0],xmm5[1],xmm8[1]
; SSE-NEXT: movaps %xmm11, %xmm6
; SSE-NEXT: unpcklps {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1]
; SSE-NEXT: movaps %xmm6, %xmm7
; SSE-NEXT: movlhps {{.*#+}} xmm7 = xmm7[0],xmm5[0]
; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhpd {{.*#+}} xmm6 = xmm6[1],xmm5[1]
; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhps {{.*#+}} xmm4 = xmm4[2],xmm8[2],xmm4[3],xmm8[3]
; SSE-NEXT: unpckhps {{.*#+}} xmm11 = xmm11[2],xmm2[2],xmm11[3],xmm2[3]
; SSE-NEXT: movaps %xmm11, %xmm2
; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm4[0]
; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhpd {{.*#+}} xmm11 = xmm11[1],xmm4[1]
; SSE-NEXT: movaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm1, %xmm2
; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; SSE-NEXT: movaps %xmm12, %xmm4
; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
; SSE-NEXT: movaps %xmm4, %xmm5
; SSE-NEXT: movlhps {{.*#+}} xmm5 = xmm5[0],xmm2[0]
; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm2[1]
; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps 48(%rdx), %xmm2
; SSE-NEXT: unpckhps {{.*#+}} xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
; SSE-NEXT: movaps 48(%rcx), %xmm3
; SSE-NEXT: unpckhps {{.*#+}} xmm12 = xmm12[2],xmm0[2],xmm12[3],xmm0[3]
; SSE-NEXT: movaps %xmm12, %xmm0
; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; SSE-NEXT: unpckhpd {{.*#+}} xmm12 = xmm12[1],xmm1[1]
; SSE-NEXT: movaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm2, %xmm0
; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
; SSE-NEXT: movaps 48(%rsi), %xmm1
; SSE-NEXT: movaps %xmm13, %xmm4
; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
; SSE-NEXT: movaps %xmm4, %xmm5
; SSE-NEXT: movlhps {{.*#+}} xmm5 = xmm5[0],xmm0[0]
; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm0[1]
; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhps {{.*#+}} xmm2 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
; SSE-NEXT: unpckhps {{.*#+}} xmm13 = xmm13[2],xmm1[2],xmm13[3],xmm1[3]
; SSE-NEXT: movaps %xmm13, %xmm0
; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhpd {{.*#+}} xmm13 = xmm13[1],xmm2[1]
; SSE-NEXT: movaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps 64(%rdx), %xmm0
; SSE-NEXT: movaps 64(%rcx), %xmm1
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSE-NEXT: movaps 64(%rdi), %xmm13
; SSE-NEXT: movaps 64(%rsi), %xmm3
; SSE-NEXT: movaps %xmm13, %xmm14
; SSE-NEXT: unpcklps {{.*#+}} xmm14 = xmm14[0],xmm3[0],xmm14[1],xmm3[1]
; SSE-NEXT: movaps %xmm14, %xmm4
; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm2[0]
; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhpd {{.*#+}} xmm14 = xmm14[1],xmm2[1]
; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE-NEXT: unpckhps {{.*#+}} xmm13 = xmm13[2],xmm3[2],xmm13[3],xmm3[3]
; SSE-NEXT: movaps %xmm13, %xmm1
; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhpd {{.*#+}} xmm13 = xmm13[1],xmm0[1]
; SSE-NEXT: movaps 80(%rdx), %xmm0
; SSE-NEXT: movaps 80(%rcx), %xmm1
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSE-NEXT: movaps 80(%rdi), %xmm11
; SSE-NEXT: movaps 80(%rsi), %xmm7
; SSE-NEXT: movaps %xmm11, %xmm8
; SSE-NEXT: unpcklps {{.*#+}} xmm8 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
; SSE-NEXT: movaps %xmm8, %xmm3
; SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm2[0]
; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhpd {{.*#+}} xmm8 = xmm8[1],xmm2[1]
; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE-NEXT: unpckhps {{.*#+}} xmm11 = xmm11[2],xmm7[2],xmm11[3],xmm7[3]
; SSE-NEXT: movaps %xmm11, %xmm15
; SSE-NEXT: movlhps {{.*#+}} xmm15 = xmm15[0],xmm0[0]
; SSE-NEXT: unpckhpd {{.*#+}} xmm11 = xmm11[1],xmm0[1]
; SSE-NEXT: movaps 96(%rdx), %xmm1
; SSE-NEXT: movaps 96(%rcx), %xmm6
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
; SSE-NEXT: movaps 96(%rdi), %xmm5
; SSE-NEXT: movaps 96(%rsi), %xmm4
; SSE-NEXT: movaps %xmm5, %xmm9
; SSE-NEXT: unpcklps {{.*#+}} xmm9 = xmm9[0],xmm4[0],xmm9[1],xmm4[1]
; SSE-NEXT: movaps %xmm9, %xmm12
; SSE-NEXT: movlhps {{.*#+}} xmm12 = xmm12[0],xmm0[0]
; SSE-NEXT: unpckhpd {{.*#+}} xmm9 = xmm9[1],xmm0[1]
; SSE-NEXT: unpckhps {{.*#+}} xmm1 = xmm1[2],xmm6[2],xmm1[3],xmm6[3]
; SSE-NEXT: unpckhps {{.*#+}} xmm5 = xmm5[2],xmm4[2],xmm5[3],xmm4[3]
; SSE-NEXT: movaps %xmm5, %xmm10
; SSE-NEXT: movlhps {{.*#+}} xmm10 = xmm10[0],xmm1[0]
; SSE-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm1[1]
; SSE-NEXT: movaps 112(%rdx), %xmm2
; SSE-NEXT: movaps 112(%rcx), %xmm7
; SSE-NEXT: movaps %xmm2, %xmm6
; SSE-NEXT: unpcklps {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
; SSE-NEXT: movaps 112(%rdi), %xmm0
; SSE-NEXT: movaps 112(%rsi), %xmm4
; SSE-NEXT: movaps %xmm0, %xmm1
; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
; SSE-NEXT: movaps %xmm1, %xmm3
; SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm6[0]
; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm6[1]
; SSE-NEXT: unpckhps {{.*#+}} xmm2 = xmm2[2],xmm7[2],xmm2[3],xmm7[3]
; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
; SSE-NEXT: movaps %xmm0, %xmm4
; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm2[0]
; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm2[1]
; SSE-NEXT: movaps %xmm0, 496(%r8)
; SSE-NEXT: movaps %xmm4, 480(%r8)
; SSE-NEXT: movaps %xmm1, 464(%r8)
; SSE-NEXT: movaps %xmm3, 448(%r8)
; SSE-NEXT: movaps %xmm5, 432(%r8)
; SSE-NEXT: movaps %xmm10, 416(%r8)
; SSE-NEXT: movaps %xmm9, 400(%r8)
; SSE-NEXT: movaps %xmm12, 384(%r8)
; SSE-NEXT: movaps %xmm11, 368(%r8)
; SSE-NEXT: movaps %xmm15, 352(%r8)
; SSE-NEXT: movaps %xmm8, 336(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 320(%r8)
; SSE-NEXT: movaps %xmm13, 304(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 288(%r8)
; SSE-NEXT: movaps %xmm14, 272(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 256(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 240(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 224(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 208(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 192(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 176(%r8)
; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 160(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 144(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 128(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 112(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 96(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 80(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 64(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 48(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 32(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 16(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, (%r8)
; SSE-NEXT: addq $184, %rsp
; SSE-NEXT: retq
;
; AVX-LABEL: store_i32_stride4_vf32:
; AVX: # %bb.0:
; AVX-NEXT: subq $488, %rsp # imm = 0x1E8
; AVX-NEXT: vmovaps 16(%rdi), %xmm2
; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps 16(%rsi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[1],xmm1[1],zero,zero
; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: vmovaps 16(%rcx), %xmm3
; AVX-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps 16(%rdx), %xmm2
; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; AVX-NEXT: vmovlhps {{.*#+}} xmm2 = xmm3[0],xmm2[0]
; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,1,2,0]
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 64(%rdi), %xmm2
; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps 64(%rsi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[1],xmm1[1],zero,zero
; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: vmovaps 64(%rcx), %xmm3
; AVX-NEXT: vmovaps %xmm3, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovaps 64(%rdx), %xmm2
; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; AVX-NEXT: vmovlhps {{.*#+}} xmm2 = xmm3[0],xmm2[0]
; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,1,2,0]
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 80(%rdi), %xmm2
; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps 80(%rsi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[1],xmm1[1],zero,zero
; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: vmovaps 80(%rcx), %xmm3
; AVX-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps 80(%rdx), %xmm2
; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovlhps {{.*#+}} xmm1 = xmm3[0],xmm2[0]
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
; AVX-NEXT: vunpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 32(%rdi), %xmm2
; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps 32(%rsi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[1],xmm1[1],zero,zero
; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: vmovaps 32(%rcx), %xmm2
; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps 32(%rdx), %xmm13
; AVX-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm13[0]
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
; AVX-NEXT: vunpcklps {{.*#+}} xmm2 = xmm13[0],xmm2[0],xmm13[1],xmm2[1]
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 48(%rdi), %xmm2
; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps 48(%rsi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[1],xmm1[1],zero,zero
; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: vmovaps 48(%rcx), %xmm10
; AVX-NEXT: vmovaps 48(%rdx), %xmm9
; AVX-NEXT: vmovlhps {{.*#+}} xmm1 = xmm10[0],xmm9[0]
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
; AVX-NEXT: vunpcklps {{.*#+}} xmm2 = xmm9[0],xmm10[0],xmm9[1],xmm10[1]
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 96(%rdi), %xmm2
; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps 96(%rsi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[1],xmm1[1],zero,zero
; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: vmovaps 96(%rcx), %xmm7
; AVX-NEXT: vmovaps 96(%rdx), %xmm5
; AVX-NEXT: vmovlhps {{.*#+}} xmm1 = xmm7[0],xmm5[0]
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
; AVX-NEXT: vunpcklps {{.*#+}} xmm2 = xmm5[0],xmm7[0],xmm5[1],xmm7[1]
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 112(%rdi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps 112(%rsi), %xmm12
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[1],xmm12[1],zero,zero
; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm12[0],xmm1[1],xmm12[1]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: vmovaps 112(%rcx), %xmm4
; AVX-NEXT: vmovaps 112(%rdx), %xmm3
; AVX-NEXT: vmovlhps {{.*#+}} xmm1 = xmm4[0],xmm3[0]
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
; AVX-NEXT: vunpcklps {{.*#+}} xmm6 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
; AVX-NEXT: vinsertf128 $1, %xmm6, %ymm1, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps (%rdi), %xmm11
; AVX-NEXT: vmovaps (%rsi), %xmm8
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm11[1],xmm8[1],zero,zero
; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm11[0],xmm8[0],xmm11[1],xmm8[1]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: vmovaps (%rcx), %xmm6
; AVX-NEXT: vmovaps (%rdx), %xmm2
; AVX-NEXT: vmovlhps {{.*#+}} xmm14 = xmm6[0],xmm2[0]
; AVX-NEXT: vshufps {{.*#+}} xmm14 = xmm14[0,1,2,0]
; AVX-NEXT: vunpcklps {{.*#+}} xmm15 = xmm2[0],xmm6[0],xmm2[1],xmm6[1]
; AVX-NEXT: vinsertf128 $1, %xmm15, %ymm14, %ymm14
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm14[2,3],ymm0[4,5],ymm14[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
; AVX-NEXT: vunpckhps {{.*#+}} xmm0 = xmm1[2],xmm14[2],xmm1[3],xmm14[3]
; AVX-NEXT: vinsertps {{.*#+}} xmm14 = zero,zero,xmm1[2],xmm14[2]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm14, %ymm1
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT: vunpckhps {{.*#+}} xmm14 = xmm0[2],xmm15[2],xmm0[3],xmm15[3]
; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm15[3,0],xmm0[3,0]
; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm15[2,0,2,3]
; AVX-NEXT: vinsertf128 $1, %xmm15, %ymm14, %ymm14
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm14[0,1],ymm1[2,3],ymm14[4,5],ymm1[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; AVX-NEXT: vmovaps (%rsp), %xmm14 # 16-byte Reload
; AVX-NEXT: vunpckhps {{.*#+}} xmm0 = xmm1[2],xmm14[2],xmm1[3],xmm14[3]
; AVX-NEXT: vinsertps {{.*#+}} xmm14 = zero,zero,xmm1[2],xmm14[2]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm14, %ymm0
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
; AVX-NEXT: vunpckhps {{.*#+}} xmm14 = xmm15[2],xmm1[2],xmm15[3],xmm1[3]
; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm1[3,0],xmm15[3,0]
; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm15[2,0,2,3]
; AVX-NEXT: vinsertf128 $1, %xmm15, %ymm14, %ymm14
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm14[0,1],ymm0[2,3],ymm14[4,5],ymm0[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
; AVX-NEXT: vunpckhps {{.*#+}} xmm0 = xmm1[2],xmm14[2],xmm1[3],xmm14[3]
; AVX-NEXT: vinsertps {{.*#+}} xmm14 = zero,zero,xmm1[2],xmm14[2]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm14, %ymm0
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
; AVX-NEXT: vunpckhps {{.*#+}} xmm14 = xmm15[2],xmm1[2],xmm15[3],xmm1[3]
; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm1[3,0],xmm15[3,0]
; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm15[2,0,2,3]
; AVX-NEXT: vinsertf128 $1, %xmm15, %ymm14, %ymm14
; AVX-NEXT: vblendps {{.*#+}} ymm15 = ymm14[0,1],ymm0[2,3],ymm14[4,5],ymm0[6,7]
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; AVX-NEXT: vunpckhps {{.*#+}} xmm0 = xmm13[2],xmm1[2],xmm13[3],xmm1[3]
; AVX-NEXT: vinsertps {{.*#+}} xmm13 = zero,zero,xmm13[2],xmm1[2]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm13, %ymm0
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
; AVX-NEXT: vunpckhps {{.*#+}} xmm13 = xmm14[2],xmm1[2],xmm14[3],xmm1[3]
; AVX-NEXT: vshufps {{.*#+}} xmm14 = xmm1[3,0],xmm14[3,0]
; AVX-NEXT: vshufps {{.*#+}} xmm14 = xmm14[2,0,2,3]
; AVX-NEXT: vinsertf128 $1, %xmm14, %ymm13, %ymm13
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm13[0,1],ymm0[2,3],ymm13[4,5],ymm0[6,7]
; AVX-NEXT: vunpckhps {{.*#+}} xmm13 = xmm9[2],xmm10[2],xmm9[3],xmm10[3]
; AVX-NEXT: vinsertps {{.*#+}} xmm9 = zero,zero,xmm9[2],xmm10[2]
; AVX-NEXT: vinsertf128 $1, %xmm13, %ymm9, %ymm9
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
; AVX-NEXT: vunpckhps {{.*#+}} xmm10 = xmm13[2],xmm1[2],xmm13[3],xmm1[3]
; AVX-NEXT: vshufps {{.*#+}} xmm13 = xmm1[3,0],xmm13[3,0]
; AVX-NEXT: vshufps {{.*#+}} xmm13 = xmm13[2,0,2,3]
; AVX-NEXT: vinsertf128 $1, %xmm13, %ymm10, %ymm10
; AVX-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0,1],ymm9[2,3],ymm10[4,5],ymm9[6,7]
; AVX-NEXT: vunpckhps {{.*#+}} xmm10 = xmm5[2],xmm7[2],xmm5[3],xmm7[3]
; AVX-NEXT: vinsertps {{.*#+}} xmm5 = zero,zero,xmm5[2],xmm7[2]
; AVX-NEXT: vinsertf128 $1, %xmm10, %ymm5, %ymm5
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
; AVX-NEXT: vunpckhps {{.*#+}} xmm7 = xmm10[2],xmm1[2],xmm10[3],xmm1[3]
; AVX-NEXT: vshufps {{.*#+}} xmm10 = xmm1[3,0],xmm10[3,0]
; AVX-NEXT: vshufps {{.*#+}} xmm10 = xmm10[2,0,2,3]
; AVX-NEXT: vinsertf128 $1, %xmm10, %ymm7, %ymm7
; AVX-NEXT: vblendps {{.*#+}} ymm5 = ymm7[0,1],ymm5[2,3],ymm7[4,5],ymm5[6,7]
; AVX-NEXT: vunpckhps {{.*#+}} xmm7 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
; AVX-NEXT: vinsertps {{.*#+}} xmm3 = zero,zero,xmm3[2],xmm4[2]
; AVX-NEXT: vinsertf128 $1, %xmm7, %ymm3, %ymm3
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; AVX-NEXT: vunpckhps {{.*#+}} xmm4 = xmm1[2],xmm12[2],xmm1[3],xmm12[3]
; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm12[3,0],xmm1[3,0]
; AVX-NEXT: vshufps {{.*#+}} xmm7 = xmm7[2,0,2,3]
; AVX-NEXT: vinsertf128 $1, %xmm7, %ymm4, %ymm4
; AVX-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1],ymm3[2,3],ymm4[4,5],ymm3[6,7]
; AVX-NEXT: vunpckhps {{.*#+}} xmm4 = xmm2[2],xmm6[2],xmm2[3],xmm6[3]
; AVX-NEXT: vinsertps {{.*#+}} xmm1 = zero,zero,xmm2[2],xmm6[2]
; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1
; AVX-NEXT: vunpckhps {{.*#+}} xmm2 = xmm11[2],xmm8[2],xmm11[3],xmm8[3]
; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm8[3,0],xmm11[3,0]
; AVX-NEXT: vshufps {{.*#+}} xmm4 = xmm4[2,0,2,3]
; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm2, %ymm2
; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3],ymm2[4,5],ymm1[6,7]
; AVX-NEXT: vmovaps %ymm1, 32(%r8)
; AVX-NEXT: vmovaps %ymm3, 480(%r8)
; AVX-NEXT: vmovaps %ymm5, 416(%r8)
; AVX-NEXT: vmovaps %ymm9, 224(%r8)
; AVX-NEXT: vmovaps %ymm0, 160(%r8)
; AVX-NEXT: vmovaps %ymm15, 352(%r8)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 288(%r8)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 96(%r8)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, (%r8)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 448(%r8)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 384(%r8)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 192(%r8)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 128(%r8)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 320(%r8)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 256(%r8)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 64(%r8)
; AVX-NEXT: addq $488, %rsp # imm = 0x1E8
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; AVX2-LABEL: store_i32_stride4_vf32:
; AVX2: # %bb.0:
; AVX2-NEXT: pushq %rax
; AVX2-NEXT: vmovaps 64(%rdi), %ymm2
; AVX2-NEXT: vmovaps (%rdi), %ymm6
; AVX2-NEXT: vmovaps 64(%rsi), %ymm4
; AVX2-NEXT: vmovaps (%rsi), %ymm7
; AVX2-NEXT: vmovaps (%rcx), %xmm11
; AVX2-NEXT: vmovaps 32(%rcx), %xmm5
; AVX2-NEXT: vmovaps 64(%rcx), %xmm1
; AVX2-NEXT: vmovaps (%rdx), %xmm12
; AVX2-NEXT: vmovaps 32(%rdx), %xmm8
; AVX2-NEXT: vmovaps 64(%rdx), %xmm3
; AVX2-NEXT: vunpcklps {{.*#+}} xmm0 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,2,1]
; AVX2-NEXT: vmovaps 32(%rsi), %xmm9
; AVX2-NEXT: vmovaps 64(%rsi), %xmm10
; AVX2-NEXT: vmovaps 32(%rdi), %xmm13
; AVX2-NEXT: vmovaps 64(%rdi), %xmm14
; AVX2-NEXT: vunpcklps {{.*#+}} xmm15 = xmm14[0],xmm10[0],xmm14[1],xmm10[1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0,1],ymm0[2,3],ymm15[4,5],ymm0[6,7]
; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vunpckhps {{.*#+}} xmm1 = xmm3[2],xmm1[2],xmm3[3],xmm1[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,0,2,1]
; AVX2-NEXT: vunpckhps {{.*#+}} xmm3 = xmm14[2],xmm10[2],xmm14[3],xmm10[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm1[2,3],ymm3[4,5],ymm1[6,7]
; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vunpcklps {{.*#+}} xmm3 = xmm8[0],xmm5[0],xmm8[1],xmm5[1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,0,2,1]
; AVX2-NEXT: vunpcklps {{.*#+}} xmm10 = xmm13[0],xmm9[0],xmm13[1],xmm9[1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm10[0,1],ymm3[2,3],ymm10[4,5],ymm3[6,7]
; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vunpckhps {{.*#+}} xmm5 = xmm8[2],xmm5[2],xmm8[3],xmm5[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,0,2,1]
; AVX2-NEXT: vunpckhps {{.*#+}} xmm8 = xmm13[2],xmm9[2],xmm13[3],xmm9[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm8[0,1],ymm5[2,3],ymm8[4,5],ymm5[6,7]
; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vmovaps 96(%rcx), %xmm9
; AVX2-NEXT: vmovaps 96(%rdx), %xmm10
; AVX2-NEXT: vunpcklps {{.*#+}} xmm8 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[0,0,2,1]
; AVX2-NEXT: vmovaps 96(%rsi), %xmm13
; AVX2-NEXT: vmovaps 96(%rdi), %xmm14
; AVX2-NEXT: vunpcklps {{.*#+}} xmm15 = xmm14[0],xmm13[0],xmm14[1],xmm13[1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm8 = ymm15[0,1],ymm8[2,3],ymm15[4,5],ymm8[6,7]
; AVX2-NEXT: vmovaps (%rsi), %xmm15
; AVX2-NEXT: vunpckhps {{.*#+}} xmm9 = xmm10[2],xmm9[2],xmm10[3],xmm9[3]
; AVX2-NEXT: vmovaps (%rdi), %xmm0
; AVX2-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[0,0,2,1]
; AVX2-NEXT: vunpckhps {{.*#+}} xmm10 = xmm14[2],xmm13[2],xmm14[3],xmm13[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0,1],ymm9[2,3],ymm10[4,5],ymm9[6,7]
; AVX2-NEXT: vunpcklps {{.*#+}} xmm10 = xmm12[0],xmm11[0],xmm12[1],xmm11[1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,0,2,1]
; AVX2-NEXT: vunpcklps {{.*#+}} xmm13 = xmm0[0],xmm15[0],xmm0[1],xmm15[1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm13 = ymm13[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm10 = ymm13[0,1],ymm10[2,3],ymm13[4,5],ymm10[6,7]
; AVX2-NEXT: vmovaps (%rdx), %ymm13
; AVX2-NEXT: vunpckhps {{.*#+}} xmm11 = xmm12[2],xmm11[2],xmm12[3],xmm11[3]
; AVX2-NEXT: vmovaps (%rcx), %ymm14
; AVX2-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[0,0,2,1]
; AVX2-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm15[2],xmm0[3],xmm15[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm11 = ymm0[0,1],ymm11[2,3],ymm0[4,5],ymm11[6,7]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm0 = ymm13[2],ymm14[2],ymm13[3],ymm14[3],ymm13[6],ymm14[6],ymm13[7],ymm14[7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm12 = ymm6[2],ymm7[2],ymm6[3],ymm7[3],ymm6[6],ymm7[6],ymm6[7],ymm7[7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm12 = ymm12[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1],ymm0[2,3],ymm12[4,5],ymm0[6,7]
; AVX2-NEXT: vmovaps 64(%rdx), %ymm0
; AVX2-NEXT: vunpcklps {{.*#+}} ymm13 = ymm13[0],ymm14[0],ymm13[1],ymm14[1],ymm13[4],ymm14[4],ymm13[5],ymm14[5]
; AVX2-NEXT: vmovaps 64(%rcx), %ymm14
; AVX2-NEXT: vpermpd {{.*#+}} ymm13 = ymm13[0,2,2,3]
; AVX2-NEXT: vunpcklps {{.*#+}} ymm6 = ymm6[0],ymm7[0],ymm6[1],ymm7[1],ymm6[4],ymm7[4],ymm6[5],ymm7[5]
; AVX2-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm13[2,3],ymm6[4,5],ymm13[6,7]
; AVX2-NEXT: vunpcklps {{.*#+}} ymm7 = ymm0[0],ymm14[0],ymm0[1],ymm14[1],ymm0[4],ymm14[4],ymm0[5],ymm14[5]
; AVX2-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,2,2,3]
; AVX2-NEXT: vunpcklps {{.*#+}} ymm13 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[4],ymm4[4],ymm2[5],ymm4[5]
; AVX2-NEXT: vpermpd {{.*#+}} ymm13 = ymm13[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm13[0,1],ymm7[2,3],ymm13[4,5],ymm7[6,7]
; AVX2-NEXT: vmovaps 32(%rdi), %ymm13
; AVX2-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm14[2],ymm0[3],ymm14[3],ymm0[6],ymm14[6],ymm0[7],ymm14[7]
; AVX2-NEXT: vmovaps 32(%rdx), %ymm14
; AVX2-NEXT: vunpckhps {{.*#+}} ymm2 = ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[6],ymm4[6],ymm2[7],ymm4[7]
; AVX2-NEXT: vmovaps 32(%rcx), %ymm4
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm2[0,1],ymm0[2,3],ymm2[4,5],ymm0[6,7]
; AVX2-NEXT: vunpcklps {{.*#+}} ymm2 = ymm14[0],ymm4[0],ymm14[1],ymm4[1],ymm14[4],ymm4[4],ymm14[5],ymm4[5]
; AVX2-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-NEXT: vmovaps 32(%rsi), %ymm15
; AVX2-NEXT: vunpcklps {{.*#+}} ymm1 = ymm13[0],ymm15[0],ymm13[1],ymm15[1],ymm13[4],ymm15[4],ymm13[5],ymm15[5]
; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5],ymm2[6,7]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm2 = ymm14[2],ymm4[2],ymm14[3],ymm4[3],ymm14[6],ymm4[6],ymm14[7],ymm4[7]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm4 = ymm13[2],ymm15[2],ymm13[3],ymm15[3],ymm13[6],ymm15[6],ymm13[7],ymm15[7]
; AVX2-NEXT: vmovaps 96(%rdx), %ymm13
; AVX2-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3],ymm4[4,5],ymm2[6,7]
; AVX2-NEXT: vmovaps 96(%rcx), %ymm4
; AVX2-NEXT: vunpcklps {{.*#+}} ymm14 = ymm13[0],ymm4[0],ymm13[1],ymm4[1],ymm13[4],ymm4[4],ymm13[5],ymm4[5]
; AVX2-NEXT: vpermpd {{.*#+}} ymm14 = ymm14[0,2,2,3]
; AVX2-NEXT: vmovaps 96(%rdi), %ymm15
; AVX2-NEXT: vmovaps 96(%rsi), %ymm0
; AVX2-NEXT: vunpcklps {{.*#+}} ymm5 = ymm15[0],ymm0[0],ymm15[1],ymm0[1],ymm15[4],ymm0[4],ymm15[5],ymm0[5]
; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm14[2,3],ymm5[4,5],ymm14[6,7]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm4 = ymm13[2],ymm4[2],ymm13[3],ymm4[3],ymm13[6],ymm4[6],ymm13[7],ymm4[7]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm0 = ymm15[2],ymm0[2],ymm15[3],ymm0[3],ymm15[6],ymm0[6],ymm15[7],ymm0[7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,2,2,3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm4[2,3],ymm0[4,5],ymm4[6,7]
; AVX2-NEXT: vmovaps %ymm0, 480(%r8)
; AVX2-NEXT: vmovaps %ymm5, 448(%r8)
; AVX2-NEXT: vmovaps %ymm2, 224(%r8)
; AVX2-NEXT: vmovaps %ymm1, 192(%r8)
; AVX2-NEXT: vmovaps %ymm3, 352(%r8)
; AVX2-NEXT: vmovaps %ymm7, 320(%r8)
; AVX2-NEXT: vmovaps %ymm6, 64(%r8)
; AVX2-NEXT: vmovaps %ymm12, 96(%r8)
; AVX2-NEXT: vmovaps %ymm11, 32(%r8)
; AVX2-NEXT: vmovaps %ymm10, (%r8)
; AVX2-NEXT: vmovaps %ymm9, 416(%r8)
; AVX2-NEXT: vmovaps %ymm8, 384(%r8)
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, 160(%r8)
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, 128(%r8)
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, 288(%r8)
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, 256(%r8)
; AVX2-NEXT: popq %rax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX2-FP-LABEL: store_i32_stride4_vf32:
; AVX2-FP: # %bb.0:
; AVX2-FP-NEXT: pushq %rax
; AVX2-FP-NEXT: vmovaps 64(%rdi), %ymm2
; AVX2-FP-NEXT: vmovaps (%rdi), %ymm6
; AVX2-FP-NEXT: vmovaps 64(%rsi), %ymm4
; AVX2-FP-NEXT: vmovaps (%rsi), %ymm7
; AVX2-FP-NEXT: vmovaps (%rcx), %xmm11
; AVX2-FP-NEXT: vmovaps 32(%rcx), %xmm5
; AVX2-FP-NEXT: vmovaps 64(%rcx), %xmm1
; AVX2-FP-NEXT: vmovaps (%rdx), %xmm12
; AVX2-FP-NEXT: vmovaps 32(%rdx), %xmm8
; AVX2-FP-NEXT: vmovaps 64(%rdx), %xmm3
; AVX2-FP-NEXT: vunpcklps {{.*#+}} xmm0 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,2,1]
; AVX2-FP-NEXT: vmovaps 32(%rsi), %xmm9
; AVX2-FP-NEXT: vmovaps 64(%rsi), %xmm10
; AVX2-FP-NEXT: vmovaps 32(%rdi), %xmm13
; AVX2-FP-NEXT: vmovaps 64(%rdi), %xmm14
; AVX2-FP-NEXT: vunpcklps {{.*#+}} xmm15 = xmm14[0],xmm10[0],xmm14[1],xmm10[1]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[0,1,1,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0,1],ymm0[2,3],ymm15[4,5],ymm0[6,7]
; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vunpckhps {{.*#+}} xmm1 = xmm3[2],xmm1[2],xmm3[3],xmm1[3]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,0,2,1]
; AVX2-FP-NEXT: vunpckhps {{.*#+}} xmm3 = xmm14[2],xmm10[2],xmm14[3],xmm10[3]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,1,1,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm1[2,3],ymm3[4,5],ymm1[6,7]
; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vunpcklps {{.*#+}} xmm3 = xmm8[0],xmm5[0],xmm8[1],xmm5[1]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,0,2,1]
; AVX2-FP-NEXT: vunpcklps {{.*#+}} xmm10 = xmm13[0],xmm9[0],xmm13[1],xmm9[1]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,1,1,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm10[0,1],ymm3[2,3],ymm10[4,5],ymm3[6,7]
; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vunpckhps {{.*#+}} xmm5 = xmm8[2],xmm5[2],xmm8[3],xmm5[3]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,0,2,1]
; AVX2-FP-NEXT: vunpckhps {{.*#+}} xmm8 = xmm13[2],xmm9[2],xmm13[3],xmm9[3]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[0,1,1,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm8[0,1],ymm5[2,3],ymm8[4,5],ymm5[6,7]
; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vmovaps 96(%rcx), %xmm9
; AVX2-FP-NEXT: vmovaps 96(%rdx), %xmm10
; AVX2-FP-NEXT: vunpcklps {{.*#+}} xmm8 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[0,0,2,1]
; AVX2-FP-NEXT: vmovaps 96(%rsi), %xmm13
; AVX2-FP-NEXT: vmovaps 96(%rdi), %xmm14
; AVX2-FP-NEXT: vunpcklps {{.*#+}} xmm15 = xmm14[0],xmm13[0],xmm14[1],xmm13[1]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[0,1,1,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm8 = ymm15[0,1],ymm8[2,3],ymm15[4,5],ymm8[6,7]
; AVX2-FP-NEXT: vmovaps (%rsi), %xmm15
; AVX2-FP-NEXT: vunpckhps {{.*#+}} xmm9 = xmm10[2],xmm9[2],xmm10[3],xmm9[3]
; AVX2-FP-NEXT: vmovaps (%rdi), %xmm0
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[0,0,2,1]
; AVX2-FP-NEXT: vunpckhps {{.*#+}} xmm10 = xmm14[2],xmm13[2],xmm14[3],xmm13[3]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,1,1,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0,1],ymm9[2,3],ymm10[4,5],ymm9[6,7]
; AVX2-FP-NEXT: vunpcklps {{.*#+}} xmm10 = xmm12[0],xmm11[0],xmm12[1],xmm11[1]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,0,2,1]
; AVX2-FP-NEXT: vunpcklps {{.*#+}} xmm13 = xmm0[0],xmm15[0],xmm0[1],xmm15[1]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm13 = ymm13[0,1,1,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm10 = ymm13[0,1],ymm10[2,3],ymm13[4,5],ymm10[6,7]
; AVX2-FP-NEXT: vmovaps (%rdx), %ymm13
; AVX2-FP-NEXT: vunpckhps {{.*#+}} xmm11 = xmm12[2],xmm11[2],xmm12[3],xmm11[3]
; AVX2-FP-NEXT: vmovaps (%rcx), %ymm14
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[0,0,2,1]
; AVX2-FP-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm15[2],xmm0[3],xmm15[3]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,1,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm11 = ymm0[0,1],ymm11[2,3],ymm0[4,5],ymm11[6,7]
; AVX2-FP-NEXT: vunpckhps {{.*#+}} ymm0 = ymm13[2],ymm14[2],ymm13[3],ymm14[3],ymm13[6],ymm14[6],ymm13[7],ymm14[7]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-FP-NEXT: vunpckhps {{.*#+}} ymm12 = ymm6[2],ymm7[2],ymm6[3],ymm7[3],ymm6[6],ymm7[6],ymm6[7],ymm7[7]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm12 = ymm12[2,1,3,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1],ymm0[2,3],ymm12[4,5],ymm0[6,7]
; AVX2-FP-NEXT: vmovaps 64(%rdx), %ymm0
; AVX2-FP-NEXT: vunpcklps {{.*#+}} ymm13 = ymm13[0],ymm14[0],ymm13[1],ymm14[1],ymm13[4],ymm14[4],ymm13[5],ymm14[5]
; AVX2-FP-NEXT: vmovaps 64(%rcx), %ymm14
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm13 = ymm13[0,2,2,3]
; AVX2-FP-NEXT: vunpcklps {{.*#+}} ymm6 = ymm6[0],ymm7[0],ymm6[1],ymm7[1],ymm6[4],ymm7[4],ymm6[5],ymm7[5]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[2,1,3,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm13[2,3],ymm6[4,5],ymm13[6,7]
; AVX2-FP-NEXT: vunpcklps {{.*#+}} ymm7 = ymm0[0],ymm14[0],ymm0[1],ymm14[1],ymm0[4],ymm14[4],ymm0[5],ymm14[5]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,2,2,3]
; AVX2-FP-NEXT: vunpcklps {{.*#+}} ymm13 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[4],ymm4[4],ymm2[5],ymm4[5]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm13 = ymm13[2,1,3,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm7 = ymm13[0,1],ymm7[2,3],ymm13[4,5],ymm7[6,7]
; AVX2-FP-NEXT: vmovaps 32(%rdi), %ymm13
; AVX2-FP-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm14[2],ymm0[3],ymm14[3],ymm0[6],ymm14[6],ymm0[7],ymm14[7]
; AVX2-FP-NEXT: vmovaps 32(%rdx), %ymm14
; AVX2-FP-NEXT: vunpckhps {{.*#+}} ymm2 = ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[6],ymm4[6],ymm2[7],ymm4[7]
; AVX2-FP-NEXT: vmovaps 32(%rcx), %ymm4
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[2,1,3,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm3 = ymm2[0,1],ymm0[2,3],ymm2[4,5],ymm0[6,7]
; AVX2-FP-NEXT: vunpcklps {{.*#+}} ymm2 = ymm14[0],ymm4[0],ymm14[1],ymm4[1],ymm14[4],ymm4[4],ymm14[5],ymm4[5]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-FP-NEXT: vmovaps 32(%rsi), %ymm15
; AVX2-FP-NEXT: vunpcklps {{.*#+}} ymm1 = ymm13[0],ymm15[0],ymm13[1],ymm15[1],ymm13[4],ymm15[4],ymm13[5],ymm15[5]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,3,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5],ymm2[6,7]
; AVX2-FP-NEXT: vunpckhps {{.*#+}} ymm2 = ymm14[2],ymm4[2],ymm14[3],ymm4[3],ymm14[6],ymm4[6],ymm14[7],ymm4[7]
; AVX2-FP-NEXT: vunpckhps {{.*#+}} ymm4 = ymm13[2],ymm15[2],ymm13[3],ymm15[3],ymm13[6],ymm15[6],ymm13[7],ymm15[7]
; AVX2-FP-NEXT: vmovaps 96(%rdx), %ymm13
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[2,1,3,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3],ymm4[4,5],ymm2[6,7]
; AVX2-FP-NEXT: vmovaps 96(%rcx), %ymm4
; AVX2-FP-NEXT: vunpcklps {{.*#+}} ymm14 = ymm13[0],ymm4[0],ymm13[1],ymm4[1],ymm13[4],ymm4[4],ymm13[5],ymm4[5]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm14 = ymm14[0,2,2,3]
; AVX2-FP-NEXT: vmovaps 96(%rdi), %ymm15
; AVX2-FP-NEXT: vmovaps 96(%rsi), %ymm0
; AVX2-FP-NEXT: vunpcklps {{.*#+}} ymm5 = ymm15[0],ymm0[0],ymm15[1],ymm0[1],ymm15[4],ymm0[4],ymm15[5],ymm0[5]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[2,1,3,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm14[2,3],ymm5[4,5],ymm14[6,7]
; AVX2-FP-NEXT: vunpckhps {{.*#+}} ymm4 = ymm13[2],ymm4[2],ymm13[3],ymm4[3],ymm13[6],ymm4[6],ymm13[7],ymm4[7]
; AVX2-FP-NEXT: vunpckhps {{.*#+}} ymm0 = ymm15[2],ymm0[2],ymm15[3],ymm0[3],ymm15[6],ymm0[6],ymm15[7],ymm0[7]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,2,2,3]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm4[2,3],ymm0[4,5],ymm4[6,7]
; AVX2-FP-NEXT: vmovaps %ymm0, 480(%r8)
; AVX2-FP-NEXT: vmovaps %ymm5, 448(%r8)
; AVX2-FP-NEXT: vmovaps %ymm2, 224(%r8)
; AVX2-FP-NEXT: vmovaps %ymm1, 192(%r8)
; AVX2-FP-NEXT: vmovaps %ymm3, 352(%r8)
; AVX2-FP-NEXT: vmovaps %ymm7, 320(%r8)
; AVX2-FP-NEXT: vmovaps %ymm6, 64(%r8)
; AVX2-FP-NEXT: vmovaps %ymm12, 96(%r8)
; AVX2-FP-NEXT: vmovaps %ymm11, 32(%r8)
; AVX2-FP-NEXT: vmovaps %ymm10, (%r8)
; AVX2-FP-NEXT: vmovaps %ymm9, 416(%r8)
; AVX2-FP-NEXT: vmovaps %ymm8, 384(%r8)
; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT: vmovaps %ymm0, 160(%r8)
; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT: vmovaps %ymm0, 128(%r8)
; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT: vmovaps %ymm0, 288(%r8)
; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT: vmovaps %ymm0, 256(%r8)
; AVX2-FP-NEXT: popq %rax
; AVX2-FP-NEXT: vzeroupper
; AVX2-FP-NEXT: retq
;
; AVX2-FCP-LABEL: store_i32_stride4_vf32:
; AVX2-FCP: # %bb.0:
; AVX2-FCP-NEXT: pushq %rax
; AVX2-FCP-NEXT: vmovaps 64(%rdi), %ymm2
; AVX2-FCP-NEXT: vmovaps (%rdi), %ymm6
; AVX2-FCP-NEXT: vmovaps 64(%rsi), %ymm4
; AVX2-FCP-NEXT: vmovaps (%rsi), %ymm7
; AVX2-FCP-NEXT: vmovaps (%rcx), %xmm11
; AVX2-FCP-NEXT: vmovaps 32(%rcx), %xmm5
; AVX2-FCP-NEXT: vmovaps 64(%rcx), %xmm1
; AVX2-FCP-NEXT: vmovaps (%rdx), %xmm12
; AVX2-FCP-NEXT: vmovaps 32(%rdx), %xmm8
; AVX2-FCP-NEXT: vmovaps 64(%rdx), %xmm3
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} xmm0 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,2,1]
; AVX2-FCP-NEXT: vmovaps 32(%rsi), %xmm9
; AVX2-FCP-NEXT: vmovaps 64(%rsi), %xmm10
; AVX2-FCP-NEXT: vmovaps 32(%rdi), %xmm13
; AVX2-FCP-NEXT: vmovaps 64(%rdi), %xmm14
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} xmm15 = xmm14[0],xmm10[0],xmm14[1],xmm10[1]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[0,1,1,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm15[0,1],ymm0[2,3],ymm15[4,5],ymm0[6,7]
; AVX2-FCP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} xmm1 = xmm3[2],xmm1[2],xmm3[3],xmm1[3]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,0,2,1]
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} xmm3 = xmm14[2],xmm10[2],xmm14[3],xmm10[3]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,1,1,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm1[2,3],ymm3[4,5],ymm1[6,7]
; AVX2-FCP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} xmm3 = xmm8[0],xmm5[0],xmm8[1],xmm5[1]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,0,2,1]
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} xmm10 = xmm13[0],xmm9[0],xmm13[1],xmm9[1]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,1,1,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm10[0,1],ymm3[2,3],ymm10[4,5],ymm3[6,7]
; AVX2-FCP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} xmm5 = xmm8[2],xmm5[2],xmm8[3],xmm5[3]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,0,2,1]
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} xmm8 = xmm13[2],xmm9[2],xmm13[3],xmm9[3]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[0,1,1,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm8[0,1],ymm5[2,3],ymm8[4,5],ymm5[6,7]
; AVX2-FCP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovaps 96(%rcx), %xmm9
; AVX2-FCP-NEXT: vmovaps 96(%rdx), %xmm10
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} xmm8 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[0,0,2,1]
; AVX2-FCP-NEXT: vmovaps 96(%rsi), %xmm13
; AVX2-FCP-NEXT: vmovaps 96(%rdi), %xmm14
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} xmm15 = xmm14[0],xmm13[0],xmm14[1],xmm13[1]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[0,1,1,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm8 = ymm15[0,1],ymm8[2,3],ymm15[4,5],ymm8[6,7]
; AVX2-FCP-NEXT: vmovaps (%rsi), %xmm15
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} xmm9 = xmm10[2],xmm9[2],xmm10[3],xmm9[3]
; AVX2-FCP-NEXT: vmovaps (%rdi), %xmm0
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[0,0,2,1]
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} xmm10 = xmm14[2],xmm13[2],xmm14[3],xmm13[3]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,1,1,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0,1],ymm9[2,3],ymm10[4,5],ymm9[6,7]
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} xmm10 = xmm12[0],xmm11[0],xmm12[1],xmm11[1]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,0,2,1]
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} xmm13 = xmm0[0],xmm15[0],xmm0[1],xmm15[1]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm13 = ymm13[0,1,1,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm10 = ymm13[0,1],ymm10[2,3],ymm13[4,5],ymm10[6,7]
; AVX2-FCP-NEXT: vmovaps (%rdx), %ymm13
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} xmm11 = xmm12[2],xmm11[2],xmm12[3],xmm11[3]
; AVX2-FCP-NEXT: vmovaps (%rcx), %ymm14
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[0,0,2,1]
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm15[2],xmm0[3],xmm15[3]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,1,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm11 = ymm0[0,1],ymm11[2,3],ymm0[4,5],ymm11[6,7]
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} ymm0 = ymm13[2],ymm14[2],ymm13[3],ymm14[3],ymm13[6],ymm14[6],ymm13[7],ymm14[7]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} ymm12 = ymm6[2],ymm7[2],ymm6[3],ymm7[3],ymm6[6],ymm7[6],ymm6[7],ymm7[7]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm12 = ymm12[2,1,3,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1],ymm0[2,3],ymm12[4,5],ymm0[6,7]
; AVX2-FCP-NEXT: vmovaps 64(%rdx), %ymm0
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} ymm13 = ymm13[0],ymm14[0],ymm13[1],ymm14[1],ymm13[4],ymm14[4],ymm13[5],ymm14[5]
; AVX2-FCP-NEXT: vmovaps 64(%rcx), %ymm14
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm13 = ymm13[0,2,2,3]
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} ymm6 = ymm6[0],ymm7[0],ymm6[1],ymm7[1],ymm6[4],ymm7[4],ymm6[5],ymm7[5]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[2,1,3,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm13[2,3],ymm6[4,5],ymm13[6,7]
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} ymm7 = ymm0[0],ymm14[0],ymm0[1],ymm14[1],ymm0[4],ymm14[4],ymm0[5],ymm14[5]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,2,2,3]
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} ymm13 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[4],ymm4[4],ymm2[5],ymm4[5]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm13 = ymm13[2,1,3,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm7 = ymm13[0,1],ymm7[2,3],ymm13[4,5],ymm7[6,7]
; AVX2-FCP-NEXT: vmovaps 32(%rdi), %ymm13
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm14[2],ymm0[3],ymm14[3],ymm0[6],ymm14[6],ymm0[7],ymm14[7]
; AVX2-FCP-NEXT: vmovaps 32(%rdx), %ymm14
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} ymm2 = ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[6],ymm4[6],ymm2[7],ymm4[7]
; AVX2-FCP-NEXT: vmovaps 32(%rcx), %ymm4
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[2,1,3,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm3 = ymm2[0,1],ymm0[2,3],ymm2[4,5],ymm0[6,7]
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} ymm2 = ymm14[0],ymm4[0],ymm14[1],ymm4[1],ymm14[4],ymm4[4],ymm14[5],ymm4[5]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-FCP-NEXT: vmovaps 32(%rsi), %ymm15
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} ymm1 = ymm13[0],ymm15[0],ymm13[1],ymm15[1],ymm13[4],ymm15[4],ymm13[5],ymm15[5]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,3,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5],ymm2[6,7]
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} ymm2 = ymm14[2],ymm4[2],ymm14[3],ymm4[3],ymm14[6],ymm4[6],ymm14[7],ymm4[7]
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} ymm4 = ymm13[2],ymm15[2],ymm13[3],ymm15[3],ymm13[6],ymm15[6],ymm13[7],ymm15[7]
; AVX2-FCP-NEXT: vmovaps 96(%rdx), %ymm13
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[2,1,3,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3],ymm4[4,5],ymm2[6,7]
; AVX2-FCP-NEXT: vmovaps 96(%rcx), %ymm4
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} ymm14 = ymm13[0],ymm4[0],ymm13[1],ymm4[1],ymm13[4],ymm4[4],ymm13[5],ymm4[5]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm14 = ymm14[0,2,2,3]
; AVX2-FCP-NEXT: vmovaps 96(%rdi), %ymm15
; AVX2-FCP-NEXT: vmovaps 96(%rsi), %ymm0
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} ymm5 = ymm15[0],ymm0[0],ymm15[1],ymm0[1],ymm15[4],ymm0[4],ymm15[5],ymm0[5]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[2,1,3,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm14[2,3],ymm5[4,5],ymm14[6,7]
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} ymm4 = ymm13[2],ymm4[2],ymm13[3],ymm4[3],ymm13[6],ymm4[6],ymm13[7],ymm4[7]
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} ymm0 = ymm15[2],ymm0[2],ymm15[3],ymm0[3],ymm15[6],ymm0[6],ymm15[7],ymm0[7]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,2,2,3]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm4[2,3],ymm0[4,5],ymm4[6,7]
; AVX2-FCP-NEXT: vmovaps %ymm0, 480(%r8)
; AVX2-FCP-NEXT: vmovaps %ymm5, 448(%r8)
; AVX2-FCP-NEXT: vmovaps %ymm2, 224(%r8)
; AVX2-FCP-NEXT: vmovaps %ymm1, 192(%r8)
; AVX2-FCP-NEXT: vmovaps %ymm3, 352(%r8)
; AVX2-FCP-NEXT: vmovaps %ymm7, 320(%r8)
; AVX2-FCP-NEXT: vmovaps %ymm6, 64(%r8)
; AVX2-FCP-NEXT: vmovaps %ymm12, 96(%r8)
; AVX2-FCP-NEXT: vmovaps %ymm11, 32(%r8)
; AVX2-FCP-NEXT: vmovaps %ymm10, (%r8)
; AVX2-FCP-NEXT: vmovaps %ymm9, 416(%r8)
; AVX2-FCP-NEXT: vmovaps %ymm8, 384(%r8)
; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT: vmovaps %ymm0, 160(%r8)
; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT: vmovaps %ymm0, 128(%r8)
; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT: vmovaps %ymm0, 288(%r8)
; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT: vmovaps %ymm0, 256(%r8)
; AVX2-FCP-NEXT: popq %rax
; AVX2-FCP-NEXT: vzeroupper
; AVX2-FCP-NEXT: retq
;
; AVX512-LABEL: store_i32_stride4_vf32:
; AVX512: # %bb.0:
; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm1
; AVX512-NEXT: vmovdqa64 (%rsi), %zmm2
; AVX512-NEXT: vmovdqa64 64(%rsi), %zmm3
; AVX512-NEXT: vmovdqa64 (%rdx), %zmm4
; AVX512-NEXT: vmovdqa64 64(%rdx), %zmm5
; AVX512-NEXT: vmovdqa64 (%rcx), %zmm6
; AVX512-NEXT: vmovdqa64 64(%rcx), %zmm7
; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm8 = [0,0,4,20,0,0,5,21,0,0,6,22,0,0,7,23]
; AVX512-NEXT: vmovdqa64 %zmm4, %zmm9
; AVX512-NEXT: vpermt2d %zmm6, %zmm8, %zmm9
; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm10 = [4,20,0,0,5,21,0,0,6,22,0,0,7,23,0,0]
; AVX512-NEXT: vmovdqa64 %zmm0, %zmm11
; AVX512-NEXT: vpermt2d %zmm2, %zmm10, %zmm11
; AVX512-NEXT: movb $-86, %al
; AVX512-NEXT: kmovw %eax, %k1
; AVX512-NEXT: vmovdqa64 %zmm9, %zmm11 {%k1}
; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm9 = [0,0,0,16,0,0,1,17,0,0,2,18,0,0,3,19]
; AVX512-NEXT: vmovdqa64 %zmm4, %zmm12
; AVX512-NEXT: vpermt2d %zmm6, %zmm9, %zmm12
; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm13 = [0,16,0,0,1,17,0,0,2,18,0,0,3,19,0,0]
; AVX512-NEXT: vmovdqa64 %zmm0, %zmm14
; AVX512-NEXT: vpermt2d %zmm2, %zmm13, %zmm14
; AVX512-NEXT: vmovdqa64 %zmm12, %zmm14 {%k1}
; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm12 = [0,0,12,28,0,0,13,29,0,0,14,30,0,0,15,31]
; AVX512-NEXT: vmovdqa64 %zmm4, %zmm15
; AVX512-NEXT: vpermt2d %zmm6, %zmm12, %zmm15
; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm16 = [12,28,0,0,13,29,0,0,14,30,0,0,15,31,0,0]
; AVX512-NEXT: vmovdqa64 %zmm0, %zmm17
; AVX512-NEXT: vpermt2d %zmm2, %zmm16, %zmm17
; AVX512-NEXT: vmovdqa64 %zmm15, %zmm17 {%k1}
; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm15 = [0,0,8,24,0,0,9,25,0,0,10,26,0,0,11,27]
; AVX512-NEXT: vpermt2d %zmm6, %zmm15, %zmm4
; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm6 = [8,24,0,0,9,25,0,0,10,26,0,0,11,27,0,0]
; AVX512-NEXT: vpermt2d %zmm2, %zmm6, %zmm0
; AVX512-NEXT: vmovdqa64 %zmm4, %zmm0 {%k1}
; AVX512-NEXT: vpermi2d %zmm7, %zmm5, %zmm8
; AVX512-NEXT: vpermi2d %zmm3, %zmm1, %zmm10
; AVX512-NEXT: vmovdqa64 %zmm8, %zmm10 {%k1}
; AVX512-NEXT: vpermi2d %zmm7, %zmm5, %zmm9
; AVX512-NEXT: vpermi2d %zmm3, %zmm1, %zmm13
; AVX512-NEXT: vmovdqa64 %zmm9, %zmm13 {%k1}
; AVX512-NEXT: vpermi2d %zmm7, %zmm5, %zmm12
; AVX512-NEXT: vpermi2d %zmm3, %zmm1, %zmm16
; AVX512-NEXT: vmovdqa64 %zmm12, %zmm16 {%k1}
; AVX512-NEXT: vpermt2d %zmm7, %zmm15, %zmm5
; AVX512-NEXT: vpermt2d %zmm3, %zmm6, %zmm1
; AVX512-NEXT: vmovdqa64 %zmm5, %zmm1 {%k1}
; AVX512-NEXT: vmovdqa64 %zmm1, 384(%r8)
; AVX512-NEXT: vmovdqa64 %zmm16, 448(%r8)
; AVX512-NEXT: vmovdqa64 %zmm13, 256(%r8)
; AVX512-NEXT: vmovdqa64 %zmm10, 320(%r8)
; AVX512-NEXT: vmovdqa64 %zmm0, 128(%r8)
; AVX512-NEXT: vmovdqa64 %zmm17, 192(%r8)
; AVX512-NEXT: vmovdqa64 %zmm14, (%r8)
; AVX512-NEXT: vmovdqa64 %zmm11, 64(%r8)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
; AVX512-FCP-LABEL: store_i32_stride4_vf32:
; AVX512-FCP: # %bb.0:
; AVX512-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512-FCP-NEXT: vmovdqa64 64(%rdi), %zmm1
; AVX512-FCP-NEXT: vmovdqa64 (%rsi), %zmm2
; AVX512-FCP-NEXT: vmovdqa64 64(%rsi), %zmm3
; AVX512-FCP-NEXT: vmovdqa64 (%rdx), %zmm4
; AVX512-FCP-NEXT: vmovdqa64 64(%rdx), %zmm5
; AVX512-FCP-NEXT: vmovdqa64 (%rcx), %zmm6
; AVX512-FCP-NEXT: vmovdqa64 64(%rcx), %zmm7
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm8 = [0,0,4,20,0,0,5,21,0,0,6,22,0,0,7,23]
; AVX512-FCP-NEXT: vmovdqa64 %zmm4, %zmm9
; AVX512-FCP-NEXT: vpermt2d %zmm6, %zmm8, %zmm9
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm10 = [4,20,0,0,5,21,0,0,6,22,0,0,7,23,0,0]
; AVX512-FCP-NEXT: vmovdqa64 %zmm0, %zmm11
; AVX512-FCP-NEXT: vpermt2d %zmm2, %zmm10, %zmm11
; AVX512-FCP-NEXT: movb $-86, %al
; AVX512-FCP-NEXT: kmovw %eax, %k1
; AVX512-FCP-NEXT: vmovdqa64 %zmm9, %zmm11 {%k1}
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm9 = [0,0,0,16,0,0,1,17,0,0,2,18,0,0,3,19]
; AVX512-FCP-NEXT: vmovdqa64 %zmm4, %zmm12
; AVX512-FCP-NEXT: vpermt2d %zmm6, %zmm9, %zmm12
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm13 = [0,16,0,0,1,17,0,0,2,18,0,0,3,19,0,0]
; AVX512-FCP-NEXT: vmovdqa64 %zmm0, %zmm14
; AVX512-FCP-NEXT: vpermt2d %zmm2, %zmm13, %zmm14
; AVX512-FCP-NEXT: vmovdqa64 %zmm12, %zmm14 {%k1}
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm12 = [0,0,12,28,0,0,13,29,0,0,14,30,0,0,15,31]
; AVX512-FCP-NEXT: vmovdqa64 %zmm4, %zmm15
; AVX512-FCP-NEXT: vpermt2d %zmm6, %zmm12, %zmm15
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm16 = [12,28,0,0,13,29,0,0,14,30,0,0,15,31,0,0]
; AVX512-FCP-NEXT: vmovdqa64 %zmm0, %zmm17
; AVX512-FCP-NEXT: vpermt2d %zmm2, %zmm16, %zmm17
; AVX512-FCP-NEXT: vmovdqa64 %zmm15, %zmm17 {%k1}
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm15 = [0,0,8,24,0,0,9,25,0,0,10,26,0,0,11,27]
; AVX512-FCP-NEXT: vpermt2d %zmm6, %zmm15, %zmm4
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm6 = [8,24,0,0,9,25,0,0,10,26,0,0,11,27,0,0]
; AVX512-FCP-NEXT: vpermt2d %zmm2, %zmm6, %zmm0
; AVX512-FCP-NEXT: vmovdqa64 %zmm4, %zmm0 {%k1}
; AVX512-FCP-NEXT: vpermi2d %zmm7, %zmm5, %zmm8
; AVX512-FCP-NEXT: vpermi2d %zmm3, %zmm1, %zmm10
; AVX512-FCP-NEXT: vmovdqa64 %zmm8, %zmm10 {%k1}
; AVX512-FCP-NEXT: vpermi2d %zmm7, %zmm5, %zmm9
; AVX512-FCP-NEXT: vpermi2d %zmm3, %zmm1, %zmm13
; AVX512-FCP-NEXT: vmovdqa64 %zmm9, %zmm13 {%k1}
; AVX512-FCP-NEXT: vpermi2d %zmm7, %zmm5, %zmm12
; AVX512-FCP-NEXT: vpermi2d %zmm3, %zmm1, %zmm16
; AVX512-FCP-NEXT: vmovdqa64 %zmm12, %zmm16 {%k1}
; AVX512-FCP-NEXT: vpermt2d %zmm7, %zmm15, %zmm5
; AVX512-FCP-NEXT: vpermt2d %zmm3, %zmm6, %zmm1
; AVX512-FCP-NEXT: vmovdqa64 %zmm5, %zmm1 {%k1}
; AVX512-FCP-NEXT: vmovdqa64 %zmm1, 384(%r8)
; AVX512-FCP-NEXT: vmovdqa64 %zmm16, 448(%r8)
; AVX512-FCP-NEXT: vmovdqa64 %zmm13, 256(%r8)
; AVX512-FCP-NEXT: vmovdqa64 %zmm10, 320(%r8)
; AVX512-FCP-NEXT: vmovdqa64 %zmm0, 128(%r8)
; AVX512-FCP-NEXT: vmovdqa64 %zmm17, 192(%r8)
; AVX512-FCP-NEXT: vmovdqa64 %zmm14, (%r8)
; AVX512-FCP-NEXT: vmovdqa64 %zmm11, 64(%r8)
; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
;
; AVX512DQ-LABEL: store_i32_stride4_vf32:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512DQ-NEXT: vmovdqa64 64(%rdi), %zmm1
; AVX512DQ-NEXT: vmovdqa64 (%rsi), %zmm2
; AVX512DQ-NEXT: vmovdqa64 64(%rsi), %zmm3
; AVX512DQ-NEXT: vmovdqa64 (%rdx), %zmm4
; AVX512DQ-NEXT: vmovdqa64 64(%rdx), %zmm5
; AVX512DQ-NEXT: vmovdqa64 (%rcx), %zmm6
; AVX512DQ-NEXT: vmovdqa64 64(%rcx), %zmm7
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm8 = [0,0,4,20,0,0,5,21,0,0,6,22,0,0,7,23]
; AVX512DQ-NEXT: vmovdqa64 %zmm4, %zmm9
; AVX512DQ-NEXT: vpermt2d %zmm6, %zmm8, %zmm9
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm10 = [4,20,0,0,5,21,0,0,6,22,0,0,7,23,0,0]
; AVX512DQ-NEXT: vmovdqa64 %zmm0, %zmm11
; AVX512DQ-NEXT: vpermt2d %zmm2, %zmm10, %zmm11
; AVX512DQ-NEXT: movb $-86, %al
; AVX512DQ-NEXT: kmovw %eax, %k1
; AVX512DQ-NEXT: vmovdqa64 %zmm9, %zmm11 {%k1}
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm9 = [0,0,0,16,0,0,1,17,0,0,2,18,0,0,3,19]
; AVX512DQ-NEXT: vmovdqa64 %zmm4, %zmm12
; AVX512DQ-NEXT: vpermt2d %zmm6, %zmm9, %zmm12
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm13 = [0,16,0,0,1,17,0,0,2,18,0,0,3,19,0,0]
; AVX512DQ-NEXT: vmovdqa64 %zmm0, %zmm14
; AVX512DQ-NEXT: vpermt2d %zmm2, %zmm13, %zmm14
; AVX512DQ-NEXT: vmovdqa64 %zmm12, %zmm14 {%k1}
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm12 = [0,0,12,28,0,0,13,29,0,0,14,30,0,0,15,31]
; AVX512DQ-NEXT: vmovdqa64 %zmm4, %zmm15
; AVX512DQ-NEXT: vpermt2d %zmm6, %zmm12, %zmm15
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm16 = [12,28,0,0,13,29,0,0,14,30,0,0,15,31,0,0]
; AVX512DQ-NEXT: vmovdqa64 %zmm0, %zmm17
; AVX512DQ-NEXT: vpermt2d %zmm2, %zmm16, %zmm17
; AVX512DQ-NEXT: vmovdqa64 %zmm15, %zmm17 {%k1}
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm15 = [0,0,8,24,0,0,9,25,0,0,10,26,0,0,11,27]
; AVX512DQ-NEXT: vpermt2d %zmm6, %zmm15, %zmm4
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm6 = [8,24,0,0,9,25,0,0,10,26,0,0,11,27,0,0]
; AVX512DQ-NEXT: vpermt2d %zmm2, %zmm6, %zmm0
; AVX512DQ-NEXT: vmovdqa64 %zmm4, %zmm0 {%k1}
; AVX512DQ-NEXT: vpermi2d %zmm7, %zmm5, %zmm8
; AVX512DQ-NEXT: vpermi2d %zmm3, %zmm1, %zmm10
; AVX512DQ-NEXT: vmovdqa64 %zmm8, %zmm10 {%k1}
; AVX512DQ-NEXT: vpermi2d %zmm7, %zmm5, %zmm9
; AVX512DQ-NEXT: vpermi2d %zmm3, %zmm1, %zmm13
; AVX512DQ-NEXT: vmovdqa64 %zmm9, %zmm13 {%k1}
; AVX512DQ-NEXT: vpermi2d %zmm7, %zmm5, %zmm12
; AVX512DQ-NEXT: vpermi2d %zmm3, %zmm1, %zmm16
; AVX512DQ-NEXT: vmovdqa64 %zmm12, %zmm16 {%k1}
; AVX512DQ-NEXT: vpermt2d %zmm7, %zmm15, %zmm5
; AVX512DQ-NEXT: vpermt2d %zmm3, %zmm6, %zmm1
; AVX512DQ-NEXT: vmovdqa64 %zmm5, %zmm1 {%k1}
; AVX512DQ-NEXT: vmovdqa64 %zmm1, 384(%r8)
; AVX512DQ-NEXT: vmovdqa64 %zmm16, 448(%r8)
; AVX512DQ-NEXT: vmovdqa64 %zmm13, 256(%r8)
; AVX512DQ-NEXT: vmovdqa64 %zmm10, 320(%r8)
; AVX512DQ-NEXT: vmovdqa64 %zmm0, 128(%r8)
; AVX512DQ-NEXT: vmovdqa64 %zmm17, 192(%r8)
; AVX512DQ-NEXT: vmovdqa64 %zmm14, (%r8)
; AVX512DQ-NEXT: vmovdqa64 %zmm11, 64(%r8)
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512DQ-FCP-LABEL: store_i32_stride4_vf32:
; AVX512DQ-FCP: # %bb.0:
; AVX512DQ-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512DQ-FCP-NEXT: vmovdqa64 64(%rdi), %zmm1
; AVX512DQ-FCP-NEXT: vmovdqa64 (%rsi), %zmm2
; AVX512DQ-FCP-NEXT: vmovdqa64 64(%rsi), %zmm3
; AVX512DQ-FCP-NEXT: vmovdqa64 (%rdx), %zmm4
; AVX512DQ-FCP-NEXT: vmovdqa64 64(%rdx), %zmm5
; AVX512DQ-FCP-NEXT: vmovdqa64 (%rcx), %zmm6
; AVX512DQ-FCP-NEXT: vmovdqa64 64(%rcx), %zmm7
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm8 = [0,0,4,20,0,0,5,21,0,0,6,22,0,0,7,23]
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm4, %zmm9
; AVX512DQ-FCP-NEXT: vpermt2d %zmm6, %zmm8, %zmm9
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm10 = [4,20,0,0,5,21,0,0,6,22,0,0,7,23,0,0]
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, %zmm11
; AVX512DQ-FCP-NEXT: vpermt2d %zmm2, %zmm10, %zmm11
; AVX512DQ-FCP-NEXT: movb $-86, %al
; AVX512DQ-FCP-NEXT: kmovw %eax, %k1
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm9, %zmm11 {%k1}
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm9 = [0,0,0,16,0,0,1,17,0,0,2,18,0,0,3,19]
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm4, %zmm12
; AVX512DQ-FCP-NEXT: vpermt2d %zmm6, %zmm9, %zmm12
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm13 = [0,16,0,0,1,17,0,0,2,18,0,0,3,19,0,0]
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, %zmm14
; AVX512DQ-FCP-NEXT: vpermt2d %zmm2, %zmm13, %zmm14
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm12, %zmm14 {%k1}
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm12 = [0,0,12,28,0,0,13,29,0,0,14,30,0,0,15,31]
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm4, %zmm15
; AVX512DQ-FCP-NEXT: vpermt2d %zmm6, %zmm12, %zmm15
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm16 = [12,28,0,0,13,29,0,0,14,30,0,0,15,31,0,0]
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, %zmm17
; AVX512DQ-FCP-NEXT: vpermt2d %zmm2, %zmm16, %zmm17
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm15, %zmm17 {%k1}
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm15 = [0,0,8,24,0,0,9,25,0,0,10,26,0,0,11,27]
; AVX512DQ-FCP-NEXT: vpermt2d %zmm6, %zmm15, %zmm4
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm6 = [8,24,0,0,9,25,0,0,10,26,0,0,11,27,0,0]
; AVX512DQ-FCP-NEXT: vpermt2d %zmm2, %zmm6, %zmm0
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm4, %zmm0 {%k1}
; AVX512DQ-FCP-NEXT: vpermi2d %zmm7, %zmm5, %zmm8
; AVX512DQ-FCP-NEXT: vpermi2d %zmm3, %zmm1, %zmm10
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm8, %zmm10 {%k1}
; AVX512DQ-FCP-NEXT: vpermi2d %zmm7, %zmm5, %zmm9
; AVX512DQ-FCP-NEXT: vpermi2d %zmm3, %zmm1, %zmm13
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm9, %zmm13 {%k1}
; AVX512DQ-FCP-NEXT: vpermi2d %zmm7, %zmm5, %zmm12
; AVX512DQ-FCP-NEXT: vpermi2d %zmm3, %zmm1, %zmm16
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm12, %zmm16 {%k1}
; AVX512DQ-FCP-NEXT: vpermt2d %zmm7, %zmm15, %zmm5
; AVX512DQ-FCP-NEXT: vpermt2d %zmm3, %zmm6, %zmm1
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm5, %zmm1 {%k1}
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm1, 384(%r8)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm16, 448(%r8)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm13, 256(%r8)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm10, 320(%r8)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, 128(%r8)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm17, 192(%r8)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm14, (%r8)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm11, 64(%r8)
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
;
; AVX512BW-LABEL: store_i32_stride4_vf32:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm1
; AVX512BW-NEXT: vmovdqa64 (%rsi), %zmm2
; AVX512BW-NEXT: vmovdqa64 64(%rsi), %zmm3
; AVX512BW-NEXT: vmovdqa64 (%rdx), %zmm4
; AVX512BW-NEXT: vmovdqa64 64(%rdx), %zmm5
; AVX512BW-NEXT: vmovdqa64 (%rcx), %zmm6
; AVX512BW-NEXT: vmovdqa64 64(%rcx), %zmm7
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} zmm8 = [0,0,4,20,0,0,5,21,0,0,6,22,0,0,7,23]
; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm9
; AVX512BW-NEXT: vpermt2d %zmm6, %zmm8, %zmm9
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} zmm10 = [4,20,0,0,5,21,0,0,6,22,0,0,7,23,0,0]
; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm11
; AVX512BW-NEXT: vpermt2d %zmm2, %zmm10, %zmm11
; AVX512BW-NEXT: movb $-86, %al
; AVX512BW-NEXT: kmovd %eax, %k1
; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm11 {%k1}
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} zmm9 = [0,0,0,16,0,0,1,17,0,0,2,18,0,0,3,19]
; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm12
; AVX512BW-NEXT: vpermt2d %zmm6, %zmm9, %zmm12
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} zmm13 = [0,16,0,0,1,17,0,0,2,18,0,0,3,19,0,0]
; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm14
; AVX512BW-NEXT: vpermt2d %zmm2, %zmm13, %zmm14
; AVX512BW-NEXT: vmovdqa64 %zmm12, %zmm14 {%k1}
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} zmm12 = [0,0,12,28,0,0,13,29,0,0,14,30,0,0,15,31]
; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm15
; AVX512BW-NEXT: vpermt2d %zmm6, %zmm12, %zmm15
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} zmm16 = [12,28,0,0,13,29,0,0,14,30,0,0,15,31,0,0]
; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm17
; AVX512BW-NEXT: vpermt2d %zmm2, %zmm16, %zmm17
; AVX512BW-NEXT: vmovdqa64 %zmm15, %zmm17 {%k1}
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} zmm15 = [0,0,8,24,0,0,9,25,0,0,10,26,0,0,11,27]
; AVX512BW-NEXT: vpermt2d %zmm6, %zmm15, %zmm4
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} zmm6 = [8,24,0,0,9,25,0,0,10,26,0,0,11,27,0,0]
; AVX512BW-NEXT: vpermt2d %zmm2, %zmm6, %zmm0
; AVX512BW-NEXT: vmovdqa64 %zmm4, %zmm0 {%k1}
; AVX512BW-NEXT: vpermi2d %zmm7, %zmm5, %zmm8
; AVX512BW-NEXT: vpermi2d %zmm3, %zmm1, %zmm10
; AVX512BW-NEXT: vmovdqa64 %zmm8, %zmm10 {%k1}
; AVX512BW-NEXT: vpermi2d %zmm7, %zmm5, %zmm9
; AVX512BW-NEXT: vpermi2d %zmm3, %zmm1, %zmm13
; AVX512BW-NEXT: vmovdqa64 %zmm9, %zmm13 {%k1}
; AVX512BW-NEXT: vpermi2d %zmm7, %zmm5, %zmm12
; AVX512BW-NEXT: vpermi2d %zmm3, %zmm1, %zmm16
; AVX512BW-NEXT: vmovdqa64 %zmm12, %zmm16 {%k1}
; AVX512BW-NEXT: vpermt2d %zmm7, %zmm15, %zmm5
; AVX512BW-NEXT: vpermt2d %zmm3, %zmm6, %zmm1
; AVX512BW-NEXT: vmovdqa64 %zmm5, %zmm1 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm1, 384(%r8)
; AVX512BW-NEXT: vmovdqa64 %zmm16, 448(%r8)
; AVX512BW-NEXT: vmovdqa64 %zmm13, 256(%r8)
; AVX512BW-NEXT: vmovdqa64 %zmm10, 320(%r8)
; AVX512BW-NEXT: vmovdqa64 %zmm0, 128(%r8)
; AVX512BW-NEXT: vmovdqa64 %zmm17, 192(%r8)
; AVX512BW-NEXT: vmovdqa64 %zmm14, (%r8)
; AVX512BW-NEXT: vmovdqa64 %zmm11, 64(%r8)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BW-FCP-LABEL: store_i32_stride4_vf32:
; AVX512BW-FCP: # %bb.0:
; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-FCP-NEXT: vmovdqa64 64(%rdi), %zmm1
; AVX512BW-FCP-NEXT: vmovdqa64 (%rsi), %zmm2
; AVX512BW-FCP-NEXT: vmovdqa64 64(%rsi), %zmm3
; AVX512BW-FCP-NEXT: vmovdqa64 (%rdx), %zmm4
; AVX512BW-FCP-NEXT: vmovdqa64 64(%rdx), %zmm5
; AVX512BW-FCP-NEXT: vmovdqa64 (%rcx), %zmm6
; AVX512BW-FCP-NEXT: vmovdqa64 64(%rcx), %zmm7
; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm8 = [0,0,4,20,0,0,5,21,0,0,6,22,0,0,7,23]
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm4, %zmm9
; AVX512BW-FCP-NEXT: vpermt2d %zmm6, %zmm8, %zmm9
; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm10 = [4,20,0,0,5,21,0,0,6,22,0,0,7,23,0,0]
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm0, %zmm11
; AVX512BW-FCP-NEXT: vpermt2d %zmm2, %zmm10, %zmm11
; AVX512BW-FCP-NEXT: movb $-86, %al
; AVX512BW-FCP-NEXT: kmovd %eax, %k1
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm9, %zmm11 {%k1}
; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm9 = [0,0,0,16,0,0,1,17,0,0,2,18,0,0,3,19]
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm4, %zmm12
; AVX512BW-FCP-NEXT: vpermt2d %zmm6, %zmm9, %zmm12
; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm13 = [0,16,0,0,1,17,0,0,2,18,0,0,3,19,0,0]
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm0, %zmm14
; AVX512BW-FCP-NEXT: vpermt2d %zmm2, %zmm13, %zmm14
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm12, %zmm14 {%k1}
; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm12 = [0,0,12,28,0,0,13,29,0,0,14,30,0,0,15,31]
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm4, %zmm15
; AVX512BW-FCP-NEXT: vpermt2d %zmm6, %zmm12, %zmm15
; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm16 = [12,28,0,0,13,29,0,0,14,30,0,0,15,31,0,0]
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm0, %zmm17
; AVX512BW-FCP-NEXT: vpermt2d %zmm2, %zmm16, %zmm17
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm15, %zmm17 {%k1}
; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm15 = [0,0,8,24,0,0,9,25,0,0,10,26,0,0,11,27]
; AVX512BW-FCP-NEXT: vpermt2d %zmm6, %zmm15, %zmm4
; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm6 = [8,24,0,0,9,25,0,0,10,26,0,0,11,27,0,0]
; AVX512BW-FCP-NEXT: vpermt2d %zmm2, %zmm6, %zmm0
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm4, %zmm0 {%k1}
; AVX512BW-FCP-NEXT: vpermi2d %zmm7, %zmm5, %zmm8
; AVX512BW-FCP-NEXT: vpermi2d %zmm3, %zmm1, %zmm10
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm8, %zmm10 {%k1}
; AVX512BW-FCP-NEXT: vpermi2d %zmm7, %zmm5, %zmm9
; AVX512BW-FCP-NEXT: vpermi2d %zmm3, %zmm1, %zmm13
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm9, %zmm13 {%k1}
; AVX512BW-FCP-NEXT: vpermi2d %zmm7, %zmm5, %zmm12
; AVX512BW-FCP-NEXT: vpermi2d %zmm3, %zmm1, %zmm16
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm12, %zmm16 {%k1}
; AVX512BW-FCP-NEXT: vpermt2d %zmm7, %zmm15, %zmm5
; AVX512BW-FCP-NEXT: vpermt2d %zmm3, %zmm6, %zmm1
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm5, %zmm1 {%k1}
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm1, 384(%r8)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm16, 448(%r8)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm13, 256(%r8)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm10, 320(%r8)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm0, 128(%r8)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm17, 192(%r8)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm14, (%r8)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm11, 64(%r8)
; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
;
; AVX512DQ-BW-LABEL: store_i32_stride4_vf32:
; AVX512DQ-BW: # %bb.0:
; AVX512DQ-BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512DQ-BW-NEXT: vmovdqa64 64(%rdi), %zmm1
; AVX512DQ-BW-NEXT: vmovdqa64 (%rsi), %zmm2
; AVX512DQ-BW-NEXT: vmovdqa64 64(%rsi), %zmm3
; AVX512DQ-BW-NEXT: vmovdqa64 (%rdx), %zmm4
; AVX512DQ-BW-NEXT: vmovdqa64 64(%rdx), %zmm5
; AVX512DQ-BW-NEXT: vmovdqa64 (%rcx), %zmm6
; AVX512DQ-BW-NEXT: vmovdqa64 64(%rcx), %zmm7
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} zmm8 = [0,0,4,20,0,0,5,21,0,0,6,22,0,0,7,23]
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm4, %zmm9
; AVX512DQ-BW-NEXT: vpermt2d %zmm6, %zmm8, %zmm9
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} zmm10 = [4,20,0,0,5,21,0,0,6,22,0,0,7,23,0,0]
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm0, %zmm11
; AVX512DQ-BW-NEXT: vpermt2d %zmm2, %zmm10, %zmm11
; AVX512DQ-BW-NEXT: movb $-86, %al
; AVX512DQ-BW-NEXT: kmovd %eax, %k1
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm9, %zmm11 {%k1}
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} zmm9 = [0,0,0,16,0,0,1,17,0,0,2,18,0,0,3,19]
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm4, %zmm12
; AVX512DQ-BW-NEXT: vpermt2d %zmm6, %zmm9, %zmm12
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} zmm13 = [0,16,0,0,1,17,0,0,2,18,0,0,3,19,0,0]
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm0, %zmm14
; AVX512DQ-BW-NEXT: vpermt2d %zmm2, %zmm13, %zmm14
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm12, %zmm14 {%k1}
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} zmm12 = [0,0,12,28,0,0,13,29,0,0,14,30,0,0,15,31]
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm4, %zmm15
; AVX512DQ-BW-NEXT: vpermt2d %zmm6, %zmm12, %zmm15
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} zmm16 = [12,28,0,0,13,29,0,0,14,30,0,0,15,31,0,0]
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm0, %zmm17
; AVX512DQ-BW-NEXT: vpermt2d %zmm2, %zmm16, %zmm17
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm15, %zmm17 {%k1}
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} zmm15 = [0,0,8,24,0,0,9,25,0,0,10,26,0,0,11,27]
; AVX512DQ-BW-NEXT: vpermt2d %zmm6, %zmm15, %zmm4
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} zmm6 = [8,24,0,0,9,25,0,0,10,26,0,0,11,27,0,0]
; AVX512DQ-BW-NEXT: vpermt2d %zmm2, %zmm6, %zmm0
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm4, %zmm0 {%k1}
; AVX512DQ-BW-NEXT: vpermi2d %zmm7, %zmm5, %zmm8
; AVX512DQ-BW-NEXT: vpermi2d %zmm3, %zmm1, %zmm10
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm8, %zmm10 {%k1}
; AVX512DQ-BW-NEXT: vpermi2d %zmm7, %zmm5, %zmm9
; AVX512DQ-BW-NEXT: vpermi2d %zmm3, %zmm1, %zmm13
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm9, %zmm13 {%k1}
; AVX512DQ-BW-NEXT: vpermi2d %zmm7, %zmm5, %zmm12
; AVX512DQ-BW-NEXT: vpermi2d %zmm3, %zmm1, %zmm16
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm12, %zmm16 {%k1}
; AVX512DQ-BW-NEXT: vpermt2d %zmm7, %zmm15, %zmm5
; AVX512DQ-BW-NEXT: vpermt2d %zmm3, %zmm6, %zmm1
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm5, %zmm1 {%k1}
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm1, 384(%r8)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm16, 448(%r8)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm13, 256(%r8)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm10, 320(%r8)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm0, 128(%r8)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm17, 192(%r8)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm14, (%r8)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm11, 64(%r8)
; AVX512DQ-BW-NEXT: vzeroupper
; AVX512DQ-BW-NEXT: retq
;
; AVX512DQ-BW-FCP-LABEL: store_i32_stride4_vf32:
; AVX512DQ-BW-FCP: # %bb.0:
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 64(%rdi), %zmm1
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rsi), %zmm2
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 64(%rsi), %zmm3
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdx), %zmm4
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 64(%rdx), %zmm5
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rcx), %zmm6
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 64(%rcx), %zmm7
; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm8 = [0,0,4,20,0,0,5,21,0,0,6,22,0,0,7,23]
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm4, %zmm9
; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm6, %zmm8, %zmm9
; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm10 = [4,20,0,0,5,21,0,0,6,22,0,0,7,23,0,0]
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm0, %zmm11
; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm2, %zmm10, %zmm11
; AVX512DQ-BW-FCP-NEXT: movb $-86, %al
; AVX512DQ-BW-FCP-NEXT: kmovd %eax, %k1
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm9, %zmm11 {%k1}
; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm9 = [0,0,0,16,0,0,1,17,0,0,2,18,0,0,3,19]
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm4, %zmm12
; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm6, %zmm9, %zmm12
; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm13 = [0,16,0,0,1,17,0,0,2,18,0,0,3,19,0,0]
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm0, %zmm14
; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm2, %zmm13, %zmm14
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm12, %zmm14 {%k1}
; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm12 = [0,0,12,28,0,0,13,29,0,0,14,30,0,0,15,31]
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm4, %zmm15
; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm6, %zmm12, %zmm15
; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm16 = [12,28,0,0,13,29,0,0,14,30,0,0,15,31,0,0]
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm0, %zmm17
; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm2, %zmm16, %zmm17
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm15, %zmm17 {%k1}
; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm15 = [0,0,8,24,0,0,9,25,0,0,10,26,0,0,11,27]
; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm6, %zmm15, %zmm4
; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm6 = [8,24,0,0,9,25,0,0,10,26,0,0,11,27,0,0]
; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm2, %zmm6, %zmm0
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm4, %zmm0 {%k1}
; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm7, %zmm5, %zmm8
; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm3, %zmm1, %zmm10
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm8, %zmm10 {%k1}
; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm7, %zmm5, %zmm9
; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm3, %zmm1, %zmm13
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm9, %zmm13 {%k1}
; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm7, %zmm5, %zmm12
; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm3, %zmm1, %zmm16
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm12, %zmm16 {%k1}
; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm7, %zmm15, %zmm5
; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm3, %zmm6, %zmm1
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm5, %zmm1 {%k1}
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm1, 384(%r8)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm16, 448(%r8)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm13, 256(%r8)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm10, 320(%r8)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm0, 128(%r8)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm17, 192(%r8)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm14, (%r8)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm11, 64(%r8)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
%in.vec0 = load <32 x i32>, ptr %in.vecptr0, align 64
%in.vec1 = load <32 x i32>, ptr %in.vecptr1, align 64
%in.vec2 = load <32 x i32>, ptr %in.vecptr2, align 64
%in.vec3 = load <32 x i32>, ptr %in.vecptr3, align 64
%1 = shufflevector <32 x i32> %in.vec0, <32 x i32> %in.vec1, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
%2 = shufflevector <32 x i32> %in.vec2, <32 x i32> %in.vec3, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
%3 = shufflevector <64 x i32> %1, <64 x i32> %2, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
%interleaved.vec = shufflevector <128 x i32> %3, <128 x i32> poison, <128 x i32> <i32 0, i32 32, i32 64, i32 96, i32 1, i32 33, i32 65, i32 97, i32 2, i32 34, i32 66, i32 98, i32 3, i32 35, i32 67, i32 99, i32 4, i32 36, i32 68, i32 100, i32 5, i32 37, i32 69, i32 101, i32 6, i32 38, i32 70, i32 102, i32 7, i32 39, i32 71, i32 103, i32 8, i32 40, i32 72, i32 104, i32 9, i32 41, i32 73, i32 105, i32 10, i32 42, i32 74, i32 106, i32 11, i32 43, i32 75, i32 107, i32 12, i32 44, i32 76, i32 108, i32 13, i32 45, i32 77, i32 109, i32 14, i32 46, i32 78, i32 110, i32 15, i32 47, i32 79, i32 111, i32 16, i32 48, i32 80, i32 112, i32 17, i32 49, i32 81, i32 113, i32 18, i32 50, i32 82, i32 114, i32 19, i32 51, i32 83, i32 115, i32 20, i32 52, i32 84, i32 116, i32 21, i32 53, i32 85, i32 117, i32 22, i32 54, i32 86, i32 118, i32 23, i32 55, i32 87, i32 119, i32 24, i32 56, i32 88, i32 120, i32 25, i32 57, i32 89, i32 121, i32 26, i32 58, i32 90, i32 122, i32 27, i32 59, i32 91, i32 123, i32 28, i32 60, i32 92, i32 124, i32 29, i32 61, i32 93, i32 125, i32 30, i32 62, i32 94, i32 126, i32 31, i32 63, i32 95, i32 127>
store <128 x i32> %interleaved.vec, ptr %out.vec, align 64
ret void
}
define void @store_i32_stride4_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %in.vecptr3, ptr %out.vec) nounwind {
; SSE-LABEL: store_i32_stride4_vf64:
; SSE: # %bb.0:
; SSE-NEXT: subq $696, %rsp # imm = 0x2B8
; SSE-NEXT: movaps (%rdi), %xmm10
; SSE-NEXT: movaps 16(%rdi), %xmm11
; SSE-NEXT: movaps 32(%rdi), %xmm12
; SSE-NEXT: movaps 48(%rdi), %xmm13
; SSE-NEXT: movaps (%rsi), %xmm4
; SSE-NEXT: movaps 16(%rsi), %xmm2
; SSE-NEXT: movaps 32(%rsi), %xmm0
; SSE-NEXT: movaps (%rdx), %xmm6
; SSE-NEXT: movaps 16(%rdx), %xmm3
; SSE-NEXT: movaps 32(%rdx), %xmm1
; SSE-NEXT: movaps (%rcx), %xmm7
; SSE-NEXT: movaps 16(%rcx), %xmm8
; SSE-NEXT: movaps 32(%rcx), %xmm5
; SSE-NEXT: movaps %xmm6, %xmm9
; SSE-NEXT: unpcklps {{.*#+}} xmm9 = xmm9[0],xmm7[0],xmm9[1],xmm7[1]
; SSE-NEXT: movaps %xmm10, %xmm14
; SSE-NEXT: unpcklps {{.*#+}} xmm14 = xmm14[0],xmm4[0],xmm14[1],xmm4[1]
; SSE-NEXT: movaps %xmm14, %xmm15
; SSE-NEXT: movlhps {{.*#+}} xmm15 = xmm15[0],xmm9[0]
; SSE-NEXT: movaps %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhpd {{.*#+}} xmm14 = xmm14[1],xmm9[1]
; SSE-NEXT: movaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhps {{.*#+}} xmm6 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
; SSE-NEXT: unpckhps {{.*#+}} xmm10 = xmm10[2],xmm4[2],xmm10[3],xmm4[3]
; SSE-NEXT: movaps %xmm10, %xmm4
; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm6[0]
; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhpd {{.*#+}} xmm10 = xmm10[1],xmm6[1]
; SSE-NEXT: movaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm3, %xmm4
; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm8[0],xmm4[1],xmm8[1]
; SSE-NEXT: movaps %xmm11, %xmm6
; SSE-NEXT: unpcklps {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1]
; SSE-NEXT: movaps %xmm6, %xmm7
; SSE-NEXT: movlhps {{.*#+}} xmm7 = xmm7[0],xmm4[0]
; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhpd {{.*#+}} xmm6 = xmm6[1],xmm4[1]
; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhps {{.*#+}} xmm3 = xmm3[2],xmm8[2],xmm3[3],xmm8[3]
; SSE-NEXT: unpckhps {{.*#+}} xmm11 = xmm11[2],xmm2[2],xmm11[3],xmm2[3]
; SSE-NEXT: movaps %xmm11, %xmm2
; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0]
; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhpd {{.*#+}} xmm11 = xmm11[1],xmm3[1]
; SSE-NEXT: movaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm1, %xmm2
; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
; SSE-NEXT: movaps %xmm12, %xmm3
; SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
; SSE-NEXT: movaps %xmm3, %xmm4
; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm2[0]
; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm2[1]
; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps 48(%rdx), %xmm2
; SSE-NEXT: unpckhps {{.*#+}} xmm1 = xmm1[2],xmm5[2],xmm1[3],xmm5[3]
; SSE-NEXT: movaps 48(%rcx), %xmm3
; SSE-NEXT: unpckhps {{.*#+}} xmm12 = xmm12[2],xmm0[2],xmm12[3],xmm0[3]
; SSE-NEXT: movaps %xmm12, %xmm0
; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhpd {{.*#+}} xmm12 = xmm12[1],xmm1[1]
; SSE-NEXT: movaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm2, %xmm0
; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
; SSE-NEXT: movaps 48(%rsi), %xmm1
; SSE-NEXT: movaps %xmm13, %xmm4
; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
; SSE-NEXT: movaps %xmm4, %xmm5
; SSE-NEXT: movlhps {{.*#+}} xmm5 = xmm5[0],xmm0[0]
; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm0[1]
; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhps {{.*#+}} xmm2 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
; SSE-NEXT: unpckhps {{.*#+}} xmm13 = xmm13[2],xmm1[2],xmm13[3],xmm1[3]
; SSE-NEXT: movaps %xmm13, %xmm0
; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhpd {{.*#+}} xmm13 = xmm13[1],xmm2[1]
; SSE-NEXT: movaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps 64(%rdx), %xmm0
; SSE-NEXT: movaps 64(%rcx), %xmm1
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSE-NEXT: movaps 64(%rdi), %xmm5
; SSE-NEXT: movaps 64(%rsi), %xmm3
; SSE-NEXT: movaps %xmm5, %xmm4
; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
; SSE-NEXT: movaps %xmm4, %xmm6
; SSE-NEXT: movlhps {{.*#+}} xmm6 = xmm6[0],xmm2[0]
; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm2[1]
; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE-NEXT: unpckhps {{.*#+}} xmm5 = xmm5[2],xmm3[2],xmm5[3],xmm3[3]
; SSE-NEXT: movaps %xmm5, %xmm1
; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm0[1]
; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps 80(%rdx), %xmm0
; SSE-NEXT: movaps 80(%rcx), %xmm1
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSE-NEXT: movaps 80(%rdi), %xmm5
; SSE-NEXT: movaps 80(%rsi), %xmm3
; SSE-NEXT: movaps %xmm5, %xmm4
; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
; SSE-NEXT: movaps %xmm4, %xmm6
; SSE-NEXT: movlhps {{.*#+}} xmm6 = xmm6[0],xmm2[0]
; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm2[1]
; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE-NEXT: unpckhps {{.*#+}} xmm5 = xmm5[2],xmm3[2],xmm5[3],xmm3[3]
; SSE-NEXT: movaps %xmm5, %xmm1
; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm0[1]
; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps 96(%rdx), %xmm0
; SSE-NEXT: movaps 96(%rcx), %xmm1
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSE-NEXT: movaps 96(%rdi), %xmm5
; SSE-NEXT: movaps 96(%rsi), %xmm3
; SSE-NEXT: movaps %xmm5, %xmm4
; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
; SSE-NEXT: movaps %xmm4, %xmm6
; SSE-NEXT: movlhps {{.*#+}} xmm6 = xmm6[0],xmm2[0]
; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm2[1]
; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE-NEXT: unpckhps {{.*#+}} xmm5 = xmm5[2],xmm3[2],xmm5[3],xmm3[3]
; SSE-NEXT: movaps %xmm5, %xmm1
; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm0[1]
; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps 112(%rdx), %xmm0
; SSE-NEXT: movaps 112(%rcx), %xmm1
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSE-NEXT: movaps 112(%rdi), %xmm5
; SSE-NEXT: movaps 112(%rsi), %xmm3
; SSE-NEXT: movaps %xmm5, %xmm4
; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
; SSE-NEXT: movaps %xmm4, %xmm6
; SSE-NEXT: movlhps {{.*#+}} xmm6 = xmm6[0],xmm2[0]
; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm2[1]
; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE-NEXT: unpckhps {{.*#+}} xmm5 = xmm5[2],xmm3[2],xmm5[3],xmm3[3]
; SSE-NEXT: movaps %xmm5, %xmm1
; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm0[1]
; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps 128(%rdx), %xmm0
; SSE-NEXT: movaps 128(%rcx), %xmm1
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSE-NEXT: movaps 128(%rdi), %xmm5
; SSE-NEXT: movaps 128(%rsi), %xmm3
; SSE-NEXT: movaps %xmm5, %xmm4
; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
; SSE-NEXT: movaps %xmm4, %xmm6
; SSE-NEXT: movlhps {{.*#+}} xmm6 = xmm6[0],xmm2[0]
; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm2[1]
; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE-NEXT: unpckhps {{.*#+}} xmm5 = xmm5[2],xmm3[2],xmm5[3],xmm3[3]
; SSE-NEXT: movaps %xmm5, %xmm1
; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm0[1]
; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps 144(%rdx), %xmm0
; SSE-NEXT: movaps 144(%rcx), %xmm1
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSE-NEXT: movaps 144(%rdi), %xmm5
; SSE-NEXT: movaps 144(%rsi), %xmm3
; SSE-NEXT: movaps %xmm5, %xmm4
; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
; SSE-NEXT: movaps %xmm4, %xmm6
; SSE-NEXT: movlhps {{.*#+}} xmm6 = xmm6[0],xmm2[0]
; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm2[1]
; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE-NEXT: unpckhps {{.*#+}} xmm5 = xmm5[2],xmm3[2],xmm5[3],xmm3[3]
; SSE-NEXT: movaps %xmm5, %xmm1
; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm0[1]
; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps 160(%rdx), %xmm0
; SSE-NEXT: movaps 160(%rcx), %xmm1
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSE-NEXT: movaps 160(%rdi), %xmm5
; SSE-NEXT: movaps 160(%rsi), %xmm3
; SSE-NEXT: movaps %xmm5, %xmm4
; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
; SSE-NEXT: movaps %xmm4, %xmm6
; SSE-NEXT: movlhps {{.*#+}} xmm6 = xmm6[0],xmm2[0]
; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm2[1]
; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE-NEXT: unpckhps {{.*#+}} xmm5 = xmm5[2],xmm3[2],xmm5[3],xmm3[3]
; SSE-NEXT: movaps %xmm5, %xmm1
; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE-NEXT: movaps %xmm1, (%rsp) # 16-byte Spill
; SSE-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm0[1]
; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps 176(%rdx), %xmm0
; SSE-NEXT: movaps 176(%rcx), %xmm1
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSE-NEXT: movaps 176(%rdi), %xmm15
; SSE-NEXT: movaps 176(%rsi), %xmm3
; SSE-NEXT: movaps %xmm15, %xmm4
; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
; SSE-NEXT: movaps %xmm4, %xmm5
; SSE-NEXT: movlhps {{.*#+}} xmm5 = xmm5[0],xmm2[0]
; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm2[1]
; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE-NEXT: unpckhps {{.*#+}} xmm15 = xmm15[2],xmm3[2],xmm15[3],xmm3[3]
; SSE-NEXT: movaps %xmm15, %xmm1
; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhpd {{.*#+}} xmm15 = xmm15[1],xmm0[1]
; SSE-NEXT: movaps 192(%rdx), %xmm0
; SSE-NEXT: movaps 192(%rcx), %xmm1
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSE-NEXT: movaps 192(%rdi), %xmm12
; SSE-NEXT: movaps 192(%rsi), %xmm3
; SSE-NEXT: movaps %xmm12, %xmm14
; SSE-NEXT: unpcklps {{.*#+}} xmm14 = xmm14[0],xmm3[0],xmm14[1],xmm3[1]
; SSE-NEXT: movaps %xmm14, %xmm4
; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm2[0]
; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhpd {{.*#+}} xmm14 = xmm14[1],xmm2[1]
; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE-NEXT: unpckhps {{.*#+}} xmm12 = xmm12[2],xmm3[2],xmm12[3],xmm3[3]
; SSE-NEXT: movaps %xmm12, %xmm1
; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhpd {{.*#+}} xmm12 = xmm12[1],xmm0[1]
; SSE-NEXT: movaps 208(%rdx), %xmm0
; SSE-NEXT: movaps 208(%rcx), %xmm1
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSE-NEXT: movaps 208(%rdi), %xmm13
; SSE-NEXT: movaps 208(%rsi), %xmm7
; SSE-NEXT: movaps %xmm13, %xmm8
; SSE-NEXT: unpcklps {{.*#+}} xmm8 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
; SSE-NEXT: movaps %xmm8, %xmm3
; SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm2[0]
; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhpd {{.*#+}} xmm8 = xmm8[1],xmm2[1]
; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE-NEXT: unpckhps {{.*#+}} xmm13 = xmm13[2],xmm7[2],xmm13[3],xmm7[3]
; SSE-NEXT: movaps %xmm13, %xmm1
; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: unpckhpd {{.*#+}} xmm13 = xmm13[1],xmm0[1]
; SSE-NEXT: movaps 224(%rdx), %xmm1
; SSE-NEXT: movaps 224(%rcx), %xmm6
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
; SSE-NEXT: movaps 224(%rdi), %xmm5
; SSE-NEXT: movaps 224(%rsi), %xmm4
; SSE-NEXT: movaps %xmm5, %xmm9
; SSE-NEXT: unpcklps {{.*#+}} xmm9 = xmm9[0],xmm4[0],xmm9[1],xmm4[1]
; SSE-NEXT: movaps %xmm9, %xmm11
; SSE-NEXT: movlhps {{.*#+}} xmm11 = xmm11[0],xmm0[0]
; SSE-NEXT: unpckhpd {{.*#+}} xmm9 = xmm9[1],xmm0[1]
; SSE-NEXT: unpckhps {{.*#+}} xmm1 = xmm1[2],xmm6[2],xmm1[3],xmm6[3]
; SSE-NEXT: unpckhps {{.*#+}} xmm5 = xmm5[2],xmm4[2],xmm5[3],xmm4[3]
; SSE-NEXT: movaps %xmm5, %xmm10
; SSE-NEXT: movlhps {{.*#+}} xmm10 = xmm10[0],xmm1[0]
; SSE-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm1[1]
; SSE-NEXT: movaps 240(%rdx), %xmm2
; SSE-NEXT: movaps 240(%rcx), %xmm7
; SSE-NEXT: movaps %xmm2, %xmm6
; SSE-NEXT: unpcklps {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
; SSE-NEXT: movaps 240(%rdi), %xmm0
; SSE-NEXT: movaps 240(%rsi), %xmm4
; SSE-NEXT: movaps %xmm0, %xmm1
; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
; SSE-NEXT: movaps %xmm1, %xmm3
; SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm6[0]
; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm6[1]
; SSE-NEXT: unpckhps {{.*#+}} xmm2 = xmm2[2],xmm7[2],xmm2[3],xmm7[3]
; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
; SSE-NEXT: movaps %xmm0, %xmm4
; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm2[0]
; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm2[1]
; SSE-NEXT: movaps %xmm0, 1008(%r8)
; SSE-NEXT: movaps %xmm4, 992(%r8)
; SSE-NEXT: movaps %xmm1, 976(%r8)
; SSE-NEXT: movaps %xmm3, 960(%r8)
; SSE-NEXT: movaps %xmm5, 944(%r8)
; SSE-NEXT: movaps %xmm10, 928(%r8)
; SSE-NEXT: movaps %xmm9, 912(%r8)
; SSE-NEXT: movaps %xmm11, 896(%r8)
; SSE-NEXT: movaps %xmm13, 880(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 864(%r8)
; SSE-NEXT: movaps %xmm8, 848(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 832(%r8)
; SSE-NEXT: movaps %xmm12, 816(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 800(%r8)
; SSE-NEXT: movaps %xmm14, 784(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 768(%r8)
; SSE-NEXT: movaps %xmm15, 752(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 736(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 720(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 704(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 688(%r8)
; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 672(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 656(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 640(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 624(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 608(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 592(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 576(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 560(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 544(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 528(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 512(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 496(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 480(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 464(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 448(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 432(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 416(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 400(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 384(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 368(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 352(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 336(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 320(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 304(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 288(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 272(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 256(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 240(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 224(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 208(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 192(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 176(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 160(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 144(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 128(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 112(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 96(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 80(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 64(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 48(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 32(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 16(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, (%r8)
; SSE-NEXT: addq $696, %rsp # imm = 0x2B8
; SSE-NEXT: retq
;
; AVX-LABEL: store_i32_stride4_vf64:
; AVX: # %bb.0:
; AVX-NEXT: subq $1384, %rsp # imm = 0x568
; AVX-NEXT: vmovaps (%rdi), %xmm2
; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps 16(%rdi), %xmm5
; AVX-NEXT: vmovaps 32(%rdi), %xmm3
; AVX-NEXT: vmovaps (%rsi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps 16(%rsi), %xmm6
; AVX-NEXT: vmovaps 32(%rsi), %xmm4
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[1],xmm1[1],zero,zero
; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: vmovaps (%rcx), %xmm11
; AVX-NEXT: vmovaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps 16(%rcx), %xmm9
; AVX-NEXT: vmovaps 32(%rcx), %xmm7
; AVX-NEXT: vmovaps (%rdx), %xmm2
; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps 16(%rdx), %xmm10
; AVX-NEXT: vmovaps 32(%rdx), %xmm8
; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm11[0],xmm2[1],xmm11[1]
; AVX-NEXT: vmovlhps {{.*#+}} xmm2 = xmm11[0],xmm2[0]
; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,1,2,0]
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm5[1],xmm6[1],zero,zero
; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: vmovaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
; AVX-NEXT: vmovlhps {{.*#+}} xmm2 = xmm9[0],xmm10[0]
; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,1,2,0]
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm3[1],xmm4[1],zero,zero
; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovlhps {{.*#+}} xmm1 = xmm7[0],xmm8[0]
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
; AVX-NEXT: vunpcklps {{.*#+}} xmm2 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 48(%rdi), %xmm2
; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps 48(%rsi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[1],xmm1[1],zero,zero
; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: vmovaps 48(%rcx), %xmm3
; AVX-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps 48(%rdx), %xmm2
; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovlhps {{.*#+}} xmm1 = xmm3[0],xmm2[0]
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
; AVX-NEXT: vunpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 64(%rdi), %xmm2
; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps 64(%rsi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[1],xmm1[1],zero,zero
; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: vmovaps 64(%rcx), %xmm3
; AVX-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps 64(%rdx), %xmm2
; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovlhps {{.*#+}} xmm1 = xmm3[0],xmm2[0]
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
; AVX-NEXT: vunpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 80(%rdi), %xmm2
; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps 80(%rsi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[1],xmm1[1],zero,zero
; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: vmovaps 80(%rcx), %xmm3
; AVX-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps 80(%rdx), %xmm2
; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovlhps {{.*#+}} xmm1 = xmm3[0],xmm2[0]
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
; AVX-NEXT: vunpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 96(%rdi), %xmm2
; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps 96(%rsi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[1],xmm1[1],zero,zero
; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: vmovaps 96(%rcx), %xmm3
; AVX-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps 96(%rdx), %xmm2
; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovlhps {{.*#+}} xmm1 = xmm3[0],xmm2[0]
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
; AVX-NEXT: vunpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 112(%rdi), %xmm2
; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps 112(%rsi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[1],xmm1[1],zero,zero
; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: vmovaps 112(%rcx), %xmm3
; AVX-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps 112(%rdx), %xmm2
; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovlhps {{.*#+}} xmm1 = xmm3[0],xmm2[0]
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
; AVX-NEXT: vunpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 128(%rdi), %xmm2
; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps 128(%rsi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[1],xmm1[1],zero,zero
; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: vmovaps 128(%rcx), %xmm3
; AVX-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps 128(%rdx), %xmm2
; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovlhps {{.*#+}} xmm1 = xmm3[0],xmm2[0]
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
; AVX-NEXT: vunpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 144(%rdi), %xmm2
; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps 144(%rsi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[1],xmm1[1],zero,zero
; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: vmovaps 144(%rcx), %xmm3
; AVX-NEXT: vmovaps %xmm3, (%rsp) # 16-byte Spill
; AVX-NEXT: vmovaps 144(%rdx), %xmm2
; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovlhps {{.*#+}} xmm1 = xmm3[0],xmm2[0]
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
; AVX-NEXT: vunpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 160(%rdi), %xmm2
; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps 160(%rsi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[1],xmm1[1],zero,zero
; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: vmovaps 160(%rcx), %xmm3
; AVX-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps 160(%rdx), %xmm2
; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovlhps {{.*#+}} xmm1 = xmm3[0],xmm2[0]
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
; AVX-NEXT: vunpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 176(%rdi), %xmm2
; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps 176(%rsi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[1],xmm1[1],zero,zero
; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: vmovaps 176(%rcx), %xmm2
; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps 176(%rdx), %xmm13
; AVX-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm13[0]
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
; AVX-NEXT: vunpcklps {{.*#+}} xmm2 = xmm13[0],xmm2[0],xmm13[1],xmm2[1]
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 192(%rdi), %xmm2
; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps 192(%rsi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[1],xmm1[1],zero,zero
; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: vmovaps 192(%rcx), %xmm10
; AVX-NEXT: vmovaps 192(%rdx), %xmm8
; AVX-NEXT: vmovlhps {{.*#+}} xmm1 = xmm10[0],xmm8[0]
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
; AVX-NEXT: vunpcklps {{.*#+}} xmm2 = xmm8[0],xmm10[0],xmm8[1],xmm10[1]
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 208(%rdi), %xmm2
; AVX-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps 208(%rsi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[1],xmm1[1],zero,zero
; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: vmovaps 208(%rcx), %xmm6
; AVX-NEXT: vmovaps 208(%rdx), %xmm4
; AVX-NEXT: vmovlhps {{.*#+}} xmm1 = xmm6[0],xmm4[0]
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
; AVX-NEXT: vunpcklps {{.*#+}} xmm2 = xmm4[0],xmm6[0],xmm4[1],xmm6[1]
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 224(%rdi), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps 224(%rsi), %xmm11
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[1],xmm11[1],zero,zero
; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm11[0],xmm1[1],xmm11[1]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX-NEXT: vmovaps 224(%rcx), %xmm3
; AVX-NEXT: vmovaps 224(%rdx), %xmm2
; AVX-NEXT: vmovlhps {{.*#+}} xmm1 = xmm3[0],xmm2[0]
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
; AVX-NEXT: vunpcklps {{.*#+}} xmm5 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; AVX-NEXT: vinsertf128 $1, %xmm5, %ymm1, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps 240(%rdi), %xmm9
; AVX-NEXT: vmovaps 240(%rsi), %xmm7
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm9[1],xmm7[1],zero,zero
; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm9[0],xmm7[0],xmm9[1],xmm7[1]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm12
; AVX-NEXT: vmovaps 240(%rcx), %xmm5
; AVX-NEXT: vmovaps 240(%rdx), %xmm1
; AVX-NEXT: vmovlhps {{.*#+}} xmm14 = xmm5[0],xmm1[0]
; AVX-NEXT: vshufps {{.*#+}} xmm14 = xmm14[0,1,2,0]
; AVX-NEXT: vunpcklps {{.*#+}} xmm15 = xmm1[0],xmm5[0],xmm1[1],xmm5[1]
; AVX-NEXT: vinsertf128 $1, %xmm15, %ymm14, %ymm14
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm12[0,1],ymm14[2,3],ymm12[4,5],ymm14[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
; AVX-NEXT: vunpckhps {{.*#+}} xmm12 = xmm0[2],xmm14[2],xmm0[3],xmm14[3]
; AVX-NEXT: vinsertps {{.*#+}} xmm14 = zero,zero,xmm0[2],xmm14[2]
; AVX-NEXT: vinsertf128 $1, %xmm12, %ymm14, %ymm12
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT: vunpckhps {{.*#+}} xmm14 = xmm0[2],xmm15[2],xmm0[3],xmm15[3]
; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm15[3,0],xmm0[3,0]
; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm15[2,0,2,3]
; AVX-NEXT: vinsertf128 $1, %xmm15, %ymm14, %ymm14
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm14[0,1],ymm12[2,3],ymm14[4,5],ymm12[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
; AVX-NEXT: vunpckhps {{.*#+}} xmm12 = xmm0[2],xmm14[2],xmm0[3],xmm14[3]
; AVX-NEXT: vinsertps {{.*#+}} xmm14 = zero,zero,xmm0[2],xmm14[2]
; AVX-NEXT: vinsertf128 $1, %xmm12, %ymm14, %ymm12
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
; AVX-NEXT: vunpckhps {{.*#+}} xmm14 = xmm15[2],xmm0[2],xmm15[3],xmm0[3]
; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm0[3,0],xmm15[3,0]
; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm15[2,0,2,3]
; AVX-NEXT: vinsertf128 $1, %xmm15, %ymm14, %ymm14
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm14[0,1],ymm12[2,3],ymm14[4,5],ymm12[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
; AVX-NEXT: vunpckhps {{.*#+}} xmm12 = xmm0[2],xmm14[2],xmm0[3],xmm14[3]
; AVX-NEXT: vinsertps {{.*#+}} xmm14 = zero,zero,xmm0[2],xmm14[2]
; AVX-NEXT: vinsertf128 $1, %xmm12, %ymm14, %ymm12
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
; AVX-NEXT: vunpckhps {{.*#+}} xmm14 = xmm15[2],xmm0[2],xmm15[3],xmm0[3]
; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm0[3,0],xmm15[3,0]
; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm15[2,0,2,3]
; AVX-NEXT: vinsertf128 $1, %xmm15, %ymm14, %ymm14
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm14[0,1],ymm12[2,3],ymm14[4,5],ymm12[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
; AVX-NEXT: vunpckhps {{.*#+}} xmm12 = xmm0[2],xmm14[2],xmm0[3],xmm14[3]
; AVX-NEXT: vinsertps {{.*#+}} xmm14 = zero,zero,xmm0[2],xmm14[2]
; AVX-NEXT: vinsertf128 $1, %xmm12, %ymm14, %ymm12
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
; AVX-NEXT: vunpckhps {{.*#+}} xmm14 = xmm15[2],xmm0[2],xmm15[3],xmm0[3]
; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm0[3,0],xmm15[3,0]
; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm15[2,0,2,3]
; AVX-NEXT: vinsertf128 $1, %xmm15, %ymm14, %ymm14
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm14[0,1],ymm12[2,3],ymm14[4,5],ymm12[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
; AVX-NEXT: vunpckhps {{.*#+}} xmm12 = xmm0[2],xmm14[2],xmm0[3],xmm14[3]
; AVX-NEXT: vinsertps {{.*#+}} xmm14 = zero,zero,xmm0[2],xmm14[2]
; AVX-NEXT: vinsertf128 $1, %xmm12, %ymm14, %ymm12
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
; AVX-NEXT: vunpckhps {{.*#+}} xmm14 = xmm15[2],xmm0[2],xmm15[3],xmm0[3]
; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm0[3,0],xmm15[3,0]
; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm15[2,0,2,3]
; AVX-NEXT: vinsertf128 $1, %xmm15, %ymm14, %ymm14
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm14[0,1],ymm12[2,3],ymm14[4,5],ymm12[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
; AVX-NEXT: vunpckhps {{.*#+}} xmm12 = xmm0[2],xmm14[2],xmm0[3],xmm14[3]
; AVX-NEXT: vinsertps {{.*#+}} xmm14 = zero,zero,xmm0[2],xmm14[2]
; AVX-NEXT: vinsertf128 $1, %xmm12, %ymm14, %ymm12
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
; AVX-NEXT: vunpckhps {{.*#+}} xmm14 = xmm15[2],xmm0[2],xmm15[3],xmm0[3]
; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm0[3,0],xmm15[3,0]
; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm15[2,0,2,3]
; AVX-NEXT: vinsertf128 $1, %xmm15, %ymm14, %ymm14
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm14[0,1],ymm12[2,3],ymm14[4,5],ymm12[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
; AVX-NEXT: vunpckhps {{.*#+}} xmm12 = xmm0[2],xmm14[2],xmm0[3],xmm14[3]
; AVX-NEXT: vinsertps {{.*#+}} xmm14 = zero,zero,xmm0[2],xmm14[2]
; AVX-NEXT: vinsertf128 $1, %xmm12, %ymm14, %ymm12
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
; AVX-NEXT: vunpckhps {{.*#+}} xmm14 = xmm15[2],xmm0[2],xmm15[3],xmm0[3]
; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm0[3,0],xmm15[3,0]
; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm15[2,0,2,3]
; AVX-NEXT: vinsertf128 $1, %xmm15, %ymm14, %ymm14
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm14[0,1],ymm12[2,3],ymm14[4,5],ymm12[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
; AVX-NEXT: vunpckhps {{.*#+}} xmm12 = xmm0[2],xmm14[2],xmm0[3],xmm14[3]
; AVX-NEXT: vinsertps {{.*#+}} xmm14 = zero,zero,xmm0[2],xmm14[2]
; AVX-NEXT: vinsertf128 $1, %xmm12, %ymm14, %ymm12
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
; AVX-NEXT: vunpckhps {{.*#+}} xmm14 = xmm15[2],xmm0[2],xmm15[3],xmm0[3]
; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm0[3,0],xmm15[3,0]
; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm15[2,0,2,3]
; AVX-NEXT: vinsertf128 $1, %xmm15, %ymm14, %ymm14
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm14[0,1],ymm12[2,3],ymm14[4,5],ymm12[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
; AVX-NEXT: vunpckhps {{.*#+}} xmm12 = xmm0[2],xmm14[2],xmm0[3],xmm14[3]
; AVX-NEXT: vinsertps {{.*#+}} xmm14 = zero,zero,xmm0[2],xmm14[2]
; AVX-NEXT: vinsertf128 $1, %xmm12, %ymm14, %ymm12
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
; AVX-NEXT: vunpckhps {{.*#+}} xmm14 = xmm15[2],xmm0[2],xmm15[3],xmm0[3]
; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm0[3,0],xmm15[3,0]
; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm15[2,0,2,3]
; AVX-NEXT: vinsertf128 $1, %xmm15, %ymm14, %ymm14
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm14[0,1],ymm12[2,3],ymm14[4,5],ymm12[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT: vmovaps (%rsp), %xmm14 # 16-byte Reload
; AVX-NEXT: vunpckhps {{.*#+}} xmm12 = xmm0[2],xmm14[2],xmm0[3],xmm14[3]
; AVX-NEXT: vinsertps {{.*#+}} xmm14 = zero,zero,xmm0[2],xmm14[2]
; AVX-NEXT: vinsertf128 $1, %xmm12, %ymm14, %ymm12
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
; AVX-NEXT: vunpckhps {{.*#+}} xmm14 = xmm15[2],xmm0[2],xmm15[3],xmm0[3]
; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm0[3,0],xmm15[3,0]
; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm15[2,0,2,3]
; AVX-NEXT: vinsertf128 $1, %xmm15, %ymm14, %ymm14
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm14[0,1],ymm12[2,3],ymm14[4,5],ymm12[6,7]
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
; AVX-NEXT: vunpckhps {{.*#+}} xmm12 = xmm0[2],xmm14[2],xmm0[3],xmm14[3]
; AVX-NEXT: vinsertps {{.*#+}} xmm14 = zero,zero,xmm0[2],xmm14[2]
; AVX-NEXT: vinsertf128 $1, %xmm12, %ymm14, %ymm12
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
; AVX-NEXT: vunpckhps {{.*#+}} xmm14 = xmm15[2],xmm0[2],xmm15[3],xmm0[3]
; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm0[3,0],xmm15[3,0]
; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm15[2,0,2,3]
; AVX-NEXT: vinsertf128 $1, %xmm15, %ymm14, %ymm14
; AVX-NEXT: vblendps {{.*#+}} ymm12 = ymm14[0,1],ymm12[2,3],ymm14[4,5],ymm12[6,7]
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT: vunpckhps {{.*#+}} xmm14 = xmm13[2],xmm0[2],xmm13[3],xmm0[3]
; AVX-NEXT: vinsertps {{.*#+}} xmm13 = zero,zero,xmm13[2],xmm0[2]
; AVX-NEXT: vinsertf128 $1, %xmm14, %ymm13, %ymm13
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
; AVX-NEXT: vunpckhps {{.*#+}} xmm14 = xmm15[2],xmm0[2],xmm15[3],xmm0[3]
; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm0[3,0],xmm15[3,0]
; AVX-NEXT: vshufps {{.*#+}} xmm15 = xmm15[2,0,2,3]
; AVX-NEXT: vinsertf128 $1, %xmm15, %ymm14, %ymm14
; AVX-NEXT: vblendps {{.*#+}} ymm13 = ymm14[0,1],ymm13[2,3],ymm14[4,5],ymm13[6,7]
; AVX-NEXT: vunpckhps {{.*#+}} xmm14 = xmm8[2],xmm10[2],xmm8[3],xmm10[3]
; AVX-NEXT: vinsertps {{.*#+}} xmm8 = zero,zero,xmm8[2],xmm10[2]
; AVX-NEXT: vinsertf128 $1, %xmm14, %ymm8, %ymm8
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
; AVX-NEXT: vunpckhps {{.*#+}} xmm10 = xmm14[2],xmm0[2],xmm14[3],xmm0[3]
; AVX-NEXT: vshufps {{.*#+}} xmm14 = xmm0[3,0],xmm14[3,0]
; AVX-NEXT: vshufps {{.*#+}} xmm14 = xmm14[2,0,2,3]
; AVX-NEXT: vinsertf128 $1, %xmm14, %ymm10, %ymm10
; AVX-NEXT: vblendps {{.*#+}} ymm8 = ymm10[0,1],ymm8[2,3],ymm10[4,5],ymm8[6,7]
; AVX-NEXT: vunpckhps {{.*#+}} xmm10 = xmm4[2],xmm6[2],xmm4[3],xmm6[3]
; AVX-NEXT: vinsertps {{.*#+}} xmm4 = zero,zero,xmm4[2],xmm6[2]
; AVX-NEXT: vinsertf128 $1, %xmm10, %ymm4, %ymm4
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
; AVX-NEXT: vunpckhps {{.*#+}} xmm6 = xmm10[2],xmm0[2],xmm10[3],xmm0[3]
; AVX-NEXT: vshufps {{.*#+}} xmm10 = xmm0[3,0],xmm10[3,0]
; AVX-NEXT: vshufps {{.*#+}} xmm10 = xmm10[2,0,2,3]
; AVX-NEXT: vinsertf128 $1, %xmm10, %ymm6, %ymm6
; AVX-NEXT: vblendps {{.*#+}} ymm4 = ymm6[0,1],ymm4[2,3],ymm6[4,5],ymm4[6,7]
; AVX-NEXT: vunpckhps {{.*#+}} xmm6 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
; AVX-NEXT: vinsertps {{.*#+}} xmm2 = zero,zero,xmm2[2],xmm3[2]
; AVX-NEXT: vinsertf128 $1, %xmm6, %ymm2, %ymm2
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT: vunpckhps {{.*#+}} xmm3 = xmm0[2],xmm11[2],xmm0[3],xmm11[3]
; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm11[3,0],xmm0[3,0]
; AVX-NEXT: vshufps {{.*#+}} xmm6 = xmm6[2,0,2,3]
; AVX-NEXT: vinsertf128 $1, %xmm6, %ymm3, %ymm3
; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5],ymm2[6,7]
; AVX-NEXT: vunpckhps {{.*#+}} xmm3 = xmm1[2],xmm5[2],xmm1[3],xmm5[3]
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = zero,zero,xmm1[2],xmm5[2]
; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
; AVX-NEXT: vunpckhps {{.*#+}} xmm1 = xmm9[2],xmm7[2],xmm9[3],xmm7[3]
; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm7[3,0],xmm9[3,0]
; AVX-NEXT: vshufps {{.*#+}} xmm3 = xmm3[2,0,2,3]
; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX-NEXT: vmovaps %ymm0, 992(%r8)
; AVX-NEXT: vmovaps %ymm2, 928(%r8)
; AVX-NEXT: vmovaps %ymm4, 864(%r8)
; AVX-NEXT: vmovaps %ymm8, 800(%r8)
; AVX-NEXT: vmovaps %ymm13, 736(%r8)
; AVX-NEXT: vmovaps %ymm12, 672(%r8)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 608(%r8)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 544(%r8)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 480(%r8)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 416(%r8)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 352(%r8)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 288(%r8)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 224(%r8)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 160(%r8)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 96(%r8)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 32(%r8)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 960(%r8)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 896(%r8)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 832(%r8)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 768(%r8)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 704(%r8)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 640(%r8)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 576(%r8)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 512(%r8)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 448(%r8)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 384(%r8)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 320(%r8)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 256(%r8)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 192(%r8)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 128(%r8)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, 64(%r8)
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT: vmovaps %ymm0, (%r8)
; AVX-NEXT: addq $1384, %rsp # imm = 0x568
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; AVX2-LABEL: store_i32_stride4_vf64:
; AVX2: # %bb.0:
; AVX2-NEXT: subq $520, %rsp # imm = 0x208
; AVX2-NEXT: vmovaps (%rcx), %xmm4
; AVX2-NEXT: vmovaps 32(%rcx), %xmm5
; AVX2-NEXT: vmovaps 64(%rcx), %xmm0
; AVX2-NEXT: vmovaps (%rdx), %xmm6
; AVX2-NEXT: vmovaps 32(%rdx), %xmm7
; AVX2-NEXT: vmovaps 64(%rdx), %xmm1
; AVX2-NEXT: vunpcklps {{.*#+}} xmm2 = xmm6[0],xmm4[0],xmm6[1],xmm4[1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm8 = ymm2[0,0,2,1]
; AVX2-NEXT: vmovaps (%rsi), %xmm9
; AVX2-NEXT: vmovaps 32(%rsi), %xmm10
; AVX2-NEXT: vmovaps 64(%rsi), %xmm2
; AVX2-NEXT: vmovaps (%rdi), %xmm11
; AVX2-NEXT: vmovaps 32(%rdi), %xmm12
; AVX2-NEXT: vmovaps 64(%rdi), %xmm3
; AVX2-NEXT: vunpcklps {{.*#+}} xmm13 = xmm11[0],xmm9[0],xmm11[1],xmm9[1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm13 = ymm13[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm8 = ymm13[0,1],ymm8[2,3],ymm13[4,5],ymm8[6,7]
; AVX2-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vunpckhps {{.*#+}} xmm4 = xmm6[2],xmm4[2],xmm6[3],xmm4[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,0,2,1]
; AVX2-NEXT: vunpckhps {{.*#+}} xmm6 = xmm11[2],xmm9[2],xmm11[3],xmm9[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm6[0,1],ymm4[2,3],ymm6[4,5],ymm4[6,7]
; AVX2-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vunpcklps {{.*#+}} xmm4 = xmm7[0],xmm5[0],xmm7[1],xmm5[1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,0,2,1]
; AVX2-NEXT: vunpcklps {{.*#+}} xmm6 = xmm12[0],xmm10[0],xmm12[1],xmm10[1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm6[0,1],ymm4[2,3],ymm6[4,5],ymm4[6,7]
; AVX2-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vunpckhps {{.*#+}} xmm4 = xmm7[2],xmm5[2],xmm7[3],xmm5[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,0,2,1]
; AVX2-NEXT: vunpckhps {{.*#+}} xmm5 = xmm12[2],xmm10[2],xmm12[3],xmm10[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3],ymm5[4,5],ymm4[6,7]
; AVX2-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vunpcklps {{.*#+}} xmm4 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,0,2,1]
; AVX2-NEXT: vunpcklps {{.*#+}} xmm5 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3],ymm5[4,5],ymm4[6,7]
; AVX2-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vunpckhps {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,2,1]
; AVX2-NEXT: vunpckhps {{.*#+}} xmm1 = xmm3[2],xmm2[2],xmm3[3],xmm2[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vmovaps 96(%rcx), %xmm0
; AVX2-NEXT: vmovaps 96(%rdx), %xmm1
; AVX2-NEXT: vunpcklps {{.*#+}} xmm2 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,0,2,1]
; AVX2-NEXT: vmovaps 96(%rsi), %xmm3
; AVX2-NEXT: vmovaps 96(%rdi), %xmm4
; AVX2-NEXT: vunpcklps {{.*#+}} xmm5 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5],ymm2[6,7]
; AVX2-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vunpckhps {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,2,1]
; AVX2-NEXT: vunpckhps {{.*#+}} xmm1 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vmovaps 128(%rcx), %xmm0
; AVX2-NEXT: vmovaps 128(%rdx), %xmm1
; AVX2-NEXT: vunpcklps {{.*#+}} xmm2 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,0,2,1]
; AVX2-NEXT: vmovaps 128(%rsi), %xmm3
; AVX2-NEXT: vmovaps 128(%rdi), %xmm4
; AVX2-NEXT: vunpcklps {{.*#+}} xmm5 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5],ymm2[6,7]
; AVX2-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vunpckhps {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,2,1]
; AVX2-NEXT: vunpckhps {{.*#+}} xmm1 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vmovaps 160(%rcx), %xmm0
; AVX2-NEXT: vmovaps 160(%rdx), %xmm1
; AVX2-NEXT: vunpcklps {{.*#+}} xmm2 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,0,2,1]
; AVX2-NEXT: vmovaps 160(%rsi), %xmm3
; AVX2-NEXT: vmovaps 160(%rdi), %xmm4
; AVX2-NEXT: vunpcklps {{.*#+}} xmm5 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5],ymm2[6,7]
; AVX2-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vunpckhps {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,2,1]
; AVX2-NEXT: vunpckhps {{.*#+}} xmm1 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vmovaps 192(%rcx), %xmm0
; AVX2-NEXT: vmovaps 192(%rdx), %xmm1
; AVX2-NEXT: vunpcklps {{.*#+}} xmm2 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,0,2,1]
; AVX2-NEXT: vmovaps 192(%rsi), %xmm3
; AVX2-NEXT: vmovaps 192(%rdi), %xmm4
; AVX2-NEXT: vunpcklps {{.*#+}} xmm5 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5],ymm2[6,7]
; AVX2-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vunpckhps {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,2,1]
; AVX2-NEXT: vunpckhps {{.*#+}} xmm1 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vmovaps 224(%rcx), %xmm0
; AVX2-NEXT: vmovaps 224(%rdx), %xmm1
; AVX2-NEXT: vunpcklps {{.*#+}} xmm2 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,0,2,1]
; AVX2-NEXT: vmovaps 224(%rsi), %xmm3
; AVX2-NEXT: vmovaps 224(%rdi), %xmm4
; AVX2-NEXT: vunpcklps {{.*#+}} xmm5 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5],ymm2[6,7]
; AVX2-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vunpckhps {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; AVX2-NEXT: vunpckhps {{.*#+}} xmm1 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,2,1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX2-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
; AVX2-NEXT: vmovaps (%rdx), %ymm0
; AVX2-NEXT: vmovaps (%rcx), %ymm1
; AVX2-NEXT: vunpcklps {{.*#+}} ymm2 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
; AVX2-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-NEXT: vmovaps (%rdi), %ymm3
; AVX2-NEXT: vmovaps (%rsi), %ymm4
; AVX2-NEXT: vunpcklps {{.*#+}} ymm5 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[4],ymm4[4],ymm3[5],ymm4[5]
; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5],ymm2[6,7]
; AVX2-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm1 = ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[6],ymm4[6],ymm3[7],ymm4[7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vmovaps 32(%rdx), %ymm0
; AVX2-NEXT: vmovaps 32(%rcx), %ymm1
; AVX2-NEXT: vunpcklps {{.*#+}} ymm2 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
; AVX2-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-NEXT: vmovaps 32(%rdi), %ymm3
; AVX2-NEXT: vmovaps 32(%rsi), %ymm4
; AVX2-NEXT: vunpcklps {{.*#+}} ymm5 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[4],ymm4[4],ymm3[5],ymm4[5]
; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5],ymm2[6,7]
; AVX2-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm1 = ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[6],ymm4[6],ymm3[7],ymm4[7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vmovaps 64(%rdx), %ymm0
; AVX2-NEXT: vmovaps 64(%rcx), %ymm1
; AVX2-NEXT: vunpcklps {{.*#+}} ymm2 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
; AVX2-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-NEXT: vmovaps 64(%rdi), %ymm3
; AVX2-NEXT: vmovaps 64(%rsi), %ymm4
; AVX2-NEXT: vunpcklps {{.*#+}} ymm5 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[4],ymm4[4],ymm3[5],ymm4[5]
; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm11 = ymm5[0,1],ymm2[2,3],ymm5[4,5],ymm2[6,7]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm1 = ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[6],ymm4[6],ymm3[7],ymm4[7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm10 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX2-NEXT: vmovaps 96(%rdx), %ymm0
; AVX2-NEXT: vmovaps 96(%rcx), %ymm1
; AVX2-NEXT: vunpcklps {{.*#+}} ymm2 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
; AVX2-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-NEXT: vmovaps 96(%rdi), %ymm3
; AVX2-NEXT: vmovaps 96(%rsi), %ymm4
; AVX2-NEXT: vunpcklps {{.*#+}} ymm5 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[4],ymm4[4],ymm3[5],ymm4[5]
; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm9 = ymm5[0,1],ymm2[2,3],ymm5[4,5],ymm2[6,7]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm1 = ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[6],ymm4[6],ymm3[7],ymm4[7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm8 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX2-NEXT: vmovaps 128(%rdx), %ymm3
; AVX2-NEXT: vmovaps 128(%rcx), %ymm1
; AVX2-NEXT: vunpcklps {{.*#+}} ymm2 = ymm3[0],ymm1[0],ymm3[1],ymm1[1],ymm3[4],ymm1[4],ymm3[5],ymm1[5]
; AVX2-NEXT: vpermpd {{.*#+}} ymm4 = ymm2[0,2,2,3]
; AVX2-NEXT: vmovaps 128(%rdi), %ymm2
; AVX2-NEXT: vmovaps 128(%rsi), %ymm0
; AVX2-NEXT: vunpcklps {{.*#+}} ymm13 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[4],ymm0[4],ymm2[5],ymm0[5]
; AVX2-NEXT: vpermpd {{.*#+}} ymm13 = ymm13[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm13[0,1],ymm4[2,3],ymm13[4,5],ymm4[6,7]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm1 = ymm3[2],ymm1[2],ymm3[3],ymm1[3],ymm3[6],ymm1[6],ymm3[7],ymm1[7]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm0 = ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[6],ymm0[6],ymm2[7],ymm0[7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX2-NEXT: vmovaps 160(%rdx), %ymm2
; AVX2-NEXT: vmovaps 160(%rcx), %ymm3
; AVX2-NEXT: vunpcklps {{.*#+}} ymm1 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX2-NEXT: vmovaps 160(%rdi), %ymm13
; AVX2-NEXT: vmovaps 160(%rsi), %ymm0
; AVX2-NEXT: vunpcklps {{.*#+}} ymm15 = ymm13[0],ymm0[0],ymm13[1],ymm0[1],ymm13[4],ymm0[4],ymm13[5],ymm0[5]
; AVX2-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm15[0,1],ymm1[2,3],ymm15[4,5],ymm1[6,7]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm2 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm0 = ymm13[2],ymm0[2],ymm13[3],ymm0[3],ymm13[6],ymm0[6],ymm13[7],ymm0[7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm0[0,1],ymm2[2,3],ymm0[4,5],ymm2[6,7]
; AVX2-NEXT: vmovaps 192(%rdx), %ymm2
; AVX2-NEXT: vmovaps 192(%rcx), %ymm3
; AVX2-NEXT: vunpcklps {{.*#+}} ymm13 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
; AVX2-NEXT: vpermpd {{.*#+}} ymm13 = ymm13[0,2,2,3]
; AVX2-NEXT: vmovaps 192(%rdi), %ymm15
; AVX2-NEXT: vmovaps 192(%rsi), %ymm0
; AVX2-NEXT: vunpcklps {{.*#+}} ymm14 = ymm15[0],ymm0[0],ymm15[1],ymm0[1],ymm15[4],ymm0[4],ymm15[5],ymm0[5]
; AVX2-NEXT: vpermpd {{.*#+}} ymm14 = ymm14[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm13 = ymm14[0,1],ymm13[2,3],ymm14[4,5],ymm13[6,7]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm2 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm0 = ymm15[2],ymm0[2],ymm15[3],ymm0[3],ymm15[6],ymm0[6],ymm15[7],ymm0[7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0,1],ymm2[2,3],ymm0[4,5],ymm2[6,7]
; AVX2-NEXT: vmovaps 224(%rdx), %ymm2
; AVX2-NEXT: vmovaps 224(%rcx), %ymm3
; AVX2-NEXT: vunpcklps {{.*#+}} ymm14 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
; AVX2-NEXT: vpermpd {{.*#+}} ymm14 = ymm14[0,2,2,3]
; AVX2-NEXT: vmovaps 224(%rdi), %ymm15
; AVX2-NEXT: vmovaps 224(%rsi), %ymm0
; AVX2-NEXT: vunpcklps {{.*#+}} ymm12 = ymm15[0],ymm0[0],ymm15[1],ymm0[1],ymm15[4],ymm0[4],ymm15[5],ymm0[5]
; AVX2-NEXT: vpermpd {{.*#+}} ymm12 = ymm12[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1],ymm14[2,3],ymm12[4,5],ymm14[6,7]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm2 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm0 = ymm15[2],ymm0[2],ymm15[3],ymm0[3],ymm15[6],ymm0[6],ymm15[7],ymm0[7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3],ymm0[4,5],ymm2[6,7]
; AVX2-NEXT: vmovaps %ymm0, 992(%r8)
; AVX2-NEXT: vmovaps %ymm12, 960(%r8)
; AVX2-NEXT: vmovaps %ymm1, 864(%r8)
; AVX2-NEXT: vmovaps %ymm13, 832(%r8)
; AVX2-NEXT: vmovaps %ymm4, 736(%r8)
; AVX2-NEXT: vmovaps %ymm5, 704(%r8)
; AVX2-NEXT: vmovaps %ymm6, 608(%r8)
; AVX2-NEXT: vmovaps %ymm7, 576(%r8)
; AVX2-NEXT: vmovaps %ymm8, 480(%r8)
; AVX2-NEXT: vmovaps %ymm9, 448(%r8)
; AVX2-NEXT: vmovaps %ymm10, 352(%r8)
; AVX2-NEXT: vmovaps %ymm11, 320(%r8)
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, 224(%r8)
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, 192(%r8)
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, 96(%r8)
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, 64(%r8)
; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, 928(%r8)
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, 896(%r8)
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, 800(%r8)
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, 768(%r8)
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, 672(%r8)
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, 640(%r8)
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, 544(%r8)
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, 512(%r8)
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, 416(%r8)
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, 384(%r8)
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, 288(%r8)
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, 256(%r8)
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, 160(%r8)
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, 128(%r8)
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, 32(%r8)
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, (%r8)
; AVX2-NEXT: addq $520, %rsp # imm = 0x208
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX2-FP-LABEL: store_i32_stride4_vf64:
; AVX2-FP: # %bb.0:
; AVX2-FP-NEXT: subq $520, %rsp # imm = 0x208
; AVX2-FP-NEXT: vmovaps (%rcx), %xmm4
; AVX2-FP-NEXT: vmovaps 32(%rcx), %xmm5
; AVX2-FP-NEXT: vmovaps 64(%rcx), %xmm0
; AVX2-FP-NEXT: vmovaps (%rdx), %xmm6
; AVX2-FP-NEXT: vmovaps 32(%rdx), %xmm7
; AVX2-FP-NEXT: vmovaps 64(%rdx), %xmm1
; AVX2-FP-NEXT: vunpcklps {{.*#+}} xmm2 = xmm6[0],xmm4[0],xmm6[1],xmm4[1]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm8 = ymm2[0,0,2,1]
; AVX2-FP-NEXT: vmovaps (%rsi), %xmm9
; AVX2-FP-NEXT: vmovaps 32(%rsi), %xmm10
; AVX2-FP-NEXT: vmovaps 64(%rsi), %xmm2
; AVX2-FP-NEXT: vmovaps (%rdi), %xmm11
; AVX2-FP-NEXT: vmovaps 32(%rdi), %xmm12
; AVX2-FP-NEXT: vmovaps 64(%rdi), %xmm3
; AVX2-FP-NEXT: vunpcklps {{.*#+}} xmm13 = xmm11[0],xmm9[0],xmm11[1],xmm9[1]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm13 = ymm13[0,1,1,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm8 = ymm13[0,1],ymm8[2,3],ymm13[4,5],ymm8[6,7]
; AVX2-FP-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vunpckhps {{.*#+}} xmm4 = xmm6[2],xmm4[2],xmm6[3],xmm4[3]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,0,2,1]
; AVX2-FP-NEXT: vunpckhps {{.*#+}} xmm6 = xmm11[2],xmm9[2],xmm11[3],xmm9[3]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,1,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm4 = ymm6[0,1],ymm4[2,3],ymm6[4,5],ymm4[6,7]
; AVX2-FP-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vunpcklps {{.*#+}} xmm4 = xmm7[0],xmm5[0],xmm7[1],xmm5[1]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,0,2,1]
; AVX2-FP-NEXT: vunpcklps {{.*#+}} xmm6 = xmm12[0],xmm10[0],xmm12[1],xmm10[1]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,1,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm4 = ymm6[0,1],ymm4[2,3],ymm6[4,5],ymm4[6,7]
; AVX2-FP-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vunpckhps {{.*#+}} xmm4 = xmm7[2],xmm5[2],xmm7[3],xmm5[3]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,0,2,1]
; AVX2-FP-NEXT: vunpckhps {{.*#+}} xmm5 = xmm12[2],xmm10[2],xmm12[3],xmm10[3]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,1,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3],ymm5[4,5],ymm4[6,7]
; AVX2-FP-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vunpcklps {{.*#+}} xmm4 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,0,2,1]
; AVX2-FP-NEXT: vunpcklps {{.*#+}} xmm5 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,1,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3],ymm5[4,5],ymm4[6,7]
; AVX2-FP-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vunpckhps {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,2,1]
; AVX2-FP-NEXT: vunpckhps {{.*#+}} xmm1 = xmm3[2],xmm2[2],xmm3[3],xmm2[3]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,1,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vmovaps 96(%rcx), %xmm0
; AVX2-FP-NEXT: vmovaps 96(%rdx), %xmm1
; AVX2-FP-NEXT: vunpcklps {{.*#+}} xmm2 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,0,2,1]
; AVX2-FP-NEXT: vmovaps 96(%rsi), %xmm3
; AVX2-FP-NEXT: vmovaps 96(%rdi), %xmm4
; AVX2-FP-NEXT: vunpcklps {{.*#+}} xmm5 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,1,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5],ymm2[6,7]
; AVX2-FP-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vunpckhps {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,2,1]
; AVX2-FP-NEXT: vunpckhps {{.*#+}} xmm1 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,1,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vmovaps 128(%rcx), %xmm0
; AVX2-FP-NEXT: vmovaps 128(%rdx), %xmm1
; AVX2-FP-NEXT: vunpcklps {{.*#+}} xmm2 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,0,2,1]
; AVX2-FP-NEXT: vmovaps 128(%rsi), %xmm3
; AVX2-FP-NEXT: vmovaps 128(%rdi), %xmm4
; AVX2-FP-NEXT: vunpcklps {{.*#+}} xmm5 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,1,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5],ymm2[6,7]
; AVX2-FP-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vunpckhps {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,2,1]
; AVX2-FP-NEXT: vunpckhps {{.*#+}} xmm1 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,1,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vmovaps 160(%rcx), %xmm0
; AVX2-FP-NEXT: vmovaps 160(%rdx), %xmm1
; AVX2-FP-NEXT: vunpcklps {{.*#+}} xmm2 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,0,2,1]
; AVX2-FP-NEXT: vmovaps 160(%rsi), %xmm3
; AVX2-FP-NEXT: vmovaps 160(%rdi), %xmm4
; AVX2-FP-NEXT: vunpcklps {{.*#+}} xmm5 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,1,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5],ymm2[6,7]
; AVX2-FP-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vunpckhps {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,2,1]
; AVX2-FP-NEXT: vunpckhps {{.*#+}} xmm1 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,1,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vmovaps 192(%rcx), %xmm0
; AVX2-FP-NEXT: vmovaps 192(%rdx), %xmm1
; AVX2-FP-NEXT: vunpcklps {{.*#+}} xmm2 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,0,2,1]
; AVX2-FP-NEXT: vmovaps 192(%rsi), %xmm3
; AVX2-FP-NEXT: vmovaps 192(%rdi), %xmm4
; AVX2-FP-NEXT: vunpcklps {{.*#+}} xmm5 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,1,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5],ymm2[6,7]
; AVX2-FP-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vunpckhps {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,2,1]
; AVX2-FP-NEXT: vunpckhps {{.*#+}} xmm1 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,1,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vmovaps 224(%rcx), %xmm0
; AVX2-FP-NEXT: vmovaps 224(%rdx), %xmm1
; AVX2-FP-NEXT: vunpcklps {{.*#+}} xmm2 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,0,2,1]
; AVX2-FP-NEXT: vmovaps 224(%rsi), %xmm3
; AVX2-FP-NEXT: vmovaps 224(%rdi), %xmm4
; AVX2-FP-NEXT: vunpcklps {{.*#+}} xmm5 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,1,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5],ymm2[6,7]
; AVX2-FP-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vunpckhps {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; AVX2-FP-NEXT: vunpckhps {{.*#+}} xmm1 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,2,1]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,1,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FP-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
; AVX2-FP-NEXT: vmovaps (%rdx), %ymm0
; AVX2-FP-NEXT: vmovaps (%rcx), %ymm1
; AVX2-FP-NEXT: vunpcklps {{.*#+}} ymm2 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-FP-NEXT: vmovaps (%rdi), %ymm3
; AVX2-FP-NEXT: vmovaps (%rsi), %ymm4
; AVX2-FP-NEXT: vunpcklps {{.*#+}} ymm5 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[4],ymm4[4],ymm3[5],ymm4[5]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[2,1,3,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5],ymm2[6,7]
; AVX2-FP-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
; AVX2-FP-NEXT: vunpckhps {{.*#+}} ymm1 = ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[6],ymm4[6],ymm3[7],ymm4[7]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,3,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vmovaps 32(%rdx), %ymm0
; AVX2-FP-NEXT: vmovaps 32(%rcx), %ymm1
; AVX2-FP-NEXT: vunpcklps {{.*#+}} ymm2 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-FP-NEXT: vmovaps 32(%rdi), %ymm3
; AVX2-FP-NEXT: vmovaps 32(%rsi), %ymm4
; AVX2-FP-NEXT: vunpcklps {{.*#+}} ymm5 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[4],ymm4[4],ymm3[5],ymm4[5]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[2,1,3,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5],ymm2[6,7]
; AVX2-FP-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
; AVX2-FP-NEXT: vunpckhps {{.*#+}} ymm1 = ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[6],ymm4[6],ymm3[7],ymm4[7]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,3,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT: vmovaps 64(%rdx), %ymm0
; AVX2-FP-NEXT: vmovaps 64(%rcx), %ymm1
; AVX2-FP-NEXT: vunpcklps {{.*#+}} ymm2 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-FP-NEXT: vmovaps 64(%rdi), %ymm3
; AVX2-FP-NEXT: vmovaps 64(%rsi), %ymm4
; AVX2-FP-NEXT: vunpcklps {{.*#+}} ymm5 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[4],ymm4[4],ymm3[5],ymm4[5]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[2,1,3,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm11 = ymm5[0,1],ymm2[2,3],ymm5[4,5],ymm2[6,7]
; AVX2-FP-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
; AVX2-FP-NEXT: vunpckhps {{.*#+}} ymm1 = ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[6],ymm4[6],ymm3[7],ymm4[7]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,3,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm10 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FP-NEXT: vmovaps 96(%rdx), %ymm0
; AVX2-FP-NEXT: vmovaps 96(%rcx), %ymm1
; AVX2-FP-NEXT: vunpcklps {{.*#+}} ymm2 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-FP-NEXT: vmovaps 96(%rdi), %ymm3
; AVX2-FP-NEXT: vmovaps 96(%rsi), %ymm4
; AVX2-FP-NEXT: vunpcklps {{.*#+}} ymm5 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[4],ymm4[4],ymm3[5],ymm4[5]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[2,1,3,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm9 = ymm5[0,1],ymm2[2,3],ymm5[4,5],ymm2[6,7]
; AVX2-FP-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
; AVX2-FP-NEXT: vunpckhps {{.*#+}} ymm1 = ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[6],ymm4[6],ymm3[7],ymm4[7]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,3,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm8 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FP-NEXT: vmovaps 128(%rdx), %ymm3
; AVX2-FP-NEXT: vmovaps 128(%rcx), %ymm1
; AVX2-FP-NEXT: vunpcklps {{.*#+}} ymm2 = ymm3[0],ymm1[0],ymm3[1],ymm1[1],ymm3[4],ymm1[4],ymm3[5],ymm1[5]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm4 = ymm2[0,2,2,3]
; AVX2-FP-NEXT: vmovaps 128(%rdi), %ymm2
; AVX2-FP-NEXT: vmovaps 128(%rsi), %ymm0
; AVX2-FP-NEXT: vunpcklps {{.*#+}} ymm13 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[4],ymm0[4],ymm2[5],ymm0[5]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm13 = ymm13[2,1,3,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm7 = ymm13[0,1],ymm4[2,3],ymm13[4,5],ymm4[6,7]
; AVX2-FP-NEXT: vunpckhps {{.*#+}} ymm1 = ymm3[2],ymm1[2],ymm3[3],ymm1[3],ymm3[6],ymm1[6],ymm3[7],ymm1[7]
; AVX2-FP-NEXT: vunpckhps {{.*#+}} ymm0 = ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[6],ymm0[6],ymm2[7],ymm0[7]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm6 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX2-FP-NEXT: vmovaps 160(%rdx), %ymm2
; AVX2-FP-NEXT: vmovaps 160(%rcx), %ymm3
; AVX2-FP-NEXT: vunpcklps {{.*#+}} ymm1 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX2-FP-NEXT: vmovaps 160(%rdi), %ymm13
; AVX2-FP-NEXT: vmovaps 160(%rsi), %ymm0
; AVX2-FP-NEXT: vunpcklps {{.*#+}} ymm15 = ymm13[0],ymm0[0],ymm13[1],ymm0[1],ymm13[4],ymm0[4],ymm13[5],ymm0[5]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[2,1,3,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm5 = ymm15[0,1],ymm1[2,3],ymm15[4,5],ymm1[6,7]
; AVX2-FP-NEXT: vunpckhps {{.*#+}} ymm2 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
; AVX2-FP-NEXT: vunpckhps {{.*#+}} ymm0 = ymm13[2],ymm0[2],ymm13[3],ymm0[3],ymm13[6],ymm0[6],ymm13[7],ymm0[7]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm4 = ymm0[0,1],ymm2[2,3],ymm0[4,5],ymm2[6,7]
; AVX2-FP-NEXT: vmovaps 192(%rdx), %ymm2
; AVX2-FP-NEXT: vmovaps 192(%rcx), %ymm3
; AVX2-FP-NEXT: vunpcklps {{.*#+}} ymm13 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm13 = ymm13[0,2,2,3]
; AVX2-FP-NEXT: vmovaps 192(%rdi), %ymm15
; AVX2-FP-NEXT: vmovaps 192(%rsi), %ymm0
; AVX2-FP-NEXT: vunpcklps {{.*#+}} ymm14 = ymm15[0],ymm0[0],ymm15[1],ymm0[1],ymm15[4],ymm0[4],ymm15[5],ymm0[5]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm14 = ymm14[2,1,3,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm13 = ymm14[0,1],ymm13[2,3],ymm14[4,5],ymm13[6,7]
; AVX2-FP-NEXT: vunpckhps {{.*#+}} ymm2 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
; AVX2-FP-NEXT: vunpckhps {{.*#+}} ymm0 = ymm15[2],ymm0[2],ymm15[3],ymm0[3],ymm15[6],ymm0[6],ymm15[7],ymm0[7]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0,1],ymm2[2,3],ymm0[4,5],ymm2[6,7]
; AVX2-FP-NEXT: vmovaps 224(%rdx), %ymm2
; AVX2-FP-NEXT: vmovaps 224(%rcx), %ymm3
; AVX2-FP-NEXT: vunpcklps {{.*#+}} ymm14 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm14 = ymm14[0,2,2,3]
; AVX2-FP-NEXT: vmovaps 224(%rdi), %ymm15
; AVX2-FP-NEXT: vmovaps 224(%rsi), %ymm0
; AVX2-FP-NEXT: vunpcklps {{.*#+}} ymm12 = ymm15[0],ymm0[0],ymm15[1],ymm0[1],ymm15[4],ymm0[4],ymm15[5],ymm0[5]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm12 = ymm12[2,1,3,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1],ymm14[2,3],ymm12[4,5],ymm14[6,7]
; AVX2-FP-NEXT: vunpckhps {{.*#+}} ymm2 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
; AVX2-FP-NEXT: vunpckhps {{.*#+}} ymm0 = ymm15[2],ymm0[2],ymm15[3],ymm0[3],ymm15[6],ymm0[6],ymm15[7],ymm0[7]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-FP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,3]
; AVX2-FP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3],ymm0[4,5],ymm2[6,7]
; AVX2-FP-NEXT: vmovaps %ymm0, 992(%r8)
; AVX2-FP-NEXT: vmovaps %ymm12, 960(%r8)
; AVX2-FP-NEXT: vmovaps %ymm1, 864(%r8)
; AVX2-FP-NEXT: vmovaps %ymm13, 832(%r8)
; AVX2-FP-NEXT: vmovaps %ymm4, 736(%r8)
; AVX2-FP-NEXT: vmovaps %ymm5, 704(%r8)
; AVX2-FP-NEXT: vmovaps %ymm6, 608(%r8)
; AVX2-FP-NEXT: vmovaps %ymm7, 576(%r8)
; AVX2-FP-NEXT: vmovaps %ymm8, 480(%r8)
; AVX2-FP-NEXT: vmovaps %ymm9, 448(%r8)
; AVX2-FP-NEXT: vmovaps %ymm10, 352(%r8)
; AVX2-FP-NEXT: vmovaps %ymm11, 320(%r8)
; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT: vmovaps %ymm0, 224(%r8)
; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT: vmovaps %ymm0, 192(%r8)
; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT: vmovaps %ymm0, 96(%r8)
; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT: vmovaps %ymm0, 64(%r8)
; AVX2-FP-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT: vmovaps %ymm0, 928(%r8)
; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT: vmovaps %ymm0, 896(%r8)
; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT: vmovaps %ymm0, 800(%r8)
; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT: vmovaps %ymm0, 768(%r8)
; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT: vmovaps %ymm0, 672(%r8)
; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT: vmovaps %ymm0, 640(%r8)
; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT: vmovaps %ymm0, 544(%r8)
; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT: vmovaps %ymm0, 512(%r8)
; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT: vmovaps %ymm0, 416(%r8)
; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT: vmovaps %ymm0, 384(%r8)
; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT: vmovaps %ymm0, 288(%r8)
; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT: vmovaps %ymm0, 256(%r8)
; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT: vmovaps %ymm0, 160(%r8)
; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT: vmovaps %ymm0, 128(%r8)
; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT: vmovaps %ymm0, 32(%r8)
; AVX2-FP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT: vmovaps %ymm0, (%r8)
; AVX2-FP-NEXT: addq $520, %rsp # imm = 0x208
; AVX2-FP-NEXT: vzeroupper
; AVX2-FP-NEXT: retq
;
; AVX2-FCP-LABEL: store_i32_stride4_vf64:
; AVX2-FCP: # %bb.0:
; AVX2-FCP-NEXT: subq $520, %rsp # imm = 0x208
; AVX2-FCP-NEXT: vmovaps (%rcx), %xmm4
; AVX2-FCP-NEXT: vmovaps 32(%rcx), %xmm5
; AVX2-FCP-NEXT: vmovaps 64(%rcx), %xmm0
; AVX2-FCP-NEXT: vmovaps (%rdx), %xmm6
; AVX2-FCP-NEXT: vmovaps 32(%rdx), %xmm7
; AVX2-FCP-NEXT: vmovaps 64(%rdx), %xmm1
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} xmm2 = xmm6[0],xmm4[0],xmm6[1],xmm4[1]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm8 = ymm2[0,0,2,1]
; AVX2-FCP-NEXT: vmovaps (%rsi), %xmm9
; AVX2-FCP-NEXT: vmovaps 32(%rsi), %xmm10
; AVX2-FCP-NEXT: vmovaps 64(%rsi), %xmm2
; AVX2-FCP-NEXT: vmovaps (%rdi), %xmm11
; AVX2-FCP-NEXT: vmovaps 32(%rdi), %xmm12
; AVX2-FCP-NEXT: vmovaps 64(%rdi), %xmm3
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} xmm13 = xmm11[0],xmm9[0],xmm11[1],xmm9[1]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm13 = ymm13[0,1,1,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm8 = ymm13[0,1],ymm8[2,3],ymm13[4,5],ymm8[6,7]
; AVX2-FCP-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} xmm4 = xmm6[2],xmm4[2],xmm6[3],xmm4[3]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,0,2,1]
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} xmm6 = xmm11[2],xmm9[2],xmm11[3],xmm9[3]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,1,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm4 = ymm6[0,1],ymm4[2,3],ymm6[4,5],ymm4[6,7]
; AVX2-FCP-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} xmm4 = xmm7[0],xmm5[0],xmm7[1],xmm5[1]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,0,2,1]
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} xmm6 = xmm12[0],xmm10[0],xmm12[1],xmm10[1]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,1,1,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm4 = ymm6[0,1],ymm4[2,3],ymm6[4,5],ymm4[6,7]
; AVX2-FCP-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} xmm4 = xmm7[2],xmm5[2],xmm7[3],xmm5[3]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,0,2,1]
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} xmm5 = xmm12[2],xmm10[2],xmm12[3],xmm10[3]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,1,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3],ymm5[4,5],ymm4[6,7]
; AVX2-FCP-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} xmm4 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,0,2,1]
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} xmm5 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,1,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3],ymm5[4,5],ymm4[6,7]
; AVX2-FCP-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,2,1]
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} xmm1 = xmm3[2],xmm2[2],xmm3[3],xmm2[3]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,1,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FCP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovaps 96(%rcx), %xmm0
; AVX2-FCP-NEXT: vmovaps 96(%rdx), %xmm1
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} xmm2 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,0,2,1]
; AVX2-FCP-NEXT: vmovaps 96(%rsi), %xmm3
; AVX2-FCP-NEXT: vmovaps 96(%rdi), %xmm4
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} xmm5 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,1,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5],ymm2[6,7]
; AVX2-FCP-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,2,1]
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} xmm1 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,1,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FCP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovaps 128(%rcx), %xmm0
; AVX2-FCP-NEXT: vmovaps 128(%rdx), %xmm1
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} xmm2 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,0,2,1]
; AVX2-FCP-NEXT: vmovaps 128(%rsi), %xmm3
; AVX2-FCP-NEXT: vmovaps 128(%rdi), %xmm4
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} xmm5 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,1,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5],ymm2[6,7]
; AVX2-FCP-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,2,1]
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} xmm1 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,1,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FCP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovaps 160(%rcx), %xmm0
; AVX2-FCP-NEXT: vmovaps 160(%rdx), %xmm1
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} xmm2 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,0,2,1]
; AVX2-FCP-NEXT: vmovaps 160(%rsi), %xmm3
; AVX2-FCP-NEXT: vmovaps 160(%rdi), %xmm4
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} xmm5 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,1,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5],ymm2[6,7]
; AVX2-FCP-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,2,1]
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} xmm1 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,1,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FCP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovaps 192(%rcx), %xmm0
; AVX2-FCP-NEXT: vmovaps 192(%rdx), %xmm1
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} xmm2 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,0,2,1]
; AVX2-FCP-NEXT: vmovaps 192(%rsi), %xmm3
; AVX2-FCP-NEXT: vmovaps 192(%rdi), %xmm4
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} xmm5 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,1,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5],ymm2[6,7]
; AVX2-FCP-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,2,1]
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} xmm1 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,1,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FCP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovaps 224(%rcx), %xmm0
; AVX2-FCP-NEXT: vmovaps 224(%rdx), %xmm1
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} xmm2 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,0,2,1]
; AVX2-FCP-NEXT: vmovaps 224(%rsi), %xmm3
; AVX2-FCP-NEXT: vmovaps 224(%rdi), %xmm4
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} xmm5 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,1,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5],ymm2[6,7]
; AVX2-FCP-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} xmm1 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,2,1]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,1,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FCP-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
; AVX2-FCP-NEXT: vmovaps (%rdx), %ymm0
; AVX2-FCP-NEXT: vmovaps (%rcx), %ymm1
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} ymm2 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-FCP-NEXT: vmovaps (%rdi), %ymm3
; AVX2-FCP-NEXT: vmovaps (%rsi), %ymm4
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} ymm5 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[4],ymm4[4],ymm3[5],ymm4[5]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[2,1,3,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5],ymm2[6,7]
; AVX2-FCP-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} ymm1 = ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[6],ymm4[6],ymm3[7],ymm4[7]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,3,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FCP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovaps 32(%rdx), %ymm0
; AVX2-FCP-NEXT: vmovaps 32(%rcx), %ymm1
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} ymm2 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-FCP-NEXT: vmovaps 32(%rdi), %ymm3
; AVX2-FCP-NEXT: vmovaps 32(%rsi), %ymm4
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} ymm5 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[4],ymm4[4],ymm3[5],ymm4[5]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[2,1,3,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5],ymm2[6,7]
; AVX2-FCP-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} ymm1 = ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[6],ymm4[6],ymm3[7],ymm4[7]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,3,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FCP-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT: vmovaps 64(%rdx), %ymm0
; AVX2-FCP-NEXT: vmovaps 64(%rcx), %ymm1
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} ymm2 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-FCP-NEXT: vmovaps 64(%rdi), %ymm3
; AVX2-FCP-NEXT: vmovaps 64(%rsi), %ymm4
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} ymm5 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[4],ymm4[4],ymm3[5],ymm4[5]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[2,1,3,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm11 = ymm5[0,1],ymm2[2,3],ymm5[4,5],ymm2[6,7]
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} ymm1 = ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[6],ymm4[6],ymm3[7],ymm4[7]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,3,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm10 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FCP-NEXT: vmovaps 96(%rdx), %ymm0
; AVX2-FCP-NEXT: vmovaps 96(%rcx), %ymm1
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} ymm2 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-FCP-NEXT: vmovaps 96(%rdi), %ymm3
; AVX2-FCP-NEXT: vmovaps 96(%rsi), %ymm4
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} ymm5 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[4],ymm4[4],ymm3[5],ymm4[5]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[2,1,3,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm9 = ymm5[0,1],ymm2[2,3],ymm5[4,5],ymm2[6,7]
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} ymm1 = ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[6],ymm4[6],ymm3[7],ymm4[7]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,3,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm8 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FCP-NEXT: vmovaps 128(%rdx), %ymm3
; AVX2-FCP-NEXT: vmovaps 128(%rcx), %ymm1
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} ymm2 = ymm3[0],ymm1[0],ymm3[1],ymm1[1],ymm3[4],ymm1[4],ymm3[5],ymm1[5]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm4 = ymm2[0,2,2,3]
; AVX2-FCP-NEXT: vmovaps 128(%rdi), %ymm2
; AVX2-FCP-NEXT: vmovaps 128(%rsi), %ymm0
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} ymm13 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[4],ymm0[4],ymm2[5],ymm0[5]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm13 = ymm13[2,1,3,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm7 = ymm13[0,1],ymm4[2,3],ymm13[4,5],ymm4[6,7]
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} ymm1 = ymm3[2],ymm1[2],ymm3[3],ymm1[3],ymm3[6],ymm1[6],ymm3[7],ymm1[7]
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} ymm0 = ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[6],ymm0[6],ymm2[7],ymm0[7]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm6 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX2-FCP-NEXT: vmovaps 160(%rdx), %ymm2
; AVX2-FCP-NEXT: vmovaps 160(%rcx), %ymm3
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} ymm1 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX2-FCP-NEXT: vmovaps 160(%rdi), %ymm13
; AVX2-FCP-NEXT: vmovaps 160(%rsi), %ymm0
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} ymm15 = ymm13[0],ymm0[0],ymm13[1],ymm0[1],ymm13[4],ymm0[4],ymm13[5],ymm0[5]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm15 = ymm15[2,1,3,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm5 = ymm15[0,1],ymm1[2,3],ymm15[4,5],ymm1[6,7]
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} ymm2 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} ymm0 = ymm13[2],ymm0[2],ymm13[3],ymm0[3],ymm13[6],ymm0[6],ymm13[7],ymm0[7]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm4 = ymm0[0,1],ymm2[2,3],ymm0[4,5],ymm2[6,7]
; AVX2-FCP-NEXT: vmovaps 192(%rdx), %ymm2
; AVX2-FCP-NEXT: vmovaps 192(%rcx), %ymm3
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} ymm13 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm13 = ymm13[0,2,2,3]
; AVX2-FCP-NEXT: vmovaps 192(%rdi), %ymm15
; AVX2-FCP-NEXT: vmovaps 192(%rsi), %ymm0
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} ymm14 = ymm15[0],ymm0[0],ymm15[1],ymm0[1],ymm15[4],ymm0[4],ymm15[5],ymm0[5]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm14 = ymm14[2,1,3,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm13 = ymm14[0,1],ymm13[2,3],ymm14[4,5],ymm13[6,7]
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} ymm2 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} ymm0 = ymm15[2],ymm0[2],ymm15[3],ymm0[3],ymm15[6],ymm0[6],ymm15[7],ymm0[7]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0,1],ymm2[2,3],ymm0[4,5],ymm2[6,7]
; AVX2-FCP-NEXT: vmovaps 224(%rdx), %ymm2
; AVX2-FCP-NEXT: vmovaps 224(%rcx), %ymm3
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} ymm14 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm14 = ymm14[0,2,2,3]
; AVX2-FCP-NEXT: vmovaps 224(%rdi), %ymm15
; AVX2-FCP-NEXT: vmovaps 224(%rsi), %ymm0
; AVX2-FCP-NEXT: vunpcklps {{.*#+}} ymm12 = ymm15[0],ymm0[0],ymm15[1],ymm0[1],ymm15[4],ymm0[4],ymm15[5],ymm0[5]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm12 = ymm12[2,1,3,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1],ymm14[2,3],ymm12[4,5],ymm14[6,7]
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} ymm2 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7]
; AVX2-FCP-NEXT: vunpckhps {{.*#+}} ymm0 = ymm15[2],ymm0[2],ymm15[3],ymm0[3],ymm15[6],ymm0[6],ymm15[7],ymm0[7]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-FCP-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,3]
; AVX2-FCP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3],ymm0[4,5],ymm2[6,7]
; AVX2-FCP-NEXT: vmovaps %ymm0, 992(%r8)
; AVX2-FCP-NEXT: vmovaps %ymm12, 960(%r8)
; AVX2-FCP-NEXT: vmovaps %ymm1, 864(%r8)
; AVX2-FCP-NEXT: vmovaps %ymm13, 832(%r8)
; AVX2-FCP-NEXT: vmovaps %ymm4, 736(%r8)
; AVX2-FCP-NEXT: vmovaps %ymm5, 704(%r8)
; AVX2-FCP-NEXT: vmovaps %ymm6, 608(%r8)
; AVX2-FCP-NEXT: vmovaps %ymm7, 576(%r8)
; AVX2-FCP-NEXT: vmovaps %ymm8, 480(%r8)
; AVX2-FCP-NEXT: vmovaps %ymm9, 448(%r8)
; AVX2-FCP-NEXT: vmovaps %ymm10, 352(%r8)
; AVX2-FCP-NEXT: vmovaps %ymm11, 320(%r8)
; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT: vmovaps %ymm0, 224(%r8)
; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT: vmovaps %ymm0, 192(%r8)
; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT: vmovaps %ymm0, 96(%r8)
; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT: vmovaps %ymm0, 64(%r8)
; AVX2-FCP-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT: vmovaps %ymm0, 928(%r8)
; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT: vmovaps %ymm0, 896(%r8)
; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT: vmovaps %ymm0, 800(%r8)
; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT: vmovaps %ymm0, 768(%r8)
; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT: vmovaps %ymm0, 672(%r8)
; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT: vmovaps %ymm0, 640(%r8)
; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT: vmovaps %ymm0, 544(%r8)
; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT: vmovaps %ymm0, 512(%r8)
; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT: vmovaps %ymm0, 416(%r8)
; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT: vmovaps %ymm0, 384(%r8)
; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT: vmovaps %ymm0, 288(%r8)
; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT: vmovaps %ymm0, 256(%r8)
; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT: vmovaps %ymm0, 160(%r8)
; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT: vmovaps %ymm0, 128(%r8)
; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT: vmovaps %ymm0, 32(%r8)
; AVX2-FCP-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT: vmovaps %ymm0, (%r8)
; AVX2-FCP-NEXT: addq $520, %rsp # imm = 0x208
; AVX2-FCP-NEXT: vzeroupper
; AVX2-FCP-NEXT: retq
;
; AVX512-LABEL: store_i32_stride4_vf64:
; AVX512: # %bb.0:
; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512-NEXT: vmovdqa64 64(%rdi), %zmm1
; AVX512-NEXT: vmovdqa64 128(%rdi), %zmm2
; AVX512-NEXT: vmovdqa64 192(%rdi), %zmm3
; AVX512-NEXT: vmovdqa64 (%rsi), %zmm17
; AVX512-NEXT: vmovdqa64 64(%rsi), %zmm23
; AVX512-NEXT: vmovdqa64 128(%rsi), %zmm12
; AVX512-NEXT: vmovdqa64 192(%rsi), %zmm5
; AVX512-NEXT: vmovdqa64 (%rdx), %zmm22
; AVX512-NEXT: vmovdqa64 64(%rdx), %zmm25
; AVX512-NEXT: vmovdqa64 128(%rdx), %zmm13
; AVX512-NEXT: vmovdqa64 192(%rdx), %zmm6
; AVX512-NEXT: vmovdqa64 (%rcx), %zmm21
; AVX512-NEXT: vmovdqa64 64(%rcx), %zmm26
; AVX512-NEXT: vmovdqa64 128(%rcx), %zmm19
; AVX512-NEXT: vmovdqa64 192(%rcx), %zmm9
; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm14 = [0,0,4,20,0,0,5,21,0,0,6,22,0,0,7,23]
; AVX512-NEXT: vmovdqa64 %zmm22, %zmm8
; AVX512-NEXT: vpermt2d %zmm21, %zmm14, %zmm8
; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm7 = [4,20,0,0,5,21,0,0,6,22,0,0,7,23,0,0]
; AVX512-NEXT: vmovdqa64 %zmm0, %zmm4
; AVX512-NEXT: vpermt2d %zmm17, %zmm7, %zmm4
; AVX512-NEXT: movb $-86, %al
; AVX512-NEXT: kmovw %eax, %k1
; AVX512-NEXT: vmovdqa64 %zmm8, %zmm4 {%k1}
; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm16 = [0,0,0,16,0,0,1,17,0,0,2,18,0,0,3,19]
; AVX512-NEXT: vmovdqa64 %zmm22, %zmm10
; AVX512-NEXT: vpermt2d %zmm21, %zmm16, %zmm10
; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm11 = [0,16,0,0,1,17,0,0,2,18,0,0,3,19,0,0]
; AVX512-NEXT: vmovdqa64 %zmm0, %zmm8
; AVX512-NEXT: vpermt2d %zmm17, %zmm11, %zmm8
; AVX512-NEXT: vmovdqa64 %zmm10, %zmm8 {%k1}
; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm18 = [0,0,12,28,0,0,13,29,0,0,14,30,0,0,15,31]
; AVX512-NEXT: vmovdqa64 %zmm22, %zmm20
; AVX512-NEXT: vpermt2d %zmm21, %zmm18, %zmm20
; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm15 = [12,28,0,0,13,29,0,0,14,30,0,0,15,31,0,0]
; AVX512-NEXT: vmovdqa64 %zmm0, %zmm10
; AVX512-NEXT: vpermt2d %zmm17, %zmm15, %zmm10
; AVX512-NEXT: vmovdqa64 %zmm20, %zmm10 {%k1}
; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm20 = [0,0,8,24,0,0,9,25,0,0,10,26,0,0,11,27]
; AVX512-NEXT: vpermt2d %zmm21, %zmm20, %zmm22
; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm21 = [8,24,0,0,9,25,0,0,10,26,0,0,11,27,0,0]
; AVX512-NEXT: vpermt2d %zmm17, %zmm21, %zmm0
; AVX512-NEXT: vmovdqa64 %zmm22, %zmm0 {%k1}
; AVX512-NEXT: vmovdqa64 %zmm25, %zmm22
; AVX512-NEXT: vpermt2d %zmm26, %zmm14, %zmm22
; AVX512-NEXT: vmovdqa64 %zmm1, %zmm17
; AVX512-NEXT: vpermt2d %zmm23, %zmm7, %zmm17
; AVX512-NEXT: vmovdqa64 %zmm22, %zmm17 {%k1}
; AVX512-NEXT: vmovdqa64 %zmm25, %zmm24
; AVX512-NEXT: vpermt2d %zmm26, %zmm16, %zmm24
; AVX512-NEXT: vmovdqa64 %zmm1, %zmm22
; AVX512-NEXT: vpermt2d %zmm23, %zmm11, %zmm22
; AVX512-NEXT: vmovdqa64 %zmm24, %zmm22 {%k1}
; AVX512-NEXT: vmovdqa64 %zmm25, %zmm27
; AVX512-NEXT: vpermt2d %zmm26, %zmm18, %zmm27
; AVX512-NEXT: vmovdqa64 %zmm1, %zmm24
; AVX512-NEXT: vpermt2d %zmm23, %zmm15, %zmm24
; AVX512-NEXT: vmovdqa64 %zmm27, %zmm24 {%k1}
; AVX512-NEXT: vpermt2d %zmm26, %zmm20, %zmm25
; AVX512-NEXT: vpermt2d %zmm23, %zmm21, %zmm1
; AVX512-NEXT: vmovdqa64 %zmm25, %zmm1 {%k1}
; AVX512-NEXT: vmovdqa64 %zmm13, %zmm23
; AVX512-NEXT: vpermt2d %zmm19, %zmm14, %zmm23
; AVX512-NEXT: vmovdqa64 %zmm2, %zmm25
; AVX512-NEXT: vpermt2d %zmm12, %zmm7, %zmm25
; AVX512-NEXT: vmovdqa64 %zmm23, %zmm25 {%k1}
; AVX512-NEXT: vmovdqa64 %zmm13, %zmm23
; AVX512-NEXT: vpermt2d %zmm19, %zmm16, %zmm23
; AVX512-NEXT: vmovdqa64 %zmm2, %zmm26
; AVX512-NEXT: vpermt2d %zmm12, %zmm11, %zmm26
; AVX512-NEXT: vmovdqa64 %zmm23, %zmm26 {%k1}
; AVX512-NEXT: vmovdqa64 %zmm13, %zmm23
; AVX512-NEXT: vpermt2d %zmm19, %zmm18, %zmm23
; AVX512-NEXT: vmovdqa64 %zmm2, %zmm27
; AVX512-NEXT: vpermt2d %zmm12, %zmm15, %zmm27
; AVX512-NEXT: vmovdqa64 %zmm23, %zmm27 {%k1}
; AVX512-NEXT: vpermt2d %zmm19, %zmm20, %zmm13
; AVX512-NEXT: vpermt2d %zmm12, %zmm21, %zmm2
; AVX512-NEXT: vmovdqa64 %zmm13, %zmm2 {%k1}
; AVX512-NEXT: vpermi2d %zmm9, %zmm6, %zmm14
; AVX512-NEXT: vpermi2d %zmm5, %zmm3, %zmm7
; AVX512-NEXT: vmovdqa64 %zmm14, %zmm7 {%k1}
; AVX512-NEXT: vpermi2d %zmm9, %zmm6, %zmm16
; AVX512-NEXT: vpermi2d %zmm5, %zmm3, %zmm11
; AVX512-NEXT: vmovdqa64 %zmm16, %zmm11 {%k1}
; AVX512-NEXT: vpermi2d %zmm9, %zmm6, %zmm18
; AVX512-NEXT: vpermi2d %zmm5, %zmm3, %zmm15
; AVX512-NEXT: vmovdqa64 %zmm18, %zmm15 {%k1}
; AVX512-NEXT: vpermt2d %zmm9, %zmm20, %zmm6
; AVX512-NEXT: vpermt2d %zmm5, %zmm21, %zmm3
; AVX512-NEXT: vmovdqa64 %zmm6, %zmm3 {%k1}
; AVX512-NEXT: vmovdqa64 %zmm3, 896(%r8)
; AVX512-NEXT: vmovdqa64 %zmm15, 960(%r8)
; AVX512-NEXT: vmovdqa64 %zmm11, 768(%r8)
; AVX512-NEXT: vmovdqa64 %zmm7, 832(%r8)
; AVX512-NEXT: vmovdqa64 %zmm2, 640(%r8)
; AVX512-NEXT: vmovdqa64 %zmm27, 704(%r8)
; AVX512-NEXT: vmovdqa64 %zmm26, 512(%r8)
; AVX512-NEXT: vmovdqa64 %zmm25, 576(%r8)
; AVX512-NEXT: vmovdqa64 %zmm1, 384(%r8)
; AVX512-NEXT: vmovdqa64 %zmm24, 448(%r8)
; AVX512-NEXT: vmovdqa64 %zmm22, 256(%r8)
; AVX512-NEXT: vmovdqa64 %zmm17, 320(%r8)
; AVX512-NEXT: vmovdqa64 %zmm0, 128(%r8)
; AVX512-NEXT: vmovdqa64 %zmm10, 192(%r8)
; AVX512-NEXT: vmovdqa64 %zmm8, (%r8)
; AVX512-NEXT: vmovdqa64 %zmm4, 64(%r8)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
; AVX512-FCP-LABEL: store_i32_stride4_vf64:
; AVX512-FCP: # %bb.0:
; AVX512-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512-FCP-NEXT: vmovdqa64 64(%rdi), %zmm1
; AVX512-FCP-NEXT: vmovdqa64 128(%rdi), %zmm2
; AVX512-FCP-NEXT: vmovdqa64 192(%rdi), %zmm3
; AVX512-FCP-NEXT: vmovdqa64 (%rsi), %zmm17
; AVX512-FCP-NEXT: vmovdqa64 64(%rsi), %zmm23
; AVX512-FCP-NEXT: vmovdqa64 128(%rsi), %zmm12
; AVX512-FCP-NEXT: vmovdqa64 192(%rsi), %zmm5
; AVX512-FCP-NEXT: vmovdqa64 (%rdx), %zmm22
; AVX512-FCP-NEXT: vmovdqa64 64(%rdx), %zmm25
; AVX512-FCP-NEXT: vmovdqa64 128(%rdx), %zmm13
; AVX512-FCP-NEXT: vmovdqa64 192(%rdx), %zmm6
; AVX512-FCP-NEXT: vmovdqa64 (%rcx), %zmm21
; AVX512-FCP-NEXT: vmovdqa64 64(%rcx), %zmm26
; AVX512-FCP-NEXT: vmovdqa64 128(%rcx), %zmm19
; AVX512-FCP-NEXT: vmovdqa64 192(%rcx), %zmm9
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm14 = [0,0,4,20,0,0,5,21,0,0,6,22,0,0,7,23]
; AVX512-FCP-NEXT: vmovdqa64 %zmm22, %zmm8
; AVX512-FCP-NEXT: vpermt2d %zmm21, %zmm14, %zmm8
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm7 = [4,20,0,0,5,21,0,0,6,22,0,0,7,23,0,0]
; AVX512-FCP-NEXT: vmovdqa64 %zmm0, %zmm4
; AVX512-FCP-NEXT: vpermt2d %zmm17, %zmm7, %zmm4
; AVX512-FCP-NEXT: movb $-86, %al
; AVX512-FCP-NEXT: kmovw %eax, %k1
; AVX512-FCP-NEXT: vmovdqa64 %zmm8, %zmm4 {%k1}
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm16 = [0,0,0,16,0,0,1,17,0,0,2,18,0,0,3,19]
; AVX512-FCP-NEXT: vmovdqa64 %zmm22, %zmm10
; AVX512-FCP-NEXT: vpermt2d %zmm21, %zmm16, %zmm10
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm11 = [0,16,0,0,1,17,0,0,2,18,0,0,3,19,0,0]
; AVX512-FCP-NEXT: vmovdqa64 %zmm0, %zmm8
; AVX512-FCP-NEXT: vpermt2d %zmm17, %zmm11, %zmm8
; AVX512-FCP-NEXT: vmovdqa64 %zmm10, %zmm8 {%k1}
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm18 = [0,0,12,28,0,0,13,29,0,0,14,30,0,0,15,31]
; AVX512-FCP-NEXT: vmovdqa64 %zmm22, %zmm20
; AVX512-FCP-NEXT: vpermt2d %zmm21, %zmm18, %zmm20
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm15 = [12,28,0,0,13,29,0,0,14,30,0,0,15,31,0,0]
; AVX512-FCP-NEXT: vmovdqa64 %zmm0, %zmm10
; AVX512-FCP-NEXT: vpermt2d %zmm17, %zmm15, %zmm10
; AVX512-FCP-NEXT: vmovdqa64 %zmm20, %zmm10 {%k1}
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm20 = [0,0,8,24,0,0,9,25,0,0,10,26,0,0,11,27]
; AVX512-FCP-NEXT: vpermt2d %zmm21, %zmm20, %zmm22
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm21 = [8,24,0,0,9,25,0,0,10,26,0,0,11,27,0,0]
; AVX512-FCP-NEXT: vpermt2d %zmm17, %zmm21, %zmm0
; AVX512-FCP-NEXT: vmovdqa64 %zmm22, %zmm0 {%k1}
; AVX512-FCP-NEXT: vmovdqa64 %zmm25, %zmm22
; AVX512-FCP-NEXT: vpermt2d %zmm26, %zmm14, %zmm22
; AVX512-FCP-NEXT: vmovdqa64 %zmm1, %zmm17
; AVX512-FCP-NEXT: vpermt2d %zmm23, %zmm7, %zmm17
; AVX512-FCP-NEXT: vmovdqa64 %zmm22, %zmm17 {%k1}
; AVX512-FCP-NEXT: vmovdqa64 %zmm25, %zmm24
; AVX512-FCP-NEXT: vpermt2d %zmm26, %zmm16, %zmm24
; AVX512-FCP-NEXT: vmovdqa64 %zmm1, %zmm22
; AVX512-FCP-NEXT: vpermt2d %zmm23, %zmm11, %zmm22
; AVX512-FCP-NEXT: vmovdqa64 %zmm24, %zmm22 {%k1}
; AVX512-FCP-NEXT: vmovdqa64 %zmm25, %zmm27
; AVX512-FCP-NEXT: vpermt2d %zmm26, %zmm18, %zmm27
; AVX512-FCP-NEXT: vmovdqa64 %zmm1, %zmm24
; AVX512-FCP-NEXT: vpermt2d %zmm23, %zmm15, %zmm24
; AVX512-FCP-NEXT: vmovdqa64 %zmm27, %zmm24 {%k1}
; AVX512-FCP-NEXT: vpermt2d %zmm26, %zmm20, %zmm25
; AVX512-FCP-NEXT: vpermt2d %zmm23, %zmm21, %zmm1
; AVX512-FCP-NEXT: vmovdqa64 %zmm25, %zmm1 {%k1}
; AVX512-FCP-NEXT: vmovdqa64 %zmm13, %zmm23
; AVX512-FCP-NEXT: vpermt2d %zmm19, %zmm14, %zmm23
; AVX512-FCP-NEXT: vmovdqa64 %zmm2, %zmm25
; AVX512-FCP-NEXT: vpermt2d %zmm12, %zmm7, %zmm25
; AVX512-FCP-NEXT: vmovdqa64 %zmm23, %zmm25 {%k1}
; AVX512-FCP-NEXT: vmovdqa64 %zmm13, %zmm23
; AVX512-FCP-NEXT: vpermt2d %zmm19, %zmm16, %zmm23
; AVX512-FCP-NEXT: vmovdqa64 %zmm2, %zmm26
; AVX512-FCP-NEXT: vpermt2d %zmm12, %zmm11, %zmm26
; AVX512-FCP-NEXT: vmovdqa64 %zmm23, %zmm26 {%k1}
; AVX512-FCP-NEXT: vmovdqa64 %zmm13, %zmm23
; AVX512-FCP-NEXT: vpermt2d %zmm19, %zmm18, %zmm23
; AVX512-FCP-NEXT: vmovdqa64 %zmm2, %zmm27
; AVX512-FCP-NEXT: vpermt2d %zmm12, %zmm15, %zmm27
; AVX512-FCP-NEXT: vmovdqa64 %zmm23, %zmm27 {%k1}
; AVX512-FCP-NEXT: vpermt2d %zmm19, %zmm20, %zmm13
; AVX512-FCP-NEXT: vpermt2d %zmm12, %zmm21, %zmm2
; AVX512-FCP-NEXT: vmovdqa64 %zmm13, %zmm2 {%k1}
; AVX512-FCP-NEXT: vpermi2d %zmm9, %zmm6, %zmm14
; AVX512-FCP-NEXT: vpermi2d %zmm5, %zmm3, %zmm7
; AVX512-FCP-NEXT: vmovdqa64 %zmm14, %zmm7 {%k1}
; AVX512-FCP-NEXT: vpermi2d %zmm9, %zmm6, %zmm16
; AVX512-FCP-NEXT: vpermi2d %zmm5, %zmm3, %zmm11
; AVX512-FCP-NEXT: vmovdqa64 %zmm16, %zmm11 {%k1}
; AVX512-FCP-NEXT: vpermi2d %zmm9, %zmm6, %zmm18
; AVX512-FCP-NEXT: vpermi2d %zmm5, %zmm3, %zmm15
; AVX512-FCP-NEXT: vmovdqa64 %zmm18, %zmm15 {%k1}
; AVX512-FCP-NEXT: vpermt2d %zmm9, %zmm20, %zmm6
; AVX512-FCP-NEXT: vpermt2d %zmm5, %zmm21, %zmm3
; AVX512-FCP-NEXT: vmovdqa64 %zmm6, %zmm3 {%k1}
; AVX512-FCP-NEXT: vmovdqa64 %zmm3, 896(%r8)
; AVX512-FCP-NEXT: vmovdqa64 %zmm15, 960(%r8)
; AVX512-FCP-NEXT: vmovdqa64 %zmm11, 768(%r8)
; AVX512-FCP-NEXT: vmovdqa64 %zmm7, 832(%r8)
; AVX512-FCP-NEXT: vmovdqa64 %zmm2, 640(%r8)
; AVX512-FCP-NEXT: vmovdqa64 %zmm27, 704(%r8)
; AVX512-FCP-NEXT: vmovdqa64 %zmm26, 512(%r8)
; AVX512-FCP-NEXT: vmovdqa64 %zmm25, 576(%r8)
; AVX512-FCP-NEXT: vmovdqa64 %zmm1, 384(%r8)
; AVX512-FCP-NEXT: vmovdqa64 %zmm24, 448(%r8)
; AVX512-FCP-NEXT: vmovdqa64 %zmm22, 256(%r8)
; AVX512-FCP-NEXT: vmovdqa64 %zmm17, 320(%r8)
; AVX512-FCP-NEXT: vmovdqa64 %zmm0, 128(%r8)
; AVX512-FCP-NEXT: vmovdqa64 %zmm10, 192(%r8)
; AVX512-FCP-NEXT: vmovdqa64 %zmm8, (%r8)
; AVX512-FCP-NEXT: vmovdqa64 %zmm4, 64(%r8)
; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
;
; AVX512DQ-LABEL: store_i32_stride4_vf64:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512DQ-NEXT: vmovdqa64 64(%rdi), %zmm1
; AVX512DQ-NEXT: vmovdqa64 128(%rdi), %zmm2
; AVX512DQ-NEXT: vmovdqa64 192(%rdi), %zmm3
; AVX512DQ-NEXT: vmovdqa64 (%rsi), %zmm17
; AVX512DQ-NEXT: vmovdqa64 64(%rsi), %zmm23
; AVX512DQ-NEXT: vmovdqa64 128(%rsi), %zmm12
; AVX512DQ-NEXT: vmovdqa64 192(%rsi), %zmm5
; AVX512DQ-NEXT: vmovdqa64 (%rdx), %zmm22
; AVX512DQ-NEXT: vmovdqa64 64(%rdx), %zmm25
; AVX512DQ-NEXT: vmovdqa64 128(%rdx), %zmm13
; AVX512DQ-NEXT: vmovdqa64 192(%rdx), %zmm6
; AVX512DQ-NEXT: vmovdqa64 (%rcx), %zmm21
; AVX512DQ-NEXT: vmovdqa64 64(%rcx), %zmm26
; AVX512DQ-NEXT: vmovdqa64 128(%rcx), %zmm19
; AVX512DQ-NEXT: vmovdqa64 192(%rcx), %zmm9
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm14 = [0,0,4,20,0,0,5,21,0,0,6,22,0,0,7,23]
; AVX512DQ-NEXT: vmovdqa64 %zmm22, %zmm8
; AVX512DQ-NEXT: vpermt2d %zmm21, %zmm14, %zmm8
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm7 = [4,20,0,0,5,21,0,0,6,22,0,0,7,23,0,0]
; AVX512DQ-NEXT: vmovdqa64 %zmm0, %zmm4
; AVX512DQ-NEXT: vpermt2d %zmm17, %zmm7, %zmm4
; AVX512DQ-NEXT: movb $-86, %al
; AVX512DQ-NEXT: kmovw %eax, %k1
; AVX512DQ-NEXT: vmovdqa64 %zmm8, %zmm4 {%k1}
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm16 = [0,0,0,16,0,0,1,17,0,0,2,18,0,0,3,19]
; AVX512DQ-NEXT: vmovdqa64 %zmm22, %zmm10
; AVX512DQ-NEXT: vpermt2d %zmm21, %zmm16, %zmm10
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm11 = [0,16,0,0,1,17,0,0,2,18,0,0,3,19,0,0]
; AVX512DQ-NEXT: vmovdqa64 %zmm0, %zmm8
; AVX512DQ-NEXT: vpermt2d %zmm17, %zmm11, %zmm8
; AVX512DQ-NEXT: vmovdqa64 %zmm10, %zmm8 {%k1}
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm18 = [0,0,12,28,0,0,13,29,0,0,14,30,0,0,15,31]
; AVX512DQ-NEXT: vmovdqa64 %zmm22, %zmm20
; AVX512DQ-NEXT: vpermt2d %zmm21, %zmm18, %zmm20
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm15 = [12,28,0,0,13,29,0,0,14,30,0,0,15,31,0,0]
; AVX512DQ-NEXT: vmovdqa64 %zmm0, %zmm10
; AVX512DQ-NEXT: vpermt2d %zmm17, %zmm15, %zmm10
; AVX512DQ-NEXT: vmovdqa64 %zmm20, %zmm10 {%k1}
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm20 = [0,0,8,24,0,0,9,25,0,0,10,26,0,0,11,27]
; AVX512DQ-NEXT: vpermt2d %zmm21, %zmm20, %zmm22
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm21 = [8,24,0,0,9,25,0,0,10,26,0,0,11,27,0,0]
; AVX512DQ-NEXT: vpermt2d %zmm17, %zmm21, %zmm0
; AVX512DQ-NEXT: vmovdqa64 %zmm22, %zmm0 {%k1}
; AVX512DQ-NEXT: vmovdqa64 %zmm25, %zmm22
; AVX512DQ-NEXT: vpermt2d %zmm26, %zmm14, %zmm22
; AVX512DQ-NEXT: vmovdqa64 %zmm1, %zmm17
; AVX512DQ-NEXT: vpermt2d %zmm23, %zmm7, %zmm17
; AVX512DQ-NEXT: vmovdqa64 %zmm22, %zmm17 {%k1}
; AVX512DQ-NEXT: vmovdqa64 %zmm25, %zmm24
; AVX512DQ-NEXT: vpermt2d %zmm26, %zmm16, %zmm24
; AVX512DQ-NEXT: vmovdqa64 %zmm1, %zmm22
; AVX512DQ-NEXT: vpermt2d %zmm23, %zmm11, %zmm22
; AVX512DQ-NEXT: vmovdqa64 %zmm24, %zmm22 {%k1}
; AVX512DQ-NEXT: vmovdqa64 %zmm25, %zmm27
; AVX512DQ-NEXT: vpermt2d %zmm26, %zmm18, %zmm27
; AVX512DQ-NEXT: vmovdqa64 %zmm1, %zmm24
; AVX512DQ-NEXT: vpermt2d %zmm23, %zmm15, %zmm24
; AVX512DQ-NEXT: vmovdqa64 %zmm27, %zmm24 {%k1}
; AVX512DQ-NEXT: vpermt2d %zmm26, %zmm20, %zmm25
; AVX512DQ-NEXT: vpermt2d %zmm23, %zmm21, %zmm1
; AVX512DQ-NEXT: vmovdqa64 %zmm25, %zmm1 {%k1}
; AVX512DQ-NEXT: vmovdqa64 %zmm13, %zmm23
; AVX512DQ-NEXT: vpermt2d %zmm19, %zmm14, %zmm23
; AVX512DQ-NEXT: vmovdqa64 %zmm2, %zmm25
; AVX512DQ-NEXT: vpermt2d %zmm12, %zmm7, %zmm25
; AVX512DQ-NEXT: vmovdqa64 %zmm23, %zmm25 {%k1}
; AVX512DQ-NEXT: vmovdqa64 %zmm13, %zmm23
; AVX512DQ-NEXT: vpermt2d %zmm19, %zmm16, %zmm23
; AVX512DQ-NEXT: vmovdqa64 %zmm2, %zmm26
; AVX512DQ-NEXT: vpermt2d %zmm12, %zmm11, %zmm26
; AVX512DQ-NEXT: vmovdqa64 %zmm23, %zmm26 {%k1}
; AVX512DQ-NEXT: vmovdqa64 %zmm13, %zmm23
; AVX512DQ-NEXT: vpermt2d %zmm19, %zmm18, %zmm23
; AVX512DQ-NEXT: vmovdqa64 %zmm2, %zmm27
; AVX512DQ-NEXT: vpermt2d %zmm12, %zmm15, %zmm27
; AVX512DQ-NEXT: vmovdqa64 %zmm23, %zmm27 {%k1}
; AVX512DQ-NEXT: vpermt2d %zmm19, %zmm20, %zmm13
; AVX512DQ-NEXT: vpermt2d %zmm12, %zmm21, %zmm2
; AVX512DQ-NEXT: vmovdqa64 %zmm13, %zmm2 {%k1}
; AVX512DQ-NEXT: vpermi2d %zmm9, %zmm6, %zmm14
; AVX512DQ-NEXT: vpermi2d %zmm5, %zmm3, %zmm7
; AVX512DQ-NEXT: vmovdqa64 %zmm14, %zmm7 {%k1}
; AVX512DQ-NEXT: vpermi2d %zmm9, %zmm6, %zmm16
; AVX512DQ-NEXT: vpermi2d %zmm5, %zmm3, %zmm11
; AVX512DQ-NEXT: vmovdqa64 %zmm16, %zmm11 {%k1}
; AVX512DQ-NEXT: vpermi2d %zmm9, %zmm6, %zmm18
; AVX512DQ-NEXT: vpermi2d %zmm5, %zmm3, %zmm15
; AVX512DQ-NEXT: vmovdqa64 %zmm18, %zmm15 {%k1}
; AVX512DQ-NEXT: vpermt2d %zmm9, %zmm20, %zmm6
; AVX512DQ-NEXT: vpermt2d %zmm5, %zmm21, %zmm3
; AVX512DQ-NEXT: vmovdqa64 %zmm6, %zmm3 {%k1}
; AVX512DQ-NEXT: vmovdqa64 %zmm3, 896(%r8)
; AVX512DQ-NEXT: vmovdqa64 %zmm15, 960(%r8)
; AVX512DQ-NEXT: vmovdqa64 %zmm11, 768(%r8)
; AVX512DQ-NEXT: vmovdqa64 %zmm7, 832(%r8)
; AVX512DQ-NEXT: vmovdqa64 %zmm2, 640(%r8)
; AVX512DQ-NEXT: vmovdqa64 %zmm27, 704(%r8)
; AVX512DQ-NEXT: vmovdqa64 %zmm26, 512(%r8)
; AVX512DQ-NEXT: vmovdqa64 %zmm25, 576(%r8)
; AVX512DQ-NEXT: vmovdqa64 %zmm1, 384(%r8)
; AVX512DQ-NEXT: vmovdqa64 %zmm24, 448(%r8)
; AVX512DQ-NEXT: vmovdqa64 %zmm22, 256(%r8)
; AVX512DQ-NEXT: vmovdqa64 %zmm17, 320(%r8)
; AVX512DQ-NEXT: vmovdqa64 %zmm0, 128(%r8)
; AVX512DQ-NEXT: vmovdqa64 %zmm10, 192(%r8)
; AVX512DQ-NEXT: vmovdqa64 %zmm8, (%r8)
; AVX512DQ-NEXT: vmovdqa64 %zmm4, 64(%r8)
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512DQ-FCP-LABEL: store_i32_stride4_vf64:
; AVX512DQ-FCP: # %bb.0:
; AVX512DQ-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512DQ-FCP-NEXT: vmovdqa64 64(%rdi), %zmm1
; AVX512DQ-FCP-NEXT: vmovdqa64 128(%rdi), %zmm2
; AVX512DQ-FCP-NEXT: vmovdqa64 192(%rdi), %zmm3
; AVX512DQ-FCP-NEXT: vmovdqa64 (%rsi), %zmm17
; AVX512DQ-FCP-NEXT: vmovdqa64 64(%rsi), %zmm23
; AVX512DQ-FCP-NEXT: vmovdqa64 128(%rsi), %zmm12
; AVX512DQ-FCP-NEXT: vmovdqa64 192(%rsi), %zmm5
; AVX512DQ-FCP-NEXT: vmovdqa64 (%rdx), %zmm22
; AVX512DQ-FCP-NEXT: vmovdqa64 64(%rdx), %zmm25
; AVX512DQ-FCP-NEXT: vmovdqa64 128(%rdx), %zmm13
; AVX512DQ-FCP-NEXT: vmovdqa64 192(%rdx), %zmm6
; AVX512DQ-FCP-NEXT: vmovdqa64 (%rcx), %zmm21
; AVX512DQ-FCP-NEXT: vmovdqa64 64(%rcx), %zmm26
; AVX512DQ-FCP-NEXT: vmovdqa64 128(%rcx), %zmm19
; AVX512DQ-FCP-NEXT: vmovdqa64 192(%rcx), %zmm9
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm14 = [0,0,4,20,0,0,5,21,0,0,6,22,0,0,7,23]
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm22, %zmm8
; AVX512DQ-FCP-NEXT: vpermt2d %zmm21, %zmm14, %zmm8
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm7 = [4,20,0,0,5,21,0,0,6,22,0,0,7,23,0,0]
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, %zmm4
; AVX512DQ-FCP-NEXT: vpermt2d %zmm17, %zmm7, %zmm4
; AVX512DQ-FCP-NEXT: movb $-86, %al
; AVX512DQ-FCP-NEXT: kmovw %eax, %k1
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm8, %zmm4 {%k1}
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm16 = [0,0,0,16,0,0,1,17,0,0,2,18,0,0,3,19]
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm22, %zmm10
; AVX512DQ-FCP-NEXT: vpermt2d %zmm21, %zmm16, %zmm10
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm11 = [0,16,0,0,1,17,0,0,2,18,0,0,3,19,0,0]
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, %zmm8
; AVX512DQ-FCP-NEXT: vpermt2d %zmm17, %zmm11, %zmm8
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm10, %zmm8 {%k1}
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm18 = [0,0,12,28,0,0,13,29,0,0,14,30,0,0,15,31]
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm22, %zmm20
; AVX512DQ-FCP-NEXT: vpermt2d %zmm21, %zmm18, %zmm20
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm15 = [12,28,0,0,13,29,0,0,14,30,0,0,15,31,0,0]
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, %zmm10
; AVX512DQ-FCP-NEXT: vpermt2d %zmm17, %zmm15, %zmm10
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm20, %zmm10 {%k1}
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm20 = [0,0,8,24,0,0,9,25,0,0,10,26,0,0,11,27]
; AVX512DQ-FCP-NEXT: vpermt2d %zmm21, %zmm20, %zmm22
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm21 = [8,24,0,0,9,25,0,0,10,26,0,0,11,27,0,0]
; AVX512DQ-FCP-NEXT: vpermt2d %zmm17, %zmm21, %zmm0
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm22, %zmm0 {%k1}
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm25, %zmm22
; AVX512DQ-FCP-NEXT: vpermt2d %zmm26, %zmm14, %zmm22
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm1, %zmm17
; AVX512DQ-FCP-NEXT: vpermt2d %zmm23, %zmm7, %zmm17
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm22, %zmm17 {%k1}
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm25, %zmm24
; AVX512DQ-FCP-NEXT: vpermt2d %zmm26, %zmm16, %zmm24
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm1, %zmm22
; AVX512DQ-FCP-NEXT: vpermt2d %zmm23, %zmm11, %zmm22
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm24, %zmm22 {%k1}
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm25, %zmm27
; AVX512DQ-FCP-NEXT: vpermt2d %zmm26, %zmm18, %zmm27
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm1, %zmm24
; AVX512DQ-FCP-NEXT: vpermt2d %zmm23, %zmm15, %zmm24
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm27, %zmm24 {%k1}
; AVX512DQ-FCP-NEXT: vpermt2d %zmm26, %zmm20, %zmm25
; AVX512DQ-FCP-NEXT: vpermt2d %zmm23, %zmm21, %zmm1
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm25, %zmm1 {%k1}
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm13, %zmm23
; AVX512DQ-FCP-NEXT: vpermt2d %zmm19, %zmm14, %zmm23
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm2, %zmm25
; AVX512DQ-FCP-NEXT: vpermt2d %zmm12, %zmm7, %zmm25
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm23, %zmm25 {%k1}
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm13, %zmm23
; AVX512DQ-FCP-NEXT: vpermt2d %zmm19, %zmm16, %zmm23
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm2, %zmm26
; AVX512DQ-FCP-NEXT: vpermt2d %zmm12, %zmm11, %zmm26
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm23, %zmm26 {%k1}
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm13, %zmm23
; AVX512DQ-FCP-NEXT: vpermt2d %zmm19, %zmm18, %zmm23
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm2, %zmm27
; AVX512DQ-FCP-NEXT: vpermt2d %zmm12, %zmm15, %zmm27
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm23, %zmm27 {%k1}
; AVX512DQ-FCP-NEXT: vpermt2d %zmm19, %zmm20, %zmm13
; AVX512DQ-FCP-NEXT: vpermt2d %zmm12, %zmm21, %zmm2
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm13, %zmm2 {%k1}
; AVX512DQ-FCP-NEXT: vpermi2d %zmm9, %zmm6, %zmm14
; AVX512DQ-FCP-NEXT: vpermi2d %zmm5, %zmm3, %zmm7
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm14, %zmm7 {%k1}
; AVX512DQ-FCP-NEXT: vpermi2d %zmm9, %zmm6, %zmm16
; AVX512DQ-FCP-NEXT: vpermi2d %zmm5, %zmm3, %zmm11
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm16, %zmm11 {%k1}
; AVX512DQ-FCP-NEXT: vpermi2d %zmm9, %zmm6, %zmm18
; AVX512DQ-FCP-NEXT: vpermi2d %zmm5, %zmm3, %zmm15
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm18, %zmm15 {%k1}
; AVX512DQ-FCP-NEXT: vpermt2d %zmm9, %zmm20, %zmm6
; AVX512DQ-FCP-NEXT: vpermt2d %zmm5, %zmm21, %zmm3
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm6, %zmm3 {%k1}
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm3, 896(%r8)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm15, 960(%r8)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm11, 768(%r8)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm7, 832(%r8)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm2, 640(%r8)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm27, 704(%r8)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm26, 512(%r8)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm25, 576(%r8)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm1, 384(%r8)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm24, 448(%r8)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm22, 256(%r8)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm17, 320(%r8)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, 128(%r8)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm10, 192(%r8)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm8, (%r8)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm4, 64(%r8)
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
;
; AVX512BW-LABEL: store_i32_stride4_vf64:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm1
; AVX512BW-NEXT: vmovdqa64 128(%rdi), %zmm2
; AVX512BW-NEXT: vmovdqa64 192(%rdi), %zmm3
; AVX512BW-NEXT: vmovdqa64 (%rsi), %zmm17
; AVX512BW-NEXT: vmovdqa64 64(%rsi), %zmm23
; AVX512BW-NEXT: vmovdqa64 128(%rsi), %zmm12
; AVX512BW-NEXT: vmovdqa64 192(%rsi), %zmm5
; AVX512BW-NEXT: vmovdqa64 (%rdx), %zmm22
; AVX512BW-NEXT: vmovdqa64 64(%rdx), %zmm25
; AVX512BW-NEXT: vmovdqa64 128(%rdx), %zmm13
; AVX512BW-NEXT: vmovdqa64 192(%rdx), %zmm6
; AVX512BW-NEXT: vmovdqa64 (%rcx), %zmm21
; AVX512BW-NEXT: vmovdqa64 64(%rcx), %zmm26
; AVX512BW-NEXT: vmovdqa64 128(%rcx), %zmm19
; AVX512BW-NEXT: vmovdqa64 192(%rcx), %zmm9
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} zmm14 = [0,0,4,20,0,0,5,21,0,0,6,22,0,0,7,23]
; AVX512BW-NEXT: vmovdqa64 %zmm22, %zmm8
; AVX512BW-NEXT: vpermt2d %zmm21, %zmm14, %zmm8
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} zmm7 = [4,20,0,0,5,21,0,0,6,22,0,0,7,23,0,0]
; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm4
; AVX512BW-NEXT: vpermt2d %zmm17, %zmm7, %zmm4
; AVX512BW-NEXT: movb $-86, %al
; AVX512BW-NEXT: kmovd %eax, %k1
; AVX512BW-NEXT: vmovdqa64 %zmm8, %zmm4 {%k1}
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} zmm16 = [0,0,0,16,0,0,1,17,0,0,2,18,0,0,3,19]
; AVX512BW-NEXT: vmovdqa64 %zmm22, %zmm10
; AVX512BW-NEXT: vpermt2d %zmm21, %zmm16, %zmm10
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} zmm11 = [0,16,0,0,1,17,0,0,2,18,0,0,3,19,0,0]
; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm8
; AVX512BW-NEXT: vpermt2d %zmm17, %zmm11, %zmm8
; AVX512BW-NEXT: vmovdqa64 %zmm10, %zmm8 {%k1}
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} zmm18 = [0,0,12,28,0,0,13,29,0,0,14,30,0,0,15,31]
; AVX512BW-NEXT: vmovdqa64 %zmm22, %zmm20
; AVX512BW-NEXT: vpermt2d %zmm21, %zmm18, %zmm20
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} zmm15 = [12,28,0,0,13,29,0,0,14,30,0,0,15,31,0,0]
; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm10
; AVX512BW-NEXT: vpermt2d %zmm17, %zmm15, %zmm10
; AVX512BW-NEXT: vmovdqa64 %zmm20, %zmm10 {%k1}
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} zmm20 = [0,0,8,24,0,0,9,25,0,0,10,26,0,0,11,27]
; AVX512BW-NEXT: vpermt2d %zmm21, %zmm20, %zmm22
; AVX512BW-NEXT: vpmovsxbd {{.*#+}} zmm21 = [8,24,0,0,9,25,0,0,10,26,0,0,11,27,0,0]
; AVX512BW-NEXT: vpermt2d %zmm17, %zmm21, %zmm0
; AVX512BW-NEXT: vmovdqa64 %zmm22, %zmm0 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm25, %zmm22
; AVX512BW-NEXT: vpermt2d %zmm26, %zmm14, %zmm22
; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm17
; AVX512BW-NEXT: vpermt2d %zmm23, %zmm7, %zmm17
; AVX512BW-NEXT: vmovdqa64 %zmm22, %zmm17 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm25, %zmm24
; AVX512BW-NEXT: vpermt2d %zmm26, %zmm16, %zmm24
; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm22
; AVX512BW-NEXT: vpermt2d %zmm23, %zmm11, %zmm22
; AVX512BW-NEXT: vmovdqa64 %zmm24, %zmm22 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm25, %zmm27
; AVX512BW-NEXT: vpermt2d %zmm26, %zmm18, %zmm27
; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm24
; AVX512BW-NEXT: vpermt2d %zmm23, %zmm15, %zmm24
; AVX512BW-NEXT: vmovdqa64 %zmm27, %zmm24 {%k1}
; AVX512BW-NEXT: vpermt2d %zmm26, %zmm20, %zmm25
; AVX512BW-NEXT: vpermt2d %zmm23, %zmm21, %zmm1
; AVX512BW-NEXT: vmovdqa64 %zmm25, %zmm1 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm13, %zmm23
; AVX512BW-NEXT: vpermt2d %zmm19, %zmm14, %zmm23
; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm25
; AVX512BW-NEXT: vpermt2d %zmm12, %zmm7, %zmm25
; AVX512BW-NEXT: vmovdqa64 %zmm23, %zmm25 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm13, %zmm23
; AVX512BW-NEXT: vpermt2d %zmm19, %zmm16, %zmm23
; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm26
; AVX512BW-NEXT: vpermt2d %zmm12, %zmm11, %zmm26
; AVX512BW-NEXT: vmovdqa64 %zmm23, %zmm26 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm13, %zmm23
; AVX512BW-NEXT: vpermt2d %zmm19, %zmm18, %zmm23
; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm27
; AVX512BW-NEXT: vpermt2d %zmm12, %zmm15, %zmm27
; AVX512BW-NEXT: vmovdqa64 %zmm23, %zmm27 {%k1}
; AVX512BW-NEXT: vpermt2d %zmm19, %zmm20, %zmm13
; AVX512BW-NEXT: vpermt2d %zmm12, %zmm21, %zmm2
; AVX512BW-NEXT: vmovdqa64 %zmm13, %zmm2 {%k1}
; AVX512BW-NEXT: vpermi2d %zmm9, %zmm6, %zmm14
; AVX512BW-NEXT: vpermi2d %zmm5, %zmm3, %zmm7
; AVX512BW-NEXT: vmovdqa64 %zmm14, %zmm7 {%k1}
; AVX512BW-NEXT: vpermi2d %zmm9, %zmm6, %zmm16
; AVX512BW-NEXT: vpermi2d %zmm5, %zmm3, %zmm11
; AVX512BW-NEXT: vmovdqa64 %zmm16, %zmm11 {%k1}
; AVX512BW-NEXT: vpermi2d %zmm9, %zmm6, %zmm18
; AVX512BW-NEXT: vpermi2d %zmm5, %zmm3, %zmm15
; AVX512BW-NEXT: vmovdqa64 %zmm18, %zmm15 {%k1}
; AVX512BW-NEXT: vpermt2d %zmm9, %zmm20, %zmm6
; AVX512BW-NEXT: vpermt2d %zmm5, %zmm21, %zmm3
; AVX512BW-NEXT: vmovdqa64 %zmm6, %zmm3 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm3, 896(%r8)
; AVX512BW-NEXT: vmovdqa64 %zmm15, 960(%r8)
; AVX512BW-NEXT: vmovdqa64 %zmm11, 768(%r8)
; AVX512BW-NEXT: vmovdqa64 %zmm7, 832(%r8)
; AVX512BW-NEXT: vmovdqa64 %zmm2, 640(%r8)
; AVX512BW-NEXT: vmovdqa64 %zmm27, 704(%r8)
; AVX512BW-NEXT: vmovdqa64 %zmm26, 512(%r8)
; AVX512BW-NEXT: vmovdqa64 %zmm25, 576(%r8)
; AVX512BW-NEXT: vmovdqa64 %zmm1, 384(%r8)
; AVX512BW-NEXT: vmovdqa64 %zmm24, 448(%r8)
; AVX512BW-NEXT: vmovdqa64 %zmm22, 256(%r8)
; AVX512BW-NEXT: vmovdqa64 %zmm17, 320(%r8)
; AVX512BW-NEXT: vmovdqa64 %zmm0, 128(%r8)
; AVX512BW-NEXT: vmovdqa64 %zmm10, 192(%r8)
; AVX512BW-NEXT: vmovdqa64 %zmm8, (%r8)
; AVX512BW-NEXT: vmovdqa64 %zmm4, 64(%r8)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BW-FCP-LABEL: store_i32_stride4_vf64:
; AVX512BW-FCP: # %bb.0:
; AVX512BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-FCP-NEXT: vmovdqa64 64(%rdi), %zmm1
; AVX512BW-FCP-NEXT: vmovdqa64 128(%rdi), %zmm2
; AVX512BW-FCP-NEXT: vmovdqa64 192(%rdi), %zmm3
; AVX512BW-FCP-NEXT: vmovdqa64 (%rsi), %zmm17
; AVX512BW-FCP-NEXT: vmovdqa64 64(%rsi), %zmm23
; AVX512BW-FCP-NEXT: vmovdqa64 128(%rsi), %zmm12
; AVX512BW-FCP-NEXT: vmovdqa64 192(%rsi), %zmm5
; AVX512BW-FCP-NEXT: vmovdqa64 (%rdx), %zmm22
; AVX512BW-FCP-NEXT: vmovdqa64 64(%rdx), %zmm25
; AVX512BW-FCP-NEXT: vmovdqa64 128(%rdx), %zmm13
; AVX512BW-FCP-NEXT: vmovdqa64 192(%rdx), %zmm6
; AVX512BW-FCP-NEXT: vmovdqa64 (%rcx), %zmm21
; AVX512BW-FCP-NEXT: vmovdqa64 64(%rcx), %zmm26
; AVX512BW-FCP-NEXT: vmovdqa64 128(%rcx), %zmm19
; AVX512BW-FCP-NEXT: vmovdqa64 192(%rcx), %zmm9
; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm14 = [0,0,4,20,0,0,5,21,0,0,6,22,0,0,7,23]
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm22, %zmm8
; AVX512BW-FCP-NEXT: vpermt2d %zmm21, %zmm14, %zmm8
; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm7 = [4,20,0,0,5,21,0,0,6,22,0,0,7,23,0,0]
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm0, %zmm4
; AVX512BW-FCP-NEXT: vpermt2d %zmm17, %zmm7, %zmm4
; AVX512BW-FCP-NEXT: movb $-86, %al
; AVX512BW-FCP-NEXT: kmovd %eax, %k1
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm8, %zmm4 {%k1}
; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm16 = [0,0,0,16,0,0,1,17,0,0,2,18,0,0,3,19]
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm22, %zmm10
; AVX512BW-FCP-NEXT: vpermt2d %zmm21, %zmm16, %zmm10
; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm11 = [0,16,0,0,1,17,0,0,2,18,0,0,3,19,0,0]
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm0, %zmm8
; AVX512BW-FCP-NEXT: vpermt2d %zmm17, %zmm11, %zmm8
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm10, %zmm8 {%k1}
; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm18 = [0,0,12,28,0,0,13,29,0,0,14,30,0,0,15,31]
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm22, %zmm20
; AVX512BW-FCP-NEXT: vpermt2d %zmm21, %zmm18, %zmm20
; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm15 = [12,28,0,0,13,29,0,0,14,30,0,0,15,31,0,0]
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm0, %zmm10
; AVX512BW-FCP-NEXT: vpermt2d %zmm17, %zmm15, %zmm10
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm20, %zmm10 {%k1}
; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm20 = [0,0,8,24,0,0,9,25,0,0,10,26,0,0,11,27]
; AVX512BW-FCP-NEXT: vpermt2d %zmm21, %zmm20, %zmm22
; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm21 = [8,24,0,0,9,25,0,0,10,26,0,0,11,27,0,0]
; AVX512BW-FCP-NEXT: vpermt2d %zmm17, %zmm21, %zmm0
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm22, %zmm0 {%k1}
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm25, %zmm22
; AVX512BW-FCP-NEXT: vpermt2d %zmm26, %zmm14, %zmm22
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm1, %zmm17
; AVX512BW-FCP-NEXT: vpermt2d %zmm23, %zmm7, %zmm17
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm22, %zmm17 {%k1}
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm25, %zmm24
; AVX512BW-FCP-NEXT: vpermt2d %zmm26, %zmm16, %zmm24
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm1, %zmm22
; AVX512BW-FCP-NEXT: vpermt2d %zmm23, %zmm11, %zmm22
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm24, %zmm22 {%k1}
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm25, %zmm27
; AVX512BW-FCP-NEXT: vpermt2d %zmm26, %zmm18, %zmm27
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm1, %zmm24
; AVX512BW-FCP-NEXT: vpermt2d %zmm23, %zmm15, %zmm24
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm27, %zmm24 {%k1}
; AVX512BW-FCP-NEXT: vpermt2d %zmm26, %zmm20, %zmm25
; AVX512BW-FCP-NEXT: vpermt2d %zmm23, %zmm21, %zmm1
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm25, %zmm1 {%k1}
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm13, %zmm23
; AVX512BW-FCP-NEXT: vpermt2d %zmm19, %zmm14, %zmm23
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm2, %zmm25
; AVX512BW-FCP-NEXT: vpermt2d %zmm12, %zmm7, %zmm25
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm23, %zmm25 {%k1}
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm13, %zmm23
; AVX512BW-FCP-NEXT: vpermt2d %zmm19, %zmm16, %zmm23
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm2, %zmm26
; AVX512BW-FCP-NEXT: vpermt2d %zmm12, %zmm11, %zmm26
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm23, %zmm26 {%k1}
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm13, %zmm23
; AVX512BW-FCP-NEXT: vpermt2d %zmm19, %zmm18, %zmm23
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm2, %zmm27
; AVX512BW-FCP-NEXT: vpermt2d %zmm12, %zmm15, %zmm27
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm23, %zmm27 {%k1}
; AVX512BW-FCP-NEXT: vpermt2d %zmm19, %zmm20, %zmm13
; AVX512BW-FCP-NEXT: vpermt2d %zmm12, %zmm21, %zmm2
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm13, %zmm2 {%k1}
; AVX512BW-FCP-NEXT: vpermi2d %zmm9, %zmm6, %zmm14
; AVX512BW-FCP-NEXT: vpermi2d %zmm5, %zmm3, %zmm7
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm14, %zmm7 {%k1}
; AVX512BW-FCP-NEXT: vpermi2d %zmm9, %zmm6, %zmm16
; AVX512BW-FCP-NEXT: vpermi2d %zmm5, %zmm3, %zmm11
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm16, %zmm11 {%k1}
; AVX512BW-FCP-NEXT: vpermi2d %zmm9, %zmm6, %zmm18
; AVX512BW-FCP-NEXT: vpermi2d %zmm5, %zmm3, %zmm15
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm18, %zmm15 {%k1}
; AVX512BW-FCP-NEXT: vpermt2d %zmm9, %zmm20, %zmm6
; AVX512BW-FCP-NEXT: vpermt2d %zmm5, %zmm21, %zmm3
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm6, %zmm3 {%k1}
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm3, 896(%r8)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm15, 960(%r8)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm11, 768(%r8)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm7, 832(%r8)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm2, 640(%r8)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm27, 704(%r8)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm26, 512(%r8)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm25, 576(%r8)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm1, 384(%r8)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm24, 448(%r8)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm22, 256(%r8)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm17, 320(%r8)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm0, 128(%r8)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm10, 192(%r8)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm8, (%r8)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm4, 64(%r8)
; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
;
; AVX512DQ-BW-LABEL: store_i32_stride4_vf64:
; AVX512DQ-BW: # %bb.0:
; AVX512DQ-BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512DQ-BW-NEXT: vmovdqa64 64(%rdi), %zmm1
; AVX512DQ-BW-NEXT: vmovdqa64 128(%rdi), %zmm2
; AVX512DQ-BW-NEXT: vmovdqa64 192(%rdi), %zmm3
; AVX512DQ-BW-NEXT: vmovdqa64 (%rsi), %zmm17
; AVX512DQ-BW-NEXT: vmovdqa64 64(%rsi), %zmm23
; AVX512DQ-BW-NEXT: vmovdqa64 128(%rsi), %zmm12
; AVX512DQ-BW-NEXT: vmovdqa64 192(%rsi), %zmm5
; AVX512DQ-BW-NEXT: vmovdqa64 (%rdx), %zmm22
; AVX512DQ-BW-NEXT: vmovdqa64 64(%rdx), %zmm25
; AVX512DQ-BW-NEXT: vmovdqa64 128(%rdx), %zmm13
; AVX512DQ-BW-NEXT: vmovdqa64 192(%rdx), %zmm6
; AVX512DQ-BW-NEXT: vmovdqa64 (%rcx), %zmm21
; AVX512DQ-BW-NEXT: vmovdqa64 64(%rcx), %zmm26
; AVX512DQ-BW-NEXT: vmovdqa64 128(%rcx), %zmm19
; AVX512DQ-BW-NEXT: vmovdqa64 192(%rcx), %zmm9
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} zmm14 = [0,0,4,20,0,0,5,21,0,0,6,22,0,0,7,23]
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm22, %zmm8
; AVX512DQ-BW-NEXT: vpermt2d %zmm21, %zmm14, %zmm8
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} zmm7 = [4,20,0,0,5,21,0,0,6,22,0,0,7,23,0,0]
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm0, %zmm4
; AVX512DQ-BW-NEXT: vpermt2d %zmm17, %zmm7, %zmm4
; AVX512DQ-BW-NEXT: movb $-86, %al
; AVX512DQ-BW-NEXT: kmovd %eax, %k1
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm8, %zmm4 {%k1}
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} zmm16 = [0,0,0,16,0,0,1,17,0,0,2,18,0,0,3,19]
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm22, %zmm10
; AVX512DQ-BW-NEXT: vpermt2d %zmm21, %zmm16, %zmm10
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} zmm11 = [0,16,0,0,1,17,0,0,2,18,0,0,3,19,0,0]
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm0, %zmm8
; AVX512DQ-BW-NEXT: vpermt2d %zmm17, %zmm11, %zmm8
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm10, %zmm8 {%k1}
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} zmm18 = [0,0,12,28,0,0,13,29,0,0,14,30,0,0,15,31]
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm22, %zmm20
; AVX512DQ-BW-NEXT: vpermt2d %zmm21, %zmm18, %zmm20
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} zmm15 = [12,28,0,0,13,29,0,0,14,30,0,0,15,31,0,0]
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm0, %zmm10
; AVX512DQ-BW-NEXT: vpermt2d %zmm17, %zmm15, %zmm10
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm20, %zmm10 {%k1}
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} zmm20 = [0,0,8,24,0,0,9,25,0,0,10,26,0,0,11,27]
; AVX512DQ-BW-NEXT: vpermt2d %zmm21, %zmm20, %zmm22
; AVX512DQ-BW-NEXT: vpmovsxbd {{.*#+}} zmm21 = [8,24,0,0,9,25,0,0,10,26,0,0,11,27,0,0]
; AVX512DQ-BW-NEXT: vpermt2d %zmm17, %zmm21, %zmm0
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm22, %zmm0 {%k1}
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm25, %zmm22
; AVX512DQ-BW-NEXT: vpermt2d %zmm26, %zmm14, %zmm22
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm1, %zmm17
; AVX512DQ-BW-NEXT: vpermt2d %zmm23, %zmm7, %zmm17
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm22, %zmm17 {%k1}
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm25, %zmm24
; AVX512DQ-BW-NEXT: vpermt2d %zmm26, %zmm16, %zmm24
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm1, %zmm22
; AVX512DQ-BW-NEXT: vpermt2d %zmm23, %zmm11, %zmm22
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm24, %zmm22 {%k1}
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm25, %zmm27
; AVX512DQ-BW-NEXT: vpermt2d %zmm26, %zmm18, %zmm27
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm1, %zmm24
; AVX512DQ-BW-NEXT: vpermt2d %zmm23, %zmm15, %zmm24
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm27, %zmm24 {%k1}
; AVX512DQ-BW-NEXT: vpermt2d %zmm26, %zmm20, %zmm25
; AVX512DQ-BW-NEXT: vpermt2d %zmm23, %zmm21, %zmm1
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm25, %zmm1 {%k1}
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm13, %zmm23
; AVX512DQ-BW-NEXT: vpermt2d %zmm19, %zmm14, %zmm23
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm2, %zmm25
; AVX512DQ-BW-NEXT: vpermt2d %zmm12, %zmm7, %zmm25
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm23, %zmm25 {%k1}
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm13, %zmm23
; AVX512DQ-BW-NEXT: vpermt2d %zmm19, %zmm16, %zmm23
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm2, %zmm26
; AVX512DQ-BW-NEXT: vpermt2d %zmm12, %zmm11, %zmm26
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm23, %zmm26 {%k1}
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm13, %zmm23
; AVX512DQ-BW-NEXT: vpermt2d %zmm19, %zmm18, %zmm23
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm2, %zmm27
; AVX512DQ-BW-NEXT: vpermt2d %zmm12, %zmm15, %zmm27
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm23, %zmm27 {%k1}
; AVX512DQ-BW-NEXT: vpermt2d %zmm19, %zmm20, %zmm13
; AVX512DQ-BW-NEXT: vpermt2d %zmm12, %zmm21, %zmm2
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm13, %zmm2 {%k1}
; AVX512DQ-BW-NEXT: vpermi2d %zmm9, %zmm6, %zmm14
; AVX512DQ-BW-NEXT: vpermi2d %zmm5, %zmm3, %zmm7
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm14, %zmm7 {%k1}
; AVX512DQ-BW-NEXT: vpermi2d %zmm9, %zmm6, %zmm16
; AVX512DQ-BW-NEXT: vpermi2d %zmm5, %zmm3, %zmm11
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm16, %zmm11 {%k1}
; AVX512DQ-BW-NEXT: vpermi2d %zmm9, %zmm6, %zmm18
; AVX512DQ-BW-NEXT: vpermi2d %zmm5, %zmm3, %zmm15
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm18, %zmm15 {%k1}
; AVX512DQ-BW-NEXT: vpermt2d %zmm9, %zmm20, %zmm6
; AVX512DQ-BW-NEXT: vpermt2d %zmm5, %zmm21, %zmm3
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm6, %zmm3 {%k1}
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm3, 896(%r8)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm15, 960(%r8)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm11, 768(%r8)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm7, 832(%r8)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm2, 640(%r8)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm27, 704(%r8)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm26, 512(%r8)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm25, 576(%r8)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm1, 384(%r8)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm24, 448(%r8)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm22, 256(%r8)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm17, 320(%r8)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm0, 128(%r8)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm10, 192(%r8)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm8, (%r8)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm4, 64(%r8)
; AVX512DQ-BW-NEXT: vzeroupper
; AVX512DQ-BW-NEXT: retq
;
; AVX512DQ-BW-FCP-LABEL: store_i32_stride4_vf64:
; AVX512DQ-BW-FCP: # %bb.0:
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 64(%rdi), %zmm1
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 128(%rdi), %zmm2
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 192(%rdi), %zmm3
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rsi), %zmm17
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 64(%rsi), %zmm23
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 128(%rsi), %zmm12
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 192(%rsi), %zmm5
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rdx), %zmm22
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 64(%rdx), %zmm25
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 128(%rdx), %zmm13
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 192(%rdx), %zmm6
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 (%rcx), %zmm21
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 64(%rcx), %zmm26
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 128(%rcx), %zmm19
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 192(%rcx), %zmm9
; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm14 = [0,0,4,20,0,0,5,21,0,0,6,22,0,0,7,23]
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm22, %zmm8
; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm21, %zmm14, %zmm8
; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm7 = [4,20,0,0,5,21,0,0,6,22,0,0,7,23,0,0]
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm0, %zmm4
; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm17, %zmm7, %zmm4
; AVX512DQ-BW-FCP-NEXT: movb $-86, %al
; AVX512DQ-BW-FCP-NEXT: kmovd %eax, %k1
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm8, %zmm4 {%k1}
; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm16 = [0,0,0,16,0,0,1,17,0,0,2,18,0,0,3,19]
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm22, %zmm10
; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm21, %zmm16, %zmm10
; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm11 = [0,16,0,0,1,17,0,0,2,18,0,0,3,19,0,0]
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm0, %zmm8
; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm17, %zmm11, %zmm8
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm10, %zmm8 {%k1}
; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm18 = [0,0,12,28,0,0,13,29,0,0,14,30,0,0,15,31]
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm22, %zmm20
; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm21, %zmm18, %zmm20
; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm15 = [12,28,0,0,13,29,0,0,14,30,0,0,15,31,0,0]
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm0, %zmm10
; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm17, %zmm15, %zmm10
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm20, %zmm10 {%k1}
; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm20 = [0,0,8,24,0,0,9,25,0,0,10,26,0,0,11,27]
; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm21, %zmm20, %zmm22
; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} zmm21 = [8,24,0,0,9,25,0,0,10,26,0,0,11,27,0,0]
; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm17, %zmm21, %zmm0
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm22, %zmm0 {%k1}
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm25, %zmm22
; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm26, %zmm14, %zmm22
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm1, %zmm17
; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm23, %zmm7, %zmm17
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm22, %zmm17 {%k1}
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm25, %zmm24
; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm26, %zmm16, %zmm24
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm1, %zmm22
; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm23, %zmm11, %zmm22
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm24, %zmm22 {%k1}
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm25, %zmm27
; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm26, %zmm18, %zmm27
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm1, %zmm24
; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm23, %zmm15, %zmm24
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm27, %zmm24 {%k1}
; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm26, %zmm20, %zmm25
; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm23, %zmm21, %zmm1
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm25, %zmm1 {%k1}
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm13, %zmm23
; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm19, %zmm14, %zmm23
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm2, %zmm25
; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm12, %zmm7, %zmm25
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm23, %zmm25 {%k1}
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm13, %zmm23
; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm19, %zmm16, %zmm23
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm2, %zmm26
; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm12, %zmm11, %zmm26
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm23, %zmm26 {%k1}
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm13, %zmm23
; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm19, %zmm18, %zmm23
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm2, %zmm27
; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm12, %zmm15, %zmm27
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm23, %zmm27 {%k1}
; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm19, %zmm20, %zmm13
; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm12, %zmm21, %zmm2
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm13, %zmm2 {%k1}
; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm9, %zmm6, %zmm14
; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm5, %zmm3, %zmm7
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm14, %zmm7 {%k1}
; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm9, %zmm6, %zmm16
; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm5, %zmm3, %zmm11
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm16, %zmm11 {%k1}
; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm9, %zmm6, %zmm18
; AVX512DQ-BW-FCP-NEXT: vpermi2d %zmm5, %zmm3, %zmm15
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm18, %zmm15 {%k1}
; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm9, %zmm20, %zmm6
; AVX512DQ-BW-FCP-NEXT: vpermt2d %zmm5, %zmm21, %zmm3
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm6, %zmm3 {%k1}
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm3, 896(%r8)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm15, 960(%r8)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm11, 768(%r8)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm7, 832(%r8)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm2, 640(%r8)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm27, 704(%r8)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm26, 512(%r8)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm25, 576(%r8)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm1, 384(%r8)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm24, 448(%r8)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm22, 256(%r8)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm17, 320(%r8)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm0, 128(%r8)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm10, 192(%r8)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm8, (%r8)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm4, 64(%r8)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
%in.vec0 = load <64 x i32>, ptr %in.vecptr0, align 64
%in.vec1 = load <64 x i32>, ptr %in.vecptr1, align 64
%in.vec2 = load <64 x i32>, ptr %in.vecptr2, align 64
%in.vec3 = load <64 x i32>, ptr %in.vecptr3, align 64
%1 = shufflevector <64 x i32> %in.vec0, <64 x i32> %in.vec1, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
%2 = shufflevector <64 x i32> %in.vec2, <64 x i32> %in.vec3, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
%3 = shufflevector <128 x i32> %1, <128 x i32> %2, <256 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127, i32 128, i32 129, i32 130, i32 131, i32 132, i32 133, i32 134, i32 135, i32 136, i32 137, i32 138, i32 139, i32 140, i32 141, i32 142, i32 143, i32 144, i32 145, i32 146, i32 147, i32 148, i32 149, i32 150, i32 151, i32 152, i32 153, i32 154, i32 155, i32 156, i32 157, i32 158, i32 159, i32 160, i32 161, i32 162, i32 163, i32 164, i32 165, i32 166, i32 167, i32 168, i32 169, i32 170, i32 171, i32 172, i32 173, i32 174, i32 175, i32 176, i32 177, i32 178, i32 179, i32 180, i32 181, i32 182, i32 183, i32 184, i32 185, i32 186, i32 187, i32 188, i32 189, i32 190, i32 191, i32 192, i32 193, i32 194, i32 195, i32 196, i32 197, i32 198, i32 199, i32 200, i32 201, i32 202, i32 203, i32 204, i32 205, i32 206, i32 207, i32 208, i32 209, i32 210, i32 211, i32 212, i32 213, i32 214, i32 215, i32 216, i32 217, i32 218, i32 219, i32 220, i32 221, i32 222, i32 223, i32 224, i32 225, i32 226, i32 227, i32 228, i32 229, i32 230, i32 231, i32 232, i32 233, i32 234, i32 235, i32 236, i32 237, i32 238, i32 239, i32 240, i32 241, i32 242, i32 243, i32 244, i32 245, i32 246, i32 247, i32 248, i32 249, i32 250, i32 251, i32 252, i32 253, i32 254, i32 255>
%interleaved.vec = shufflevector <256 x i32> %3, <256 x i32> poison, <256 x i32> <i32 0, i32 64, i32 128, i32 192, i32 1, i32 65, i32 129, i32 193, i32 2, i32 66, i32 130, i32 194, i32 3, i32 67, i32 131, i32 195, i32 4, i32 68, i32 132, i32 196, i32 5, i32 69, i32 133, i32 197, i32 6, i32 70, i32 134, i32 198, i32 7, i32 71, i32 135, i32 199, i32 8, i32 72, i32 136, i32 200, i32 9, i32 73, i32 137, i32 201, i32 10, i32 74, i32 138, i32 202, i32 11, i32 75, i32 139, i32 203, i32 12, i32 76, i32 140, i32 204, i32 13, i32 77, i32 141, i32 205, i32 14, i32 78, i32 142, i32 206, i32 15, i32 79, i32 143, i32 207, i32 16, i32 80, i32 144, i32 208, i32 17, i32 81, i32 145, i32 209, i32 18, i32 82, i32 146, i32 210, i32 19, i32 83, i32 147, i32 211, i32 20, i32 84, i32 148, i32 212, i32 21, i32 85, i32 149, i32 213, i32 22, i32 86, i32 150, i32 214, i32 23, i32 87, i32 151, i32 215, i32 24, i32 88, i32 152, i32 216, i32 25, i32 89, i32 153, i32 217, i32 26, i32 90, i32 154, i32 218, i32 27, i32 91, i32 155, i32 219, i32 28, i32 92, i32 156, i32 220, i32 29, i32 93, i32 157, i32 221, i32 30, i32 94, i32 158, i32 222, i32 31, i32 95, i32 159, i32 223, i32 32, i32 96, i32 160, i32 224, i32 33, i32 97, i32 161, i32 225, i32 34, i32 98, i32 162, i32 226, i32 35, i32 99, i32 163, i32 227, i32 36, i32 100, i32 164, i32 228, i32 37, i32 101, i32 165, i32 229, i32 38, i32 102, i32 166, i32 230, i32 39, i32 103, i32 167, i32 231, i32 40, i32 104, i32 168, i32 232, i32 41, i32 105, i32 169, i32 233, i32 42, i32 106, i32 170, i32 234, i32 43, i32 107, i32 171, i32 235, i32 44, i32 108, i32 172, i32 236, i32 45, i32 109, i32 173, i32 237, i32 46, i32 110, i32 174, i32 238, i32 47, i32 111, i32 175, i32 239, i32 48, i32 112, i32 176, i32 240, i32 49, i32 113, i32 177, i32 241, i32 50, i32 114, i32 178, i32 242, i32 51, i32 115, i32 179, i32 243, i32 52, i32 116, i32 180, i32 244, i32 53, i32 117, i32 181, i32 245, i32 54, i32 118, i32 182, i32 246, i32 55, i32 119, i32 183, i32 247, i32 56, i32 120, i32 184, i32 248, i32 57, i32 121, i32 185, i32 249, i32 58, i32 122, i32 186, i32 250, i32 59, i32 123, i32 187, i32 251, i32 60, i32 124, i32 188, i32 252, i32 61, i32 125, i32 189, i32 253, i32 62, i32 126, i32 190, i32 254, i32 63, i32 127, i32 191, i32 255>
store <256 x i32> %interleaved.vec, ptr %out.vec, align 64
ret void
}