llvm/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-5.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx  | FileCheck %s --check-prefixes=AVX
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX2
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX2-FP
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX2-FCP
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX512
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512-FCP
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX512DQ
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512DQ-FCP
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX512BW
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512BW-FCP
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX512DQ-BW
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512DQ-BW-FCP

; These patterns are produced by LoopVectorizer for interleaved stores.

define void @store_i64_stride5_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %in.vecptr3, ptr %in.vecptr4, ptr %out.vec) nounwind {
; SSE-LABEL: store_i64_stride5_vf2:
; SSE:       # %bb.0:
; SSE-NEXT:    movapd (%rdi), %xmm0
; SSE-NEXT:    movapd (%rsi), %xmm1
; SSE-NEXT:    movapd (%rdx), %xmm2
; SSE-NEXT:    movapd (%rcx), %xmm3
; SSE-NEXT:    movapd (%r8), %xmm4
; SSE-NEXT:    movapd %xmm0, %xmm5
; SSE-NEXT:    unpcklpd {{.*#+}} xmm5 = xmm5[0],xmm1[0]
; SSE-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1]
; SSE-NEXT:    unpcklpd {{.*#+}} xmm2 = xmm2[0],xmm3[0]
; SSE-NEXT:    unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm4[1]
; SSE-NEXT:    movsd {{.*#+}} xmm0 = xmm4[0],xmm0[1]
; SSE-NEXT:    movapd %xmm2, 16(%r9)
; SSE-NEXT:    movapd %xmm0, 32(%r9)
; SSE-NEXT:    movapd %xmm1, 48(%r9)
; SSE-NEXT:    movapd %xmm3, 64(%r9)
; SSE-NEXT:    movapd %xmm5, (%r9)
; SSE-NEXT:    retq
;
; AVX-LABEL: store_i64_stride5_vf2:
; AVX:       # %bb.0:
; AVX-NEXT:    vmovaps (%rdi), %xmm0
; AVX-NEXT:    vmovaps (%rsi), %xmm1
; AVX-NEXT:    vmovaps (%rcx), %xmm2
; AVX-NEXT:    vmovaps (%r8), %xmm3
; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm4
; AVX-NEXT:    vinsertf128 $1, (%rdx), %ymm0, %ymm0
; AVX-NEXT:    vunpckhpd {{.*#+}} ymm4 = ymm4[1],ymm0[1],ymm4[3],ymm0[3]
; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm3[0,1],ymm4[2,3,4,5,6,7]
; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; AVX-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm2[1],xmm3[1]
; AVX-NEXT:    vmovaps %xmm1, 64(%r9)
; AVX-NEXT:    vmovaps %ymm0, (%r9)
; AVX-NEXT:    vmovaps %ymm4, 32(%r9)
; AVX-NEXT:    vzeroupper
; AVX-NEXT:    retq
;
; AVX2-LABEL: store_i64_stride5_vf2:
; AVX2:       # %bb.0:
; AVX2-NEXT:    vmovaps (%rdi), %xmm0
; AVX2-NEXT:    vmovaps (%rdx), %xmm1
; AVX2-NEXT:    vmovaps (%rcx), %xmm2
; AVX2-NEXT:    vmovaps (%r8), %xmm3
; AVX2-NEXT:    vinsertf128 $1, (%rsi), %ymm0, %ymm0
; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm4 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
; AVX2-NEXT:    vpermpd {{.*#+}} ymm4 = ymm4[0,0,2,1]
; AVX2-NEXT:    vblendps {{.*#+}} ymm4 = ymm3[0,1],ymm4[2,3,4,5,6,7]
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm2[1],xmm3[1]
; AVX2-NEXT:    vmovaps %xmm1, 64(%r9)
; AVX2-NEXT:    vmovaps %ymm0, (%r9)
; AVX2-NEXT:    vmovaps %ymm4, 32(%r9)
; AVX2-NEXT:    vzeroupper
; AVX2-NEXT:    retq
;
; AVX2-FP-LABEL: store_i64_stride5_vf2:
; AVX2-FP:       # %bb.0:
; AVX2-FP-NEXT:    vmovaps (%rdi), %xmm0
; AVX2-FP-NEXT:    vmovaps (%rdx), %xmm1
; AVX2-FP-NEXT:    vmovaps (%rcx), %xmm2
; AVX2-FP-NEXT:    vmovaps (%r8), %xmm3
; AVX2-FP-NEXT:    vinsertf128 $1, (%rsi), %ymm0, %ymm0
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FP-NEXT:    vunpckhpd {{.*#+}} ymm4 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
; AVX2-FP-NEXT:    vpermpd {{.*#+}} ymm4 = ymm4[0,0,2,1]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm4 = ymm3[0,1],ymm4[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; AVX2-FP-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-FP-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm2[1],xmm3[1]
; AVX2-FP-NEXT:    vmovaps %xmm1, 64(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm0, (%r9)
; AVX2-FP-NEXT:    vmovaps %ymm4, 32(%r9)
; AVX2-FP-NEXT:    vzeroupper
; AVX2-FP-NEXT:    retq
;
; AVX2-FCP-LABEL: store_i64_stride5_vf2:
; AVX2-FCP:       # %bb.0:
; AVX2-FCP-NEXT:    vmovaps (%rdi), %xmm0
; AVX2-FCP-NEXT:    vmovaps (%rdx), %xmm1
; AVX2-FCP-NEXT:    vmovaps (%rcx), %xmm2
; AVX2-FCP-NEXT:    vmovaps (%r8), %xmm3
; AVX2-FCP-NEXT:    vinsertf128 $1, (%rsi), %ymm0, %ymm0
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FCP-NEXT:    vunpckhpd {{.*#+}} ymm4 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
; AVX2-FCP-NEXT:    vpermpd {{.*#+}} ymm4 = ymm4[0,0,2,1]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm4 = ymm3[0,1],ymm4[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; AVX2-FCP-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-FCP-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm2[1],xmm3[1]
; AVX2-FCP-NEXT:    vmovaps %xmm1, 64(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm0, (%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm4, 32(%r9)
; AVX2-FCP-NEXT:    vzeroupper
; AVX2-FCP-NEXT:    retq
;
; AVX512-LABEL: store_i64_stride5_vf2:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vmovdqa (%rdi), %xmm0
; AVX512-NEXT:    vmovdqa (%rdx), %xmm1
; AVX512-NEXT:    vmovdqa (%r8), %xmm2
; AVX512-NEXT:    vinserti128 $1, (%rcx), %ymm1, %ymm1
; AVX512-NEXT:    vinserti128 $1, (%rsi), %ymm0, %ymm0
; AVX512-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512-NEXT:    vpmovsxbq {{.*#+}} xmm1 = [7,9]
; AVX512-NEXT:    vpermi2q %zmm2, %zmm0, %zmm1
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm3 = [0,2,4,6,8,1,3,5]
; AVX512-NEXT:    vpermi2q %zmm2, %zmm0, %zmm3
; AVX512-NEXT:    vmovdqa64 %zmm3, (%r9)
; AVX512-NEXT:    vmovdqa %xmm1, 64(%r9)
; AVX512-NEXT:    vzeroupper
; AVX512-NEXT:    retq
;
; AVX512-FCP-LABEL: store_i64_stride5_vf2:
; AVX512-FCP:       # %bb.0:
; AVX512-FCP-NEXT:    vmovdqa (%rdi), %xmm0
; AVX512-FCP-NEXT:    vmovdqa (%rdx), %xmm1
; AVX512-FCP-NEXT:    vmovdqa (%r8), %xmm2
; AVX512-FCP-NEXT:    vinserti128 $1, (%rcx), %ymm1, %ymm1
; AVX512-FCP-NEXT:    vinserti128 $1, (%rsi), %ymm0, %ymm0
; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} xmm1 = [7,9]
; AVX512-FCP-NEXT:    vpermi2q %zmm2, %zmm0, %zmm1
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm3 = [0,2,4,6,8,1,3,5]
; AVX512-FCP-NEXT:    vpermi2q %zmm2, %zmm0, %zmm3
; AVX512-FCP-NEXT:    vmovdqa64 %zmm3, (%r9)
; AVX512-FCP-NEXT:    vmovdqa %xmm1, 64(%r9)
; AVX512-FCP-NEXT:    vzeroupper
; AVX512-FCP-NEXT:    retq
;
; AVX512DQ-LABEL: store_i64_stride5_vf2:
; AVX512DQ:       # %bb.0:
; AVX512DQ-NEXT:    vmovdqa (%rdi), %xmm0
; AVX512DQ-NEXT:    vmovdqa (%rdx), %xmm1
; AVX512DQ-NEXT:    vmovdqa (%r8), %xmm2
; AVX512DQ-NEXT:    vinserti128 $1, (%rcx), %ymm1, %ymm1
; AVX512DQ-NEXT:    vinserti128 $1, (%rsi), %ymm0, %ymm0
; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} xmm1 = [7,9]
; AVX512DQ-NEXT:    vpermi2q %zmm2, %zmm0, %zmm1
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm3 = [0,2,4,6,8,1,3,5]
; AVX512DQ-NEXT:    vpermi2q %zmm2, %zmm0, %zmm3
; AVX512DQ-NEXT:    vmovdqa64 %zmm3, (%r9)
; AVX512DQ-NEXT:    vmovdqa %xmm1, 64(%r9)
; AVX512DQ-NEXT:    vzeroupper
; AVX512DQ-NEXT:    retq
;
; AVX512DQ-FCP-LABEL: store_i64_stride5_vf2:
; AVX512DQ-FCP:       # %bb.0:
; AVX512DQ-FCP-NEXT:    vmovdqa (%rdi), %xmm0
; AVX512DQ-FCP-NEXT:    vmovdqa (%rdx), %xmm1
; AVX512DQ-FCP-NEXT:    vmovdqa (%r8), %xmm2
; AVX512DQ-FCP-NEXT:    vinserti128 $1, (%rcx), %ymm1, %ymm1
; AVX512DQ-FCP-NEXT:    vinserti128 $1, (%rsi), %ymm0, %ymm0
; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} xmm1 = [7,9]
; AVX512DQ-FCP-NEXT:    vpermi2q %zmm2, %zmm0, %zmm1
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm3 = [0,2,4,6,8,1,3,5]
; AVX512DQ-FCP-NEXT:    vpermi2q %zmm2, %zmm0, %zmm3
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm3, (%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa %xmm1, 64(%r9)
; AVX512DQ-FCP-NEXT:    vzeroupper
; AVX512DQ-FCP-NEXT:    retq
;
; AVX512BW-LABEL: store_i64_stride5_vf2:
; AVX512BW:       # %bb.0:
; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm0
; AVX512BW-NEXT:    vmovdqa (%rdx), %xmm1
; AVX512BW-NEXT:    vmovdqa (%r8), %xmm2
; AVX512BW-NEXT:    vinserti128 $1, (%rcx), %ymm1, %ymm1
; AVX512BW-NEXT:    vinserti128 $1, (%rsi), %ymm0, %ymm0
; AVX512BW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} xmm1 = [7,9]
; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm0, %zmm1
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm3 = [0,2,4,6,8,1,3,5]
; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm0, %zmm3
; AVX512BW-NEXT:    vmovdqa64 %zmm3, (%r9)
; AVX512BW-NEXT:    vmovdqa %xmm1, 64(%r9)
; AVX512BW-NEXT:    vzeroupper
; AVX512BW-NEXT:    retq
;
; AVX512BW-FCP-LABEL: store_i64_stride5_vf2:
; AVX512BW-FCP:       # %bb.0:
; AVX512BW-FCP-NEXT:    vmovdqa (%rdi), %xmm0
; AVX512BW-FCP-NEXT:    vmovdqa (%rdx), %xmm1
; AVX512BW-FCP-NEXT:    vmovdqa (%r8), %xmm2
; AVX512BW-FCP-NEXT:    vinserti128 $1, (%rcx), %ymm1, %ymm1
; AVX512BW-FCP-NEXT:    vinserti128 $1, (%rsi), %ymm0, %ymm0
; AVX512BW-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} xmm1 = [7,9]
; AVX512BW-FCP-NEXT:    vpermi2q %zmm2, %zmm0, %zmm1
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm3 = [0,2,4,6,8,1,3,5]
; AVX512BW-FCP-NEXT:    vpermi2q %zmm2, %zmm0, %zmm3
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm3, (%r9)
; AVX512BW-FCP-NEXT:    vmovdqa %xmm1, 64(%r9)
; AVX512BW-FCP-NEXT:    vzeroupper
; AVX512BW-FCP-NEXT:    retq
;
; AVX512DQ-BW-LABEL: store_i64_stride5_vf2:
; AVX512DQ-BW:       # %bb.0:
; AVX512DQ-BW-NEXT:    vmovdqa (%rdi), %xmm0
; AVX512DQ-BW-NEXT:    vmovdqa (%rdx), %xmm1
; AVX512DQ-BW-NEXT:    vmovdqa (%r8), %xmm2
; AVX512DQ-BW-NEXT:    vinserti128 $1, (%rcx), %ymm1, %ymm1
; AVX512DQ-BW-NEXT:    vinserti128 $1, (%rsi), %ymm0, %ymm0
; AVX512DQ-BW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} xmm1 = [7,9]
; AVX512DQ-BW-NEXT:    vpermi2q %zmm2, %zmm0, %zmm1
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm3 = [0,2,4,6,8,1,3,5]
; AVX512DQ-BW-NEXT:    vpermi2q %zmm2, %zmm0, %zmm3
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm3, (%r9)
; AVX512DQ-BW-NEXT:    vmovdqa %xmm1, 64(%r9)
; AVX512DQ-BW-NEXT:    vzeroupper
; AVX512DQ-BW-NEXT:    retq
;
; AVX512DQ-BW-FCP-LABEL: store_i64_stride5_vf2:
; AVX512DQ-BW-FCP:       # %bb.0:
; AVX512DQ-BW-FCP-NEXT:    vmovdqa (%rdi), %xmm0
; AVX512DQ-BW-FCP-NEXT:    vmovdqa (%rdx), %xmm1
; AVX512DQ-BW-FCP-NEXT:    vmovdqa (%r8), %xmm2
; AVX512DQ-BW-FCP-NEXT:    vinserti128 $1, (%rcx), %ymm1, %ymm1
; AVX512DQ-BW-FCP-NEXT:    vinserti128 $1, (%rsi), %ymm0, %ymm0
; AVX512DQ-BW-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} xmm1 = [7,9]
; AVX512DQ-BW-FCP-NEXT:    vpermi2q %zmm2, %zmm0, %zmm1
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm3 = [0,2,4,6,8,1,3,5]
; AVX512DQ-BW-FCP-NEXT:    vpermi2q %zmm2, %zmm0, %zmm3
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm3, (%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa %xmm1, 64(%r9)
; AVX512DQ-BW-FCP-NEXT:    vzeroupper
; AVX512DQ-BW-FCP-NEXT:    retq
  %in.vec0 = load <2 x i64>, ptr %in.vecptr0, align 64
  %in.vec1 = load <2 x i64>, ptr %in.vecptr1, align 64
  %in.vec2 = load <2 x i64>, ptr %in.vecptr2, align 64
  %in.vec3 = load <2 x i64>, ptr %in.vecptr3, align 64
  %in.vec4 = load <2 x i64>, ptr %in.vecptr4, align 64
  %1 = shufflevector <2 x i64> %in.vec0, <2 x i64> %in.vec1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
  %2 = shufflevector <2 x i64> %in.vec2, <2 x i64> %in.vec3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
  %3 = shufflevector <4 x i64> %1, <4 x i64> %2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
  %4 = shufflevector <2 x i64> %in.vec4, <2 x i64> poison, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
  %5 = shufflevector <8 x i64> %3, <8 x i64> %4, <10 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9>
  %interleaved.vec = shufflevector <10 x i64> %5, <10 x i64> poison, <10 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 1, i32 3, i32 5, i32 7, i32 9>
  store <10 x i64> %interleaved.vec, ptr %out.vec, align 64
  ret void
}

define void @store_i64_stride5_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %in.vecptr3, ptr %in.vecptr4, ptr %out.vec) nounwind {
; SSE-LABEL: store_i64_stride5_vf4:
; SSE:       # %bb.0:
; SSE-NEXT:    movaps (%rdi), %xmm2
; SSE-NEXT:    movaps 16(%rdi), %xmm0
; SSE-NEXT:    movaps (%rsi), %xmm4
; SSE-NEXT:    movaps 16(%rsi), %xmm6
; SSE-NEXT:    movaps (%rdx), %xmm5
; SSE-NEXT:    movaps 16(%rdx), %xmm1
; SSE-NEXT:    movaps (%rcx), %xmm7
; SSE-NEXT:    movaps 16(%rcx), %xmm8
; SSE-NEXT:    movaps (%r8), %xmm9
; SSE-NEXT:    movaps 16(%r8), %xmm3
; SSE-NEXT:    movaps %xmm6, %xmm10
; SSE-NEXT:    unpckhpd {{.*#+}} xmm10 = xmm10[1],xmm1[1]
; SSE-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm8[0]
; SSE-NEXT:    unpckhpd {{.*#+}} xmm8 = xmm8[1],xmm3[1]
; SSE-NEXT:    shufps {{.*#+}} xmm3 = xmm3[0,1],xmm0[2,3]
; SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm6[0]
; SSE-NEXT:    movaps %xmm4, %xmm6
; SSE-NEXT:    unpckhpd {{.*#+}} xmm6 = xmm6[1],xmm5[1]
; SSE-NEXT:    movlhps {{.*#+}} xmm5 = xmm5[0],xmm7[0]
; SSE-NEXT:    unpckhpd {{.*#+}} xmm7 = xmm7[1],xmm9[1]
; SSE-NEXT:    shufps {{.*#+}} xmm9 = xmm9[0,1],xmm2[2,3]
; SSE-NEXT:    movlhps {{.*#+}} xmm2 = xmm2[0],xmm4[0]
; SSE-NEXT:    movaps %xmm2, (%r9)
; SSE-NEXT:    movaps %xmm5, 16(%r9)
; SSE-NEXT:    movaps %xmm9, 32(%r9)
; SSE-NEXT:    movaps %xmm6, 48(%r9)
; SSE-NEXT:    movaps %xmm7, 64(%r9)
; SSE-NEXT:    movaps %xmm0, 80(%r9)
; SSE-NEXT:    movaps %xmm1, 96(%r9)
; SSE-NEXT:    movaps %xmm3, 112(%r9)
; SSE-NEXT:    movaps %xmm10, 128(%r9)
; SSE-NEXT:    movaps %xmm8, 144(%r9)
; SSE-NEXT:    retq
;
; AVX-LABEL: store_i64_stride5_vf4:
; AVX:       # %bb.0:
; AVX-NEXT:    vmovapd (%rdi), %ymm0
; AVX-NEXT:    vmovapd (%r8), %ymm1
; AVX-NEXT:    vperm2f128 {{.*#+}} ymm2 = mem[2,3],ymm0[2,3]
; AVX-NEXT:    vmovapd 16(%rdx), %xmm3
; AVX-NEXT:    vshufpd {{.*#+}} ymm2 = ymm3[0],ymm2[0],ymm3[2],ymm2[3]
; AVX-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0,1],ymm1[2],ymm2[3]
; AVX-NEXT:    vmovddup {{.*#+}} xmm4 = mem[0,0]
; AVX-NEXT:    vunpcklpd {{.*#+}} ymm5 = ymm0[0],mem[0],ymm0[2],mem[2]
; AVX-NEXT:    vblendpd {{.*#+}} ymm4 = ymm4[0,1],ymm5[2,3]
; AVX-NEXT:    vblendpd {{.*#+}} ymm4 = ymm4[0],ymm1[1],ymm4[2,3]
; AVX-NEXT:    vmovlpd {{.*#+}} xmm3 = mem[0],xmm3[1]
; AVX-NEXT:    vbroadcastsd 24(%rcx), %ymm5
; AVX-NEXT:    vblendpd {{.*#+}} ymm3 = ymm3[0,1],ymm5[2,3]
; AVX-NEXT:    vblendpd {{.*#+}} ymm3 = ymm3[0,1,2],ymm1[3]
; AVX-NEXT:    vmovaps (%rdx), %xmm5
; AVX-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm0
; AVX-NEXT:    vbroadcastsd 8(%rsi), %ymm6
; AVX-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm6[2],ymm0[3]
; AVX-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3]
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm1 = xmm5[0],mem[0]
; AVX-NEXT:    vmovaps (%rdi), %xmm5
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm5 = xmm5[0],mem[0]
; AVX-NEXT:    vmovaps %xmm5, (%r9)
; AVX-NEXT:    vmovaps %xmm1, 16(%r9)
; AVX-NEXT:    vmovapd %ymm4, 64(%r9)
; AVX-NEXT:    vmovapd %ymm0, 32(%r9)
; AVX-NEXT:    vmovapd %ymm2, 96(%r9)
; AVX-NEXT:    vmovapd %ymm3, 128(%r9)
; AVX-NEXT:    vzeroupper
; AVX-NEXT:    retq
;
; AVX2-LABEL: store_i64_stride5_vf4:
; AVX2:       # %bb.0:
; AVX2-NEXT:    vmovaps (%rdi), %ymm0
; AVX2-NEXT:    vmovaps (%rsi), %ymm1
; AVX2-NEXT:    vmovaps (%rdx), %ymm2
; AVX2-NEXT:    vmovaps (%r8), %ymm3
; AVX2-NEXT:    vmovaps (%rdx), %xmm4
; AVX2-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm5
; AVX2-NEXT:    vbroadcastsd 8(%rsi), %ymm6
; AVX2-NEXT:    vblendps {{.*#+}} ymm6 = ymm0[0,1,2,3],ymm6[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4,5],ymm5[6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm5 = ymm3[0,1],ymm5[2,3,4,5,6,7]
; AVX2-NEXT:    vmovaps (%rsi), %xmm6
; AVX2-NEXT:    vinsertf128 $1, (%rcx), %ymm6, %ymm6
; AVX2-NEXT:    vmovaps (%rdi), %xmm7
; AVX2-NEXT:    vinsertf128 $1, %xmm4, %ymm7, %ymm4
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm4 = ymm4[0],ymm6[0],ymm4[2],ymm6[2]
; AVX2-NEXT:    vmovddup {{.*#+}} xmm6 = mem[0,0]
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm7 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; AVX2-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm3[2,3],ymm6[4,5,6,7]
; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm2[2,3],ymm0[2,3]
; AVX2-NEXT:    vbroadcastsd 16(%rcx), %ymm7
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm7[2,3],ymm0[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5],ymm0[6,7]
; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm1 = ymm1[1],ymm2[1],ymm1[3],ymm2[3]
; AVX2-NEXT:    vbroadcastsd 24(%rcx), %ymm2
; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm2[2,3]
; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm3[6,7]
; AVX2-NEXT:    vmovaps %ymm6, 64(%r9)
; AVX2-NEXT:    vmovaps %ymm1, 128(%r9)
; AVX2-NEXT:    vmovaps %ymm4, (%r9)
; AVX2-NEXT:    vmovaps %ymm5, 32(%r9)
; AVX2-NEXT:    vmovaps %ymm0, 96(%r9)
; AVX2-NEXT:    vzeroupper
; AVX2-NEXT:    retq
;
; AVX2-FP-LABEL: store_i64_stride5_vf4:
; AVX2-FP:       # %bb.0:
; AVX2-FP-NEXT:    vmovaps (%rdi), %ymm0
; AVX2-FP-NEXT:    vmovaps (%rsi), %ymm1
; AVX2-FP-NEXT:    vmovaps (%rdx), %ymm2
; AVX2-FP-NEXT:    vmovaps (%r8), %ymm3
; AVX2-FP-NEXT:    vmovaps (%rdx), %xmm4
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm5
; AVX2-FP-NEXT:    vbroadcastsd 8(%rsi), %ymm6
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm6 = ymm0[0,1,2,3],ymm6[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4,5],ymm5[6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm5 = ymm3[0,1],ymm5[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vmovaps (%rsi), %xmm6
; AVX2-FP-NEXT:    vinsertf128 $1, (%rcx), %ymm6, %ymm6
; AVX2-FP-NEXT:    vmovaps (%rdi), %xmm7
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm4, %ymm7, %ymm4
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm4 = ymm4[0],ymm6[0],ymm4[2],ymm6[2]
; AVX2-FP-NEXT:    vmovddup {{.*#+}} xmm6 = mem[0,0]
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm7 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm3[2,3],ymm6[4,5,6,7]
; AVX2-FP-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm2[2,3],ymm0[2,3]
; AVX2-FP-NEXT:    vbroadcastsd 16(%rcx), %ymm7
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm7[2,3],ymm0[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vunpckhpd {{.*#+}} ymm1 = ymm1[1],ymm2[1],ymm1[3],ymm2[3]
; AVX2-FP-NEXT:    vbroadcastsd 24(%rcx), %ymm2
; AVX2-FP-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm2[2,3]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm3[6,7]
; AVX2-FP-NEXT:    vmovaps %ymm6, 64(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm1, 128(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm4, (%r9)
; AVX2-FP-NEXT:    vmovaps %ymm5, 32(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm0, 96(%r9)
; AVX2-FP-NEXT:    vzeroupper
; AVX2-FP-NEXT:    retq
;
; AVX2-FCP-LABEL: store_i64_stride5_vf4:
; AVX2-FCP:       # %bb.0:
; AVX2-FCP-NEXT:    vmovaps (%rdi), %ymm0
; AVX2-FCP-NEXT:    vmovaps (%rsi), %ymm1
; AVX2-FCP-NEXT:    vmovaps (%rdx), %ymm2
; AVX2-FCP-NEXT:    vmovaps (%r8), %ymm3
; AVX2-FCP-NEXT:    vmovaps (%rdx), %xmm4
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm5
; AVX2-FCP-NEXT:    vbroadcastsd 8(%rsi), %ymm6
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm6 = ymm0[0,1,2,3],ymm6[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4,5],ymm5[6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm5 = ymm3[0,1],ymm5[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vmovaps (%rsi), %xmm6
; AVX2-FCP-NEXT:    vinsertf128 $1, (%rcx), %ymm6, %ymm6
; AVX2-FCP-NEXT:    vmovaps (%rdi), %xmm7
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm4, %ymm7, %ymm4
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm4 = ymm4[0],ymm6[0],ymm4[2],ymm6[2]
; AVX2-FCP-NEXT:    vmovddup {{.*#+}} xmm6 = mem[0,0]
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm7 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm3[2,3],ymm6[4,5,6,7]
; AVX2-FCP-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm2[2,3],ymm0[2,3]
; AVX2-FCP-NEXT:    vbroadcastsd 16(%rcx), %ymm7
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm7[2,3],ymm0[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vunpckhpd {{.*#+}} ymm1 = ymm1[1],ymm2[1],ymm1[3],ymm2[3]
; AVX2-FCP-NEXT:    vbroadcastsd 24(%rcx), %ymm2
; AVX2-FCP-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm2[2,3]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm3[6,7]
; AVX2-FCP-NEXT:    vmovaps %ymm6, 64(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm1, 128(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm4, (%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm5, 32(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm0, 96(%r9)
; AVX2-FCP-NEXT:    vzeroupper
; AVX2-FCP-NEXT:    retq
;
; AVX512-LABEL: store_i64_stride5_vf4:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vmovdqa (%rdi), %ymm0
; AVX512-NEXT:    vmovdqa (%rdx), %ymm1
; AVX512-NEXT:    vmovdqa (%r8), %ymm2
; AVX512-NEXT:    vinserti64x4 $1, (%rsi), %zmm0, %zmm0
; AVX512-NEXT:    vinserti64x4 $1, (%rcx), %zmm1, %zmm1
; AVX512-NEXT:    vpmovsxbq {{.*#+}} ymm3 = [15,3,7,0]
; AVX512-NEXT:    vpermi2q %zmm0, %zmm1, %zmm3
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm4 = [0,4,8,12,0,1,5,9]
; AVX512-NEXT:    vpermi2q %zmm1, %zmm0, %zmm4
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm5 = [0,1,2,3,8,5,6,7]
; AVX512-NEXT:    vpermi2q %zmm2, %zmm4, %zmm5
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm4 = [13,0,2,6,10,14,0,3]
; AVX512-NEXT:    vpermi2q %zmm1, %zmm0, %zmm4
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm0 = [0,9,2,3,4,5,10,7]
; AVX512-NEXT:    vpermi2q %zmm2, %zmm4, %zmm0
; AVX512-NEXT:    vmovdqa64 %zmm0, 64(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm5, (%r9)
; AVX512-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm2[6,7]
; AVX512-NEXT:    vmovdqa %ymm0, 128(%r9)
; AVX512-NEXT:    vzeroupper
; AVX512-NEXT:    retq
;
; AVX512-FCP-LABEL: store_i64_stride5_vf4:
; AVX512-FCP:       # %bb.0:
; AVX512-FCP-NEXT:    vmovdqa (%rdi), %ymm0
; AVX512-FCP-NEXT:    vmovdqa (%rdx), %ymm1
; AVX512-FCP-NEXT:    vmovdqa (%r8), %ymm2
; AVX512-FCP-NEXT:    vinserti64x4 $1, (%rsi), %zmm0, %zmm0
; AVX512-FCP-NEXT:    vinserti64x4 $1, (%rcx), %zmm1, %zmm1
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} ymm3 = [15,3,7,0]
; AVX512-FCP-NEXT:    vpermi2q %zmm0, %zmm1, %zmm3
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm4 = [0,4,8,12,0,1,5,9]
; AVX512-FCP-NEXT:    vpermi2q %zmm1, %zmm0, %zmm4
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm5 = [0,1,2,3,8,5,6,7]
; AVX512-FCP-NEXT:    vpermi2q %zmm2, %zmm4, %zmm5
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm4 = [13,0,2,6,10,14,0,3]
; AVX512-FCP-NEXT:    vpermi2q %zmm1, %zmm0, %zmm4
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm0 = [0,9,2,3,4,5,10,7]
; AVX512-FCP-NEXT:    vpermi2q %zmm2, %zmm4, %zmm0
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, 64(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm5, (%r9)
; AVX512-FCP-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm2[6,7]
; AVX512-FCP-NEXT:    vmovdqa %ymm0, 128(%r9)
; AVX512-FCP-NEXT:    vzeroupper
; AVX512-FCP-NEXT:    retq
;
; AVX512DQ-LABEL: store_i64_stride5_vf4:
; AVX512DQ:       # %bb.0:
; AVX512DQ-NEXT:    vmovdqa (%rdi), %ymm0
; AVX512DQ-NEXT:    vmovdqa (%rdx), %ymm1
; AVX512DQ-NEXT:    vmovdqa (%r8), %ymm2
; AVX512DQ-NEXT:    vinserti64x4 $1, (%rsi), %zmm0, %zmm0
; AVX512DQ-NEXT:    vinserti64x4 $1, (%rcx), %zmm1, %zmm1
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} ymm3 = [15,3,7,0]
; AVX512DQ-NEXT:    vpermi2q %zmm0, %zmm1, %zmm3
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm4 = [0,4,8,12,0,1,5,9]
; AVX512DQ-NEXT:    vpermi2q %zmm1, %zmm0, %zmm4
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm5 = [0,1,2,3,8,5,6,7]
; AVX512DQ-NEXT:    vpermi2q %zmm2, %zmm4, %zmm5
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm4 = [13,0,2,6,10,14,0,3]
; AVX512DQ-NEXT:    vpermi2q %zmm1, %zmm0, %zmm4
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm0 = [0,9,2,3,4,5,10,7]
; AVX512DQ-NEXT:    vpermi2q %zmm2, %zmm4, %zmm0
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, 64(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm5, (%r9)
; AVX512DQ-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm2[6,7]
; AVX512DQ-NEXT:    vmovdqa %ymm0, 128(%r9)
; AVX512DQ-NEXT:    vzeroupper
; AVX512DQ-NEXT:    retq
;
; AVX512DQ-FCP-LABEL: store_i64_stride5_vf4:
; AVX512DQ-FCP:       # %bb.0:
; AVX512DQ-FCP-NEXT:    vmovdqa (%rdi), %ymm0
; AVX512DQ-FCP-NEXT:    vmovdqa (%rdx), %ymm1
; AVX512DQ-FCP-NEXT:    vmovdqa (%r8), %ymm2
; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, (%rsi), %zmm0, %zmm0
; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, (%rcx), %zmm1, %zmm1
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} ymm3 = [15,3,7,0]
; AVX512DQ-FCP-NEXT:    vpermi2q %zmm0, %zmm1, %zmm3
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm4 = [0,4,8,12,0,1,5,9]
; AVX512DQ-FCP-NEXT:    vpermi2q %zmm1, %zmm0, %zmm4
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm5 = [0,1,2,3,8,5,6,7]
; AVX512DQ-FCP-NEXT:    vpermi2q %zmm2, %zmm4, %zmm5
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm4 = [13,0,2,6,10,14,0,3]
; AVX512DQ-FCP-NEXT:    vpermi2q %zmm1, %zmm0, %zmm4
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm0 = [0,9,2,3,4,5,10,7]
; AVX512DQ-FCP-NEXT:    vpermi2q %zmm2, %zmm4, %zmm0
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, 64(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm5, (%r9)
; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm2[6,7]
; AVX512DQ-FCP-NEXT:    vmovdqa %ymm0, 128(%r9)
; AVX512DQ-FCP-NEXT:    vzeroupper
; AVX512DQ-FCP-NEXT:    retq
;
; AVX512BW-LABEL: store_i64_stride5_vf4:
; AVX512BW:       # %bb.0:
; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm0
; AVX512BW-NEXT:    vmovdqa (%rdx), %ymm1
; AVX512BW-NEXT:    vmovdqa (%r8), %ymm2
; AVX512BW-NEXT:    vinserti64x4 $1, (%rsi), %zmm0, %zmm0
; AVX512BW-NEXT:    vinserti64x4 $1, (%rcx), %zmm1, %zmm1
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} ymm3 = [15,3,7,0]
; AVX512BW-NEXT:    vpermi2q %zmm0, %zmm1, %zmm3
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm4 = [0,4,8,12,0,1,5,9]
; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm4
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm5 = [0,1,2,3,8,5,6,7]
; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm4, %zmm5
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm4 = [13,0,2,6,10,14,0,3]
; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm4
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm0 = [0,9,2,3,4,5,10,7]
; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm4, %zmm0
; AVX512BW-NEXT:    vmovdqa64 %zmm0, 64(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm5, (%r9)
; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm2[6,7]
; AVX512BW-NEXT:    vmovdqa %ymm0, 128(%r9)
; AVX512BW-NEXT:    vzeroupper
; AVX512BW-NEXT:    retq
;
; AVX512BW-FCP-LABEL: store_i64_stride5_vf4:
; AVX512BW-FCP:       # %bb.0:
; AVX512BW-FCP-NEXT:    vmovdqa (%rdi), %ymm0
; AVX512BW-FCP-NEXT:    vmovdqa (%rdx), %ymm1
; AVX512BW-FCP-NEXT:    vmovdqa (%r8), %ymm2
; AVX512BW-FCP-NEXT:    vinserti64x4 $1, (%rsi), %zmm0, %zmm0
; AVX512BW-FCP-NEXT:    vinserti64x4 $1, (%rcx), %zmm1, %zmm1
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} ymm3 = [15,3,7,0]
; AVX512BW-FCP-NEXT:    vpermi2q %zmm0, %zmm1, %zmm3
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm4 = [0,4,8,12,0,1,5,9]
; AVX512BW-FCP-NEXT:    vpermi2q %zmm1, %zmm0, %zmm4
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm5 = [0,1,2,3,8,5,6,7]
; AVX512BW-FCP-NEXT:    vpermi2q %zmm2, %zmm4, %zmm5
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm4 = [13,0,2,6,10,14,0,3]
; AVX512BW-FCP-NEXT:    vpermi2q %zmm1, %zmm0, %zmm4
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm0 = [0,9,2,3,4,5,10,7]
; AVX512BW-FCP-NEXT:    vpermi2q %zmm2, %zmm4, %zmm0
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, 64(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm5, (%r9)
; AVX512BW-FCP-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm2[6,7]
; AVX512BW-FCP-NEXT:    vmovdqa %ymm0, 128(%r9)
; AVX512BW-FCP-NEXT:    vzeroupper
; AVX512BW-FCP-NEXT:    retq
;
; AVX512DQ-BW-LABEL: store_i64_stride5_vf4:
; AVX512DQ-BW:       # %bb.0:
; AVX512DQ-BW-NEXT:    vmovdqa (%rdi), %ymm0
; AVX512DQ-BW-NEXT:    vmovdqa (%rdx), %ymm1
; AVX512DQ-BW-NEXT:    vmovdqa (%r8), %ymm2
; AVX512DQ-BW-NEXT:    vinserti64x4 $1, (%rsi), %zmm0, %zmm0
; AVX512DQ-BW-NEXT:    vinserti64x4 $1, (%rcx), %zmm1, %zmm1
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} ymm3 = [15,3,7,0]
; AVX512DQ-BW-NEXT:    vpermi2q %zmm0, %zmm1, %zmm3
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm4 = [0,4,8,12,0,1,5,9]
; AVX512DQ-BW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm4
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm5 = [0,1,2,3,8,5,6,7]
; AVX512DQ-BW-NEXT:    vpermi2q %zmm2, %zmm4, %zmm5
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm4 = [13,0,2,6,10,14,0,3]
; AVX512DQ-BW-NEXT:    vpermi2q %zmm1, %zmm0, %zmm4
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm0 = [0,9,2,3,4,5,10,7]
; AVX512DQ-BW-NEXT:    vpermi2q %zmm2, %zmm4, %zmm0
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, 64(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm5, (%r9)
; AVX512DQ-BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm2[6,7]
; AVX512DQ-BW-NEXT:    vmovdqa %ymm0, 128(%r9)
; AVX512DQ-BW-NEXT:    vzeroupper
; AVX512DQ-BW-NEXT:    retq
;
; AVX512DQ-BW-FCP-LABEL: store_i64_stride5_vf4:
; AVX512DQ-BW-FCP:       # %bb.0:
; AVX512DQ-BW-FCP-NEXT:    vmovdqa (%rdi), %ymm0
; AVX512DQ-BW-FCP-NEXT:    vmovdqa (%rdx), %ymm1
; AVX512DQ-BW-FCP-NEXT:    vmovdqa (%r8), %ymm2
; AVX512DQ-BW-FCP-NEXT:    vinserti64x4 $1, (%rsi), %zmm0, %zmm0
; AVX512DQ-BW-FCP-NEXT:    vinserti64x4 $1, (%rcx), %zmm1, %zmm1
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} ymm3 = [15,3,7,0]
; AVX512DQ-BW-FCP-NEXT:    vpermi2q %zmm0, %zmm1, %zmm3
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm4 = [0,4,8,12,0,1,5,9]
; AVX512DQ-BW-FCP-NEXT:    vpermi2q %zmm1, %zmm0, %zmm4
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm5 = [0,1,2,3,8,5,6,7]
; AVX512DQ-BW-FCP-NEXT:    vpermi2q %zmm2, %zmm4, %zmm5
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm4 = [13,0,2,6,10,14,0,3]
; AVX512DQ-BW-FCP-NEXT:    vpermi2q %zmm1, %zmm0, %zmm4
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm0 = [0,9,2,3,4,5,10,7]
; AVX512DQ-BW-FCP-NEXT:    vpermi2q %zmm2, %zmm4, %zmm0
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, 64(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm5, (%r9)
; AVX512DQ-BW-FCP-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm2[6,7]
; AVX512DQ-BW-FCP-NEXT:    vmovdqa %ymm0, 128(%r9)
; AVX512DQ-BW-FCP-NEXT:    vzeroupper
; AVX512DQ-BW-FCP-NEXT:    retq
  %in.vec0 = load <4 x i64>, ptr %in.vecptr0, align 64
  %in.vec1 = load <4 x i64>, ptr %in.vecptr1, align 64
  %in.vec2 = load <4 x i64>, ptr %in.vecptr2, align 64
  %in.vec3 = load <4 x i64>, ptr %in.vecptr3, align 64
  %in.vec4 = load <4 x i64>, ptr %in.vecptr4, align 64
  %1 = shufflevector <4 x i64> %in.vec0, <4 x i64> %in.vec1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
  %2 = shufflevector <4 x i64> %in.vec2, <4 x i64> %in.vec3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
  %3 = shufflevector <8 x i64> %1, <8 x i64> %2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
  %4 = shufflevector <4 x i64> %in.vec4, <4 x i64> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
  %5 = shufflevector <16 x i64> %3, <16 x i64> %4, <20 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
  %interleaved.vec = shufflevector <20 x i64> %5, <20 x i64> poison, <20 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 1, i32 5, i32 9, i32 13, i32 17, i32 2, i32 6, i32 10, i32 14, i32 18, i32 3, i32 7, i32 11, i32 15, i32 19>
  store <20 x i64> %interleaved.vec, ptr %out.vec, align 64
  ret void
}

define void @store_i64_stride5_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %in.vecptr3, ptr %in.vecptr4, ptr %out.vec) nounwind {
; SSE-LABEL: store_i64_stride5_vf8:
; SSE:       # %bb.0:
; SSE-NEXT:    movapd (%rdi), %xmm0
; SSE-NEXT:    movapd 16(%rdi), %xmm2
; SSE-NEXT:    movapd 32(%rdi), %xmm5
; SSE-NEXT:    movapd (%rsi), %xmm1
; SSE-NEXT:    movapd 16(%rsi), %xmm9
; SSE-NEXT:    movapd 32(%rsi), %xmm8
; SSE-NEXT:    movapd (%rdx), %xmm3
; SSE-NEXT:    movapd 16(%rdx), %xmm6
; SSE-NEXT:    movapd 32(%rdx), %xmm10
; SSE-NEXT:    movapd (%rcx), %xmm7
; SSE-NEXT:    movapd 16(%rcx), %xmm11
; SSE-NEXT:    movapd 32(%rcx), %xmm12
; SSE-NEXT:    movapd (%r8), %xmm13
; SSE-NEXT:    movapd 16(%r8), %xmm14
; SSE-NEXT:    movapd 32(%r8), %xmm15
; SSE-NEXT:    movapd %xmm0, %xmm4
; SSE-NEXT:    unpcklpd {{.*#+}} xmm4 = xmm4[0],xmm1[0]
; SSE-NEXT:    movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movsd {{.*#+}} xmm0 = xmm13[0],xmm0[1]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm3[1]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpcklpd {{.*#+}} xmm3 = xmm3[0],xmm7[0]
; SSE-NEXT:    movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm7 = xmm7[1],xmm13[1]
; SSE-NEXT:    movapd %xmm2, %xmm13
; SSE-NEXT:    unpcklpd {{.*#+}} xmm13 = xmm13[0],xmm9[0]
; SSE-NEXT:    movsd {{.*#+}} xmm2 = xmm14[0],xmm2[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm9 = xmm9[1],xmm6[1]
; SSE-NEXT:    unpcklpd {{.*#+}} xmm6 = xmm6[0],xmm11[0]
; SSE-NEXT:    unpckhpd {{.*#+}} xmm11 = xmm11[1],xmm14[1]
; SSE-NEXT:    movapd %xmm5, %xmm14
; SSE-NEXT:    unpcklpd {{.*#+}} xmm14 = xmm14[0],xmm8[0]
; SSE-NEXT:    movsd {{.*#+}} xmm5 = xmm15[0],xmm5[1]
; SSE-NEXT:    unpckhpd {{.*#+}} xmm8 = xmm8[1],xmm10[1]
; SSE-NEXT:    unpcklpd {{.*#+}} xmm10 = xmm10[0],xmm12[0]
; SSE-NEXT:    unpckhpd {{.*#+}} xmm12 = xmm12[1],xmm15[1]
; SSE-NEXT:    movapd 48(%rdi), %xmm15
; SSE-NEXT:    movapd 48(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm15, %xmm3
; SSE-NEXT:    unpcklpd {{.*#+}} xmm3 = xmm3[0],xmm2[0]
; SSE-NEXT:    movapd 48(%r8), %xmm4
; SSE-NEXT:    movsd {{.*#+}} xmm15 = xmm4[0],xmm15[1]
; SSE-NEXT:    movapd 48(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd 48(%rcx), %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm4[1]
; SSE-NEXT:    movapd %xmm0, 304(%r9)
; SSE-NEXT:    movapd %xmm2, 288(%r9)
; SSE-NEXT:    movapd %xmm15, 272(%r9)
; SSE-NEXT:    movapd %xmm1, 256(%r9)
; SSE-NEXT:    movapd %xmm3, 240(%r9)
; SSE-NEXT:    movapd %xmm12, 224(%r9)
; SSE-NEXT:    movapd %xmm8, 208(%r9)
; SSE-NEXT:    movapd %xmm5, 192(%r9)
; SSE-NEXT:    movapd %xmm10, 176(%r9)
; SSE-NEXT:    movapd %xmm14, 160(%r9)
; SSE-NEXT:    movapd %xmm11, 144(%r9)
; SSE-NEXT:    movapd %xmm9, 128(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 112(%r9)
; SSE-NEXT:    movapd %xmm6, 96(%r9)
; SSE-NEXT:    movapd %xmm13, 80(%r9)
; SSE-NEXT:    movapd %xmm7, 64(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 48(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 32(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 16(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, (%r9)
; SSE-NEXT:    retq
;
; AVX-LABEL: store_i64_stride5_vf8:
; AVX:       # %bb.0:
; AVX-NEXT:    vmovapd 32(%rdi), %ymm9
; AVX-NEXT:    vmovaps (%rdi), %ymm1
; AVX-NEXT:    vmovaps (%rcx), %ymm0
; AVX-NEXT:    vmovaps (%r8), %ymm3
; AVX-NEXT:    vmovapd 32(%r8), %ymm5
; AVX-NEXT:    vpermilps {{.*#+}} xmm2 = mem[2,3,2,3]
; AVX-NEXT:    vmovaps 16(%rdx), %xmm7
; AVX-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm7[1],ymm0[3],ymm7[3]
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3,4,5,6,7]
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm3[6,7]
; AVX-NEXT:    vmovaps (%rdx), %xmm4
; AVX-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm2
; AVX-NEXT:    vbroadcastsd 8(%rsi), %ymm6
; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm1[0,1,2,3],ymm6[4,5,6,7]
; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm6[0,1,2,3,4,5],ymm2[6,7]
; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3,4,5,6,7]
; AVX-NEXT:    vpermilps {{.*#+}} xmm6 = mem[2,3,2,3]
; AVX-NEXT:    vunpcklpd {{.*#+}} ymm8 = ymm9[0],mem[0],ymm9[2],mem[2]
; AVX-NEXT:    vblendpd {{.*#+}} ymm6 = ymm6[0,1],ymm8[2,3]
; AVX-NEXT:    vblendpd {{.*#+}} ymm6 = ymm6[0],ymm5[1],ymm6[2,3]
; AVX-NEXT:    vperm2f128 {{.*#+}} ymm8 = mem[2,3],ymm9[2,3]
; AVX-NEXT:    vmovapd 48(%rdx), %xmm10
; AVX-NEXT:    vshufpd {{.*#+}} ymm8 = ymm10[0],ymm8[0],ymm10[2],ymm8[3]
; AVX-NEXT:    vblendpd {{.*#+}} ymm8 = ymm8[0,1],ymm5[2],ymm8[3]
; AVX-NEXT:    vmovaps 32(%rdx), %xmm11
; AVX-NEXT:    vinsertf128 $1, %xmm11, %ymm0, %ymm12
; AVX-NEXT:    vbroadcastsd 40(%rsi), %ymm13
; AVX-NEXT:    vblendpd {{.*#+}} ymm9 = ymm9[0,1],ymm13[2,3]
; AVX-NEXT:    vblendpd {{.*#+}} ymm9 = ymm9[0,1,2],ymm12[3]
; AVX-NEXT:    vblendpd {{.*#+}} ymm9 = ymm5[0],ymm9[1,2,3]
; AVX-NEXT:    vpermilps {{.*#+}} xmm12 = mem[2,3,2,3]
; AVX-NEXT:    vunpcklpd {{.*#+}} ymm13 = ymm1[0],mem[0],ymm1[2],mem[2]
; AVX-NEXT:    vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm13[4,5,6,7]
; AVX-NEXT:    vblendps {{.*#+}} ymm12 = ymm12[0,1],ymm3[2,3],ymm12[4,5,6,7]
; AVX-NEXT:    vmovapd 48(%rsi), %xmm13
; AVX-NEXT:    vunpckhpd {{.*#+}} xmm10 = xmm13[1],xmm10[1]
; AVX-NEXT:    vbroadcastsd 56(%rcx), %ymm13
; AVX-NEXT:    vblendpd {{.*#+}} ymm10 = ymm10[0,1],ymm13[2,3]
; AVX-NEXT:    vblendpd {{.*#+}} ymm5 = ymm10[0,1,2],ymm5[3]
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm7 = xmm7[0],mem[0]
; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm7[0,1,2,3],ymm1[4,5,6,7]
; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5],ymm1[6,7]
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm3 = xmm11[0],mem[0]
; AVX-NEXT:    vmovaps (%rdi), %xmm7
; AVX-NEXT:    vmovaps 32(%rdi), %xmm10
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm10 = xmm10[0],mem[0]
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm4 = xmm4[0],mem[0]
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm7 = xmm7[0],mem[0]
; AVX-NEXT:    vmovaps %xmm7, (%r9)
; AVX-NEXT:    vmovaps %xmm4, 16(%r9)
; AVX-NEXT:    vmovaps %xmm10, 160(%r9)
; AVX-NEXT:    vmovaps %xmm3, 176(%r9)
; AVX-NEXT:    vmovaps %ymm12, 64(%r9)
; AVX-NEXT:    vmovapd %ymm9, 192(%r9)
; AVX-NEXT:    vmovapd %ymm8, 256(%r9)
; AVX-NEXT:    vmovapd %ymm6, 224(%r9)
; AVX-NEXT:    vmovaps %ymm2, 32(%r9)
; AVX-NEXT:    vmovaps %ymm1, 96(%r9)
; AVX-NEXT:    vmovaps %ymm0, 128(%r9)
; AVX-NEXT:    vmovapd %ymm5, 288(%r9)
; AVX-NEXT:    vzeroupper
; AVX-NEXT:    retq
;
; AVX2-LABEL: store_i64_stride5_vf8:
; AVX2:       # %bb.0:
; AVX2-NEXT:    vmovaps 32(%rdi), %ymm5
; AVX2-NEXT:    vmovaps (%rdi), %ymm0
; AVX2-NEXT:    vmovaps (%rdx), %ymm1
; AVX2-NEXT:    vmovaps (%rcx), %ymm4
; AVX2-NEXT:    vmovaps (%r8), %ymm2
; AVX2-NEXT:    vmovaps 32(%r8), %ymm6
; AVX2-NEXT:    vmovaps (%rdx), %xmm7
; AVX2-NEXT:    vmovaps 32(%rdx), %xmm10
; AVX2-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm3
; AVX2-NEXT:    vbroadcastsd 8(%rsi), %ymm8
; AVX2-NEXT:    vblendps {{.*#+}} ymm8 = ymm0[0,1,2,3],ymm8[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm3 = ymm8[0,1,2,3,4,5],ymm3[6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm3 = ymm2[0,1],ymm3[2,3,4,5,6,7]
; AVX2-NEXT:    vmovaps (%rsi), %xmm8
; AVX2-NEXT:    vmovaps 32(%rsi), %xmm11
; AVX2-NEXT:    vinsertf128 $1, (%rcx), %ymm8, %ymm8
; AVX2-NEXT:    vmovaps (%rdi), %xmm9
; AVX2-NEXT:    vmovaps 32(%rdi), %xmm12
; AVX2-NEXT:    vinsertf128 $1, %xmm7, %ymm9, %ymm7
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm7 = ymm7[0],ymm8[0],ymm7[2],ymm8[2]
; AVX2-NEXT:    vmovddup {{.*#+}} xmm8 = mem[0,0]
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm9 = ymm5[0],mem[0],ymm5[2],mem[2]
; AVX2-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm9[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm6[2,3],ymm8[4,5,6,7]
; AVX2-NEXT:    vinsertf128 $1, %xmm10, %ymm0, %ymm9
; AVX2-NEXT:    vbroadcastsd 40(%rsi), %ymm13
; AVX2-NEXT:    vblendps {{.*#+}} ymm13 = ymm5[0,1,2,3],ymm13[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm9 = ymm13[0,1,2,3,4,5],ymm9[6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm9 = ymm6[0,1],ymm9[2,3,4,5,6,7]
; AVX2-NEXT:    vinsertf128 $1, 32(%rcx), %ymm11, %ymm11
; AVX2-NEXT:    vinsertf128 $1, %xmm10, %ymm12, %ymm10
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm10 = ymm10[0],ymm11[0],ymm10[2],ymm11[2]
; AVX2-NEXT:    vmovddup {{.*#+}} xmm11 = mem[0,0]
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm12 = ymm0[0],mem[0],ymm0[2],mem[2]
; AVX2-NEXT:    vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm12[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm11 = ymm11[0,1],ymm2[2,3],ymm11[4,5,6,7]
; AVX2-NEXT:    vbroadcastsd 56(%rsi), %ymm12
; AVX2-NEXT:    vblendps {{.*#+}} xmm12 = xmm12[0,1],mem[2,3]
; AVX2-NEXT:    vbroadcastsd 56(%rcx), %ymm13
; AVX2-NEXT:    vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm13[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5],ymm6[6,7]
; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm13 = ymm1[1],ymm4[1],ymm1[3],ymm4[3]
; AVX2-NEXT:    vpermpd {{.*#+}} ymm13 = ymm13[0,2,3,3]
; AVX2-NEXT:    vbroadcastsd 24(%rsi), %ymm14
; AVX2-NEXT:    vblendps {{.*#+}} ymm13 = ymm14[0,1],ymm13[2,3,4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],ymm2[6,7]
; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm5 = mem[2,3],ymm5[2,3]
; AVX2-NEXT:    vbroadcastsd 48(%rcx), %ymm14
; AVX2-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm14[2,3],ymm5[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5],ymm5[6,7]
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm4[0],ymm1[2],ymm4[2]
; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5],ymm0[6,7]
; AVX2-NEXT:    vmovaps %ymm11, 64(%r9)
; AVX2-NEXT:    vmovaps %ymm10, 160(%r9)
; AVX2-NEXT:    vmovaps %ymm0, 96(%r9)
; AVX2-NEXT:    vmovaps %ymm9, 192(%r9)
; AVX2-NEXT:    vmovaps %ymm5, 256(%r9)
; AVX2-NEXT:    vmovaps %ymm8, 224(%r9)
; AVX2-NEXT:    vmovaps %ymm7, (%r9)
; AVX2-NEXT:    vmovaps %ymm3, 32(%r9)
; AVX2-NEXT:    vmovaps %ymm13, 128(%r9)
; AVX2-NEXT:    vmovaps %ymm12, 288(%r9)
; AVX2-NEXT:    vzeroupper
; AVX2-NEXT:    retq
;
; AVX2-FP-LABEL: store_i64_stride5_vf8:
; AVX2-FP:       # %bb.0:
; AVX2-FP-NEXT:    vmovaps 32(%rdi), %ymm5
; AVX2-FP-NEXT:    vmovaps (%rdi), %ymm0
; AVX2-FP-NEXT:    vmovaps (%rdx), %ymm1
; AVX2-FP-NEXT:    vmovaps (%rcx), %ymm4
; AVX2-FP-NEXT:    vmovaps (%r8), %ymm2
; AVX2-FP-NEXT:    vmovaps 32(%r8), %ymm6
; AVX2-FP-NEXT:    vmovaps (%rdx), %xmm7
; AVX2-FP-NEXT:    vmovaps 32(%rdx), %xmm10
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm3
; AVX2-FP-NEXT:    vbroadcastsd 8(%rsi), %ymm8
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm8 = ymm0[0,1,2,3],ymm8[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm3 = ymm8[0,1,2,3,4,5],ymm3[6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm3 = ymm2[0,1],ymm3[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vmovaps (%rsi), %xmm8
; AVX2-FP-NEXT:    vmovaps 32(%rsi), %xmm11
; AVX2-FP-NEXT:    vinsertf128 $1, (%rcx), %ymm8, %ymm8
; AVX2-FP-NEXT:    vmovaps (%rdi), %xmm9
; AVX2-FP-NEXT:    vmovaps 32(%rdi), %xmm12
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm7, %ymm9, %ymm7
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm7 = ymm7[0],ymm8[0],ymm7[2],ymm8[2]
; AVX2-FP-NEXT:    vmovddup {{.*#+}} xmm8 = mem[0,0]
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm9 = ymm5[0],mem[0],ymm5[2],mem[2]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm9[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm6[2,3],ymm8[4,5,6,7]
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm10, %ymm0, %ymm9
; AVX2-FP-NEXT:    vbroadcastsd 40(%rsi), %ymm13
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm13 = ymm5[0,1,2,3],ymm13[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm9 = ymm13[0,1,2,3,4,5],ymm9[6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm9 = ymm6[0,1],ymm9[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vinsertf128 $1, 32(%rcx), %ymm11, %ymm11
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm10, %ymm12, %ymm10
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm10 = ymm10[0],ymm11[0],ymm10[2],ymm11[2]
; AVX2-FP-NEXT:    vmovddup {{.*#+}} xmm11 = mem[0,0]
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm12 = ymm0[0],mem[0],ymm0[2],mem[2]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm12[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm11 = ymm11[0,1],ymm2[2,3],ymm11[4,5,6,7]
; AVX2-FP-NEXT:    vbroadcastsd 56(%rsi), %ymm12
; AVX2-FP-NEXT:    vblendps {{.*#+}} xmm12 = xmm12[0,1],mem[2,3]
; AVX2-FP-NEXT:    vbroadcastsd 56(%rcx), %ymm13
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm13[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5],ymm6[6,7]
; AVX2-FP-NEXT:    vunpckhpd {{.*#+}} ymm13 = ymm1[1],ymm4[1],ymm1[3],ymm4[3]
; AVX2-FP-NEXT:    vpermpd {{.*#+}} ymm13 = ymm13[0,2,3,3]
; AVX2-FP-NEXT:    vbroadcastsd 24(%rsi), %ymm14
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm13 = ymm14[0,1],ymm13[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],ymm2[6,7]
; AVX2-FP-NEXT:    vperm2f128 {{.*#+}} ymm5 = mem[2,3],ymm5[2,3]
; AVX2-FP-NEXT:    vbroadcastsd 48(%rcx), %ymm14
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm14[2,3],ymm5[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5],ymm5[6,7]
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm4[0],ymm1[2],ymm4[2]
; AVX2-FP-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vmovaps %ymm11, 64(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm10, 160(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm0, 96(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm9, 192(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm5, 256(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm8, 224(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm7, (%r9)
; AVX2-FP-NEXT:    vmovaps %ymm3, 32(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm13, 128(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm12, 288(%r9)
; AVX2-FP-NEXT:    vzeroupper
; AVX2-FP-NEXT:    retq
;
; AVX2-FCP-LABEL: store_i64_stride5_vf8:
; AVX2-FCP:       # %bb.0:
; AVX2-FCP-NEXT:    vmovaps 32(%rdi), %ymm5
; AVX2-FCP-NEXT:    vmovaps (%rdi), %ymm0
; AVX2-FCP-NEXT:    vmovaps (%rdx), %ymm1
; AVX2-FCP-NEXT:    vmovaps (%rcx), %ymm4
; AVX2-FCP-NEXT:    vmovaps (%r8), %ymm2
; AVX2-FCP-NEXT:    vmovaps 32(%r8), %ymm6
; AVX2-FCP-NEXT:    vmovaps (%rdx), %xmm7
; AVX2-FCP-NEXT:    vmovaps 32(%rdx), %xmm10
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm3
; AVX2-FCP-NEXT:    vbroadcastsd 8(%rsi), %ymm8
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm8 = ymm0[0,1,2,3],ymm8[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm3 = ymm8[0,1,2,3,4,5],ymm3[6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm3 = ymm2[0,1],ymm3[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vmovaps (%rsi), %xmm8
; AVX2-FCP-NEXT:    vmovaps 32(%rsi), %xmm11
; AVX2-FCP-NEXT:    vinsertf128 $1, (%rcx), %ymm8, %ymm8
; AVX2-FCP-NEXT:    vmovaps (%rdi), %xmm9
; AVX2-FCP-NEXT:    vmovaps 32(%rdi), %xmm12
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm7, %ymm9, %ymm7
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm7 = ymm7[0],ymm8[0],ymm7[2],ymm8[2]
; AVX2-FCP-NEXT:    vmovddup {{.*#+}} xmm8 = mem[0,0]
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm9 = ymm5[0],mem[0],ymm5[2],mem[2]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm9[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm6[2,3],ymm8[4,5,6,7]
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm10, %ymm0, %ymm9
; AVX2-FCP-NEXT:    vbroadcastsd 40(%rsi), %ymm13
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm13 = ymm5[0,1,2,3],ymm13[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm9 = ymm13[0,1,2,3,4,5],ymm9[6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm9 = ymm6[0,1],ymm9[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vinsertf128 $1, 32(%rcx), %ymm11, %ymm11
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm10, %ymm12, %ymm10
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm10 = ymm10[0],ymm11[0],ymm10[2],ymm11[2]
; AVX2-FCP-NEXT:    vmovddup {{.*#+}} xmm11 = mem[0,0]
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm12 = ymm0[0],mem[0],ymm0[2],mem[2]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm12[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm11 = ymm11[0,1],ymm2[2,3],ymm11[4,5,6,7]
; AVX2-FCP-NEXT:    vbroadcastsd 56(%rsi), %ymm12
; AVX2-FCP-NEXT:    vblendps {{.*#+}} xmm12 = xmm12[0,1],mem[2,3]
; AVX2-FCP-NEXT:    vbroadcastsd 56(%rcx), %ymm13
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm13[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5],ymm6[6,7]
; AVX2-FCP-NEXT:    vunpckhpd {{.*#+}} ymm13 = ymm1[1],ymm4[1],ymm1[3],ymm4[3]
; AVX2-FCP-NEXT:    vpermpd {{.*#+}} ymm13 = ymm13[0,2,3,3]
; AVX2-FCP-NEXT:    vbroadcastsd 24(%rsi), %ymm14
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm13 = ymm14[0,1],ymm13[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],ymm2[6,7]
; AVX2-FCP-NEXT:    vperm2f128 {{.*#+}} ymm5 = mem[2,3],ymm5[2,3]
; AVX2-FCP-NEXT:    vbroadcastsd 48(%rcx), %ymm14
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm14[2,3],ymm5[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5],ymm5[6,7]
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm4[0],ymm1[2],ymm4[2]
; AVX2-FCP-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vmovaps %ymm11, 64(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm10, 160(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm0, 96(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm9, 192(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm5, 256(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm8, 224(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm7, (%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm3, 32(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm13, 128(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm12, 288(%r9)
; AVX2-FCP-NEXT:    vzeroupper
; AVX2-FCP-NEXT:    retq
;
; AVX512-LABEL: store_i64_stride5_vf8:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vmovdqa64 (%rdi), %zmm2
; AVX512-NEXT:    vmovdqa64 (%rsi), %zmm3
; AVX512-NEXT:    vmovdqa64 (%rdx), %zmm4
; AVX512-NEXT:    vmovdqa64 (%rcx), %zmm5
; AVX512-NEXT:    vmovdqa64 (%r8), %zmm0
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm6 = [3,0,0,0,12,4,0,0]
; AVX512-NEXT:    vpermi2q %zmm2, %zmm3, %zmm6
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm1 = [0,3,11,0,0,0,4,12]
; AVX512-NEXT:    vpermi2q %zmm5, %zmm4, %zmm1
; AVX512-NEXT:    movb $49, %al
; AVX512-NEXT:    kmovw %eax, %k1
; AVX512-NEXT:    vmovdqa64 %zmm6, %zmm1 {%k1}
; AVX512-NEXT:    movb $8, %al
; AVX512-NEXT:    kmovw %eax, %k2
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm6 = [0,0,0,8,0,0,0,1]
; AVX512-NEXT:    vpermi2q %zmm5, %zmm4, %zmm6
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm7 = [0,8,0,0,0,1,9,0]
; AVX512-NEXT:    vpermi2q %zmm3, %zmm2, %zmm7
; AVX512-NEXT:    movb $-116, %al
; AVX512-NEXT:    kmovw %eax, %k2
; AVX512-NEXT:    vmovdqa64 %zmm6, %zmm7 {%k2}
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm6 = [0,1,2,3,8,5,6,7]
; AVX512-NEXT:    vpermi2q %zmm0, %zmm7, %zmm6
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm7 = [1,0,0,0,10,2,0,0]
; AVX512-NEXT:    vpermi2q %zmm4, %zmm5, %zmm7
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm8 = [0,0,2,10,0,0,0,3]
; AVX512-NEXT:    vpermi2q %zmm3, %zmm2, %zmm8
; AVX512-NEXT:    vmovdqa64 %zmm7, %zmm8 {%k1}
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm7 = [0,9,2,3,4,5,10,7]
; AVX512-NEXT:    vpermi2q %zmm0, %zmm8, %zmm7
; AVX512-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [13,5,13,5,13,5,13,5]
; AVX512-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512-NEXT:    vpermi2q %zmm5, %zmm4, %zmm8
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm9 = [0,5,13,0,0,0,6,14]
; AVX512-NEXT:    vpermi2q %zmm3, %zmm2, %zmm9
; AVX512-NEXT:    movb $24, %al
; AVX512-NEXT:    kmovw %eax, %k1
; AVX512-NEXT:    vmovdqa64 %zmm8, %zmm9 {%k1}
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm8 = [12,1,2,3,4,13,6,7]
; AVX512-NEXT:    vpermi2q %zmm0, %zmm9, %zmm8
; AVX512-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [15,7,15,7,15,7,15,7]
; AVX512-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512-NEXT:    vpermi2q %zmm3, %zmm2, %zmm9
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm2 = [6,14,0,0,0,7,15,0]
; AVX512-NEXT:    vpermi2q %zmm5, %zmm4, %zmm2
; AVX512-NEXT:    vmovdqa64 %zmm9, %zmm2 {%k1}
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm3 = [0,1,14,3,4,5,6,15]
; AVX512-NEXT:    vpermi2q %zmm0, %zmm2, %zmm3
; AVX512-NEXT:    vmovdqa64 %zmm3, 256(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm8, 192(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm1, 128(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm7, 64(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm6, (%r9)
; AVX512-NEXT:    vzeroupper
; AVX512-NEXT:    retq
;
; AVX512-FCP-LABEL: store_i64_stride5_vf8:
; AVX512-FCP:       # %bb.0:
; AVX512-FCP-NEXT:    vmovdqa64 (%rdi), %zmm2
; AVX512-FCP-NEXT:    vmovdqa64 (%rsi), %zmm3
; AVX512-FCP-NEXT:    vmovdqa64 (%rdx), %zmm4
; AVX512-FCP-NEXT:    vmovdqa64 (%rcx), %zmm5
; AVX512-FCP-NEXT:    vmovdqa64 (%r8), %zmm0
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm6 = [3,0,0,0,12,4,0,0]
; AVX512-FCP-NEXT:    vpermi2q %zmm2, %zmm3, %zmm6
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm1 = [0,3,11,0,0,0,4,12]
; AVX512-FCP-NEXT:    vpermi2q %zmm5, %zmm4, %zmm1
; AVX512-FCP-NEXT:    movb $49, %al
; AVX512-FCP-NEXT:    kmovw %eax, %k1
; AVX512-FCP-NEXT:    vmovdqa64 %zmm6, %zmm1 {%k1}
; AVX512-FCP-NEXT:    movb $8, %al
; AVX512-FCP-NEXT:    kmovw %eax, %k2
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm6 = [0,0,0,8,0,0,0,1]
; AVX512-FCP-NEXT:    vpermi2q %zmm5, %zmm4, %zmm6
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm7 = [0,8,0,0,0,1,9,0]
; AVX512-FCP-NEXT:    vpermi2q %zmm3, %zmm2, %zmm7
; AVX512-FCP-NEXT:    movb $-116, %al
; AVX512-FCP-NEXT:    kmovw %eax, %k2
; AVX512-FCP-NEXT:    vmovdqa64 %zmm6, %zmm7 {%k2}
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm6 = [0,1,2,3,8,5,6,7]
; AVX512-FCP-NEXT:    vpermi2q %zmm0, %zmm7, %zmm6
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm7 = [1,0,0,0,10,2,0,0]
; AVX512-FCP-NEXT:    vpermi2q %zmm4, %zmm5, %zmm7
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm8 = [0,0,2,10,0,0,0,3]
; AVX512-FCP-NEXT:    vpermi2q %zmm3, %zmm2, %zmm8
; AVX512-FCP-NEXT:    vmovdqa64 %zmm7, %zmm8 {%k1}
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm7 = [0,9,2,3,4,5,10,7]
; AVX512-FCP-NEXT:    vpermi2q %zmm0, %zmm8, %zmm7
; AVX512-FCP-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [13,5,13,5,13,5,13,5]
; AVX512-FCP-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512-FCP-NEXT:    vpermi2q %zmm5, %zmm4, %zmm8
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm9 = [0,5,13,0,0,0,6,14]
; AVX512-FCP-NEXT:    vpermi2q %zmm3, %zmm2, %zmm9
; AVX512-FCP-NEXT:    movb $24, %al
; AVX512-FCP-NEXT:    kmovw %eax, %k1
; AVX512-FCP-NEXT:    vmovdqa64 %zmm8, %zmm9 {%k1}
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm8 = [12,1,2,3,4,13,6,7]
; AVX512-FCP-NEXT:    vpermi2q %zmm0, %zmm9, %zmm8
; AVX512-FCP-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [15,7,15,7,15,7,15,7]
; AVX512-FCP-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512-FCP-NEXT:    vpermi2q %zmm3, %zmm2, %zmm9
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm2 = [6,14,0,0,0,7,15,0]
; AVX512-FCP-NEXT:    vpermi2q %zmm5, %zmm4, %zmm2
; AVX512-FCP-NEXT:    vmovdqa64 %zmm9, %zmm2 {%k1}
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm3 = [0,1,14,3,4,5,6,15]
; AVX512-FCP-NEXT:    vpermi2q %zmm0, %zmm2, %zmm3
; AVX512-FCP-NEXT:    vmovdqa64 %zmm3, 256(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm8, 192(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm1, 128(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm7, 64(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm6, (%r9)
; AVX512-FCP-NEXT:    vzeroupper
; AVX512-FCP-NEXT:    retq
;
; AVX512DQ-LABEL: store_i64_stride5_vf8:
; AVX512DQ:       # %bb.0:
; AVX512DQ-NEXT:    vmovdqa64 (%rdi), %zmm2
; AVX512DQ-NEXT:    vmovdqa64 (%rsi), %zmm3
; AVX512DQ-NEXT:    vmovdqa64 (%rdx), %zmm4
; AVX512DQ-NEXT:    vmovdqa64 (%rcx), %zmm5
; AVX512DQ-NEXT:    vmovdqa64 (%r8), %zmm0
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm6 = [3,0,0,0,12,4,0,0]
; AVX512DQ-NEXT:    vpermi2q %zmm2, %zmm3, %zmm6
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm1 = [0,3,11,0,0,0,4,12]
; AVX512DQ-NEXT:    vpermi2q %zmm5, %zmm4, %zmm1
; AVX512DQ-NEXT:    movb $49, %al
; AVX512DQ-NEXT:    kmovw %eax, %k1
; AVX512DQ-NEXT:    vmovdqa64 %zmm6, %zmm1 {%k1}
; AVX512DQ-NEXT:    movb $8, %al
; AVX512DQ-NEXT:    kmovw %eax, %k2
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm6 = [0,0,0,8,0,0,0,1]
; AVX512DQ-NEXT:    vpermi2q %zmm5, %zmm4, %zmm6
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm7 = [0,8,0,0,0,1,9,0]
; AVX512DQ-NEXT:    vpermi2q %zmm3, %zmm2, %zmm7
; AVX512DQ-NEXT:    movb $-116, %al
; AVX512DQ-NEXT:    kmovw %eax, %k2
; AVX512DQ-NEXT:    vmovdqa64 %zmm6, %zmm7 {%k2}
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm6 = [0,1,2,3,8,5,6,7]
; AVX512DQ-NEXT:    vpermi2q %zmm0, %zmm7, %zmm6
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm7 = [1,0,0,0,10,2,0,0]
; AVX512DQ-NEXT:    vpermi2q %zmm4, %zmm5, %zmm7
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm8 = [0,0,2,10,0,0,0,3]
; AVX512DQ-NEXT:    vpermi2q %zmm3, %zmm2, %zmm8
; AVX512DQ-NEXT:    vmovdqa64 %zmm7, %zmm8 {%k1}
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm7 = [0,9,2,3,4,5,10,7]
; AVX512DQ-NEXT:    vpermi2q %zmm0, %zmm8, %zmm7
; AVX512DQ-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [13,5,13,5,13,5,13,5]
; AVX512DQ-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512DQ-NEXT:    vpermi2q %zmm5, %zmm4, %zmm8
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm9 = [0,5,13,0,0,0,6,14]
; AVX512DQ-NEXT:    vpermi2q %zmm3, %zmm2, %zmm9
; AVX512DQ-NEXT:    movb $24, %al
; AVX512DQ-NEXT:    kmovw %eax, %k1
; AVX512DQ-NEXT:    vmovdqa64 %zmm8, %zmm9 {%k1}
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm8 = [12,1,2,3,4,13,6,7]
; AVX512DQ-NEXT:    vpermi2q %zmm0, %zmm9, %zmm8
; AVX512DQ-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [15,7,15,7,15,7,15,7]
; AVX512DQ-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512DQ-NEXT:    vpermi2q %zmm3, %zmm2, %zmm9
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm2 = [6,14,0,0,0,7,15,0]
; AVX512DQ-NEXT:    vpermi2q %zmm5, %zmm4, %zmm2
; AVX512DQ-NEXT:    vmovdqa64 %zmm9, %zmm2 {%k1}
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm3 = [0,1,14,3,4,5,6,15]
; AVX512DQ-NEXT:    vpermi2q %zmm0, %zmm2, %zmm3
; AVX512DQ-NEXT:    vmovdqa64 %zmm3, 256(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm8, 192(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm1, 128(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm7, 64(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm6, (%r9)
; AVX512DQ-NEXT:    vzeroupper
; AVX512DQ-NEXT:    retq
;
; AVX512DQ-FCP-LABEL: store_i64_stride5_vf8:
; AVX512DQ-FCP:       # %bb.0:
; AVX512DQ-FCP-NEXT:    vmovdqa64 (%rdi), %zmm2
; AVX512DQ-FCP-NEXT:    vmovdqa64 (%rsi), %zmm3
; AVX512DQ-FCP-NEXT:    vmovdqa64 (%rdx), %zmm4
; AVX512DQ-FCP-NEXT:    vmovdqa64 (%rcx), %zmm5
; AVX512DQ-FCP-NEXT:    vmovdqa64 (%r8), %zmm0
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm6 = [3,0,0,0,12,4,0,0]
; AVX512DQ-FCP-NEXT:    vpermi2q %zmm2, %zmm3, %zmm6
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm1 = [0,3,11,0,0,0,4,12]
; AVX512DQ-FCP-NEXT:    vpermi2q %zmm5, %zmm4, %zmm1
; AVX512DQ-FCP-NEXT:    movb $49, %al
; AVX512DQ-FCP-NEXT:    kmovw %eax, %k1
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm6, %zmm1 {%k1}
; AVX512DQ-FCP-NEXT:    movb $8, %al
; AVX512DQ-FCP-NEXT:    kmovw %eax, %k2
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm6 = [0,0,0,8,0,0,0,1]
; AVX512DQ-FCP-NEXT:    vpermi2q %zmm5, %zmm4, %zmm6
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm7 = [0,8,0,0,0,1,9,0]
; AVX512DQ-FCP-NEXT:    vpermi2q %zmm3, %zmm2, %zmm7
; AVX512DQ-FCP-NEXT:    movb $-116, %al
; AVX512DQ-FCP-NEXT:    kmovw %eax, %k2
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm6, %zmm7 {%k2}
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm6 = [0,1,2,3,8,5,6,7]
; AVX512DQ-FCP-NEXT:    vpermi2q %zmm0, %zmm7, %zmm6
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm7 = [1,0,0,0,10,2,0,0]
; AVX512DQ-FCP-NEXT:    vpermi2q %zmm4, %zmm5, %zmm7
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm8 = [0,0,2,10,0,0,0,3]
; AVX512DQ-FCP-NEXT:    vpermi2q %zmm3, %zmm2, %zmm8
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm7, %zmm8 {%k1}
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm7 = [0,9,2,3,4,5,10,7]
; AVX512DQ-FCP-NEXT:    vpermi2q %zmm0, %zmm8, %zmm7
; AVX512DQ-FCP-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [13,5,13,5,13,5,13,5]
; AVX512DQ-FCP-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512DQ-FCP-NEXT:    vpermi2q %zmm5, %zmm4, %zmm8
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm9 = [0,5,13,0,0,0,6,14]
; AVX512DQ-FCP-NEXT:    vpermi2q %zmm3, %zmm2, %zmm9
; AVX512DQ-FCP-NEXT:    movb $24, %al
; AVX512DQ-FCP-NEXT:    kmovw %eax, %k1
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm8, %zmm9 {%k1}
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm8 = [12,1,2,3,4,13,6,7]
; AVX512DQ-FCP-NEXT:    vpermi2q %zmm0, %zmm9, %zmm8
; AVX512DQ-FCP-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [15,7,15,7,15,7,15,7]
; AVX512DQ-FCP-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512DQ-FCP-NEXT:    vpermi2q %zmm3, %zmm2, %zmm9
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm2 = [6,14,0,0,0,7,15,0]
; AVX512DQ-FCP-NEXT:    vpermi2q %zmm5, %zmm4, %zmm2
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm9, %zmm2 {%k1}
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm3 = [0,1,14,3,4,5,6,15]
; AVX512DQ-FCP-NEXT:    vpermi2q %zmm0, %zmm2, %zmm3
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm3, 256(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm8, 192(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm1, 128(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm7, 64(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm6, (%r9)
; AVX512DQ-FCP-NEXT:    vzeroupper
; AVX512DQ-FCP-NEXT:    retq
;
; AVX512BW-LABEL: store_i64_stride5_vf8:
; AVX512BW:       # %bb.0:
; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm2
; AVX512BW-NEXT:    vmovdqa64 (%rsi), %zmm3
; AVX512BW-NEXT:    vmovdqa64 (%rdx), %zmm4
; AVX512BW-NEXT:    vmovdqa64 (%rcx), %zmm5
; AVX512BW-NEXT:    vmovdqa64 (%r8), %zmm0
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm6 = [3,0,0,0,12,4,0,0]
; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm3, %zmm6
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm1 = [0,3,11,0,0,0,4,12]
; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm4, %zmm1
; AVX512BW-NEXT:    movb $49, %al
; AVX512BW-NEXT:    kmovd %eax, %k1
; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm1 {%k1}
; AVX512BW-NEXT:    movb $8, %al
; AVX512BW-NEXT:    kmovd %eax, %k2
; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm6 = [0,0,0,8,0,0,0,1]
; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm4, %zmm6
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm7 = [0,8,0,0,0,1,9,0]
; AVX512BW-NEXT:    vpermi2q %zmm3, %zmm2, %zmm7
; AVX512BW-NEXT:    movb $-116, %al
; AVX512BW-NEXT:    kmovd %eax, %k2
; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm7 {%k2}
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm6 = [0,1,2,3,8,5,6,7]
; AVX512BW-NEXT:    vpermi2q %zmm0, %zmm7, %zmm6
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm7 = [1,0,0,0,10,2,0,0]
; AVX512BW-NEXT:    vpermi2q %zmm4, %zmm5, %zmm7
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm8 = [0,0,2,10,0,0,0,3]
; AVX512BW-NEXT:    vpermi2q %zmm3, %zmm2, %zmm8
; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm8 {%k1}
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm7 = [0,9,2,3,4,5,10,7]
; AVX512BW-NEXT:    vpermi2q %zmm0, %zmm8, %zmm7
; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [13,5,13,5,13,5,13,5]
; AVX512BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm4, %zmm8
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm9 = [0,5,13,0,0,0,6,14]
; AVX512BW-NEXT:    vpermi2q %zmm3, %zmm2, %zmm9
; AVX512BW-NEXT:    movb $24, %al
; AVX512BW-NEXT:    kmovd %eax, %k1
; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm9 {%k1}
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm8 = [12,1,2,3,4,13,6,7]
; AVX512BW-NEXT:    vpermi2q %zmm0, %zmm9, %zmm8
; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [15,7,15,7,15,7,15,7]
; AVX512BW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512BW-NEXT:    vpermi2q %zmm3, %zmm2, %zmm9
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm2 = [6,14,0,0,0,7,15,0]
; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm4, %zmm2
; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm2 {%k1}
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm3 = [0,1,14,3,4,5,6,15]
; AVX512BW-NEXT:    vpermi2q %zmm0, %zmm2, %zmm3
; AVX512BW-NEXT:    vmovdqa64 %zmm3, 256(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm8, 192(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm1, 128(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm7, 64(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm6, (%r9)
; AVX512BW-NEXT:    vzeroupper
; AVX512BW-NEXT:    retq
;
; AVX512BW-FCP-LABEL: store_i64_stride5_vf8:
; AVX512BW-FCP:       # %bb.0:
; AVX512BW-FCP-NEXT:    vmovdqa64 (%rdi), %zmm2
; AVX512BW-FCP-NEXT:    vmovdqa64 (%rsi), %zmm3
; AVX512BW-FCP-NEXT:    vmovdqa64 (%rdx), %zmm4
; AVX512BW-FCP-NEXT:    vmovdqa64 (%rcx), %zmm5
; AVX512BW-FCP-NEXT:    vmovdqa64 (%r8), %zmm0
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm6 = [3,0,0,0,12,4,0,0]
; AVX512BW-FCP-NEXT:    vpermi2q %zmm2, %zmm3, %zmm6
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm1 = [0,3,11,0,0,0,4,12]
; AVX512BW-FCP-NEXT:    vpermi2q %zmm5, %zmm4, %zmm1
; AVX512BW-FCP-NEXT:    movb $49, %al
; AVX512BW-FCP-NEXT:    kmovd %eax, %k1
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm6, %zmm1 {%k1}
; AVX512BW-FCP-NEXT:    movb $8, %al
; AVX512BW-FCP-NEXT:    kmovd %eax, %k2
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm6 = [0,0,0,8,0,0,0,1]
; AVX512BW-FCP-NEXT:    vpermi2q %zmm5, %zmm4, %zmm6
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm7 = [0,8,0,0,0,1,9,0]
; AVX512BW-FCP-NEXT:    vpermi2q %zmm3, %zmm2, %zmm7
; AVX512BW-FCP-NEXT:    movb $-116, %al
; AVX512BW-FCP-NEXT:    kmovd %eax, %k2
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm6, %zmm7 {%k2}
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm6 = [0,1,2,3,8,5,6,7]
; AVX512BW-FCP-NEXT:    vpermi2q %zmm0, %zmm7, %zmm6
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm7 = [1,0,0,0,10,2,0,0]
; AVX512BW-FCP-NEXT:    vpermi2q %zmm4, %zmm5, %zmm7
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm8 = [0,0,2,10,0,0,0,3]
; AVX512BW-FCP-NEXT:    vpermi2q %zmm3, %zmm2, %zmm8
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm7, %zmm8 {%k1}
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm7 = [0,9,2,3,4,5,10,7]
; AVX512BW-FCP-NEXT:    vpermi2q %zmm0, %zmm8, %zmm7
; AVX512BW-FCP-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [13,5,13,5,13,5,13,5]
; AVX512BW-FCP-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512BW-FCP-NEXT:    vpermi2q %zmm5, %zmm4, %zmm8
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm9 = [0,5,13,0,0,0,6,14]
; AVX512BW-FCP-NEXT:    vpermi2q %zmm3, %zmm2, %zmm9
; AVX512BW-FCP-NEXT:    movb $24, %al
; AVX512BW-FCP-NEXT:    kmovd %eax, %k1
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm8, %zmm9 {%k1}
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm8 = [12,1,2,3,4,13,6,7]
; AVX512BW-FCP-NEXT:    vpermi2q %zmm0, %zmm9, %zmm8
; AVX512BW-FCP-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [15,7,15,7,15,7,15,7]
; AVX512BW-FCP-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512BW-FCP-NEXT:    vpermi2q %zmm3, %zmm2, %zmm9
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm2 = [6,14,0,0,0,7,15,0]
; AVX512BW-FCP-NEXT:    vpermi2q %zmm5, %zmm4, %zmm2
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm9, %zmm2 {%k1}
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm3 = [0,1,14,3,4,5,6,15]
; AVX512BW-FCP-NEXT:    vpermi2q %zmm0, %zmm2, %zmm3
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm3, 256(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm8, 192(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm1, 128(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm7, 64(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm6, (%r9)
; AVX512BW-FCP-NEXT:    vzeroupper
; AVX512BW-FCP-NEXT:    retq
;
; AVX512DQ-BW-LABEL: store_i64_stride5_vf8:
; AVX512DQ-BW:       # %bb.0:
; AVX512DQ-BW-NEXT:    vmovdqa64 (%rdi), %zmm2
; AVX512DQ-BW-NEXT:    vmovdqa64 (%rsi), %zmm3
; AVX512DQ-BW-NEXT:    vmovdqa64 (%rdx), %zmm4
; AVX512DQ-BW-NEXT:    vmovdqa64 (%rcx), %zmm5
; AVX512DQ-BW-NEXT:    vmovdqa64 (%r8), %zmm0
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm6 = [3,0,0,0,12,4,0,0]
; AVX512DQ-BW-NEXT:    vpermi2q %zmm2, %zmm3, %zmm6
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm1 = [0,3,11,0,0,0,4,12]
; AVX512DQ-BW-NEXT:    vpermi2q %zmm5, %zmm4, %zmm1
; AVX512DQ-BW-NEXT:    movb $49, %al
; AVX512DQ-BW-NEXT:    kmovd %eax, %k1
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm6, %zmm1 {%k1}
; AVX512DQ-BW-NEXT:    movb $8, %al
; AVX512DQ-BW-NEXT:    kmovd %eax, %k2
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm6 = [0,0,0,8,0,0,0,1]
; AVX512DQ-BW-NEXT:    vpermi2q %zmm5, %zmm4, %zmm6
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm7 = [0,8,0,0,0,1,9,0]
; AVX512DQ-BW-NEXT:    vpermi2q %zmm3, %zmm2, %zmm7
; AVX512DQ-BW-NEXT:    movb $-116, %al
; AVX512DQ-BW-NEXT:    kmovd %eax, %k2
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm6, %zmm7 {%k2}
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm6 = [0,1,2,3,8,5,6,7]
; AVX512DQ-BW-NEXT:    vpermi2q %zmm0, %zmm7, %zmm6
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm7 = [1,0,0,0,10,2,0,0]
; AVX512DQ-BW-NEXT:    vpermi2q %zmm4, %zmm5, %zmm7
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm8 = [0,0,2,10,0,0,0,3]
; AVX512DQ-BW-NEXT:    vpermi2q %zmm3, %zmm2, %zmm8
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm7, %zmm8 {%k1}
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm7 = [0,9,2,3,4,5,10,7]
; AVX512DQ-BW-NEXT:    vpermi2q %zmm0, %zmm8, %zmm7
; AVX512DQ-BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [13,5,13,5,13,5,13,5]
; AVX512DQ-BW-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512DQ-BW-NEXT:    vpermi2q %zmm5, %zmm4, %zmm8
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm9 = [0,5,13,0,0,0,6,14]
; AVX512DQ-BW-NEXT:    vpermi2q %zmm3, %zmm2, %zmm9
; AVX512DQ-BW-NEXT:    movb $24, %al
; AVX512DQ-BW-NEXT:    kmovd %eax, %k1
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm8, %zmm9 {%k1}
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm8 = [12,1,2,3,4,13,6,7]
; AVX512DQ-BW-NEXT:    vpermi2q %zmm0, %zmm9, %zmm8
; AVX512DQ-BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [15,7,15,7,15,7,15,7]
; AVX512DQ-BW-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512DQ-BW-NEXT:    vpermi2q %zmm3, %zmm2, %zmm9
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm2 = [6,14,0,0,0,7,15,0]
; AVX512DQ-BW-NEXT:    vpermi2q %zmm5, %zmm4, %zmm2
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm9, %zmm2 {%k1}
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm3 = [0,1,14,3,4,5,6,15]
; AVX512DQ-BW-NEXT:    vpermi2q %zmm0, %zmm2, %zmm3
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm3, 256(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm8, 192(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm1, 128(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm7, 64(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm6, (%r9)
; AVX512DQ-BW-NEXT:    vzeroupper
; AVX512DQ-BW-NEXT:    retq
;
; AVX512DQ-BW-FCP-LABEL: store_i64_stride5_vf8:
; AVX512DQ-BW-FCP:       # %bb.0:
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rdi), %zmm2
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rsi), %zmm3
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rdx), %zmm4
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rcx), %zmm5
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%r8), %zmm0
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm6 = [3,0,0,0,12,4,0,0]
; AVX512DQ-BW-FCP-NEXT:    vpermi2q %zmm2, %zmm3, %zmm6
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm1 = [0,3,11,0,0,0,4,12]
; AVX512DQ-BW-FCP-NEXT:    vpermi2q %zmm5, %zmm4, %zmm1
; AVX512DQ-BW-FCP-NEXT:    movb $49, %al
; AVX512DQ-BW-FCP-NEXT:    kmovd %eax, %k1
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm6, %zmm1 {%k1}
; AVX512DQ-BW-FCP-NEXT:    movb $8, %al
; AVX512DQ-BW-FCP-NEXT:    kmovd %eax, %k2
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm6 = [0,0,0,8,0,0,0,1]
; AVX512DQ-BW-FCP-NEXT:    vpermi2q %zmm5, %zmm4, %zmm6
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm7 = [0,8,0,0,0,1,9,0]
; AVX512DQ-BW-FCP-NEXT:    vpermi2q %zmm3, %zmm2, %zmm7
; AVX512DQ-BW-FCP-NEXT:    movb $-116, %al
; AVX512DQ-BW-FCP-NEXT:    kmovd %eax, %k2
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm6, %zmm7 {%k2}
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm6 = [0,1,2,3,8,5,6,7]
; AVX512DQ-BW-FCP-NEXT:    vpermi2q %zmm0, %zmm7, %zmm6
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm7 = [1,0,0,0,10,2,0,0]
; AVX512DQ-BW-FCP-NEXT:    vpermi2q %zmm4, %zmm5, %zmm7
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm8 = [0,0,2,10,0,0,0,3]
; AVX512DQ-BW-FCP-NEXT:    vpermi2q %zmm3, %zmm2, %zmm8
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm7, %zmm8 {%k1}
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm7 = [0,9,2,3,4,5,10,7]
; AVX512DQ-BW-FCP-NEXT:    vpermi2q %zmm0, %zmm8, %zmm7
; AVX512DQ-BW-FCP-NEXT:    vbroadcasti32x4 {{.*#+}} zmm8 = [13,5,13,5,13,5,13,5]
; AVX512DQ-BW-FCP-NEXT:    # zmm8 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512DQ-BW-FCP-NEXT:    vpermi2q %zmm5, %zmm4, %zmm8
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm9 = [0,5,13,0,0,0,6,14]
; AVX512DQ-BW-FCP-NEXT:    vpermi2q %zmm3, %zmm2, %zmm9
; AVX512DQ-BW-FCP-NEXT:    movb $24, %al
; AVX512DQ-BW-FCP-NEXT:    kmovd %eax, %k1
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm8, %zmm9 {%k1}
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm8 = [12,1,2,3,4,13,6,7]
; AVX512DQ-BW-FCP-NEXT:    vpermi2q %zmm0, %zmm9, %zmm8
; AVX512DQ-BW-FCP-NEXT:    vbroadcasti32x4 {{.*#+}} zmm9 = [15,7,15,7,15,7,15,7]
; AVX512DQ-BW-FCP-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512DQ-BW-FCP-NEXT:    vpermi2q %zmm3, %zmm2, %zmm9
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm2 = [6,14,0,0,0,7,15,0]
; AVX512DQ-BW-FCP-NEXT:    vpermi2q %zmm5, %zmm4, %zmm2
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm9, %zmm2 {%k1}
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm3 = [0,1,14,3,4,5,6,15]
; AVX512DQ-BW-FCP-NEXT:    vpermi2q %zmm0, %zmm2, %zmm3
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm3, 256(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm8, 192(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm1, 128(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm7, 64(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm6, (%r9)
; AVX512DQ-BW-FCP-NEXT:    vzeroupper
; AVX512DQ-BW-FCP-NEXT:    retq
  %in.vec0 = load <8 x i64>, ptr %in.vecptr0, align 64
  %in.vec1 = load <8 x i64>, ptr %in.vecptr1, align 64
  %in.vec2 = load <8 x i64>, ptr %in.vecptr2, align 64
  %in.vec3 = load <8 x i64>, ptr %in.vecptr3, align 64
  %in.vec4 = load <8 x i64>, ptr %in.vecptr4, align 64
  %1 = shufflevector <8 x i64> %in.vec0, <8 x i64> %in.vec1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
  %2 = shufflevector <8 x i64> %in.vec2, <8 x i64> %in.vec3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
  %3 = shufflevector <16 x i64> %1, <16 x i64> %2, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
  %4 = shufflevector <8 x i64> %in.vec4, <8 x i64> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
  %5 = shufflevector <32 x i64> %3, <32 x i64> %4, <40 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39>
  %interleaved.vec = shufflevector <40 x i64> %5, <40 x i64> poison, <40 x i32> <i32 0, i32 8, i32 16, i32 24, i32 32, i32 1, i32 9, i32 17, i32 25, i32 33, i32 2, i32 10, i32 18, i32 26, i32 34, i32 3, i32 11, i32 19, i32 27, i32 35, i32 4, i32 12, i32 20, i32 28, i32 36, i32 5, i32 13, i32 21, i32 29, i32 37, i32 6, i32 14, i32 22, i32 30, i32 38, i32 7, i32 15, i32 23, i32 31, i32 39>
  store <40 x i64> %interleaved.vec, ptr %out.vec, align 64
  ret void
}

define void @store_i64_stride5_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %in.vecptr3, ptr %in.vecptr4, ptr %out.vec) nounwind {
; SSE-LABEL: store_i64_stride5_vf16:
; SSE:       # %bb.0:
; SSE-NEXT:    subq $280, %rsp # imm = 0x118
; SSE-NEXT:    movapd (%rdi), %xmm3
; SSE-NEXT:    movapd 16(%rdi), %xmm4
; SSE-NEXT:    movapd 32(%rdi), %xmm6
; SSE-NEXT:    movapd (%rsi), %xmm5
; SSE-NEXT:    movapd 16(%rsi), %xmm8
; SSE-NEXT:    movapd 32(%rsi), %xmm10
; SSE-NEXT:    movapd (%rdx), %xmm7
; SSE-NEXT:    movapd 16(%rdx), %xmm9
; SSE-NEXT:    movapd 32(%rdx), %xmm12
; SSE-NEXT:    movapd (%rcx), %xmm11
; SSE-NEXT:    movapd 16(%rcx), %xmm13
; SSE-NEXT:    movapd 32(%rcx), %xmm15
; SSE-NEXT:    movapd (%r8), %xmm0
; SSE-NEXT:    movapd 16(%r8), %xmm1
; SSE-NEXT:    movapd 32(%r8), %xmm2
; SSE-NEXT:    movapd %xmm3, %xmm14
; SSE-NEXT:    unpcklpd {{.*#+}} xmm14 = xmm14[0],xmm5[0]
; SSE-NEXT:    movapd %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movsd {{.*#+}} xmm3 = xmm0[0],xmm3[1]
; SSE-NEXT:    movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm7[1]
; SSE-NEXT:    movapd %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpcklpd {{.*#+}} xmm7 = xmm7[0],xmm11[0]
; SSE-NEXT:    movapd %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm11 = xmm11[1],xmm0[1]
; SSE-NEXT:    movapd %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd %xmm4, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm8[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movsd {{.*#+}} xmm4 = xmm1[0],xmm4[1]
; SSE-NEXT:    movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm8 = xmm8[1],xmm9[1]
; SSE-NEXT:    movapd %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpcklpd {{.*#+}} xmm9 = xmm9[0],xmm13[0]
; SSE-NEXT:    movapd %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm13 = xmm13[1],xmm1[1]
; SSE-NEXT:    movapd %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd %xmm6, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm10[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movsd {{.*#+}} xmm6 = xmm2[0],xmm6[1]
; SSE-NEXT:    movapd %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm10 = xmm10[1],xmm12[1]
; SSE-NEXT:    movapd %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpcklpd {{.*#+}} xmm12 = xmm12[0],xmm15[0]
; SSE-NEXT:    movapd %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm15 = xmm15[1],xmm2[1]
; SSE-NEXT:    movapd %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 48(%rdi), %xmm1
; SSE-NEXT:    movapd 48(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm1, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 48(%r8), %xmm0
; SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT:    movapd %xmm1, (%rsp) # 16-byte Spill
; SSE-NEXT:    movapd 48(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 48(%rcx), %xmm2
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 64(%rdi), %xmm1
; SSE-NEXT:    movapd 64(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm1, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 64(%r8), %xmm0
; SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 64(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 64(%rcx), %xmm15
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm15[0]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm15 = xmm15[1],xmm0[1]
; SSE-NEXT:    movapd 80(%rdi), %xmm14
; SSE-NEXT:    movapd 80(%rsi), %xmm13
; SSE-NEXT:    movapd %xmm14, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm13[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 80(%r8), %xmm0
; SSE-NEXT:    movsd {{.*#+}} xmm14 = xmm0[0],xmm14[1]
; SSE-NEXT:    movapd 80(%rdx), %xmm10
; SSE-NEXT:    unpckhpd {{.*#+}} xmm13 = xmm13[1],xmm10[1]
; SSE-NEXT:    movapd 80(%rcx), %xmm9
; SSE-NEXT:    unpcklpd {{.*#+}} xmm10 = xmm10[0],xmm9[0]
; SSE-NEXT:    unpckhpd {{.*#+}} xmm9 = xmm9[1],xmm0[1]
; SSE-NEXT:    movapd 96(%rdi), %xmm11
; SSE-NEXT:    movapd 96(%rsi), %xmm8
; SSE-NEXT:    movapd %xmm11, %xmm12
; SSE-NEXT:    unpcklpd {{.*#+}} xmm12 = xmm12[0],xmm8[0]
; SSE-NEXT:    movapd 96(%r8), %xmm2
; SSE-NEXT:    movsd {{.*#+}} xmm11 = xmm2[0],xmm11[1]
; SSE-NEXT:    movapd 96(%rdx), %xmm6
; SSE-NEXT:    unpckhpd {{.*#+}} xmm8 = xmm8[1],xmm6[1]
; SSE-NEXT:    movapd 96(%rcx), %xmm3
; SSE-NEXT:    unpcklpd {{.*#+}} xmm6 = xmm6[0],xmm3[0]
; SSE-NEXT:    unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm2[1]
; SSE-NEXT:    movapd 112(%rdi), %xmm4
; SSE-NEXT:    movapd 112(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm4, %xmm5
; SSE-NEXT:    unpcklpd {{.*#+}} xmm5 = xmm5[0],xmm2[0]
; SSE-NEXT:    movapd 112(%r8), %xmm7
; SSE-NEXT:    movsd {{.*#+}} xmm4 = xmm7[0],xmm4[1]
; SSE-NEXT:    movapd 112(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd 112(%rcx), %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm7[1]
; SSE-NEXT:    movapd %xmm0, 624(%r9)
; SSE-NEXT:    movapd %xmm2, 608(%r9)
; SSE-NEXT:    movapd %xmm4, 592(%r9)
; SSE-NEXT:    movapd %xmm1, 576(%r9)
; SSE-NEXT:    movapd %xmm5, 560(%r9)
; SSE-NEXT:    movapd %xmm3, 544(%r9)
; SSE-NEXT:    movapd %xmm8, 528(%r9)
; SSE-NEXT:    movapd %xmm11, 512(%r9)
; SSE-NEXT:    movapd %xmm6, 496(%r9)
; SSE-NEXT:    movapd %xmm12, 480(%r9)
; SSE-NEXT:    movapd %xmm9, 464(%r9)
; SSE-NEXT:    movapd %xmm13, 448(%r9)
; SSE-NEXT:    movapd %xmm14, 432(%r9)
; SSE-NEXT:    movapd %xmm10, 416(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 400(%r9)
; SSE-NEXT:    movapd %xmm15, 384(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 368(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 352(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 336(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 320(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 304(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 288(%r9)
; SSE-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 272(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 256(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 240(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 224(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 208(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 192(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 176(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 160(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 144(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 128(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 112(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 96(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 80(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 64(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 48(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 32(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 16(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, (%r9)
; SSE-NEXT:    addq $280, %rsp # imm = 0x118
; SSE-NEXT:    retq
;
; AVX-LABEL: store_i64_stride5_vf16:
; AVX:       # %bb.0:
; AVX-NEXT:    subq $216, %rsp
; AVX-NEXT:    vmovapd 32(%rdi), %ymm5
; AVX-NEXT:    vmovaps (%rdi), %ymm8
; AVX-NEXT:    vmovapd 96(%rdi), %ymm2
; AVX-NEXT:    vmovaps (%rcx), %ymm0
; AVX-NEXT:    vmovaps 64(%rcx), %ymm1
; AVX-NEXT:    vpermilps {{.*#+}} xmm3 = mem[2,3,2,3]
; AVX-NEXT:    vmovaps 16(%rdx), %xmm11
; AVX-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm11[1],ymm0[3],ymm11[3]
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3,4,5,6,7]
; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = mem[2,3,2,3]
; AVX-NEXT:    vmovaps 80(%rdx), %xmm7
; AVX-NEXT:    vunpckhpd {{.*#+}} ymm1 = ymm1[1],ymm7[1],ymm1[3],ymm7[3]
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX-NEXT:    vmovapd 96(%rcx), %xmm1
; AVX-NEXT:    vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[2,3,2,3]
; AVX-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
; AVX-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovaps (%rdx), %xmm0
; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX-NEXT:    vbroadcastsd 8(%rsi), %ymm1
; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm8[0,1,2,3],ymm1[4,5,6,7]
; AVX-NEXT:    vblendps {{.*#+}} ymm12 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = mem[2,3,2,3]
; AVX-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm8[0],mem[0],ymm8[2],mem[2]
; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX-NEXT:    vmovaps 32(%rdx), %xmm0
; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX-NEXT:    vbroadcastsd 40(%rsi), %ymm1
; AVX-NEXT:    vblendpd {{.*#+}} ymm1 = ymm5[0,1],ymm1[2,3]
; AVX-NEXT:    vblendpd {{.*#+}} ymm13 = ymm1[0,1,2],ymm0[3]
; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = mem[2,3,2,3]
; AVX-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm5[0],mem[0],ymm5[2],mem[2]
; AVX-NEXT:    vblendpd {{.*#+}} ymm15 = ymm0[0,1],ymm1[2,3]
; AVX-NEXT:    vmovaps 64(%rdi), %ymm0
; AVX-NEXT:    vbroadcastsd 72(%rsi), %ymm1
; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX-NEXT:    vmovaps 64(%rdx), %xmm10
; AVX-NEXT:    vinsertf128 $1, %xmm10, %ymm0, %ymm3
; AVX-NEXT:    vblendps {{.*#+}} ymm4 = ymm1[0,1,2,3,4,5],ymm3[6,7]
; AVX-NEXT:    vpermilps {{.*#+}} xmm1 = mem[2,3,2,3]
; AVX-NEXT:    vunpcklpd {{.*#+}} ymm3 = ymm0[0],mem[0],ymm0[2],mem[2]
; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
; AVX-NEXT:    vbroadcastsd 104(%rsi), %ymm3
; AVX-NEXT:    vblendpd {{.*#+}} ymm3 = ymm2[0,1],ymm3[2,3]
; AVX-NEXT:    vmovaps 96(%rdx), %xmm14
; AVX-NEXT:    vinsertf128 $1, %xmm14, %ymm0, %ymm9
; AVX-NEXT:    vblendpd {{.*#+}} ymm3 = ymm3[0,1,2],ymm9[3]
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm9 = xmm11[0],mem[0]
; AVX-NEXT:    vblendps {{.*#+}} ymm8 = ymm9[0,1,2,3],ymm8[4,5,6,7]
; AVX-NEXT:    vmovaps (%r8), %ymm9
; AVX-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm11 # 32-byte Folded Reload
; AVX-NEXT:    # ymm11 = mem[0,1,2,3,4,5],ymm9[6,7]
; AVX-NEXT:    vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendps {{.*#+}} ymm11 = ymm9[0,1],ymm12[2,3,4,5,6,7]
; AVX-NEXT:    vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm9[2,3],ymm6[4,5,6,7]
; AVX-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm8[0,1,2,3],ymm9[4,5],ymm8[6,7]
; AVX-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vperm2f128 {{.*#+}} ymm5 = mem[2,3],ymm5[2,3]
; AVX-NEXT:    vmovapd 48(%rdx), %xmm6
; AVX-NEXT:    vshufpd {{.*#+}} ymm5 = ymm6[0],ymm5[0],ymm6[2],ymm5[3]
; AVX-NEXT:    vmovapd 48(%rsi), %xmm8
; AVX-NEXT:    vunpckhpd {{.*#+}} xmm6 = xmm8[1],xmm6[1]
; AVX-NEXT:    vbroadcastsd 56(%rcx), %ymm8
; AVX-NEXT:    vblendpd {{.*#+}} ymm6 = ymm6[0,1],ymm8[2,3]
; AVX-NEXT:    vmovapd 32(%r8), %ymm8
; AVX-NEXT:    vblendpd {{.*#+}} ymm9 = ymm8[0],ymm13[1,2,3]
; AVX-NEXT:    vmovupd %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendpd {{.*#+}} ymm9 = ymm15[0],ymm8[1],ymm15[2,3]
; AVX-NEXT:    vmovupd %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendpd {{.*#+}} ymm11 = ymm5[0,1],ymm8[2],ymm5[3]
; AVX-NEXT:    vblendpd {{.*#+}} ymm5 = ymm6[0,1,2],ymm8[3]
; AVX-NEXT:    vmovupd %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm6 = xmm7[0],mem[0]
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT:    vmovaps 64(%r8), %ymm9
; AVX-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm5 # 32-byte Folded Reload
; AVX-NEXT:    # ymm5 = mem[0,1,2,3,4,5],ymm9[6,7]
; AVX-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendps {{.*#+}} ymm7 = ymm9[0,1],ymm4[2,3,4,5,6,7]
; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm1[0,1],ymm9[2,3],ymm1[4,5,6,7]
; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm0[0,1,2,3],ymm9[4,5],ymm0[6,7]
; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = mem[2,3],ymm2[2,3]
; AVX-NEXT:    vmovapd 112(%rdx), %xmm2
; AVX-NEXT:    vshufpd {{.*#+}} ymm9 = ymm2[0],ymm1[0],ymm2[2],ymm1[3]
; AVX-NEXT:    vmovapd 112(%rsi), %xmm1
; AVX-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1]
; AVX-NEXT:    vbroadcastsd 120(%rcx), %ymm2
; AVX-NEXT:    vblendpd {{.*#+}} ymm2 = ymm1[0,1],ymm2[2,3]
; AVX-NEXT:    vmovapd 96(%r8), %ymm0
; AVX-NEXT:    vblendpd $13, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
; AVX-NEXT:    # ymm4 = mem[0],ymm0[1],mem[2,3]
; AVX-NEXT:    vblendpd {{.*#+}} ymm3 = ymm0[0],ymm3[1,2,3]
; AVX-NEXT:    vblendpd {{.*#+}} ymm9 = ymm9[0,1],ymm0[2],ymm9[3]
; AVX-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0,1,2],ymm0[3]
; AVX-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm1 # 16-byte Folded Reload
; AVX-NEXT:    # xmm1 = xmm14[0],mem[0]
; AVX-NEXT:    vmovaps 64(%rdi), %xmm14
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm14 = xmm14[0],mem[0]
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm10 = xmm10[0],mem[0]
; AVX-NEXT:    vmovaps 32(%rdi), %xmm12
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm12 = xmm12[0],mem[0]
; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; AVX-NEXT:    vmovaps 96(%rdi), %xmm13
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm13 = xmm13[0],mem[0]
; AVX-NEXT:    vmovaps (%rdi), %xmm15
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm15 = xmm15[0],mem[0]
; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm8 = xmm8[0],mem[0]
; AVX-NEXT:    vmovaps %xmm8, 16(%r9)
; AVX-NEXT:    vmovaps %xmm15, (%r9)
; AVX-NEXT:    vmovaps %xmm1, 496(%r9)
; AVX-NEXT:    vmovaps %xmm13, 480(%r9)
; AVX-NEXT:    vmovaps %xmm0, 176(%r9)
; AVX-NEXT:    vmovaps %xmm12, 160(%r9)
; AVX-NEXT:    vmovaps %xmm10, 336(%r9)
; AVX-NEXT:    vmovaps %xmm14, 320(%r9)
; AVX-NEXT:    vmovapd %ymm9, 576(%r9)
; AVX-NEXT:    vmovapd %ymm3, 512(%r9)
; AVX-NEXT:    vmovaps %ymm5, 384(%r9)
; AVX-NEXT:    vmovaps %ymm7, 352(%r9)
; AVX-NEXT:    vmovapd %ymm11, 256(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 224(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 192(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 64(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 32(%r9)
; AVX-NEXT:    vmovapd %ymm2, 608(%r9)
; AVX-NEXT:    vmovapd %ymm4, 544(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 448(%r9)
; AVX-NEXT:    vmovaps %ymm6, 416(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 288(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 128(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 96(%r9)
; AVX-NEXT:    addq $216, %rsp
; AVX-NEXT:    vzeroupper
; AVX-NEXT:    retq
;
; AVX2-LABEL: store_i64_stride5_vf16:
; AVX2:       # %bb.0:
; AVX2-NEXT:    subq $264, %rsp # imm = 0x108
; AVX2-NEXT:    vmovaps 96(%rdi), %ymm15
; AVX2-NEXT:    vmovaps 64(%rdi), %ymm14
; AVX2-NEXT:    vmovaps 32(%rdi), %ymm11
; AVX2-NEXT:    vmovaps (%rdi), %ymm6
; AVX2-NEXT:    vmovaps (%rsi), %xmm1
; AVX2-NEXT:    vmovaps 32(%rsi), %xmm9
; AVX2-NEXT:    vmovaps 64(%rsi), %xmm0
; AVX2-NEXT:    vmovaps 96(%rsi), %xmm3
; AVX2-NEXT:    vinsertf128 $1, (%rcx), %ymm1, %ymm7
; AVX2-NEXT:    vmovaps (%rdi), %xmm8
; AVX2-NEXT:    vmovaps 32(%rdi), %xmm10
; AVX2-NEXT:    vmovaps 64(%rdi), %xmm1
; AVX2-NEXT:    vmovaps (%rdx), %xmm12
; AVX2-NEXT:    vmovaps 32(%rdx), %xmm13
; AVX2-NEXT:    vmovaps 64(%rdx), %xmm2
; AVX2-NEXT:    vinsertf128 $1, %xmm12, %ymm8, %ymm8
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm4 = ymm8[0],ymm7[0],ymm8[2],ymm7[2]
; AVX2-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vinsertf128 $1, %xmm12, %ymm0, %ymm7
; AVX2-NEXT:    vbroadcastsd 8(%rsi), %ymm8
; AVX2-NEXT:    vblendps {{.*#+}} ymm8 = ymm6[0,1,2,3],ymm8[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm5 = ymm8[0,1,2,3,4,5],ymm7[6,7]
; AVX2-NEXT:    vmovddup {{.*#+}} xmm8 = mem[0,0]
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm12 = ymm6[0],mem[0],ymm6[2],mem[2]
; AVX2-NEXT:    vblendps {{.*#+}} ymm4 = ymm8[0,1,2,3],ymm12[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vinsertf128 $1, 32(%rcx), %ymm9, %ymm9
; AVX2-NEXT:    vinsertf128 $1, %xmm13, %ymm10, %ymm10
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm4 = ymm10[0],ymm9[0],ymm10[2],ymm9[2]
; AVX2-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vinsertf128 $1, %xmm13, %ymm0, %ymm9
; AVX2-NEXT:    vbroadcastsd 40(%rsi), %ymm10
; AVX2-NEXT:    vblendps {{.*#+}} ymm10 = ymm11[0,1,2,3],ymm10[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm4 = ymm10[0,1,2,3,4,5],ymm9[6,7]
; AVX2-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovddup {{.*#+}} xmm10 = mem[0,0]
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm12 = ymm11[0],mem[0],ymm11[2],mem[2]
; AVX2-NEXT:    vblendps {{.*#+}} ymm4 = ymm10[0,1,2,3],ymm12[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vinsertf128 $1, 64(%rcx), %ymm0, %ymm0
; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-NEXT:    vbroadcastsd 72(%rsi), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm14[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm14[0],mem[0],ymm14[2],mem[2]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
; AVX2-NEXT:    vinsertf128 $1, 96(%rcx), %ymm3, %ymm0
; AVX2-NEXT:    vmovaps 96(%rdi), %xmm1
; AVX2-NEXT:    vmovaps 96(%rdx), %xmm2
; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-NEXT:    vbroadcastsd 104(%rsi), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm15[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm15[0],mem[0],ymm15[2],mem[2]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vbroadcastsd 56(%rsi), %ymm0
; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-NEXT:    vbroadcastsd 56(%rcx), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} ymm9 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vbroadcastsd 120(%rsi), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
; AVX2-NEXT:    vbroadcastsd 120(%rcx), %ymm2
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm2[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovaps (%rdx), %ymm1
; AVX2-NEXT:    vmovaps (%rcx), %ymm0
; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm7 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
; AVX2-NEXT:    vpermpd {{.*#+}} ymm7 = ymm7[0,2,3,3]
; AVX2-NEXT:    vbroadcastsd 24(%rsi), %ymm8
; AVX2-NEXT:    vblendps {{.*#+}} ymm4 = ymm8[0,1],ymm7[2,3,4,5,6,7]
; AVX2-NEXT:    vmovaps 64(%rdx), %ymm3
; AVX2-NEXT:    vmovaps 64(%rcx), %ymm2
; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm7 = ymm3[1],ymm2[1],ymm3[3],ymm2[3]
; AVX2-NEXT:    vpermpd {{.*#+}} ymm7 = ymm7[0,2,3,3]
; AVX2-NEXT:    vbroadcastsd 88(%rsi), %ymm8
; AVX2-NEXT:    vblendps {{.*#+}} ymm7 = ymm8[0,1],ymm7[2,3,4,5,6,7]
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm6[2,3]
; AVX2-NEXT:    vmovaps (%r8), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} ymm13 = ymm1[0,1],ymm5[2,3,4,5,6,7]
; AVX2-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm12 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm12 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm1[6,7]
; AVX2-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps {{.*#+}} ymm10 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm11[2,3]
; AVX2-NEXT:    vbroadcastsd 48(%rcx), %ymm11
; AVX2-NEXT:    vblendps {{.*#+}} ymm11 = ymm0[0,1],ymm11[2,3],ymm0[4,5,6,7]
; AVX2-NEXT:    vmovaps 32(%r8), %ymm0
; AVX2-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm8 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm8 = ymm0[0,1],mem[2,3,4,5,6,7]
; AVX2-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm6 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],ymm0[6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm5 = ymm11[0,1,2,3],ymm0[4,5],ymm11[6,7]
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm2 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm2[2,3],ymm14[2,3]
; AVX2-NEXT:    vmovaps 64(%r8), %ymm3
; AVX2-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm11 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm11 = ymm3[0,1],mem[2,3,4,5,6,7]
; AVX2-NEXT:    vblendps $243, (%rsp), %ymm3, %ymm14 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm14 = mem[0,1],ymm3[2,3],mem[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],ymm3[6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5],ymm2[6,7]
; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm3 = mem[2,3],ymm15[2,3]
; AVX2-NEXT:    vbroadcastsd 112(%rcx), %ymm15
; AVX2-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm15[2,3],ymm3[4,5,6,7]
; AVX2-NEXT:    vmovaps 96(%r8), %ymm15
; AVX2-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm1 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm1 = ymm15[0,1],mem[2,3,4,5,6,7]
; AVX2-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm0 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm0 = mem[0,1],ymm15[2,3],mem[4,5,6,7]
; AVX2-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm4 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm4 = mem[0,1,2,3,4,5],ymm15[6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm15[4,5],ymm3[6,7]
; AVX2-NEXT:    vmovaps %ymm3, 576(%r9)
; AVX2-NEXT:    vmovaps %ymm0, 544(%r9)
; AVX2-NEXT:    vmovaps %ymm1, 512(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 480(%r9)
; AVX2-NEXT:    vmovaps %ymm2, 416(%r9)
; AVX2-NEXT:    vmovaps %ymm14, 384(%r9)
; AVX2-NEXT:    vmovaps %ymm11, 352(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 320(%r9)
; AVX2-NEXT:    vmovaps %ymm5, 256(%r9)
; AVX2-NEXT:    vmovaps %ymm6, 224(%r9)
; AVX2-NEXT:    vmovaps %ymm8, 192(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 160(%r9)
; AVX2-NEXT:    vmovaps %ymm10, 96(%r9)
; AVX2-NEXT:    vmovaps %ymm12, 64(%r9)
; AVX2-NEXT:    vmovaps %ymm13, 32(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, (%r9)
; AVX2-NEXT:    vmovaps %ymm7, 448(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 128(%r9)
; AVX2-NEXT:    vmovaps %ymm4, 608(%r9)
; AVX2-NEXT:    vmovaps %ymm9, 288(%r9)
; AVX2-NEXT:    addq $264, %rsp # imm = 0x108
; AVX2-NEXT:    vzeroupper
; AVX2-NEXT:    retq
;
; AVX2-FP-LABEL: store_i64_stride5_vf16:
; AVX2-FP:       # %bb.0:
; AVX2-FP-NEXT:    subq $264, %rsp # imm = 0x108
; AVX2-FP-NEXT:    vmovaps 96(%rdi), %ymm15
; AVX2-FP-NEXT:    vmovaps 64(%rdi), %ymm14
; AVX2-FP-NEXT:    vmovaps 32(%rdi), %ymm11
; AVX2-FP-NEXT:    vmovaps (%rdi), %ymm6
; AVX2-FP-NEXT:    vmovaps (%rsi), %xmm1
; AVX2-FP-NEXT:    vmovaps 32(%rsi), %xmm9
; AVX2-FP-NEXT:    vmovaps 64(%rsi), %xmm0
; AVX2-FP-NEXT:    vmovaps 96(%rsi), %xmm3
; AVX2-FP-NEXT:    vinsertf128 $1, (%rcx), %ymm1, %ymm7
; AVX2-FP-NEXT:    vmovaps (%rdi), %xmm8
; AVX2-FP-NEXT:    vmovaps 32(%rdi), %xmm10
; AVX2-FP-NEXT:    vmovaps 64(%rdi), %xmm1
; AVX2-FP-NEXT:    vmovaps (%rdx), %xmm12
; AVX2-FP-NEXT:    vmovaps 32(%rdx), %xmm13
; AVX2-FP-NEXT:    vmovaps 64(%rdx), %xmm2
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm12, %ymm8, %ymm8
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm4 = ymm8[0],ymm7[0],ymm8[2],ymm7[2]
; AVX2-FP-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm12, %ymm0, %ymm7
; AVX2-FP-NEXT:    vbroadcastsd 8(%rsi), %ymm8
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm8 = ymm6[0,1,2,3],ymm8[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm5 = ymm8[0,1,2,3,4,5],ymm7[6,7]
; AVX2-FP-NEXT:    vmovddup {{.*#+}} xmm8 = mem[0,0]
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm12 = ymm6[0],mem[0],ymm6[2],mem[2]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm4 = ymm8[0,1,2,3],ymm12[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vinsertf128 $1, 32(%rcx), %ymm9, %ymm9
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm13, %ymm10, %ymm10
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm4 = ymm10[0],ymm9[0],ymm10[2],ymm9[2]
; AVX2-FP-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm13, %ymm0, %ymm9
; AVX2-FP-NEXT:    vbroadcastsd 40(%rsi), %ymm10
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm10 = ymm11[0,1,2,3],ymm10[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm4 = ymm10[0,1,2,3,4,5],ymm9[6,7]
; AVX2-FP-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovddup {{.*#+}} xmm10 = mem[0,0]
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm12 = ymm11[0],mem[0],ymm11[2],mem[2]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm4 = ymm10[0,1,2,3],ymm12[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vinsertf128 $1, 64(%rcx), %ymm0, %ymm0
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-FP-NEXT:    vbroadcastsd 72(%rsi), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm1 = ymm14[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm14[0],mem[0],ymm14[2],mem[2]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
; AVX2-FP-NEXT:    vinsertf128 $1, 96(%rcx), %ymm3, %ymm0
; AVX2-FP-NEXT:    vmovaps 96(%rdi), %xmm1
; AVX2-FP-NEXT:    vmovaps 96(%rdx), %xmm2
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-FP-NEXT:    vbroadcastsd 104(%rsi), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm1 = ymm15[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm15[0],mem[0],ymm15[2],mem[2]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vbroadcastsd 56(%rsi), %ymm0
; AVX2-FP-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-FP-NEXT:    vbroadcastsd 56(%rcx), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm9 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vbroadcastsd 120(%rsi), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
; AVX2-FP-NEXT:    vbroadcastsd 120(%rcx), %ymm2
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm2[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovaps (%rdx), %ymm1
; AVX2-FP-NEXT:    vmovaps (%rcx), %ymm0
; AVX2-FP-NEXT:    vunpckhpd {{.*#+}} ymm7 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
; AVX2-FP-NEXT:    vpermpd {{.*#+}} ymm7 = ymm7[0,2,3,3]
; AVX2-FP-NEXT:    vbroadcastsd 24(%rsi), %ymm8
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm4 = ymm8[0,1],ymm7[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vmovaps 64(%rdx), %ymm3
; AVX2-FP-NEXT:    vmovaps 64(%rcx), %ymm2
; AVX2-FP-NEXT:    vunpckhpd {{.*#+}} ymm7 = ymm3[1],ymm2[1],ymm3[3],ymm2[3]
; AVX2-FP-NEXT:    vpermpd {{.*#+}} ymm7 = ymm7[0,2,3,3]
; AVX2-FP-NEXT:    vbroadcastsd 88(%rsi), %ymm8
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm7 = ymm8[0,1],ymm7[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FP-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm6[2,3]
; AVX2-FP-NEXT:    vmovaps (%r8), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm13 = ymm1[0,1],ymm5[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm12 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm12 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FP-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm10 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm11[2,3]
; AVX2-FP-NEXT:    vbroadcastsd 48(%rcx), %ymm11
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm11 = ymm0[0,1],ymm11[2,3],ymm0[4,5,6,7]
; AVX2-FP-NEXT:    vmovaps 32(%r8), %ymm0
; AVX2-FP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm8 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm8 = ymm0[0,1],mem[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm6 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm5 = ymm11[0,1,2,3],ymm0[4,5],ymm11[6,7]
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm2 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
; AVX2-FP-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm2[2,3],ymm14[2,3]
; AVX2-FP-NEXT:    vmovaps 64(%r8), %ymm3
; AVX2-FP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm11 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm11 = ymm3[0,1],mem[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vblendps $243, (%rsp), %ymm3, %ymm14 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm14 = mem[0,1],ymm3[2,3],mem[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],ymm3[6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5],ymm2[6,7]
; AVX2-FP-NEXT:    vperm2f128 {{.*#+}} ymm3 = mem[2,3],ymm15[2,3]
; AVX2-FP-NEXT:    vbroadcastsd 112(%rcx), %ymm15
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm15[2,3],ymm3[4,5,6,7]
; AVX2-FP-NEXT:    vmovaps 96(%r8), %ymm15
; AVX2-FP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm1 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm1 = ymm15[0,1],mem[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm0 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm0 = mem[0,1],ymm15[2,3],mem[4,5,6,7]
; AVX2-FP-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm4 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm4 = mem[0,1,2,3,4,5],ymm15[6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm15[4,5],ymm3[6,7]
; AVX2-FP-NEXT:    vmovaps %ymm3, 576(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm0, 544(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm1, 512(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 480(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm2, 416(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm14, 384(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm11, 352(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 320(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm5, 256(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm6, 224(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm8, 192(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 160(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm10, 96(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm12, 64(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm13, 32(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, (%r9)
; AVX2-FP-NEXT:    vmovaps %ymm7, 448(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 128(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm4, 608(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm9, 288(%r9)
; AVX2-FP-NEXT:    addq $264, %rsp # imm = 0x108
; AVX2-FP-NEXT:    vzeroupper
; AVX2-FP-NEXT:    retq
;
; AVX2-FCP-LABEL: store_i64_stride5_vf16:
; AVX2-FCP:       # %bb.0:
; AVX2-FCP-NEXT:    subq $264, %rsp # imm = 0x108
; AVX2-FCP-NEXT:    vmovaps 96(%rdi), %ymm15
; AVX2-FCP-NEXT:    vmovaps 64(%rdi), %ymm14
; AVX2-FCP-NEXT:    vmovaps 32(%rdi), %ymm11
; AVX2-FCP-NEXT:    vmovaps (%rdi), %ymm6
; AVX2-FCP-NEXT:    vmovaps (%rsi), %xmm1
; AVX2-FCP-NEXT:    vmovaps 32(%rsi), %xmm9
; AVX2-FCP-NEXT:    vmovaps 64(%rsi), %xmm0
; AVX2-FCP-NEXT:    vmovaps 96(%rsi), %xmm3
; AVX2-FCP-NEXT:    vinsertf128 $1, (%rcx), %ymm1, %ymm7
; AVX2-FCP-NEXT:    vmovaps (%rdi), %xmm8
; AVX2-FCP-NEXT:    vmovaps 32(%rdi), %xmm10
; AVX2-FCP-NEXT:    vmovaps 64(%rdi), %xmm1
; AVX2-FCP-NEXT:    vmovaps (%rdx), %xmm12
; AVX2-FCP-NEXT:    vmovaps 32(%rdx), %xmm13
; AVX2-FCP-NEXT:    vmovaps 64(%rdx), %xmm2
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm12, %ymm8, %ymm8
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm4 = ymm8[0],ymm7[0],ymm8[2],ymm7[2]
; AVX2-FCP-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm12, %ymm0, %ymm7
; AVX2-FCP-NEXT:    vbroadcastsd 8(%rsi), %ymm8
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm8 = ymm6[0,1,2,3],ymm8[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm5 = ymm8[0,1,2,3,4,5],ymm7[6,7]
; AVX2-FCP-NEXT:    vmovddup {{.*#+}} xmm8 = mem[0,0]
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm12 = ymm6[0],mem[0],ymm6[2],mem[2]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm4 = ymm8[0,1,2,3],ymm12[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vinsertf128 $1, 32(%rcx), %ymm9, %ymm9
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm13, %ymm10, %ymm10
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm4 = ymm10[0],ymm9[0],ymm10[2],ymm9[2]
; AVX2-FCP-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm13, %ymm0, %ymm9
; AVX2-FCP-NEXT:    vbroadcastsd 40(%rsi), %ymm10
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm10 = ymm11[0,1,2,3],ymm10[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm4 = ymm10[0,1,2,3,4,5],ymm9[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovddup {{.*#+}} xmm10 = mem[0,0]
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm12 = ymm11[0],mem[0],ymm11[2],mem[2]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm4 = ymm10[0,1,2,3],ymm12[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vinsertf128 $1, 64(%rcx), %ymm0, %ymm0
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-FCP-NEXT:    vbroadcastsd 72(%rsi), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm1 = ymm14[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm14[0],mem[0],ymm14[2],mem[2]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
; AVX2-FCP-NEXT:    vinsertf128 $1, 96(%rcx), %ymm3, %ymm0
; AVX2-FCP-NEXT:    vmovaps 96(%rdi), %xmm1
; AVX2-FCP-NEXT:    vmovaps 96(%rdx), %xmm2
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-FCP-NEXT:    vbroadcastsd 104(%rsi), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm1 = ymm15[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm15[0],mem[0],ymm15[2],mem[2]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vbroadcastsd 56(%rsi), %ymm0
; AVX2-FCP-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-FCP-NEXT:    vbroadcastsd 56(%rcx), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm9 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vbroadcastsd 120(%rsi), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
; AVX2-FCP-NEXT:    vbroadcastsd 120(%rcx), %ymm2
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm2[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovaps (%rdx), %ymm1
; AVX2-FCP-NEXT:    vmovaps (%rcx), %ymm0
; AVX2-FCP-NEXT:    vunpckhpd {{.*#+}} ymm7 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
; AVX2-FCP-NEXT:    vpermpd {{.*#+}} ymm7 = ymm7[0,2,3,3]
; AVX2-FCP-NEXT:    vbroadcastsd 24(%rsi), %ymm8
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm4 = ymm8[0,1],ymm7[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vmovaps 64(%rdx), %ymm3
; AVX2-FCP-NEXT:    vmovaps 64(%rcx), %ymm2
; AVX2-FCP-NEXT:    vunpckhpd {{.*#+}} ymm7 = ymm3[1],ymm2[1],ymm3[3],ymm2[3]
; AVX2-FCP-NEXT:    vpermpd {{.*#+}} ymm7 = ymm7[0,2,3,3]
; AVX2-FCP-NEXT:    vbroadcastsd 88(%rsi), %ymm8
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm7 = ymm8[0,1],ymm7[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FCP-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm6[2,3]
; AVX2-FCP-NEXT:    vmovaps (%r8), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm13 = ymm1[0,1],ymm5[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm12 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm12 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm10 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm11[2,3]
; AVX2-FCP-NEXT:    vbroadcastsd 48(%rcx), %ymm11
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm11 = ymm0[0,1],ymm11[2,3],ymm0[4,5,6,7]
; AVX2-FCP-NEXT:    vmovaps 32(%r8), %ymm0
; AVX2-FCP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm8 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm8 = ymm0[0,1],mem[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm6 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm5 = ymm11[0,1,2,3],ymm0[4,5],ymm11[6,7]
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm2 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
; AVX2-FCP-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm2[2,3],ymm14[2,3]
; AVX2-FCP-NEXT:    vmovaps 64(%r8), %ymm3
; AVX2-FCP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm11 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm11 = ymm3[0,1],mem[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vblendps $243, (%rsp), %ymm3, %ymm14 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm14 = mem[0,1],ymm3[2,3],mem[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],ymm3[6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5],ymm2[6,7]
; AVX2-FCP-NEXT:    vperm2f128 {{.*#+}} ymm3 = mem[2,3],ymm15[2,3]
; AVX2-FCP-NEXT:    vbroadcastsd 112(%rcx), %ymm15
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm15[2,3],ymm3[4,5,6,7]
; AVX2-FCP-NEXT:    vmovaps 96(%r8), %ymm15
; AVX2-FCP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm1 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm1 = ymm15[0,1],mem[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm0 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm0 = mem[0,1],ymm15[2,3],mem[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm4 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm4 = mem[0,1,2,3,4,5],ymm15[6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm15[4,5],ymm3[6,7]
; AVX2-FCP-NEXT:    vmovaps %ymm3, 576(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm0, 544(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm1, 512(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 480(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm2, 416(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm14, 384(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm11, 352(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 320(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm5, 256(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm6, 224(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm8, 192(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 160(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm10, 96(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm12, 64(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm13, 32(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, (%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm7, 448(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 128(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm4, 608(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm9, 288(%r9)
; AVX2-FCP-NEXT:    addq $264, %rsp # imm = 0x108
; AVX2-FCP-NEXT:    vzeroupper
; AVX2-FCP-NEXT:    retq
;
; AVX512-LABEL: store_i64_stride5_vf16:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vmovdqa64 (%rdi), %zmm1
; AVX512-NEXT:    vmovdqa64 64(%rdi), %zmm0
; AVX512-NEXT:    vmovdqa64 (%rsi), %zmm2
; AVX512-NEXT:    vmovdqa64 64(%rsi), %zmm9
; AVX512-NEXT:    vmovdqa64 (%rdx), %zmm6
; AVX512-NEXT:    vmovdqa64 64(%rdx), %zmm10
; AVX512-NEXT:    vmovdqa64 (%rcx), %zmm5
; AVX512-NEXT:    vmovdqa64 64(%rcx), %zmm12
; AVX512-NEXT:    vmovdqa64 (%r8), %zmm4
; AVX512-NEXT:    vmovdqa64 64(%r8), %zmm11
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm8 = [3,0,0,0,12,4,0,0]
; AVX512-NEXT:    vmovdqa64 %zmm9, %zmm13
; AVX512-NEXT:    vpermt2q %zmm0, %zmm8, %zmm13
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm7 = [0,3,11,0,0,0,4,12]
; AVX512-NEXT:    vmovdqa64 %zmm10, %zmm3
; AVX512-NEXT:    vpermt2q %zmm12, %zmm7, %zmm3
; AVX512-NEXT:    movb $49, %al
; AVX512-NEXT:    kmovw %eax, %k1
; AVX512-NEXT:    vmovdqa64 %zmm13, %zmm3 {%k1}
; AVX512-NEXT:    movb $8, %al
; AVX512-NEXT:    kmovw %eax, %k2
; AVX512-NEXT:    vmovdqa64 %zmm11, %zmm3 {%k2}
; AVX512-NEXT:    vpermi2q %zmm1, %zmm2, %zmm8
; AVX512-NEXT:    vpermi2q %zmm5, %zmm6, %zmm7
; AVX512-NEXT:    vmovdqa64 %zmm8, %zmm7 {%k1}
; AVX512-NEXT:    vmovdqa64 %zmm4, %zmm7 {%k2}
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm15 = [0,0,0,8,0,0,0,1]
; AVX512-NEXT:    vmovdqa64 %zmm6, %zmm13
; AVX512-NEXT:    vpermt2q %zmm5, %zmm15, %zmm13
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm16 = [0,8,0,0,0,1,9,0]
; AVX512-NEXT:    vmovdqa64 %zmm1, %zmm8
; AVX512-NEXT:    vpermt2q %zmm2, %zmm16, %zmm8
; AVX512-NEXT:    movb $-116, %al
; AVX512-NEXT:    kmovw %eax, %k3
; AVX512-NEXT:    vmovdqa64 %zmm13, %zmm8 {%k3}
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm17 = [0,1,2,3,8,5,6,7]
; AVX512-NEXT:    vpermt2q %zmm4, %zmm17, %zmm8
; AVX512-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [15,7,15,7,15,7,15,7]
; AVX512-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm19
; AVX512-NEXT:    vpermt2q %zmm9, %zmm18, %zmm19
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm14 = [6,14,0,0,0,7,15,0]
; AVX512-NEXT:    vmovdqa64 %zmm10, %zmm13
; AVX512-NEXT:    vpermt2q %zmm12, %zmm14, %zmm13
; AVX512-NEXT:    movb $24, %al
; AVX512-NEXT:    kmovw %eax, %k2
; AVX512-NEXT:    vmovdqa64 %zmm19, %zmm13 {%k2}
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm19 = [0,1,14,3,4,5,6,15]
; AVX512-NEXT:    vpermt2q %zmm11, %zmm19, %zmm13
; AVX512-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [13,5,13,5,13,5,13,5]
; AVX512-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512-NEXT:    vmovdqa64 %zmm10, %zmm21
; AVX512-NEXT:    vpermt2q %zmm12, %zmm20, %zmm21
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm22 = [0,5,13,0,0,0,6,14]
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm23
; AVX512-NEXT:    vpermt2q %zmm9, %zmm22, %zmm23
; AVX512-NEXT:    vmovdqa64 %zmm21, %zmm23 {%k2}
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm21 = [12,1,2,3,4,13,6,7]
; AVX512-NEXT:    vpermt2q %zmm11, %zmm21, %zmm23
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm24 = [1,0,0,0,10,2,0,0]
; AVX512-NEXT:    vmovdqa64 %zmm12, %zmm25
; AVX512-NEXT:    vpermt2q %zmm10, %zmm24, %zmm25
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm26 = [0,0,2,10,0,0,0,3]
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm27
; AVX512-NEXT:    vpermt2q %zmm9, %zmm26, %zmm27
; AVX512-NEXT:    vmovdqa64 %zmm25, %zmm27 {%k1}
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm25 = [0,9,2,3,4,5,10,7]
; AVX512-NEXT:    vpermt2q %zmm11, %zmm25, %zmm27
; AVX512-NEXT:    vpermt2q %zmm12, %zmm15, %zmm10
; AVX512-NEXT:    vpermt2q %zmm9, %zmm16, %zmm0
; AVX512-NEXT:    vmovdqa64 %zmm10, %zmm0 {%k3}
; AVX512-NEXT:    vpermt2q %zmm11, %zmm17, %zmm0
; AVX512-NEXT:    vpermi2q %zmm2, %zmm1, %zmm18
; AVX512-NEXT:    vpermi2q %zmm5, %zmm6, %zmm14
; AVX512-NEXT:    vmovdqa64 %zmm18, %zmm14 {%k2}
; AVX512-NEXT:    vpermt2q %zmm4, %zmm19, %zmm14
; AVX512-NEXT:    vpermi2q %zmm5, %zmm6, %zmm20
; AVX512-NEXT:    vpermi2q %zmm2, %zmm1, %zmm22
; AVX512-NEXT:    vmovdqa64 %zmm20, %zmm22 {%k2}
; AVX512-NEXT:    vpermt2q %zmm4, %zmm21, %zmm22
; AVX512-NEXT:    vpermt2q %zmm6, %zmm24, %zmm5
; AVX512-NEXT:    vpermt2q %zmm2, %zmm26, %zmm1
; AVX512-NEXT:    vmovdqa64 %zmm5, %zmm1 {%k1}
; AVX512-NEXT:    vpermt2q %zmm4, %zmm25, %zmm1
; AVX512-NEXT:    vmovdqa64 %zmm1, 64(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm7, 128(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm22, 192(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm14, 256(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm0, 320(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm27, 384(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm3, 448(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm23, 512(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm13, 576(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm8, (%r9)
; AVX512-NEXT:    vzeroupper
; AVX512-NEXT:    retq
;
; AVX512-FCP-LABEL: store_i64_stride5_vf16:
; AVX512-FCP:       # %bb.0:
; AVX512-FCP-NEXT:    vmovdqa64 (%rdi), %zmm1
; AVX512-FCP-NEXT:    vmovdqa64 64(%rdi), %zmm0
; AVX512-FCP-NEXT:    vmovdqa64 (%rsi), %zmm2
; AVX512-FCP-NEXT:    vmovdqa64 64(%rsi), %zmm9
; AVX512-FCP-NEXT:    vmovdqa64 (%rdx), %zmm6
; AVX512-FCP-NEXT:    vmovdqa64 64(%rdx), %zmm10
; AVX512-FCP-NEXT:    vmovdqa64 (%rcx), %zmm5
; AVX512-FCP-NEXT:    vmovdqa64 64(%rcx), %zmm12
; AVX512-FCP-NEXT:    vmovdqa64 (%r8), %zmm4
; AVX512-FCP-NEXT:    vmovdqa64 64(%r8), %zmm11
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm8 = [3,0,0,0,12,4,0,0]
; AVX512-FCP-NEXT:    vmovdqa64 %zmm9, %zmm13
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm8, %zmm13
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm7 = [0,3,11,0,0,0,4,12]
; AVX512-FCP-NEXT:    vmovdqa64 %zmm10, %zmm3
; AVX512-FCP-NEXT:    vpermt2q %zmm12, %zmm7, %zmm3
; AVX512-FCP-NEXT:    movb $49, %al
; AVX512-FCP-NEXT:    kmovw %eax, %k1
; AVX512-FCP-NEXT:    vmovdqa64 %zmm13, %zmm3 {%k1}
; AVX512-FCP-NEXT:    movb $8, %al
; AVX512-FCP-NEXT:    kmovw %eax, %k2
; AVX512-FCP-NEXT:    vmovdqa64 %zmm11, %zmm3 {%k2}
; AVX512-FCP-NEXT:    vpermi2q %zmm1, %zmm2, %zmm8
; AVX512-FCP-NEXT:    vpermi2q %zmm5, %zmm6, %zmm7
; AVX512-FCP-NEXT:    vmovdqa64 %zmm8, %zmm7 {%k1}
; AVX512-FCP-NEXT:    vmovdqa64 %zmm4, %zmm7 {%k2}
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm15 = [0,0,0,8,0,0,0,1]
; AVX512-FCP-NEXT:    vmovdqa64 %zmm6, %zmm13
; AVX512-FCP-NEXT:    vpermt2q %zmm5, %zmm15, %zmm13
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm16 = [0,8,0,0,0,1,9,0]
; AVX512-FCP-NEXT:    vmovdqa64 %zmm1, %zmm8
; AVX512-FCP-NEXT:    vpermt2q %zmm2, %zmm16, %zmm8
; AVX512-FCP-NEXT:    movb $-116, %al
; AVX512-FCP-NEXT:    kmovw %eax, %k3
; AVX512-FCP-NEXT:    vmovdqa64 %zmm13, %zmm8 {%k3}
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm17 = [0,1,2,3,8,5,6,7]
; AVX512-FCP-NEXT:    vpermt2q %zmm4, %zmm17, %zmm8
; AVX512-FCP-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [15,7,15,7,15,7,15,7]
; AVX512-FCP-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, %zmm19
; AVX512-FCP-NEXT:    vpermt2q %zmm9, %zmm18, %zmm19
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm14 = [6,14,0,0,0,7,15,0]
; AVX512-FCP-NEXT:    vmovdqa64 %zmm10, %zmm13
; AVX512-FCP-NEXT:    vpermt2q %zmm12, %zmm14, %zmm13
; AVX512-FCP-NEXT:    movb $24, %al
; AVX512-FCP-NEXT:    kmovw %eax, %k2
; AVX512-FCP-NEXT:    vmovdqa64 %zmm19, %zmm13 {%k2}
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm19 = [0,1,14,3,4,5,6,15]
; AVX512-FCP-NEXT:    vpermt2q %zmm11, %zmm19, %zmm13
; AVX512-FCP-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [13,5,13,5,13,5,13,5]
; AVX512-FCP-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512-FCP-NEXT:    vmovdqa64 %zmm10, %zmm21
; AVX512-FCP-NEXT:    vpermt2q %zmm12, %zmm20, %zmm21
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm22 = [0,5,13,0,0,0,6,14]
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, %zmm23
; AVX512-FCP-NEXT:    vpermt2q %zmm9, %zmm22, %zmm23
; AVX512-FCP-NEXT:    vmovdqa64 %zmm21, %zmm23 {%k2}
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm21 = [12,1,2,3,4,13,6,7]
; AVX512-FCP-NEXT:    vpermt2q %zmm11, %zmm21, %zmm23
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm24 = [1,0,0,0,10,2,0,0]
; AVX512-FCP-NEXT:    vmovdqa64 %zmm12, %zmm25
; AVX512-FCP-NEXT:    vpermt2q %zmm10, %zmm24, %zmm25
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm26 = [0,0,2,10,0,0,0,3]
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, %zmm27
; AVX512-FCP-NEXT:    vpermt2q %zmm9, %zmm26, %zmm27
; AVX512-FCP-NEXT:    vmovdqa64 %zmm25, %zmm27 {%k1}
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm25 = [0,9,2,3,4,5,10,7]
; AVX512-FCP-NEXT:    vpermt2q %zmm11, %zmm25, %zmm27
; AVX512-FCP-NEXT:    vpermt2q %zmm12, %zmm15, %zmm10
; AVX512-FCP-NEXT:    vpermt2q %zmm9, %zmm16, %zmm0
; AVX512-FCP-NEXT:    vmovdqa64 %zmm10, %zmm0 {%k3}
; AVX512-FCP-NEXT:    vpermt2q %zmm11, %zmm17, %zmm0
; AVX512-FCP-NEXT:    vpermi2q %zmm2, %zmm1, %zmm18
; AVX512-FCP-NEXT:    vpermi2q %zmm5, %zmm6, %zmm14
; AVX512-FCP-NEXT:    vmovdqa64 %zmm18, %zmm14 {%k2}
; AVX512-FCP-NEXT:    vpermt2q %zmm4, %zmm19, %zmm14
; AVX512-FCP-NEXT:    vpermi2q %zmm5, %zmm6, %zmm20
; AVX512-FCP-NEXT:    vpermi2q %zmm2, %zmm1, %zmm22
; AVX512-FCP-NEXT:    vmovdqa64 %zmm20, %zmm22 {%k2}
; AVX512-FCP-NEXT:    vpermt2q %zmm4, %zmm21, %zmm22
; AVX512-FCP-NEXT:    vpermt2q %zmm6, %zmm24, %zmm5
; AVX512-FCP-NEXT:    vpermt2q %zmm2, %zmm26, %zmm1
; AVX512-FCP-NEXT:    vmovdqa64 %zmm5, %zmm1 {%k1}
; AVX512-FCP-NEXT:    vpermt2q %zmm4, %zmm25, %zmm1
; AVX512-FCP-NEXT:    vmovdqa64 %zmm1, 64(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm7, 128(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm22, 192(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm14, 256(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, 320(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm27, 384(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm3, 448(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm23, 512(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm13, 576(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm8, (%r9)
; AVX512-FCP-NEXT:    vzeroupper
; AVX512-FCP-NEXT:    retq
;
; AVX512DQ-LABEL: store_i64_stride5_vf16:
; AVX512DQ:       # %bb.0:
; AVX512DQ-NEXT:    vmovdqa64 (%rdi), %zmm1
; AVX512DQ-NEXT:    vmovdqa64 64(%rdi), %zmm0
; AVX512DQ-NEXT:    vmovdqa64 (%rsi), %zmm2
; AVX512DQ-NEXT:    vmovdqa64 64(%rsi), %zmm9
; AVX512DQ-NEXT:    vmovdqa64 (%rdx), %zmm6
; AVX512DQ-NEXT:    vmovdqa64 64(%rdx), %zmm10
; AVX512DQ-NEXT:    vmovdqa64 (%rcx), %zmm5
; AVX512DQ-NEXT:    vmovdqa64 64(%rcx), %zmm12
; AVX512DQ-NEXT:    vmovdqa64 (%r8), %zmm4
; AVX512DQ-NEXT:    vmovdqa64 64(%r8), %zmm11
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm8 = [3,0,0,0,12,4,0,0]
; AVX512DQ-NEXT:    vmovdqa64 %zmm9, %zmm13
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm8, %zmm13
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm7 = [0,3,11,0,0,0,4,12]
; AVX512DQ-NEXT:    vmovdqa64 %zmm10, %zmm3
; AVX512DQ-NEXT:    vpermt2q %zmm12, %zmm7, %zmm3
; AVX512DQ-NEXT:    movb $49, %al
; AVX512DQ-NEXT:    kmovw %eax, %k1
; AVX512DQ-NEXT:    vmovdqa64 %zmm13, %zmm3 {%k1}
; AVX512DQ-NEXT:    movb $8, %al
; AVX512DQ-NEXT:    kmovw %eax, %k2
; AVX512DQ-NEXT:    vmovdqa64 %zmm11, %zmm3 {%k2}
; AVX512DQ-NEXT:    vpermi2q %zmm1, %zmm2, %zmm8
; AVX512DQ-NEXT:    vpermi2q %zmm5, %zmm6, %zmm7
; AVX512DQ-NEXT:    vmovdqa64 %zmm8, %zmm7 {%k1}
; AVX512DQ-NEXT:    vmovdqa64 %zmm4, %zmm7 {%k2}
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm15 = [0,0,0,8,0,0,0,1]
; AVX512DQ-NEXT:    vmovdqa64 %zmm6, %zmm13
; AVX512DQ-NEXT:    vpermt2q %zmm5, %zmm15, %zmm13
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm16 = [0,8,0,0,0,1,9,0]
; AVX512DQ-NEXT:    vmovdqa64 %zmm1, %zmm8
; AVX512DQ-NEXT:    vpermt2q %zmm2, %zmm16, %zmm8
; AVX512DQ-NEXT:    movb $-116, %al
; AVX512DQ-NEXT:    kmovw %eax, %k3
; AVX512DQ-NEXT:    vmovdqa64 %zmm13, %zmm8 {%k3}
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm17 = [0,1,2,3,8,5,6,7]
; AVX512DQ-NEXT:    vpermt2q %zmm4, %zmm17, %zmm8
; AVX512DQ-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [15,7,15,7,15,7,15,7]
; AVX512DQ-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, %zmm19
; AVX512DQ-NEXT:    vpermt2q %zmm9, %zmm18, %zmm19
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm14 = [6,14,0,0,0,7,15,0]
; AVX512DQ-NEXT:    vmovdqa64 %zmm10, %zmm13
; AVX512DQ-NEXT:    vpermt2q %zmm12, %zmm14, %zmm13
; AVX512DQ-NEXT:    movb $24, %al
; AVX512DQ-NEXT:    kmovw %eax, %k2
; AVX512DQ-NEXT:    vmovdqa64 %zmm19, %zmm13 {%k2}
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm19 = [0,1,14,3,4,5,6,15]
; AVX512DQ-NEXT:    vpermt2q %zmm11, %zmm19, %zmm13
; AVX512DQ-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [13,5,13,5,13,5,13,5]
; AVX512DQ-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512DQ-NEXT:    vmovdqa64 %zmm10, %zmm21
; AVX512DQ-NEXT:    vpermt2q %zmm12, %zmm20, %zmm21
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm22 = [0,5,13,0,0,0,6,14]
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, %zmm23
; AVX512DQ-NEXT:    vpermt2q %zmm9, %zmm22, %zmm23
; AVX512DQ-NEXT:    vmovdqa64 %zmm21, %zmm23 {%k2}
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm21 = [12,1,2,3,4,13,6,7]
; AVX512DQ-NEXT:    vpermt2q %zmm11, %zmm21, %zmm23
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm24 = [1,0,0,0,10,2,0,0]
; AVX512DQ-NEXT:    vmovdqa64 %zmm12, %zmm25
; AVX512DQ-NEXT:    vpermt2q %zmm10, %zmm24, %zmm25
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm26 = [0,0,2,10,0,0,0,3]
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, %zmm27
; AVX512DQ-NEXT:    vpermt2q %zmm9, %zmm26, %zmm27
; AVX512DQ-NEXT:    vmovdqa64 %zmm25, %zmm27 {%k1}
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm25 = [0,9,2,3,4,5,10,7]
; AVX512DQ-NEXT:    vpermt2q %zmm11, %zmm25, %zmm27
; AVX512DQ-NEXT:    vpermt2q %zmm12, %zmm15, %zmm10
; AVX512DQ-NEXT:    vpermt2q %zmm9, %zmm16, %zmm0
; AVX512DQ-NEXT:    vmovdqa64 %zmm10, %zmm0 {%k3}
; AVX512DQ-NEXT:    vpermt2q %zmm11, %zmm17, %zmm0
; AVX512DQ-NEXT:    vpermi2q %zmm2, %zmm1, %zmm18
; AVX512DQ-NEXT:    vpermi2q %zmm5, %zmm6, %zmm14
; AVX512DQ-NEXT:    vmovdqa64 %zmm18, %zmm14 {%k2}
; AVX512DQ-NEXT:    vpermt2q %zmm4, %zmm19, %zmm14
; AVX512DQ-NEXT:    vpermi2q %zmm5, %zmm6, %zmm20
; AVX512DQ-NEXT:    vpermi2q %zmm2, %zmm1, %zmm22
; AVX512DQ-NEXT:    vmovdqa64 %zmm20, %zmm22 {%k2}
; AVX512DQ-NEXT:    vpermt2q %zmm4, %zmm21, %zmm22
; AVX512DQ-NEXT:    vpermt2q %zmm6, %zmm24, %zmm5
; AVX512DQ-NEXT:    vpermt2q %zmm2, %zmm26, %zmm1
; AVX512DQ-NEXT:    vmovdqa64 %zmm5, %zmm1 {%k1}
; AVX512DQ-NEXT:    vpermt2q %zmm4, %zmm25, %zmm1
; AVX512DQ-NEXT:    vmovdqa64 %zmm1, 64(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm7, 128(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm22, 192(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm14, 256(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, 320(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm27, 384(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm3, 448(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm23, 512(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm13, 576(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm8, (%r9)
; AVX512DQ-NEXT:    vzeroupper
; AVX512DQ-NEXT:    retq
;
; AVX512DQ-FCP-LABEL: store_i64_stride5_vf16:
; AVX512DQ-FCP:       # %bb.0:
; AVX512DQ-FCP-NEXT:    vmovdqa64 (%rdi), %zmm1
; AVX512DQ-FCP-NEXT:    vmovdqa64 64(%rdi), %zmm0
; AVX512DQ-FCP-NEXT:    vmovdqa64 (%rsi), %zmm2
; AVX512DQ-FCP-NEXT:    vmovdqa64 64(%rsi), %zmm9
; AVX512DQ-FCP-NEXT:    vmovdqa64 (%rdx), %zmm6
; AVX512DQ-FCP-NEXT:    vmovdqa64 64(%rdx), %zmm10
; AVX512DQ-FCP-NEXT:    vmovdqa64 (%rcx), %zmm5
; AVX512DQ-FCP-NEXT:    vmovdqa64 64(%rcx), %zmm12
; AVX512DQ-FCP-NEXT:    vmovdqa64 (%r8), %zmm4
; AVX512DQ-FCP-NEXT:    vmovdqa64 64(%r8), %zmm11
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm8 = [3,0,0,0,12,4,0,0]
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm9, %zmm13
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm8, %zmm13
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm7 = [0,3,11,0,0,0,4,12]
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm10, %zmm3
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm12, %zmm7, %zmm3
; AVX512DQ-FCP-NEXT:    movb $49, %al
; AVX512DQ-FCP-NEXT:    kmovw %eax, %k1
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm13, %zmm3 {%k1}
; AVX512DQ-FCP-NEXT:    movb $8, %al
; AVX512DQ-FCP-NEXT:    kmovw %eax, %k2
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm11, %zmm3 {%k2}
; AVX512DQ-FCP-NEXT:    vpermi2q %zmm1, %zmm2, %zmm8
; AVX512DQ-FCP-NEXT:    vpermi2q %zmm5, %zmm6, %zmm7
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm8, %zmm7 {%k1}
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm4, %zmm7 {%k2}
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm15 = [0,0,0,8,0,0,0,1]
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm6, %zmm13
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm5, %zmm15, %zmm13
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm16 = [0,8,0,0,0,1,9,0]
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm1, %zmm8
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm2, %zmm16, %zmm8
; AVX512DQ-FCP-NEXT:    movb $-116, %al
; AVX512DQ-FCP-NEXT:    kmovw %eax, %k3
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm13, %zmm8 {%k3}
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm17 = [0,1,2,3,8,5,6,7]
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm4, %zmm17, %zmm8
; AVX512DQ-FCP-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [15,7,15,7,15,7,15,7]
; AVX512DQ-FCP-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, %zmm19
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm9, %zmm18, %zmm19
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm14 = [6,14,0,0,0,7,15,0]
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm10, %zmm13
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm12, %zmm14, %zmm13
; AVX512DQ-FCP-NEXT:    movb $24, %al
; AVX512DQ-FCP-NEXT:    kmovw %eax, %k2
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm19, %zmm13 {%k2}
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm19 = [0,1,14,3,4,5,6,15]
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm11, %zmm19, %zmm13
; AVX512DQ-FCP-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [13,5,13,5,13,5,13,5]
; AVX512DQ-FCP-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm10, %zmm21
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm12, %zmm20, %zmm21
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm22 = [0,5,13,0,0,0,6,14]
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, %zmm23
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm9, %zmm22, %zmm23
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm21, %zmm23 {%k2}
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm21 = [12,1,2,3,4,13,6,7]
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm11, %zmm21, %zmm23
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm24 = [1,0,0,0,10,2,0,0]
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm12, %zmm25
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm10, %zmm24, %zmm25
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm26 = [0,0,2,10,0,0,0,3]
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, %zmm27
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm9, %zmm26, %zmm27
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm25, %zmm27 {%k1}
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm25 = [0,9,2,3,4,5,10,7]
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm11, %zmm25, %zmm27
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm12, %zmm15, %zmm10
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm9, %zmm16, %zmm0
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm10, %zmm0 {%k3}
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm11, %zmm17, %zmm0
; AVX512DQ-FCP-NEXT:    vpermi2q %zmm2, %zmm1, %zmm18
; AVX512DQ-FCP-NEXT:    vpermi2q %zmm5, %zmm6, %zmm14
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm18, %zmm14 {%k2}
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm4, %zmm19, %zmm14
; AVX512DQ-FCP-NEXT:    vpermi2q %zmm5, %zmm6, %zmm20
; AVX512DQ-FCP-NEXT:    vpermi2q %zmm2, %zmm1, %zmm22
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm20, %zmm22 {%k2}
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm4, %zmm21, %zmm22
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm6, %zmm24, %zmm5
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm2, %zmm26, %zmm1
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm5, %zmm1 {%k1}
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm4, %zmm25, %zmm1
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm1, 64(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm7, 128(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm22, 192(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm14, 256(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, 320(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm27, 384(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm3, 448(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm23, 512(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm13, 576(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm8, (%r9)
; AVX512DQ-FCP-NEXT:    vzeroupper
; AVX512DQ-FCP-NEXT:    retq
;
; AVX512BW-LABEL: store_i64_stride5_vf16:
; AVX512BW:       # %bb.0:
; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm1
; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm0
; AVX512BW-NEXT:    vmovdqa64 (%rsi), %zmm2
; AVX512BW-NEXT:    vmovdqa64 64(%rsi), %zmm9
; AVX512BW-NEXT:    vmovdqa64 (%rdx), %zmm6
; AVX512BW-NEXT:    vmovdqa64 64(%rdx), %zmm10
; AVX512BW-NEXT:    vmovdqa64 (%rcx), %zmm5
; AVX512BW-NEXT:    vmovdqa64 64(%rcx), %zmm12
; AVX512BW-NEXT:    vmovdqa64 (%r8), %zmm4
; AVX512BW-NEXT:    vmovdqa64 64(%r8), %zmm11
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm8 = [3,0,0,0,12,4,0,0]
; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm13
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm8, %zmm13
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm7 = [0,3,11,0,0,0,4,12]
; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm3
; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm7, %zmm3
; AVX512BW-NEXT:    movb $49, %al
; AVX512BW-NEXT:    kmovd %eax, %k1
; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm3 {%k1}
; AVX512BW-NEXT:    movb $8, %al
; AVX512BW-NEXT:    kmovd %eax, %k2
; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm3 {%k2}
; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm2, %zmm8
; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm6, %zmm7
; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm7 {%k1}
; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm7 {%k2}
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm15 = [0,0,0,8,0,0,0,1]
; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm13
; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm15, %zmm13
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm16 = [0,8,0,0,0,1,9,0]
; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm8
; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm16, %zmm8
; AVX512BW-NEXT:    movb $-116, %al
; AVX512BW-NEXT:    kmovd %eax, %k3
; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm8 {%k3}
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm17 = [0,1,2,3,8,5,6,7]
; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm17, %zmm8
; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [15,7,15,7,15,7,15,7]
; AVX512BW-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm19
; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm18, %zmm19
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm14 = [6,14,0,0,0,7,15,0]
; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm13
; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm14, %zmm13
; AVX512BW-NEXT:    movb $24, %al
; AVX512BW-NEXT:    kmovd %eax, %k2
; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm13 {%k2}
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm19 = [0,1,14,3,4,5,6,15]
; AVX512BW-NEXT:    vpermt2q %zmm11, %zmm19, %zmm13
; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [13,5,13,5,13,5,13,5]
; AVX512BW-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm21
; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm20, %zmm21
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm22 = [0,5,13,0,0,0,6,14]
; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm23
; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm22, %zmm23
; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm23 {%k2}
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm21 = [12,1,2,3,4,13,6,7]
; AVX512BW-NEXT:    vpermt2q %zmm11, %zmm21, %zmm23
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm24 = [1,0,0,0,10,2,0,0]
; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm25
; AVX512BW-NEXT:    vpermt2q %zmm10, %zmm24, %zmm25
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm26 = [0,0,2,10,0,0,0,3]
; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm27
; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm26, %zmm27
; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm27 {%k1}
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm25 = [0,9,2,3,4,5,10,7]
; AVX512BW-NEXT:    vpermt2q %zmm11, %zmm25, %zmm27
; AVX512BW-NEXT:    vpermt2q %zmm12, %zmm15, %zmm10
; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm16, %zmm0
; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm0 {%k3}
; AVX512BW-NEXT:    vpermt2q %zmm11, %zmm17, %zmm0
; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm1, %zmm18
; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm6, %zmm14
; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm14 {%k2}
; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm19, %zmm14
; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm6, %zmm20
; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm1, %zmm22
; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm22 {%k2}
; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm21, %zmm22
; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm24, %zmm5
; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm26, %zmm1
; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm1 {%k1}
; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm25, %zmm1
; AVX512BW-NEXT:    vmovdqa64 %zmm1, 64(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm7, 128(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm22, 192(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm14, 256(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm0, 320(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm27, 384(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm3, 448(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm23, 512(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm13, 576(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm8, (%r9)
; AVX512BW-NEXT:    vzeroupper
; AVX512BW-NEXT:    retq
;
; AVX512BW-FCP-LABEL: store_i64_stride5_vf16:
; AVX512BW-FCP:       # %bb.0:
; AVX512BW-FCP-NEXT:    vmovdqa64 (%rdi), %zmm1
; AVX512BW-FCP-NEXT:    vmovdqa64 64(%rdi), %zmm0
; AVX512BW-FCP-NEXT:    vmovdqa64 (%rsi), %zmm2
; AVX512BW-FCP-NEXT:    vmovdqa64 64(%rsi), %zmm9
; AVX512BW-FCP-NEXT:    vmovdqa64 (%rdx), %zmm6
; AVX512BW-FCP-NEXT:    vmovdqa64 64(%rdx), %zmm10
; AVX512BW-FCP-NEXT:    vmovdqa64 (%rcx), %zmm5
; AVX512BW-FCP-NEXT:    vmovdqa64 64(%rcx), %zmm12
; AVX512BW-FCP-NEXT:    vmovdqa64 (%r8), %zmm4
; AVX512BW-FCP-NEXT:    vmovdqa64 64(%r8), %zmm11
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm8 = [3,0,0,0,12,4,0,0]
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm9, %zmm13
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm8, %zmm13
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm7 = [0,3,11,0,0,0,4,12]
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm10, %zmm3
; AVX512BW-FCP-NEXT:    vpermt2q %zmm12, %zmm7, %zmm3
; AVX512BW-FCP-NEXT:    movb $49, %al
; AVX512BW-FCP-NEXT:    kmovd %eax, %k1
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm13, %zmm3 {%k1}
; AVX512BW-FCP-NEXT:    movb $8, %al
; AVX512BW-FCP-NEXT:    kmovd %eax, %k2
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm11, %zmm3 {%k2}
; AVX512BW-FCP-NEXT:    vpermi2q %zmm1, %zmm2, %zmm8
; AVX512BW-FCP-NEXT:    vpermi2q %zmm5, %zmm6, %zmm7
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm8, %zmm7 {%k1}
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm4, %zmm7 {%k2}
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm15 = [0,0,0,8,0,0,0,1]
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm6, %zmm13
; AVX512BW-FCP-NEXT:    vpermt2q %zmm5, %zmm15, %zmm13
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm16 = [0,8,0,0,0,1,9,0]
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm1, %zmm8
; AVX512BW-FCP-NEXT:    vpermt2q %zmm2, %zmm16, %zmm8
; AVX512BW-FCP-NEXT:    movb $-116, %al
; AVX512BW-FCP-NEXT:    kmovd %eax, %k3
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm13, %zmm8 {%k3}
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm17 = [0,1,2,3,8,5,6,7]
; AVX512BW-FCP-NEXT:    vpermt2q %zmm4, %zmm17, %zmm8
; AVX512BW-FCP-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [15,7,15,7,15,7,15,7]
; AVX512BW-FCP-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm19
; AVX512BW-FCP-NEXT:    vpermt2q %zmm9, %zmm18, %zmm19
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm14 = [6,14,0,0,0,7,15,0]
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm10, %zmm13
; AVX512BW-FCP-NEXT:    vpermt2q %zmm12, %zmm14, %zmm13
; AVX512BW-FCP-NEXT:    movb $24, %al
; AVX512BW-FCP-NEXT:    kmovd %eax, %k2
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm19, %zmm13 {%k2}
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm19 = [0,1,14,3,4,5,6,15]
; AVX512BW-FCP-NEXT:    vpermt2q %zmm11, %zmm19, %zmm13
; AVX512BW-FCP-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [13,5,13,5,13,5,13,5]
; AVX512BW-FCP-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm10, %zmm21
; AVX512BW-FCP-NEXT:    vpermt2q %zmm12, %zmm20, %zmm21
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm22 = [0,5,13,0,0,0,6,14]
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm23
; AVX512BW-FCP-NEXT:    vpermt2q %zmm9, %zmm22, %zmm23
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm21, %zmm23 {%k2}
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm21 = [12,1,2,3,4,13,6,7]
; AVX512BW-FCP-NEXT:    vpermt2q %zmm11, %zmm21, %zmm23
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm24 = [1,0,0,0,10,2,0,0]
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm12, %zmm25
; AVX512BW-FCP-NEXT:    vpermt2q %zmm10, %zmm24, %zmm25
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm26 = [0,0,2,10,0,0,0,3]
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm27
; AVX512BW-FCP-NEXT:    vpermt2q %zmm9, %zmm26, %zmm27
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm25, %zmm27 {%k1}
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm25 = [0,9,2,3,4,5,10,7]
; AVX512BW-FCP-NEXT:    vpermt2q %zmm11, %zmm25, %zmm27
; AVX512BW-FCP-NEXT:    vpermt2q %zmm12, %zmm15, %zmm10
; AVX512BW-FCP-NEXT:    vpermt2q %zmm9, %zmm16, %zmm0
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm10, %zmm0 {%k3}
; AVX512BW-FCP-NEXT:    vpermt2q %zmm11, %zmm17, %zmm0
; AVX512BW-FCP-NEXT:    vpermi2q %zmm2, %zmm1, %zmm18
; AVX512BW-FCP-NEXT:    vpermi2q %zmm5, %zmm6, %zmm14
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm18, %zmm14 {%k2}
; AVX512BW-FCP-NEXT:    vpermt2q %zmm4, %zmm19, %zmm14
; AVX512BW-FCP-NEXT:    vpermi2q %zmm5, %zmm6, %zmm20
; AVX512BW-FCP-NEXT:    vpermi2q %zmm2, %zmm1, %zmm22
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm20, %zmm22 {%k2}
; AVX512BW-FCP-NEXT:    vpermt2q %zmm4, %zmm21, %zmm22
; AVX512BW-FCP-NEXT:    vpermt2q %zmm6, %zmm24, %zmm5
; AVX512BW-FCP-NEXT:    vpermt2q %zmm2, %zmm26, %zmm1
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm5, %zmm1 {%k1}
; AVX512BW-FCP-NEXT:    vpermt2q %zmm4, %zmm25, %zmm1
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm1, 64(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm7, 128(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm22, 192(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm14, 256(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, 320(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm27, 384(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm3, 448(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm23, 512(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm13, 576(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm8, (%r9)
; AVX512BW-FCP-NEXT:    vzeroupper
; AVX512BW-FCP-NEXT:    retq
;
; AVX512DQ-BW-LABEL: store_i64_stride5_vf16:
; AVX512DQ-BW:       # %bb.0:
; AVX512DQ-BW-NEXT:    vmovdqa64 (%rdi), %zmm1
; AVX512DQ-BW-NEXT:    vmovdqa64 64(%rdi), %zmm0
; AVX512DQ-BW-NEXT:    vmovdqa64 (%rsi), %zmm2
; AVX512DQ-BW-NEXT:    vmovdqa64 64(%rsi), %zmm9
; AVX512DQ-BW-NEXT:    vmovdqa64 (%rdx), %zmm6
; AVX512DQ-BW-NEXT:    vmovdqa64 64(%rdx), %zmm10
; AVX512DQ-BW-NEXT:    vmovdqa64 (%rcx), %zmm5
; AVX512DQ-BW-NEXT:    vmovdqa64 64(%rcx), %zmm12
; AVX512DQ-BW-NEXT:    vmovdqa64 (%r8), %zmm4
; AVX512DQ-BW-NEXT:    vmovdqa64 64(%r8), %zmm11
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm8 = [3,0,0,0,12,4,0,0]
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm9, %zmm13
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm8, %zmm13
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm7 = [0,3,11,0,0,0,4,12]
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm10, %zmm3
; AVX512DQ-BW-NEXT:    vpermt2q %zmm12, %zmm7, %zmm3
; AVX512DQ-BW-NEXT:    movb $49, %al
; AVX512DQ-BW-NEXT:    kmovd %eax, %k1
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm13, %zmm3 {%k1}
; AVX512DQ-BW-NEXT:    movb $8, %al
; AVX512DQ-BW-NEXT:    kmovd %eax, %k2
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm11, %zmm3 {%k2}
; AVX512DQ-BW-NEXT:    vpermi2q %zmm1, %zmm2, %zmm8
; AVX512DQ-BW-NEXT:    vpermi2q %zmm5, %zmm6, %zmm7
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm8, %zmm7 {%k1}
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm4, %zmm7 {%k2}
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm15 = [0,0,0,8,0,0,0,1]
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm6, %zmm13
; AVX512DQ-BW-NEXT:    vpermt2q %zmm5, %zmm15, %zmm13
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm16 = [0,8,0,0,0,1,9,0]
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm1, %zmm8
; AVX512DQ-BW-NEXT:    vpermt2q %zmm2, %zmm16, %zmm8
; AVX512DQ-BW-NEXT:    movb $-116, %al
; AVX512DQ-BW-NEXT:    kmovd %eax, %k3
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm13, %zmm8 {%k3}
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm17 = [0,1,2,3,8,5,6,7]
; AVX512DQ-BW-NEXT:    vpermt2q %zmm4, %zmm17, %zmm8
; AVX512DQ-BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [15,7,15,7,15,7,15,7]
; AVX512DQ-BW-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, %zmm19
; AVX512DQ-BW-NEXT:    vpermt2q %zmm9, %zmm18, %zmm19
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm14 = [6,14,0,0,0,7,15,0]
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm10, %zmm13
; AVX512DQ-BW-NEXT:    vpermt2q %zmm12, %zmm14, %zmm13
; AVX512DQ-BW-NEXT:    movb $24, %al
; AVX512DQ-BW-NEXT:    kmovd %eax, %k2
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm19, %zmm13 {%k2}
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm19 = [0,1,14,3,4,5,6,15]
; AVX512DQ-BW-NEXT:    vpermt2q %zmm11, %zmm19, %zmm13
; AVX512DQ-BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [13,5,13,5,13,5,13,5]
; AVX512DQ-BW-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm10, %zmm21
; AVX512DQ-BW-NEXT:    vpermt2q %zmm12, %zmm20, %zmm21
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm22 = [0,5,13,0,0,0,6,14]
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, %zmm23
; AVX512DQ-BW-NEXT:    vpermt2q %zmm9, %zmm22, %zmm23
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm21, %zmm23 {%k2}
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm21 = [12,1,2,3,4,13,6,7]
; AVX512DQ-BW-NEXT:    vpermt2q %zmm11, %zmm21, %zmm23
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm24 = [1,0,0,0,10,2,0,0]
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm12, %zmm25
; AVX512DQ-BW-NEXT:    vpermt2q %zmm10, %zmm24, %zmm25
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm26 = [0,0,2,10,0,0,0,3]
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, %zmm27
; AVX512DQ-BW-NEXT:    vpermt2q %zmm9, %zmm26, %zmm27
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm25, %zmm27 {%k1}
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm25 = [0,9,2,3,4,5,10,7]
; AVX512DQ-BW-NEXT:    vpermt2q %zmm11, %zmm25, %zmm27
; AVX512DQ-BW-NEXT:    vpermt2q %zmm12, %zmm15, %zmm10
; AVX512DQ-BW-NEXT:    vpermt2q %zmm9, %zmm16, %zmm0
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm10, %zmm0 {%k3}
; AVX512DQ-BW-NEXT:    vpermt2q %zmm11, %zmm17, %zmm0
; AVX512DQ-BW-NEXT:    vpermi2q %zmm2, %zmm1, %zmm18
; AVX512DQ-BW-NEXT:    vpermi2q %zmm5, %zmm6, %zmm14
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm18, %zmm14 {%k2}
; AVX512DQ-BW-NEXT:    vpermt2q %zmm4, %zmm19, %zmm14
; AVX512DQ-BW-NEXT:    vpermi2q %zmm5, %zmm6, %zmm20
; AVX512DQ-BW-NEXT:    vpermi2q %zmm2, %zmm1, %zmm22
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm20, %zmm22 {%k2}
; AVX512DQ-BW-NEXT:    vpermt2q %zmm4, %zmm21, %zmm22
; AVX512DQ-BW-NEXT:    vpermt2q %zmm6, %zmm24, %zmm5
; AVX512DQ-BW-NEXT:    vpermt2q %zmm2, %zmm26, %zmm1
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm5, %zmm1 {%k1}
; AVX512DQ-BW-NEXT:    vpermt2q %zmm4, %zmm25, %zmm1
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm1, 64(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm7, 128(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm22, 192(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm14, 256(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, 320(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm27, 384(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm3, 448(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm23, 512(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm13, 576(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm8, (%r9)
; AVX512DQ-BW-NEXT:    vzeroupper
; AVX512DQ-BW-NEXT:    retq
;
; AVX512DQ-BW-FCP-LABEL: store_i64_stride5_vf16:
; AVX512DQ-BW-FCP:       # %bb.0:
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rdi), %zmm1
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 64(%rdi), %zmm0
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rsi), %zmm2
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 64(%rsi), %zmm9
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rdx), %zmm6
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 64(%rdx), %zmm10
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rcx), %zmm5
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 64(%rcx), %zmm12
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%r8), %zmm4
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 64(%r8), %zmm11
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm8 = [3,0,0,0,12,4,0,0]
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm9, %zmm13
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm8, %zmm13
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm7 = [0,3,11,0,0,0,4,12]
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm10, %zmm3
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm12, %zmm7, %zmm3
; AVX512DQ-BW-FCP-NEXT:    movb $49, %al
; AVX512DQ-BW-FCP-NEXT:    kmovd %eax, %k1
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm13, %zmm3 {%k1}
; AVX512DQ-BW-FCP-NEXT:    movb $8, %al
; AVX512DQ-BW-FCP-NEXT:    kmovd %eax, %k2
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm11, %zmm3 {%k2}
; AVX512DQ-BW-FCP-NEXT:    vpermi2q %zmm1, %zmm2, %zmm8
; AVX512DQ-BW-FCP-NEXT:    vpermi2q %zmm5, %zmm6, %zmm7
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm8, %zmm7 {%k1}
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm4, %zmm7 {%k2}
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm15 = [0,0,0,8,0,0,0,1]
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm6, %zmm13
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm5, %zmm15, %zmm13
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm16 = [0,8,0,0,0,1,9,0]
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm1, %zmm8
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm2, %zmm16, %zmm8
; AVX512DQ-BW-FCP-NEXT:    movb $-116, %al
; AVX512DQ-BW-FCP-NEXT:    kmovd %eax, %k3
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm13, %zmm8 {%k3}
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm17 = [0,1,2,3,8,5,6,7]
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm4, %zmm17, %zmm8
; AVX512DQ-BW-FCP-NEXT:    vbroadcasti32x4 {{.*#+}} zmm18 = [15,7,15,7,15,7,15,7]
; AVX512DQ-BW-FCP-NEXT:    # zmm18 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm19
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm9, %zmm18, %zmm19
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm14 = [6,14,0,0,0,7,15,0]
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm10, %zmm13
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm12, %zmm14, %zmm13
; AVX512DQ-BW-FCP-NEXT:    movb $24, %al
; AVX512DQ-BW-FCP-NEXT:    kmovd %eax, %k2
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm19, %zmm13 {%k2}
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm19 = [0,1,14,3,4,5,6,15]
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm11, %zmm19, %zmm13
; AVX512DQ-BW-FCP-NEXT:    vbroadcasti32x4 {{.*#+}} zmm20 = [13,5,13,5,13,5,13,5]
; AVX512DQ-BW-FCP-NEXT:    # zmm20 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm10, %zmm21
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm12, %zmm20, %zmm21
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm22 = [0,5,13,0,0,0,6,14]
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm23
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm9, %zmm22, %zmm23
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm21, %zmm23 {%k2}
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm21 = [12,1,2,3,4,13,6,7]
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm11, %zmm21, %zmm23
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm24 = [1,0,0,0,10,2,0,0]
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm12, %zmm25
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm10, %zmm24, %zmm25
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm26 = [0,0,2,10,0,0,0,3]
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm27
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm9, %zmm26, %zmm27
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm25, %zmm27 {%k1}
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm25 = [0,9,2,3,4,5,10,7]
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm11, %zmm25, %zmm27
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm12, %zmm15, %zmm10
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm9, %zmm16, %zmm0
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm10, %zmm0 {%k3}
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm11, %zmm17, %zmm0
; AVX512DQ-BW-FCP-NEXT:    vpermi2q %zmm2, %zmm1, %zmm18
; AVX512DQ-BW-FCP-NEXT:    vpermi2q %zmm5, %zmm6, %zmm14
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm18, %zmm14 {%k2}
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm4, %zmm19, %zmm14
; AVX512DQ-BW-FCP-NEXT:    vpermi2q %zmm5, %zmm6, %zmm20
; AVX512DQ-BW-FCP-NEXT:    vpermi2q %zmm2, %zmm1, %zmm22
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm20, %zmm22 {%k2}
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm4, %zmm21, %zmm22
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm6, %zmm24, %zmm5
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm2, %zmm26, %zmm1
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm5, %zmm1 {%k1}
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm4, %zmm25, %zmm1
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm1, 64(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm7, 128(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm22, 192(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm14, 256(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, 320(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm27, 384(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm3, 448(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm23, 512(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm13, 576(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm8, (%r9)
; AVX512DQ-BW-FCP-NEXT:    vzeroupper
; AVX512DQ-BW-FCP-NEXT:    retq
  %in.vec0 = load <16 x i64>, ptr %in.vecptr0, align 64
  %in.vec1 = load <16 x i64>, ptr %in.vecptr1, align 64
  %in.vec2 = load <16 x i64>, ptr %in.vecptr2, align 64
  %in.vec3 = load <16 x i64>, ptr %in.vecptr3, align 64
  %in.vec4 = load <16 x i64>, ptr %in.vecptr4, align 64
  %1 = shufflevector <16 x i64> %in.vec0, <16 x i64> %in.vec1, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
  %2 = shufflevector <16 x i64> %in.vec2, <16 x i64> %in.vec3, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
  %3 = shufflevector <32 x i64> %1, <32 x i64> %2, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
  %4 = shufflevector <16 x i64> %in.vec4, <16 x i64> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
  %5 = shufflevector <64 x i64> %3, <64 x i64> %4, <80 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79>
  %interleaved.vec = shufflevector <80 x i64> %5, <80 x i64> poison, <80 x i32> <i32 0, i32 16, i32 32, i32 48, i32 64, i32 1, i32 17, i32 33, i32 49, i32 65, i32 2, i32 18, i32 34, i32 50, i32 66, i32 3, i32 19, i32 35, i32 51, i32 67, i32 4, i32 20, i32 36, i32 52, i32 68, i32 5, i32 21, i32 37, i32 53, i32 69, i32 6, i32 22, i32 38, i32 54, i32 70, i32 7, i32 23, i32 39, i32 55, i32 71, i32 8, i32 24, i32 40, i32 56, i32 72, i32 9, i32 25, i32 41, i32 57, i32 73, i32 10, i32 26, i32 42, i32 58, i32 74, i32 11, i32 27, i32 43, i32 59, i32 75, i32 12, i32 28, i32 44, i32 60, i32 76, i32 13, i32 29, i32 45, i32 61, i32 77, i32 14, i32 30, i32 46, i32 62, i32 78, i32 15, i32 31, i32 47, i32 63, i32 79>
  store <80 x i64> %interleaved.vec, ptr %out.vec, align 64
  ret void
}

define void @store_i64_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %in.vecptr3, ptr %in.vecptr4, ptr %out.vec) nounwind {
; SSE-LABEL: store_i64_stride5_vf32:
; SSE:       # %bb.0:
; SSE-NEXT:    subq $920, %rsp # imm = 0x398
; SSE-NEXT:    movapd (%rdi), %xmm3
; SSE-NEXT:    movapd 16(%rdi), %xmm4
; SSE-NEXT:    movapd 32(%rdi), %xmm5
; SSE-NEXT:    movapd (%rsi), %xmm6
; SSE-NEXT:    movapd 16(%rsi), %xmm7
; SSE-NEXT:    movapd 32(%rsi), %xmm8
; SSE-NEXT:    movapd (%rdx), %xmm9
; SSE-NEXT:    movapd 16(%rdx), %xmm10
; SSE-NEXT:    movapd 32(%rdx), %xmm11
; SSE-NEXT:    movapd (%rcx), %xmm12
; SSE-NEXT:    movapd 16(%rcx), %xmm13
; SSE-NEXT:    movapd 32(%rcx), %xmm14
; SSE-NEXT:    movapd (%r8), %xmm0
; SSE-NEXT:    movapd 16(%r8), %xmm1
; SSE-NEXT:    movapd 32(%r8), %xmm2
; SSE-NEXT:    movapd %xmm3, %xmm15
; SSE-NEXT:    unpcklpd {{.*#+}} xmm15 = xmm15[0],xmm6[0]
; SSE-NEXT:    movapd %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movsd {{.*#+}} xmm3 = xmm0[0],xmm3[1]
; SSE-NEXT:    movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm6 = xmm6[1],xmm9[1]
; SSE-NEXT:    movapd %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpcklpd {{.*#+}} xmm9 = xmm9[0],xmm12[0]
; SSE-NEXT:    movapd %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm12 = xmm12[1],xmm0[1]
; SSE-NEXT:    movapd %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd %xmm4, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm7[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movsd {{.*#+}} xmm4 = xmm1[0],xmm4[1]
; SSE-NEXT:    movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm7 = xmm7[1],xmm10[1]
; SSE-NEXT:    movapd %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpcklpd {{.*#+}} xmm10 = xmm10[0],xmm13[0]
; SSE-NEXT:    movapd %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm13 = xmm13[1],xmm1[1]
; SSE-NEXT:    movapd %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd %xmm5, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm8[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movsd {{.*#+}} xmm5 = xmm2[0],xmm5[1]
; SSE-NEXT:    movapd %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm8 = xmm8[1],xmm11[1]
; SSE-NEXT:    movapd %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpcklpd {{.*#+}} xmm11 = xmm11[0],xmm14[0]
; SSE-NEXT:    movapd %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm14 = xmm14[1],xmm2[1]
; SSE-NEXT:    movapd %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 48(%rdi), %xmm1
; SSE-NEXT:    movapd 48(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm1, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 48(%r8), %xmm0
; SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 48(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 48(%rcx), %xmm2
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 64(%rdi), %xmm1
; SSE-NEXT:    movapd 64(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm1, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 64(%r8), %xmm0
; SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 64(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 64(%rcx), %xmm2
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 80(%rdi), %xmm1
; SSE-NEXT:    movapd 80(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm1, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 80(%r8), %xmm0
; SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 80(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 80(%rcx), %xmm2
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 96(%rdi), %xmm1
; SSE-NEXT:    movapd 96(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm1, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 96(%r8), %xmm0
; SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 96(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 96(%rcx), %xmm2
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 112(%rdi), %xmm1
; SSE-NEXT:    movapd 112(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm1, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 112(%r8), %xmm0
; SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 112(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 112(%rcx), %xmm2
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 128(%rdi), %xmm1
; SSE-NEXT:    movapd 128(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm1, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 128(%r8), %xmm0
; SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 128(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 128(%rcx), %xmm2
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 144(%rdi), %xmm1
; SSE-NEXT:    movapd 144(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm1, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 144(%r8), %xmm0
; SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 144(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 144(%rcx), %xmm2
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 160(%rdi), %xmm1
; SSE-NEXT:    movapd 160(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm1, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 160(%r8), %xmm0
; SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 160(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 160(%rcx), %xmm2
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 176(%rdi), %xmm1
; SSE-NEXT:    movapd 176(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm1, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 176(%r8), %xmm0
; SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT:    movapd %xmm1, (%rsp) # 16-byte Spill
; SSE-NEXT:    movapd 176(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 176(%rcx), %xmm2
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 192(%rdi), %xmm1
; SSE-NEXT:    movapd 192(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm1, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 192(%r8), %xmm0
; SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 192(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 192(%rcx), %xmm15
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm15[0]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm15 = xmm15[1],xmm0[1]
; SSE-NEXT:    movapd 208(%rdi), %xmm14
; SSE-NEXT:    movapd 208(%rsi), %xmm13
; SSE-NEXT:    movapd %xmm14, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm13[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 208(%r8), %xmm0
; SSE-NEXT:    movsd {{.*#+}} xmm14 = xmm0[0],xmm14[1]
; SSE-NEXT:    movapd 208(%rdx), %xmm10
; SSE-NEXT:    unpckhpd {{.*#+}} xmm13 = xmm13[1],xmm10[1]
; SSE-NEXT:    movapd 208(%rcx), %xmm9
; SSE-NEXT:    unpcklpd {{.*#+}} xmm10 = xmm10[0],xmm9[0]
; SSE-NEXT:    unpckhpd {{.*#+}} xmm9 = xmm9[1],xmm0[1]
; SSE-NEXT:    movapd 224(%rdi), %xmm11
; SSE-NEXT:    movapd 224(%rsi), %xmm8
; SSE-NEXT:    movapd %xmm11, %xmm12
; SSE-NEXT:    unpcklpd {{.*#+}} xmm12 = xmm12[0],xmm8[0]
; SSE-NEXT:    movapd 224(%r8), %xmm2
; SSE-NEXT:    movsd {{.*#+}} xmm11 = xmm2[0],xmm11[1]
; SSE-NEXT:    movapd 224(%rdx), %xmm6
; SSE-NEXT:    unpckhpd {{.*#+}} xmm8 = xmm8[1],xmm6[1]
; SSE-NEXT:    movapd 224(%rcx), %xmm3
; SSE-NEXT:    unpcklpd {{.*#+}} xmm6 = xmm6[0],xmm3[0]
; SSE-NEXT:    unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm2[1]
; SSE-NEXT:    movapd 240(%rdi), %xmm4
; SSE-NEXT:    movapd 240(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm4, %xmm5
; SSE-NEXT:    unpcklpd {{.*#+}} xmm5 = xmm5[0],xmm2[0]
; SSE-NEXT:    movapd 240(%r8), %xmm7
; SSE-NEXT:    movsd {{.*#+}} xmm4 = xmm7[0],xmm4[1]
; SSE-NEXT:    movapd 240(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd 240(%rcx), %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm7[1]
; SSE-NEXT:    movapd %xmm0, 1264(%r9)
; SSE-NEXT:    movapd %xmm2, 1248(%r9)
; SSE-NEXT:    movapd %xmm4, 1232(%r9)
; SSE-NEXT:    movapd %xmm1, 1216(%r9)
; SSE-NEXT:    movapd %xmm5, 1200(%r9)
; SSE-NEXT:    movapd %xmm3, 1184(%r9)
; SSE-NEXT:    movapd %xmm8, 1168(%r9)
; SSE-NEXT:    movapd %xmm11, 1152(%r9)
; SSE-NEXT:    movapd %xmm6, 1136(%r9)
; SSE-NEXT:    movapd %xmm12, 1120(%r9)
; SSE-NEXT:    movapd %xmm9, 1104(%r9)
; SSE-NEXT:    movapd %xmm13, 1088(%r9)
; SSE-NEXT:    movapd %xmm14, 1072(%r9)
; SSE-NEXT:    movapd %xmm10, 1056(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1040(%r9)
; SSE-NEXT:    movapd %xmm15, 1024(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1008(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 992(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 976(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 960(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 944(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 928(%r9)
; SSE-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 912(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 896(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 880(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 864(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 848(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 832(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 816(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 800(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 784(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 768(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 752(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 736(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 720(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 704(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 688(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 672(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 656(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 640(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 624(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 608(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 592(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 576(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 560(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 544(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 528(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 512(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 496(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 480(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 464(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 448(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 432(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 416(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 400(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 384(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 368(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 352(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 336(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 320(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 304(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 288(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 272(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 256(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 240(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 224(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 208(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 192(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 176(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 160(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 144(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 128(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 112(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 96(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 80(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 64(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 48(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 32(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 16(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, (%r9)
; SSE-NEXT:    addq $920, %rsp # imm = 0x398
; SSE-NEXT:    retq
;
; AVX-LABEL: store_i64_stride5_vf32:
; AVX:       # %bb.0:
; AVX-NEXT:    subq $1048, %rsp # imm = 0x418
; AVX-NEXT:    vmovaps 192(%rdi), %ymm9
; AVX-NEXT:    vmovapd 160(%rdi), %ymm7
; AVX-NEXT:    vmovapd 96(%rdi), %ymm5
; AVX-NEXT:    vmovaps 128(%rcx), %ymm0
; AVX-NEXT:    vmovaps (%rcx), %ymm1
; AVX-NEXT:    vmovaps 64(%rcx), %ymm2
; AVX-NEXT:    vpermilps {{.*#+}} xmm3 = mem[2,3,2,3]
; AVX-NEXT:    vmovaps 16(%rdx), %xmm6
; AVX-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vunpckhpd {{.*#+}} ymm1 = ymm1[1],ymm6[1],ymm1[3],ymm6[3]
; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,3,4,5,6,7]
; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vpermilps {{.*#+}} xmm1 = mem[2,3,2,3]
; AVX-NEXT:    vmovaps 80(%rdx), %xmm3
; AVX-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vunpckhpd {{.*#+}} ymm2 = ymm2[1],ymm3[1],ymm2[3],ymm3[3]
; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3,4,5,6,7]
; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm5[0],mem[0],ymm5[2],mem[2]
; AVX-NEXT:    vmovapd 96(%rcx), %xmm2
; AVX-NEXT:    vmovapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vshufps {{.*#+}} xmm2 = xmm2[2,3,2,3]
; AVX-NEXT:    vblendpd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3]
; AVX-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vpermilps {{.*#+}} xmm1 = mem[2,3,2,3]
; AVX-NEXT:    vmovaps 144(%rdx), %xmm2
; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm2[1],ymm0[3],ymm2[3]
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm7[0],mem[0],ymm7[2],mem[2]
; AVX-NEXT:    vmovapd 160(%rcx), %xmm1
; AVX-NEXT:    vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[2,3,2,3]
; AVX-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
; AVX-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm9[0],mem[0],ymm9[2],mem[2]
; AVX-NEXT:    vmovaps 192(%rcx), %xmm1
; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[2,3,2,3]
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovaps 192(%rcx), %ymm0
; AVX-NEXT:    vmovaps 208(%rdx), %xmm1
; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
; AVX-NEXT:    vpermilps {{.*#+}} xmm1 = mem[2,3,2,3]
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovaps (%rdi), %ymm8
; AVX-NEXT:    vbroadcastsd 8(%rsi), %ymm0
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT:    vmovaps (%rdx), %xmm1
; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = mem[2,3,2,3]
; AVX-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm8[0],mem[0],ymm8[2],mem[2]
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovapd 32(%rdi), %ymm2
; AVX-NEXT:    vbroadcastsd 40(%rsi), %ymm0
; AVX-NEXT:    vblendpd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3]
; AVX-NEXT:    vmovaps 32(%rdx), %xmm1
; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
; AVX-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = mem[2,3,2,3]
; AVX-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
; AVX-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovaps 64(%rdi), %ymm14
; AVX-NEXT:    vbroadcastsd 72(%rsi), %ymm0
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm14[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT:    vmovaps 64(%rdx), %xmm1
; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = mem[2,3,2,3]
; AVX-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm14[0],mem[0],ymm14[2],mem[2]
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vbroadcastsd 104(%rsi), %ymm0
; AVX-NEXT:    vblendpd {{.*#+}} ymm0 = ymm5[0,1],ymm0[2,3]
; AVX-NEXT:    vmovaps 96(%rdx), %xmm1
; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
; AVX-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovaps 128(%rdi), %ymm1
; AVX-NEXT:    vbroadcastsd 136(%rsi), %ymm0
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT:    vmovaps 128(%rdx), %xmm3
; AVX-NEXT:    vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm3
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm3[6,7]
; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = mem[2,3,2,3]
; AVX-NEXT:    vunpcklpd {{.*#+}} ymm3 = ymm1[0],mem[0],ymm1[2],mem[2]
; AVX-NEXT:    vblendps {{.*#+}} ymm10 = ymm0[0,1,2,3],ymm3[4,5,6,7]
; AVX-NEXT:    vbroadcastsd 168(%rsi), %ymm0
; AVX-NEXT:    vblendpd {{.*#+}} ymm0 = ymm7[0,1],ymm0[2,3]
; AVX-NEXT:    vmovaps 160(%rdx), %xmm4
; AVX-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm3
; AVX-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm3[3]
; AVX-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vbroadcastsd 200(%rsi), %ymm0
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT:    vmovaps 192(%rdx), %xmm3
; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm6
; AVX-NEXT:    vblendps {{.*#+}} ymm12 = ymm0[0,1,2,3,4,5],ymm6[6,7]
; AVX-NEXT:    vmovapd 224(%rdi), %ymm0
; AVX-NEXT:    vbroadcastsd 232(%rsi), %ymm6
; AVX-NEXT:    vblendpd {{.*#+}} ymm6 = ymm0[0,1],ymm6[2,3]
; AVX-NEXT:    vmovaps 224(%rdx), %xmm11
; AVX-NEXT:    vmovaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vinsertf128 $1, %xmm11, %ymm0, %ymm11
; AVX-NEXT:    vblendpd {{.*#+}} ymm11 = ymm6[0,1,2],ymm11[3]
; AVX-NEXT:    vpermilps {{.*#+}} xmm6 = mem[2,3,2,3]
; AVX-NEXT:    vunpcklpd {{.*#+}} ymm15 = ymm0[0],mem[0],ymm0[2],mem[2]
; AVX-NEXT:    vblendpd {{.*#+}} ymm6 = ymm6[0,1],ymm15[2,3]
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm15 = xmm13[0],mem[0]
; AVX-NEXT:    vblendps {{.*#+}} ymm8 = ymm15[0,1,2,3],ymm8[4,5,6,7]
; AVX-NEXT:    vmovaps (%r8), %ymm15
; AVX-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm13 # 32-byte Folded Reload
; AVX-NEXT:    # ymm13 = mem[0,1,2,3,4,5],ymm15[6,7]
; AVX-NEXT:    vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm13 # 32-byte Folded Reload
; AVX-NEXT:    # ymm13 = ymm15[0,1],mem[2,3,4,5,6,7]
; AVX-NEXT:    vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm13 # 32-byte Folded Reload
; AVX-NEXT:    # ymm13 = mem[0,1],ymm15[2,3],mem[4,5,6,7]
; AVX-NEXT:    vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm15[4,5],ymm8[6,7]
; AVX-NEXT:    vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vperm2f128 {{.*#+}} ymm2 = mem[2,3],ymm2[2,3]
; AVX-NEXT:    vmovapd 48(%rdx), %xmm8
; AVX-NEXT:    vshufpd {{.*#+}} ymm2 = ymm8[0],ymm2[0],ymm8[2],ymm2[3]
; AVX-NEXT:    vmovapd 48(%rsi), %xmm15
; AVX-NEXT:    vunpckhpd {{.*#+}} xmm8 = xmm15[1],xmm8[1]
; AVX-NEXT:    vbroadcastsd 56(%rcx), %ymm15
; AVX-NEXT:    vblendpd {{.*#+}} ymm8 = ymm8[0,1],ymm15[2,3]
; AVX-NEXT:    vmovapd 32(%r8), %ymm15
; AVX-NEXT:    vblendpd $14, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm13 # 32-byte Folded Reload
; AVX-NEXT:    # ymm13 = ymm15[0],mem[1,2,3]
; AVX-NEXT:    vmovupd %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendpd $13, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm13 # 32-byte Folded Reload
; AVX-NEXT:    # ymm13 = mem[0],ymm15[1],mem[2,3]
; AVX-NEXT:    vmovupd %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0,1],ymm15[2],ymm2[3]
; AVX-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendpd {{.*#+}} ymm2 = ymm8[0,1,2],ymm15[3]
; AVX-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm2 = xmm2[0],mem[0]
; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm14[4,5,6,7]
; AVX-NEXT:    vmovaps 64(%r8), %ymm8
; AVX-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm13 # 32-byte Folded Reload
; AVX-NEXT:    # ymm13 = mem[0,1,2,3,4,5],ymm8[6,7]
; AVX-NEXT:    vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendps $252, (%rsp), %ymm8, %ymm13 # 32-byte Folded Reload
; AVX-NEXT:    # ymm13 = ymm8[0,1],mem[2,3,4,5,6,7]
; AVX-NEXT:    vmovups %ymm13, (%rsp) # 32-byte Spill
; AVX-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm13 # 32-byte Folded Reload
; AVX-NEXT:    # ymm13 = mem[0,1],ymm8[2,3],mem[4,5,6,7]
; AVX-NEXT:    vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm8[4,5],ymm2[6,7]
; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vperm2f128 {{.*#+}} ymm2 = mem[2,3],ymm5[2,3]
; AVX-NEXT:    vmovapd 112(%rdx), %xmm5
; AVX-NEXT:    vshufpd {{.*#+}} ymm2 = ymm5[0],ymm2[0],ymm5[2],ymm2[3]
; AVX-NEXT:    vmovapd 112(%rsi), %xmm8
; AVX-NEXT:    vunpckhpd {{.*#+}} xmm5 = xmm8[1],xmm5[1]
; AVX-NEXT:    vbroadcastsd 120(%rcx), %ymm8
; AVX-NEXT:    vblendpd {{.*#+}} ymm5 = ymm5[0,1],ymm8[2,3]
; AVX-NEXT:    vmovapd 96(%r8), %ymm8
; AVX-NEXT:    vblendpd $13, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm13 # 32-byte Folded Reload
; AVX-NEXT:    # ymm13 = mem[0],ymm8[1],mem[2,3]
; AVX-NEXT:    vmovupd %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendpd $14, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm13 # 32-byte Folded Reload
; AVX-NEXT:    # ymm13 = ymm8[0],mem[1,2,3]
; AVX-NEXT:    vmovupd %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0,1],ymm8[2],ymm2[3]
; AVX-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendpd {{.*#+}} ymm2 = ymm5[0,1,2],ymm8[3]
; AVX-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm2 = xmm2[0],mem[0]
; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX-NEXT:    vmovaps 128(%r8), %ymm2
; AVX-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm5 # 32-byte Folded Reload
; AVX-NEXT:    # ymm5 = mem[0,1,2,3,4,5],ymm2[6,7]
; AVX-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm5 # 32-byte Folded Reload
; AVX-NEXT:    # ymm5 = ymm2[0,1],mem[2,3,4,5,6,7]
; AVX-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm10[0,1],ymm2[2,3],ymm10[4,5,6,7]
; AVX-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5],ymm1[6,7]
; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vperm2f128 {{.*#+}} ymm1 = mem[2,3],ymm7[2,3]
; AVX-NEXT:    vmovapd 176(%rdx), %xmm2
; AVX-NEXT:    vshufpd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[2],ymm1[3]
; AVX-NEXT:    vmovapd 176(%rsi), %xmm5
; AVX-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm5[1],xmm2[1]
; AVX-NEXT:    vbroadcastsd 184(%rcx), %ymm5
; AVX-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0,1],ymm5[2,3]
; AVX-NEXT:    vmovapd 160(%r8), %ymm5
; AVX-NEXT:    vblendpd $13, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm7 # 32-byte Folded Reload
; AVX-NEXT:    # ymm7 = mem[0],ymm5[1],mem[2,3]
; AVX-NEXT:    vmovupd %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendpd $14, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm7 # 32-byte Folded Reload
; AVX-NEXT:    # ymm7 = ymm5[0],mem[1,2,3]
; AVX-NEXT:    vmovupd %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm5[2],ymm1[3]
; AVX-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendpd {{.*#+}} ymm1 = ymm2[0,1,2],ymm5[3]
; AVX-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm1 = xmm1[0],mem[0]
; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm9[4,5,6,7]
; AVX-NEXT:    vmovaps 192(%r8), %ymm2
; AVX-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm5 # 32-byte Folded Reload
; AVX-NEXT:    # ymm5 = mem[0,1],ymm2[2,3],mem[4,5,6,7]
; AVX-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm5 # 32-byte Folded Reload
; AVX-NEXT:    # ymm5 = mem[0,1,2,3,4,5],ymm2[6,7]
; AVX-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm2[0,1],ymm12[2,3,4,5,6,7]
; AVX-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5],ymm1[6,7]
; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[2,3]
; AVX-NEXT:    vmovapd 240(%rdx), %xmm1
; AVX-NEXT:    vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[3]
; AVX-NEXT:    vmovapd 240(%rsi), %xmm2
; AVX-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm2[1],xmm1[1]
; AVX-NEXT:    vbroadcastsd 248(%rcx), %ymm2
; AVX-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3]
; AVX-NEXT:    vmovapd 224(%r8), %ymm5
; AVX-NEXT:    vblendpd {{.*#+}} ymm2 = ymm5[0],ymm11[1,2,3]
; AVX-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendpd {{.*#+}} ymm2 = ymm6[0],ymm5[1],ymm6[2,3]
; AVX-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm5[2],ymm0[3]
; AVX-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm5[3]
; AVX-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm7 # 16-byte Folded Reload
; AVX-NEXT:    # xmm7 = xmm0[0],mem[0]
; AVX-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm5 # 16-byte Folded Reload
; AVX-NEXT:    # xmm5 = xmm4[0],mem[0]
; AVX-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm2 # 16-byte Folded Reload
; AVX-NEXT:    # xmm2 = xmm3[0],mem[0]
; AVX-NEXT:    vmovaps 128(%rdi), %xmm1
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm6 = xmm1[0],mem[0]
; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm4 = xmm0[0],mem[0]
; AVX-NEXT:    vmovaps 64(%rdi), %xmm14
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm14 = xmm14[0],mem[0]
; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm3 = xmm0[0],mem[0]
; AVX-NEXT:    vmovaps 32(%rdi), %xmm12
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm12 = xmm12[0],mem[0]
; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm1 = xmm0[0],mem[0]
; AVX-NEXT:    vmovaps 96(%rdi), %xmm15
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm15 = xmm15[0],mem[0]
; AVX-NEXT:    vmovaps 160(%rdi), %xmm13
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm13 = xmm13[0],mem[0]
; AVX-NEXT:    vmovaps 224(%rdi), %xmm9
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm9 = xmm9[0],mem[0]
; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; AVX-NEXT:    vmovaps 192(%rdi), %xmm11
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm11 = xmm11[0],mem[0]
; AVX-NEXT:    vmovaps (%rdi), %xmm10
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm10 = xmm10[0],mem[0]
; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm8 = xmm8[0],mem[0]
; AVX-NEXT:    vmovaps %xmm8, 16(%r9)
; AVX-NEXT:    vmovaps %xmm10, (%r9)
; AVX-NEXT:    vmovaps %xmm2, 976(%r9)
; AVX-NEXT:    vmovaps %xmm11, 960(%r9)
; AVX-NEXT:    vmovaps %xmm0, 1136(%r9)
; AVX-NEXT:    vmovaps %xmm9, 1120(%r9)
; AVX-NEXT:    vmovaps %xmm5, 816(%r9)
; AVX-NEXT:    vmovaps %xmm13, 800(%r9)
; AVX-NEXT:    vmovaps %xmm7, 496(%r9)
; AVX-NEXT:    vmovaps %xmm15, 480(%r9)
; AVX-NEXT:    vmovaps %xmm1, 176(%r9)
; AVX-NEXT:    vmovaps %xmm12, 160(%r9)
; AVX-NEXT:    vmovaps %xmm3, 336(%r9)
; AVX-NEXT:    vmovaps %xmm14, 320(%r9)
; AVX-NEXT:    vmovaps %xmm4, 656(%r9)
; AVX-NEXT:    vmovaps %xmm6, 640(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 1216(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 1184(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 1152(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 992(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 896(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 832(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 704(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 672(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 576(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 512(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 384(%r9)
; AVX-NEXT:    vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 352(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 256(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 224(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 192(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 64(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 32(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 1248(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 1088(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 1056(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 1024(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 928(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 864(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 768(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 736(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 608(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 544(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 448(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 416(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 288(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 128(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 96(%r9)
; AVX-NEXT:    addq $1048, %rsp # imm = 0x418
; AVX-NEXT:    vzeroupper
; AVX-NEXT:    retq
;
; AVX2-LABEL: store_i64_stride5_vf32:
; AVX2:       # %bb.0:
; AVX2-NEXT:    subq $1128, %rsp # imm = 0x468
; AVX2-NEXT:    vmovaps 96(%rdi), %ymm13
; AVX2-NEXT:    vmovaps 64(%rdi), %ymm7
; AVX2-NEXT:    vmovaps 32(%rdi), %ymm9
; AVX2-NEXT:    vmovaps (%rdi), %ymm12
; AVX2-NEXT:    vmovaps (%rsi), %xmm2
; AVX2-NEXT:    vmovaps 32(%rsi), %xmm4
; AVX2-NEXT:    vmovaps 64(%rsi), %xmm1
; AVX2-NEXT:    vmovaps 96(%rsi), %xmm0
; AVX2-NEXT:    vinsertf128 $1, (%rcx), %ymm2, %ymm5
; AVX2-NEXT:    vmovaps (%rdi), %xmm6
; AVX2-NEXT:    vmovaps 32(%rdi), %xmm8
; AVX2-NEXT:    vmovaps 64(%rdi), %xmm2
; AVX2-NEXT:    vmovaps (%rdx), %xmm10
; AVX2-NEXT:    vmovaps 32(%rdx), %xmm11
; AVX2-NEXT:    vmovaps 64(%rdx), %xmm3
; AVX2-NEXT:    vinsertf128 $1, %xmm10, %ymm6, %ymm6
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm5 = ymm6[0],ymm5[0],ymm6[2],ymm5[2]
; AVX2-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vinsertf128 $1, %xmm10, %ymm0, %ymm5
; AVX2-NEXT:    vbroadcastsd 8(%rsi), %ymm6
; AVX2-NEXT:    vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps {{.*#+}} ymm6 = ymm12[0,1,2,3],ymm6[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4,5],ymm5[6,7]
; AVX2-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovddup {{.*#+}} xmm5 = mem[0,0]
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm6 = ymm12[0],mem[0],ymm12[2],mem[2]
; AVX2-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vinsertf128 $1, 32(%rcx), %ymm4, %ymm4
; AVX2-NEXT:    vinsertf128 $1, %xmm11, %ymm8, %ymm5
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm4 = ymm5[0],ymm4[0],ymm5[2],ymm4[2]
; AVX2-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vinsertf128 $1, %xmm11, %ymm0, %ymm4
; AVX2-NEXT:    vbroadcastsd 40(%rsi), %ymm5
; AVX2-NEXT:    vmovups %ymm9, (%rsp) # 32-byte Spill
; AVX2-NEXT:    vblendps {{.*#+}} ymm5 = ymm9[0,1,2,3],ymm5[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3,4,5],ymm4[6,7]
; AVX2-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovddup {{.*#+}} xmm4 = mem[0,0]
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm5 = ymm9[0],mem[0],ymm9[2],mem[2]
; AVX2-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vinsertf128 $1, 64(%rcx), %ymm1, %ymm1
; AVX2-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
; AVX2-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm1
; AVX2-NEXT:    vbroadcastsd 72(%rsi), %ymm2
; AVX2-NEXT:    vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps {{.*#+}} ymm2 = ymm7[0,1,2,3],ymm2[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
; AVX2-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovddup {{.*#+}} xmm1 = mem[0,0]
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm2 = ymm7[0],mem[0],ymm7[2],mem[2]
; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vinsertf128 $1, 96(%rcx), %ymm0, %ymm0
; AVX2-NEXT:    vmovaps 96(%rdi), %xmm1
; AVX2-NEXT:    vmovaps 96(%rdx), %xmm2
; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-NEXT:    vbroadcastsd 104(%rsi), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm13[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm13[0],mem[0],ymm13[2],mem[2]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovaps 128(%rsi), %xmm0
; AVX2-NEXT:    vinsertf128 $1, 128(%rcx), %ymm0, %ymm0
; AVX2-NEXT:    vmovaps 128(%rdi), %xmm1
; AVX2-NEXT:    vmovaps 128(%rdx), %xmm2
; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-NEXT:    vmovaps 128(%rdi), %ymm2
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vbroadcastsd 136(%rsi), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovaps 160(%rsi), %xmm0
; AVX2-NEXT:    vinsertf128 $1, 160(%rcx), %ymm0, %ymm0
; AVX2-NEXT:    vmovaps 160(%rdi), %xmm1
; AVX2-NEXT:    vmovaps 160(%rdx), %xmm2
; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-NEXT:    vmovaps 160(%rdi), %ymm2
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vbroadcastsd 168(%rsi), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovaps 192(%rsi), %xmm0
; AVX2-NEXT:    vinsertf128 $1, 192(%rcx), %ymm0, %ymm0
; AVX2-NEXT:    vmovaps 192(%rdi), %xmm1
; AVX2-NEXT:    vmovaps 192(%rdx), %xmm2
; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-NEXT:    vmovaps 192(%rdi), %ymm2
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vbroadcastsd 200(%rsi), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovaps 224(%rsi), %xmm0
; AVX2-NEXT:    vinsertf128 $1, 224(%rcx), %ymm0, %ymm0
; AVX2-NEXT:    vmovaps 224(%rdi), %xmm1
; AVX2-NEXT:    vmovaps 224(%rdx), %xmm2
; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-NEXT:    vmovaps 224(%rdi), %ymm9
; AVX2-NEXT:    vbroadcastsd 232(%rsi), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm9[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm9[0],mem[0],ymm9[2],mem[2]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vbroadcastsd 56(%rsi), %ymm0
; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-NEXT:    vbroadcastsd 56(%rcx), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} ymm7 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vbroadcastsd 120(%rsi), %ymm0
; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-NEXT:    vbroadcastsd 120(%rcx), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vbroadcastsd 184(%rsi), %ymm0
; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-NEXT:    vbroadcastsd 184(%rcx), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vbroadcastsd 248(%rsi), %ymm0
; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-NEXT:    vbroadcastsd 248(%rcx), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovaps (%rdx), %ymm1
; AVX2-NEXT:    vmovaps (%rcx), %ymm0
; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm2 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
; AVX2-NEXT:    vpermpd {{.*#+}} ymm2 = ymm2[0,2,3,3]
; AVX2-NEXT:    vbroadcastsd 24(%rsi), %ymm3
; AVX2-NEXT:    vblendps {{.*#+}} ymm6 = ymm3[0,1],ymm2[2,3,4,5,6,7]
; AVX2-NEXT:    vmovaps 64(%rdx), %ymm14
; AVX2-NEXT:    vmovaps 64(%rcx), %ymm15
; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm2 = ymm14[1],ymm15[1],ymm14[3],ymm15[3]
; AVX2-NEXT:    vpermpd {{.*#+}} ymm2 = ymm2[0,2,3,3]
; AVX2-NEXT:    vbroadcastsd 88(%rsi), %ymm3
; AVX2-NEXT:    vblendps {{.*#+}} ymm12 = ymm3[0,1],ymm2[2,3,4,5,6,7]
; AVX2-NEXT:    vmovaps 128(%rdx), %ymm3
; AVX2-NEXT:    vmovaps 128(%rcx), %ymm2
; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm4 = ymm3[1],ymm2[1],ymm3[3],ymm2[3]
; AVX2-NEXT:    vpermpd {{.*#+}} ymm4 = ymm4[0,2,3,3]
; AVX2-NEXT:    vbroadcastsd 152(%rsi), %ymm5
; AVX2-NEXT:    vblendps {{.*#+}} ymm8 = ymm5[0,1],ymm4[2,3,4,5,6,7]
; AVX2-NEXT:    vmovaps 192(%rdx), %ymm5
; AVX2-NEXT:    vmovaps 192(%rcx), %ymm4
; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm10 = ymm5[1],ymm4[1],ymm5[3],ymm4[3]
; AVX2-NEXT:    vpermpd {{.*#+}} ymm10 = ymm10[0,2,3,3]
; AVX2-NEXT:    vbroadcastsd 216(%rsi), %ymm11
; AVX2-NEXT:    vblendps {{.*#+}} ymm11 = ymm11[0,1],ymm10[2,3,4,5,6,7]
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-NEXT:    vperm2f128 $49, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm0 = ymm0[2,3],mem[2,3]
; AVX2-NEXT:    vmovaps (%r8), %ymm1
; AVX2-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm10 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm10 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-NEXT:    vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm10 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm10 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm1[6,7]
; AVX2-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[2,3]
; AVX2-NEXT:    vbroadcastsd 48(%rcx), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX2-NEXT:    vmovaps 32(%r8), %ymm1
; AVX2-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm6 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm6 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm6 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm6 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm6, (%rsp) # 32-byte Spill
; AVX2-NEXT:    vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3,4,5],ymm1[6,7]
; AVX2-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm14[0],ymm15[0],ymm14[2],ymm15[2]
; AVX2-NEXT:    vperm2f128 $49, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm0 = ymm0[2,3],mem[2,3]
; AVX2-NEXT:    vmovaps 64(%r8), %ymm1
; AVX2-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm6 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm6 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm6 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm6 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps {{.*#+}} ymm6 = ymm12[0,1,2,3,4,5],ymm1[6,7]
; AVX2-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm13[2,3]
; AVX2-NEXT:    vbroadcastsd 112(%rcx), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX2-NEXT:    vmovaps 96(%r8), %ymm1
; AVX2-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm6 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm6 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm6 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm6 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm6 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm6 = mem[0,1,2,3,4,5],ymm1[6,7]
; AVX2-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps {{.*#+}} ymm15 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
; AVX2-NEXT:    vperm2f128 $49, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm1 = ymm0[2,3],mem[2,3]
; AVX2-NEXT:    vmovaps 128(%r8), %ymm2
; AVX2-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm13 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm13 = ymm2[0,1],mem[2,3,4,5,6,7]
; AVX2-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm12 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm12 = mem[0,1],ymm2[2,3],mem[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm8[0,1,2,3,4,5],ymm2[6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps {{.*#+}} ymm10 = ymm1[0,1,2,3],ymm2[4,5],ymm1[6,7]
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm2 = mem[2,3],ymm0[2,3]
; AVX2-NEXT:    vbroadcastsd 176(%rcx), %ymm3
; AVX2-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5,6,7]
; AVX2-NEXT:    vmovaps 160(%r8), %ymm3
; AVX2-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm7 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm7 = ymm3[0,1],mem[2,3,4,5,6,7]
; AVX2-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm6 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm6 = mem[0,1],ymm3[2,3],mem[4,5,6,7]
; AVX2-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm0 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm0 = mem[0,1,2,3,4,5],ymm3[6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps {{.*#+}} ymm8 = ymm2[0,1,2,3],ymm3[4,5],ymm2[6,7]
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm2 = ymm5[0],ymm4[0],ymm5[2],ymm4[2]
; AVX2-NEXT:    vperm2f128 $49, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm2 = ymm2[2,3],mem[2,3]
; AVX2-NEXT:    vmovaps 192(%r8), %ymm4
; AVX2-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm5 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm5 = ymm4[0,1],mem[2,3,4,5,6,7]
; AVX2-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm3 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm3 = mem[0,1],ymm4[2,3],mem[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm14 = ymm11[0,1,2,3,4,5],ymm4[6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm4 = ymm2[0,1,2,3],ymm4[4,5],ymm2[6,7]
; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm2 = mem[2,3],ymm9[2,3]
; AVX2-NEXT:    vbroadcastsd 240(%rcx), %ymm9
; AVX2-NEXT:    vblendps {{.*#+}} ymm9 = ymm2[0,1],ymm9[2,3],ymm2[4,5,6,7]
; AVX2-NEXT:    vmovaps 224(%r8), %ymm0
; AVX2-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm2 = ymm0[0,1],mem[2,3,4,5,6,7]
; AVX2-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm1 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
; AVX2-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm11 = mem[0,1,2,3,4,5],ymm0[6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm0[4,5],ymm9[6,7]
; AVX2-NEXT:    vmovaps %ymm0, 1216(%r9)
; AVX2-NEXT:    vmovaps %ymm1, 1184(%r9)
; AVX2-NEXT:    vmovaps %ymm2, 1152(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 1120(%r9)
; AVX2-NEXT:    vmovaps %ymm4, 1056(%r9)
; AVX2-NEXT:    vmovaps %ymm3, 1024(%r9)
; AVX2-NEXT:    vmovaps %ymm5, 992(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 960(%r9)
; AVX2-NEXT:    vmovaps %ymm8, 896(%r9)
; AVX2-NEXT:    vmovaps %ymm6, 864(%r9)
; AVX2-NEXT:    vmovaps %ymm7, 832(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 800(%r9)
; AVX2-NEXT:    vmovaps %ymm10, 736(%r9)
; AVX2-NEXT:    vmovaps %ymm12, 704(%r9)
; AVX2-NEXT:    vmovaps %ymm13, 672(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 640(%r9)
; AVX2-NEXT:    vmovaps %ymm15, 576(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 544(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 512(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 480(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 416(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 384(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 352(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 320(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 256(%r9)
; AVX2-NEXT:    vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 224(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 192(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 160(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 96(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 64(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 32(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, (%r9)
; AVX2-NEXT:    vmovaps %ymm14, 1088(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 768(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 448(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 128(%r9)
; AVX2-NEXT:    vmovaps %ymm11, 1248(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 928(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 608(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 288(%r9)
; AVX2-NEXT:    addq $1128, %rsp # imm = 0x468
; AVX2-NEXT:    vzeroupper
; AVX2-NEXT:    retq
;
; AVX2-FP-LABEL: store_i64_stride5_vf32:
; AVX2-FP:       # %bb.0:
; AVX2-FP-NEXT:    subq $1128, %rsp # imm = 0x468
; AVX2-FP-NEXT:    vmovaps 96(%rdi), %ymm13
; AVX2-FP-NEXT:    vmovaps 64(%rdi), %ymm7
; AVX2-FP-NEXT:    vmovaps 32(%rdi), %ymm9
; AVX2-FP-NEXT:    vmovaps (%rdi), %ymm12
; AVX2-FP-NEXT:    vmovaps (%rsi), %xmm2
; AVX2-FP-NEXT:    vmovaps 32(%rsi), %xmm4
; AVX2-FP-NEXT:    vmovaps 64(%rsi), %xmm1
; AVX2-FP-NEXT:    vmovaps 96(%rsi), %xmm0
; AVX2-FP-NEXT:    vinsertf128 $1, (%rcx), %ymm2, %ymm5
; AVX2-FP-NEXT:    vmovaps (%rdi), %xmm6
; AVX2-FP-NEXT:    vmovaps 32(%rdi), %xmm8
; AVX2-FP-NEXT:    vmovaps 64(%rdi), %xmm2
; AVX2-FP-NEXT:    vmovaps (%rdx), %xmm10
; AVX2-FP-NEXT:    vmovaps 32(%rdx), %xmm11
; AVX2-FP-NEXT:    vmovaps 64(%rdx), %xmm3
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm10, %ymm6, %ymm6
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm5 = ymm6[0],ymm5[0],ymm6[2],ymm5[2]
; AVX2-FP-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm10, %ymm0, %ymm5
; AVX2-FP-NEXT:    vbroadcastsd 8(%rsi), %ymm6
; AVX2-FP-NEXT:    vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm6 = ymm12[0,1,2,3],ymm6[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4,5],ymm5[6,7]
; AVX2-FP-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovddup {{.*#+}} xmm5 = mem[0,0]
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm6 = ymm12[0],mem[0],ymm12[2],mem[2]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vinsertf128 $1, 32(%rcx), %ymm4, %ymm4
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm11, %ymm8, %ymm5
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm4 = ymm5[0],ymm4[0],ymm5[2],ymm4[2]
; AVX2-FP-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm11, %ymm0, %ymm4
; AVX2-FP-NEXT:    vbroadcastsd 40(%rsi), %ymm5
; AVX2-FP-NEXT:    vmovups %ymm9, (%rsp) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm5 = ymm9[0,1,2,3],ymm5[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3,4,5],ymm4[6,7]
; AVX2-FP-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovddup {{.*#+}} xmm4 = mem[0,0]
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm5 = ymm9[0],mem[0],ymm9[2],mem[2]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vinsertf128 $1, 64(%rcx), %ymm1, %ymm1
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
; AVX2-FP-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm1
; AVX2-FP-NEXT:    vbroadcastsd 72(%rsi), %ymm2
; AVX2-FP-NEXT:    vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm2 = ymm7[0,1,2,3],ymm2[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FP-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovddup {{.*#+}} xmm1 = mem[0,0]
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm2 = ymm7[0],mem[0],ymm7[2],mem[2]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vinsertf128 $1, 96(%rcx), %ymm0, %ymm0
; AVX2-FP-NEXT:    vmovaps 96(%rdi), %xmm1
; AVX2-FP-NEXT:    vmovaps 96(%rdx), %xmm2
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-FP-NEXT:    vbroadcastsd 104(%rsi), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm1 = ymm13[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm13[0],mem[0],ymm13[2],mem[2]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovaps 128(%rsi), %xmm0
; AVX2-FP-NEXT:    vinsertf128 $1, 128(%rcx), %ymm0, %ymm0
; AVX2-FP-NEXT:    vmovaps 128(%rdi), %xmm1
; AVX2-FP-NEXT:    vmovaps 128(%rdx), %xmm2
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-FP-NEXT:    vmovaps 128(%rdi), %ymm2
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vbroadcastsd 136(%rsi), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovaps 160(%rsi), %xmm0
; AVX2-FP-NEXT:    vinsertf128 $1, 160(%rcx), %ymm0, %ymm0
; AVX2-FP-NEXT:    vmovaps 160(%rdi), %xmm1
; AVX2-FP-NEXT:    vmovaps 160(%rdx), %xmm2
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-FP-NEXT:    vmovaps 160(%rdi), %ymm2
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vbroadcastsd 168(%rsi), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovaps 192(%rsi), %xmm0
; AVX2-FP-NEXT:    vinsertf128 $1, 192(%rcx), %ymm0, %ymm0
; AVX2-FP-NEXT:    vmovaps 192(%rdi), %xmm1
; AVX2-FP-NEXT:    vmovaps 192(%rdx), %xmm2
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-FP-NEXT:    vmovaps 192(%rdi), %ymm2
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vbroadcastsd 200(%rsi), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovaps 224(%rsi), %xmm0
; AVX2-FP-NEXT:    vinsertf128 $1, 224(%rcx), %ymm0, %ymm0
; AVX2-FP-NEXT:    vmovaps 224(%rdi), %xmm1
; AVX2-FP-NEXT:    vmovaps 224(%rdx), %xmm2
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-FP-NEXT:    vmovaps 224(%rdi), %ymm9
; AVX2-FP-NEXT:    vbroadcastsd 232(%rsi), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm1 = ymm9[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm9[0],mem[0],ymm9[2],mem[2]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vbroadcastsd 56(%rsi), %ymm0
; AVX2-FP-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-FP-NEXT:    vbroadcastsd 56(%rcx), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm7 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vbroadcastsd 120(%rsi), %ymm0
; AVX2-FP-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-FP-NEXT:    vbroadcastsd 120(%rcx), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vbroadcastsd 184(%rsi), %ymm0
; AVX2-FP-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-FP-NEXT:    vbroadcastsd 184(%rcx), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vbroadcastsd 248(%rsi), %ymm0
; AVX2-FP-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-FP-NEXT:    vbroadcastsd 248(%rcx), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovaps (%rdx), %ymm1
; AVX2-FP-NEXT:    vmovaps (%rcx), %ymm0
; AVX2-FP-NEXT:    vunpckhpd {{.*#+}} ymm2 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
; AVX2-FP-NEXT:    vpermpd {{.*#+}} ymm2 = ymm2[0,2,3,3]
; AVX2-FP-NEXT:    vbroadcastsd 24(%rsi), %ymm3
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm6 = ymm3[0,1],ymm2[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vmovaps 64(%rdx), %ymm14
; AVX2-FP-NEXT:    vmovaps 64(%rcx), %ymm15
; AVX2-FP-NEXT:    vunpckhpd {{.*#+}} ymm2 = ymm14[1],ymm15[1],ymm14[3],ymm15[3]
; AVX2-FP-NEXT:    vpermpd {{.*#+}} ymm2 = ymm2[0,2,3,3]
; AVX2-FP-NEXT:    vbroadcastsd 88(%rsi), %ymm3
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm12 = ymm3[0,1],ymm2[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vmovaps 128(%rdx), %ymm3
; AVX2-FP-NEXT:    vmovaps 128(%rcx), %ymm2
; AVX2-FP-NEXT:    vunpckhpd {{.*#+}} ymm4 = ymm3[1],ymm2[1],ymm3[3],ymm2[3]
; AVX2-FP-NEXT:    vpermpd {{.*#+}} ymm4 = ymm4[0,2,3,3]
; AVX2-FP-NEXT:    vbroadcastsd 152(%rsi), %ymm5
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm8 = ymm5[0,1],ymm4[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vmovaps 192(%rdx), %ymm5
; AVX2-FP-NEXT:    vmovaps 192(%rcx), %ymm4
; AVX2-FP-NEXT:    vunpckhpd {{.*#+}} ymm10 = ymm5[1],ymm4[1],ymm5[3],ymm4[3]
; AVX2-FP-NEXT:    vpermpd {{.*#+}} ymm10 = ymm10[0,2,3,3]
; AVX2-FP-NEXT:    vbroadcastsd 216(%rsi), %ymm11
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm11 = ymm11[0,1],ymm10[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FP-NEXT:    vperm2f128 $49, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm0 = ymm0[2,3],mem[2,3]
; AVX2-FP-NEXT:    vmovaps (%r8), %ymm1
; AVX2-FP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm10 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm10 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm10 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm10 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FP-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[2,3]
; AVX2-FP-NEXT:    vbroadcastsd 48(%rcx), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX2-FP-NEXT:    vmovaps 32(%r8), %ymm1
; AVX2-FP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm6 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm6 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm6 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm6 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm6, (%rsp) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FP-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm14[0],ymm15[0],ymm14[2],ymm15[2]
; AVX2-FP-NEXT:    vperm2f128 $49, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm0 = ymm0[2,3],mem[2,3]
; AVX2-FP-NEXT:    vmovaps 64(%r8), %ymm1
; AVX2-FP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm6 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm6 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm6 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm6 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm6 = ymm12[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FP-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm13[2,3]
; AVX2-FP-NEXT:    vbroadcastsd 112(%rcx), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX2-FP-NEXT:    vmovaps 96(%r8), %ymm1
; AVX2-FP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm6 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm6 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm6 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm6 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm6 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm6 = mem[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FP-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm15 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
; AVX2-FP-NEXT:    vperm2f128 $49, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm1 = ymm0[2,3],mem[2,3]
; AVX2-FP-NEXT:    vmovaps 128(%r8), %ymm2
; AVX2-FP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm13 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm13 = ymm2[0,1],mem[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm12 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm12 = mem[0,1],ymm2[2,3],mem[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm8[0,1,2,3,4,5],ymm2[6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm10 = ymm1[0,1,2,3],ymm2[4,5],ymm1[6,7]
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vperm2f128 {{.*#+}} ymm2 = mem[2,3],ymm0[2,3]
; AVX2-FP-NEXT:    vbroadcastsd 176(%rcx), %ymm3
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5,6,7]
; AVX2-FP-NEXT:    vmovaps 160(%r8), %ymm3
; AVX2-FP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm7 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm7 = ymm3[0,1],mem[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm6 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm6 = mem[0,1],ymm3[2,3],mem[4,5,6,7]
; AVX2-FP-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm0 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm0 = mem[0,1,2,3,4,5],ymm3[6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm8 = ymm2[0,1,2,3],ymm3[4,5],ymm2[6,7]
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm2 = ymm5[0],ymm4[0],ymm5[2],ymm4[2]
; AVX2-FP-NEXT:    vperm2f128 $49, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm2 = ymm2[2,3],mem[2,3]
; AVX2-FP-NEXT:    vmovaps 192(%r8), %ymm4
; AVX2-FP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm5 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm5 = ymm4[0,1],mem[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm3 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm3 = mem[0,1],ymm4[2,3],mem[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm14 = ymm11[0,1,2,3,4,5],ymm4[6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm4 = ymm2[0,1,2,3],ymm4[4,5],ymm2[6,7]
; AVX2-FP-NEXT:    vperm2f128 {{.*#+}} ymm2 = mem[2,3],ymm9[2,3]
; AVX2-FP-NEXT:    vbroadcastsd 240(%rcx), %ymm9
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm9 = ymm2[0,1],ymm9[2,3],ymm2[4,5,6,7]
; AVX2-FP-NEXT:    vmovaps 224(%r8), %ymm0
; AVX2-FP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm2 = ymm0[0,1],mem[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm1 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
; AVX2-FP-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm11 = mem[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm0[4,5],ymm9[6,7]
; AVX2-FP-NEXT:    vmovaps %ymm0, 1216(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm1, 1184(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm2, 1152(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 1120(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm4, 1056(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm3, 1024(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm5, 992(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 960(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm8, 896(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm6, 864(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm7, 832(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 800(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm10, 736(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm12, 704(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm13, 672(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 640(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm15, 576(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 544(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 512(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 480(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 416(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 384(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 352(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 320(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 256(%r9)
; AVX2-FP-NEXT:    vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 224(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 192(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 160(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 96(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 64(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 32(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, (%r9)
; AVX2-FP-NEXT:    vmovaps %ymm14, 1088(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 768(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 448(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 128(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm11, 1248(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 928(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 608(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 288(%r9)
; AVX2-FP-NEXT:    addq $1128, %rsp # imm = 0x468
; AVX2-FP-NEXT:    vzeroupper
; AVX2-FP-NEXT:    retq
;
; AVX2-FCP-LABEL: store_i64_stride5_vf32:
; AVX2-FCP:       # %bb.0:
; AVX2-FCP-NEXT:    subq $1128, %rsp # imm = 0x468
; AVX2-FCP-NEXT:    vmovaps 96(%rdi), %ymm13
; AVX2-FCP-NEXT:    vmovaps 64(%rdi), %ymm7
; AVX2-FCP-NEXT:    vmovaps 32(%rdi), %ymm9
; AVX2-FCP-NEXT:    vmovaps (%rdi), %ymm12
; AVX2-FCP-NEXT:    vmovaps (%rsi), %xmm2
; AVX2-FCP-NEXT:    vmovaps 32(%rsi), %xmm4
; AVX2-FCP-NEXT:    vmovaps 64(%rsi), %xmm1
; AVX2-FCP-NEXT:    vmovaps 96(%rsi), %xmm0
; AVX2-FCP-NEXT:    vinsertf128 $1, (%rcx), %ymm2, %ymm5
; AVX2-FCP-NEXT:    vmovaps (%rdi), %xmm6
; AVX2-FCP-NEXT:    vmovaps 32(%rdi), %xmm8
; AVX2-FCP-NEXT:    vmovaps 64(%rdi), %xmm2
; AVX2-FCP-NEXT:    vmovaps (%rdx), %xmm10
; AVX2-FCP-NEXT:    vmovaps 32(%rdx), %xmm11
; AVX2-FCP-NEXT:    vmovaps 64(%rdx), %xmm3
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm10, %ymm6, %ymm6
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm5 = ymm6[0],ymm5[0],ymm6[2],ymm5[2]
; AVX2-FCP-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm10, %ymm0, %ymm5
; AVX2-FCP-NEXT:    vbroadcastsd 8(%rsi), %ymm6
; AVX2-FCP-NEXT:    vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm6 = ymm12[0,1,2,3],ymm6[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4,5],ymm5[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovddup {{.*#+}} xmm5 = mem[0,0]
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm6 = ymm12[0],mem[0],ymm12[2],mem[2]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vinsertf128 $1, 32(%rcx), %ymm4, %ymm4
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm11, %ymm8, %ymm5
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm4 = ymm5[0],ymm4[0],ymm5[2],ymm4[2]
; AVX2-FCP-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm11, %ymm0, %ymm4
; AVX2-FCP-NEXT:    vbroadcastsd 40(%rsi), %ymm5
; AVX2-FCP-NEXT:    vmovups %ymm9, (%rsp) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm5 = ymm9[0,1,2,3],ymm5[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3,4,5],ymm4[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovddup {{.*#+}} xmm4 = mem[0,0]
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm5 = ymm9[0],mem[0],ymm9[2],mem[2]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vinsertf128 $1, 64(%rcx), %ymm1, %ymm1
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
; AVX2-FCP-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm1
; AVX2-FCP-NEXT:    vbroadcastsd 72(%rsi), %ymm2
; AVX2-FCP-NEXT:    vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm2 = ymm7[0,1,2,3],ymm2[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovddup {{.*#+}} xmm1 = mem[0,0]
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm2 = ymm7[0],mem[0],ymm7[2],mem[2]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vinsertf128 $1, 96(%rcx), %ymm0, %ymm0
; AVX2-FCP-NEXT:    vmovaps 96(%rdi), %xmm1
; AVX2-FCP-NEXT:    vmovaps 96(%rdx), %xmm2
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-FCP-NEXT:    vbroadcastsd 104(%rsi), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm1 = ymm13[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm13[0],mem[0],ymm13[2],mem[2]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovaps 128(%rsi), %xmm0
; AVX2-FCP-NEXT:    vinsertf128 $1, 128(%rcx), %ymm0, %ymm0
; AVX2-FCP-NEXT:    vmovaps 128(%rdi), %xmm1
; AVX2-FCP-NEXT:    vmovaps 128(%rdx), %xmm2
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-FCP-NEXT:    vmovaps 128(%rdi), %ymm2
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vbroadcastsd 136(%rsi), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovaps 160(%rsi), %xmm0
; AVX2-FCP-NEXT:    vinsertf128 $1, 160(%rcx), %ymm0, %ymm0
; AVX2-FCP-NEXT:    vmovaps 160(%rdi), %xmm1
; AVX2-FCP-NEXT:    vmovaps 160(%rdx), %xmm2
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-FCP-NEXT:    vmovaps 160(%rdi), %ymm2
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vbroadcastsd 168(%rsi), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovaps 192(%rsi), %xmm0
; AVX2-FCP-NEXT:    vinsertf128 $1, 192(%rcx), %ymm0, %ymm0
; AVX2-FCP-NEXT:    vmovaps 192(%rdi), %xmm1
; AVX2-FCP-NEXT:    vmovaps 192(%rdx), %xmm2
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-FCP-NEXT:    vmovaps 192(%rdi), %ymm2
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vbroadcastsd 200(%rsi), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovaps 224(%rsi), %xmm0
; AVX2-FCP-NEXT:    vinsertf128 $1, 224(%rcx), %ymm0, %ymm0
; AVX2-FCP-NEXT:    vmovaps 224(%rdi), %xmm1
; AVX2-FCP-NEXT:    vmovaps 224(%rdx), %xmm2
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-FCP-NEXT:    vmovaps 224(%rdi), %ymm9
; AVX2-FCP-NEXT:    vbroadcastsd 232(%rsi), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm1 = ymm9[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm9[0],mem[0],ymm9[2],mem[2]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vbroadcastsd 56(%rsi), %ymm0
; AVX2-FCP-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-FCP-NEXT:    vbroadcastsd 56(%rcx), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm7 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vbroadcastsd 120(%rsi), %ymm0
; AVX2-FCP-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-FCP-NEXT:    vbroadcastsd 120(%rcx), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vbroadcastsd 184(%rsi), %ymm0
; AVX2-FCP-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-FCP-NEXT:    vbroadcastsd 184(%rcx), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vbroadcastsd 248(%rsi), %ymm0
; AVX2-FCP-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-FCP-NEXT:    vbroadcastsd 248(%rcx), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovaps (%rdx), %ymm1
; AVX2-FCP-NEXT:    vmovaps (%rcx), %ymm0
; AVX2-FCP-NEXT:    vunpckhpd {{.*#+}} ymm2 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
; AVX2-FCP-NEXT:    vpermpd {{.*#+}} ymm2 = ymm2[0,2,3,3]
; AVX2-FCP-NEXT:    vbroadcastsd 24(%rsi), %ymm3
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm6 = ymm3[0,1],ymm2[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vmovaps 64(%rdx), %ymm14
; AVX2-FCP-NEXT:    vmovaps 64(%rcx), %ymm15
; AVX2-FCP-NEXT:    vunpckhpd {{.*#+}} ymm2 = ymm14[1],ymm15[1],ymm14[3],ymm15[3]
; AVX2-FCP-NEXT:    vpermpd {{.*#+}} ymm2 = ymm2[0,2,3,3]
; AVX2-FCP-NEXT:    vbroadcastsd 88(%rsi), %ymm3
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm12 = ymm3[0,1],ymm2[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vmovaps 128(%rdx), %ymm3
; AVX2-FCP-NEXT:    vmovaps 128(%rcx), %ymm2
; AVX2-FCP-NEXT:    vunpckhpd {{.*#+}} ymm4 = ymm3[1],ymm2[1],ymm3[3],ymm2[3]
; AVX2-FCP-NEXT:    vpermpd {{.*#+}} ymm4 = ymm4[0,2,3,3]
; AVX2-FCP-NEXT:    vbroadcastsd 152(%rsi), %ymm5
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm8 = ymm5[0,1],ymm4[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vmovaps 192(%rdx), %ymm5
; AVX2-FCP-NEXT:    vmovaps 192(%rcx), %ymm4
; AVX2-FCP-NEXT:    vunpckhpd {{.*#+}} ymm10 = ymm5[1],ymm4[1],ymm5[3],ymm4[3]
; AVX2-FCP-NEXT:    vpermpd {{.*#+}} ymm10 = ymm10[0,2,3,3]
; AVX2-FCP-NEXT:    vbroadcastsd 216(%rsi), %ymm11
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm11 = ymm11[0,1],ymm10[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FCP-NEXT:    vperm2f128 $49, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm0 = ymm0[2,3],mem[2,3]
; AVX2-FCP-NEXT:    vmovaps (%r8), %ymm1
; AVX2-FCP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm10 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm10 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm10 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm10 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[2,3]
; AVX2-FCP-NEXT:    vbroadcastsd 48(%rcx), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX2-FCP-NEXT:    vmovaps 32(%r8), %ymm1
; AVX2-FCP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm6 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm6 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm6 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm6 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm6, (%rsp) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm14[0],ymm15[0],ymm14[2],ymm15[2]
; AVX2-FCP-NEXT:    vperm2f128 $49, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm0 = ymm0[2,3],mem[2,3]
; AVX2-FCP-NEXT:    vmovaps 64(%r8), %ymm1
; AVX2-FCP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm6 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm6 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm6 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm6 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm6 = ymm12[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm13[2,3]
; AVX2-FCP-NEXT:    vbroadcastsd 112(%rcx), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX2-FCP-NEXT:    vmovaps 96(%r8), %ymm1
; AVX2-FCP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm6 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm6 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm6 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm6 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm6 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm6 = mem[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm15 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
; AVX2-FCP-NEXT:    vperm2f128 $49, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm1 = ymm0[2,3],mem[2,3]
; AVX2-FCP-NEXT:    vmovaps 128(%r8), %ymm2
; AVX2-FCP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm13 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm13 = ymm2[0,1],mem[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm12 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm12 = mem[0,1],ymm2[2,3],mem[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm8[0,1,2,3,4,5],ymm2[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm10 = ymm1[0,1,2,3],ymm2[4,5],ymm1[6,7]
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vperm2f128 {{.*#+}} ymm2 = mem[2,3],ymm0[2,3]
; AVX2-FCP-NEXT:    vbroadcastsd 176(%rcx), %ymm3
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5,6,7]
; AVX2-FCP-NEXT:    vmovaps 160(%r8), %ymm3
; AVX2-FCP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm7 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm7 = ymm3[0,1],mem[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm6 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm6 = mem[0,1],ymm3[2,3],mem[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm0 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm0 = mem[0,1,2,3,4,5],ymm3[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm8 = ymm2[0,1,2,3],ymm3[4,5],ymm2[6,7]
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm2 = ymm5[0],ymm4[0],ymm5[2],ymm4[2]
; AVX2-FCP-NEXT:    vperm2f128 $49, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm2 = ymm2[2,3],mem[2,3]
; AVX2-FCP-NEXT:    vmovaps 192(%r8), %ymm4
; AVX2-FCP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm5 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm5 = ymm4[0,1],mem[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm3 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm3 = mem[0,1],ymm4[2,3],mem[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm14 = ymm11[0,1,2,3,4,5],ymm4[6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm4 = ymm2[0,1,2,3],ymm4[4,5],ymm2[6,7]
; AVX2-FCP-NEXT:    vperm2f128 {{.*#+}} ymm2 = mem[2,3],ymm9[2,3]
; AVX2-FCP-NEXT:    vbroadcastsd 240(%rcx), %ymm9
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm9 = ymm2[0,1],ymm9[2,3],ymm2[4,5,6,7]
; AVX2-FCP-NEXT:    vmovaps 224(%r8), %ymm0
; AVX2-FCP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm2 = ymm0[0,1],mem[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm1 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm11 = mem[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm0[4,5],ymm9[6,7]
; AVX2-FCP-NEXT:    vmovaps %ymm0, 1216(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm1, 1184(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm2, 1152(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 1120(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm4, 1056(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm3, 1024(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm5, 992(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 960(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm8, 896(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm6, 864(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm7, 832(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 800(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm10, 736(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm12, 704(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm13, 672(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 640(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm15, 576(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 544(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 512(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 480(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 416(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 384(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 352(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 320(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 256(%r9)
; AVX2-FCP-NEXT:    vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 224(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 192(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 160(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 96(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 64(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 32(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, (%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm14, 1088(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 768(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 448(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 128(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm11, 1248(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 928(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 608(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 288(%r9)
; AVX2-FCP-NEXT:    addq $1128, %rsp # imm = 0x468
; AVX2-FCP-NEXT:    vzeroupper
; AVX2-FCP-NEXT:    retq
;
; AVX512-LABEL: store_i64_stride5_vf32:
; AVX512:       # %bb.0:
; AVX512-NEXT:    subq $648, %rsp # imm = 0x288
; AVX512-NEXT:    vmovdqa64 128(%rdi), %zmm15
; AVX512-NEXT:    vmovdqa64 192(%rdi), %zmm20
; AVX512-NEXT:    vmovdqa64 192(%rsi), %zmm16
; AVX512-NEXT:    vmovdqa64 (%rdx), %zmm17
; AVX512-NEXT:    vmovdqa64 64(%rdx), %zmm24
; AVX512-NEXT:    vmovdqa64 128(%rdx), %zmm22
; AVX512-NEXT:    vmovdqa64 192(%rdx), %zmm27
; AVX512-NEXT:    vmovdqa64 (%rcx), %zmm1
; AVX512-NEXT:    vmovdqa64 64(%rcx), %zmm23
; AVX512-NEXT:    vmovdqa64 128(%rcx), %zmm25
; AVX512-NEXT:    vmovdqa64 192(%rcx), %zmm18
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm11 = [3,0,0,0,12,4,0,0]
; AVX512-NEXT:    vmovdqa64 %zmm16, %zmm0
; AVX512-NEXT:    vpermt2q %zmm20, %zmm11, %zmm0
; AVX512-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm19 = [0,3,11,0,0,0,4,12]
; AVX512-NEXT:    vmovdqa64 %zmm27, %zmm0
; AVX512-NEXT:    vpermt2q %zmm18, %zmm19, %zmm0
; AVX512-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vmovdqa64 %zmm22, %zmm0
; AVX512-NEXT:    vpermt2q %zmm25, %zmm19, %zmm0
; AVX512-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vmovdqa64 %zmm24, %zmm0
; AVX512-NEXT:    vpermt2q %zmm23, %zmm19, %zmm0
; AVX512-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vpermi2q %zmm1, %zmm17, %zmm19
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm28 = [0,0,0,8,0,0,0,1]
; AVX512-NEXT:    vmovdqa64 %zmm17, %zmm0
; AVX512-NEXT:    vpermt2q %zmm1, %zmm28, %zmm0
; AVX512-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm14 = [0,8,0,0,0,1,9,0]
; AVX512-NEXT:    vbroadcasti32x4 {{.*#+}} zmm29 = [15,7,15,7,15,7,15,7]
; AVX512-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512-NEXT:    vmovdqa64 %zmm20, %zmm0
; AVX512-NEXT:    vpermt2q %zmm16, %zmm29, %zmm0
; AVX512-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm12 = [6,14,0,0,0,7,15,0]
; AVX512-NEXT:    vmovdqa64 %zmm27, %zmm26
; AVX512-NEXT:    vpermt2q %zmm18, %zmm12, %zmm26
; AVX512-NEXT:    vbroadcasti32x4 {{.*#+}} zmm21 = [13,5,13,5,13,5,13,5]
; AVX512-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512-NEXT:    vmovdqa64 %zmm27, %zmm0
; AVX512-NEXT:    vpermt2q %zmm18, %zmm21, %zmm0
; AVX512-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm31 = [1,0,0,0,10,2,0,0]
; AVX512-NEXT:    vmovdqa64 %zmm18, %zmm0
; AVX512-NEXT:    vpermt2q %zmm27, %zmm31, %zmm0
; AVX512-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vpermt2q %zmm18, %zmm28, %zmm27
; AVX512-NEXT:    vmovdqa64 %zmm22, %zmm18
; AVX512-NEXT:    vpermt2q %zmm25, %zmm12, %zmm18
; AVX512-NEXT:    vmovdqa64 %zmm22, %zmm0
; AVX512-NEXT:    vpermt2q %zmm25, %zmm21, %zmm0
; AVX512-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vmovdqa64 %zmm25, %zmm0
; AVX512-NEXT:    vpermt2q %zmm22, %zmm31, %zmm0
; AVX512-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
; AVX512-NEXT:    vpermt2q %zmm25, %zmm28, %zmm22
; AVX512-NEXT:    vmovdqa64 %zmm24, %zmm25
; AVX512-NEXT:    vmovdqa64 %zmm24, %zmm0
; AVX512-NEXT:    vmovdqa64 %zmm23, %zmm30
; AVX512-NEXT:    vpermt2q %zmm24, %zmm31, %zmm30
; AVX512-NEXT:    vpermt2q %zmm23, %zmm28, %zmm24
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm28 = [0,5,13,0,0,0,6,14]
; AVX512-NEXT:    vpermt2q %zmm23, %zmm12, %zmm25
; AVX512-NEXT:    vpermt2q %zmm23, %zmm21, %zmm0
; AVX512-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vmovdqa64 %zmm20, %zmm23
; AVX512-NEXT:    vpermt2q %zmm16, %zmm28, %zmm23
; AVX512-NEXT:    vpermi2q %zmm1, %zmm17, %zmm12
; AVX512-NEXT:    vpermi2q %zmm1, %zmm17, %zmm21
; AVX512-NEXT:    vpermt2q %zmm17, %zmm31, %zmm1
; AVX512-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm10 = [0,0,2,10,0,0,0,3]
; AVX512-NEXT:    vmovdqa64 %zmm20, %zmm17
; AVX512-NEXT:    vpermt2q %zmm16, %zmm10, %zmm17
; AVX512-NEXT:    vpermt2q %zmm16, %zmm14, %zmm20
; AVX512-NEXT:    vmovdqa64 128(%rsi), %zmm0
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm6
; AVX512-NEXT:    vpermt2q %zmm15, %zmm11, %zmm6
; AVX512-NEXT:    vmovdqa64 %zmm15, %zmm13
; AVX512-NEXT:    vpermt2q %zmm0, %zmm29, %zmm13
; AVX512-NEXT:    vmovdqa64 %zmm15, %zmm16
; AVX512-NEXT:    vpermt2q %zmm0, %zmm28, %zmm16
; AVX512-NEXT:    vmovdqa64 %zmm15, %zmm8
; AVX512-NEXT:    vpermt2q %zmm0, %zmm10, %zmm8
; AVX512-NEXT:    vpermt2q %zmm0, %zmm14, %zmm15
; AVX512-NEXT:    vmovdqa64 64(%rdi), %zmm2
; AVX512-NEXT:    vmovdqa64 64(%rsi), %zmm0
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm3
; AVX512-NEXT:    vpermt2q %zmm2, %zmm11, %zmm3
; AVX512-NEXT:    vmovdqa64 (%rdi), %zmm31
; AVX512-NEXT:    vmovdqa64 (%rsi), %zmm1
; AVX512-NEXT:    vmovdqa64 %zmm31, %zmm7
; AVX512-NEXT:    vpermt2q %zmm1, %zmm14, %zmm7
; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm9
; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm5
; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm4
; AVX512-NEXT:    vpermt2q %zmm0, %zmm14, %zmm2
; AVX512-NEXT:    vpermt2q %zmm0, %zmm29, %zmm9
; AVX512-NEXT:    vpermt2q %zmm0, %zmm28, %zmm5
; AVX512-NEXT:    vpermt2q %zmm0, %zmm10, %zmm4
; AVX512-NEXT:    vpermi2q %zmm31, %zmm1, %zmm11
; AVX512-NEXT:    vpermi2q %zmm1, %zmm31, %zmm29
; AVX512-NEXT:    vpermi2q %zmm1, %zmm31, %zmm28
; AVX512-NEXT:    vpermt2q %zmm1, %zmm10, %zmm31
; AVX512-NEXT:    movb $49, %al
; AVX512-NEXT:    kmovw %eax, %k1
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm6, %zmm10 {%k1}
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm3, %zmm14 {%k1}
; AVX512-NEXT:    vmovdqa64 %zmm11, %zmm19 {%k1}
; AVX512-NEXT:    movb $-116, %al
; AVX512-NEXT:    kmovw %eax, %k3
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k3}
; AVX512-NEXT:    movb $24, %al
; AVX512-NEXT:    kmovw %eax, %k2
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k2}
; AVX512-NEXT:    vmovdqa64 %zmm27, %zmm20 {%k3}
; AVX512-NEXT:    vmovdqa64 %zmm22, %zmm15 {%k3}
; AVX512-NEXT:    vmovdqa64 %zmm24, %zmm2 {%k3}
; AVX512-NEXT:    vmovdqa64 192(%r8), %zmm0
; AVX512-NEXT:    movb $8, %al
; AVX512-NEXT:    kmovw %eax, %k3
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k3}
; AVX512-NEXT:    vmovdqa64 %zmm1, %zmm22
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm1, %zmm23 {%k2}
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm1 = [0,1,14,3,4,5,6,15]
; AVX512-NEXT:    vpermt2q %zmm0, %zmm1, %zmm26
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm3, %zmm17 {%k1}
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm3 = [12,1,2,3,4,13,6,7]
; AVX512-NEXT:    vpermt2q %zmm0, %zmm3, %zmm23
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm6 = [0,9,2,3,4,5,10,7]
; AVX512-NEXT:    vpermt2q %zmm0, %zmm6, %zmm17
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm11 = [0,1,2,3,8,5,6,7]
; AVX512-NEXT:    vpermt2q %zmm0, %zmm11, %zmm20
; AVX512-NEXT:    vmovdqa64 %zmm13, %zmm18 {%k2}
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm16 {%k2}
; AVX512-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm8 {%k1}
; AVX512-NEXT:    vmovdqa64 128(%r8), %zmm0
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k3}
; AVX512-NEXT:    vpermt2q %zmm0, %zmm1, %zmm18
; AVX512-NEXT:    vpermt2q %zmm0, %zmm3, %zmm16
; AVX512-NEXT:    vpermt2q %zmm0, %zmm6, %zmm8
; AVX512-NEXT:    vpermt2q %zmm0, %zmm11, %zmm15
; AVX512-NEXT:    vmovdqa64 %zmm9, %zmm25 {%k2}
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
; AVX512-NEXT:    vmovdqa64 (%r8), %zmm0
; AVX512-NEXT:    vmovdqa64 %zmm30, %zmm4 {%k1}
; AVX512-NEXT:    vmovdqa64 64(%r8), %zmm9
; AVX512-NEXT:    vpermt2q %zmm0, %zmm11, %zmm7
; AVX512-NEXT:    vpermt2q %zmm9, %zmm11, %zmm2
; AVX512-NEXT:    vmovdqa64 %zmm9, %zmm14 {%k3}
; AVX512-NEXT:    vpermt2q %zmm9, %zmm1, %zmm25
; AVX512-NEXT:    vpermt2q %zmm9, %zmm3, %zmm5
; AVX512-NEXT:    vpermt2q %zmm9, %zmm6, %zmm4
; AVX512-NEXT:    vmovdqa64 %zmm29, %zmm12 {%k2}
; AVX512-NEXT:    vpermt2q %zmm0, %zmm1, %zmm12
; AVX512-NEXT:    vmovdqa64 %zmm21, %zmm28 {%k2}
; AVX512-NEXT:    vpermt2q %zmm0, %zmm3, %zmm28
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm1, %zmm31 {%k1}
; AVX512-NEXT:    vpermt2q %zmm0, %zmm6, %zmm31
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm19 {%k3}
; AVX512-NEXT:    vmovdqa64 %zmm31, 64(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm19, 128(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm28, 192(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm12, 256(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm2, 320(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm4, 384(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm14, 448(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm5, 512(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm25, 576(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm15, 640(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm8, 704(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm10, 768(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm16, 832(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm18, 896(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm20, 960(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm17, 1024(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm22, 1088(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm23, 1152(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm26, 1216(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm7, (%r9)
; AVX512-NEXT:    addq $648, %rsp # imm = 0x288
; AVX512-NEXT:    vzeroupper
; AVX512-NEXT:    retq
;
; AVX512-FCP-LABEL: store_i64_stride5_vf32:
; AVX512-FCP:       # %bb.0:
; AVX512-FCP-NEXT:    subq $648, %rsp # imm = 0x288
; AVX512-FCP-NEXT:    vmovdqa64 128(%rdi), %zmm15
; AVX512-FCP-NEXT:    vmovdqa64 192(%rdi), %zmm20
; AVX512-FCP-NEXT:    vmovdqa64 192(%rsi), %zmm16
; AVX512-FCP-NEXT:    vmovdqa64 (%rdx), %zmm17
; AVX512-FCP-NEXT:    vmovdqa64 64(%rdx), %zmm24
; AVX512-FCP-NEXT:    vmovdqa64 128(%rdx), %zmm22
; AVX512-FCP-NEXT:    vmovdqa64 192(%rdx), %zmm27
; AVX512-FCP-NEXT:    vmovdqa64 (%rcx), %zmm1
; AVX512-FCP-NEXT:    vmovdqa64 64(%rcx), %zmm23
; AVX512-FCP-NEXT:    vmovdqa64 128(%rcx), %zmm25
; AVX512-FCP-NEXT:    vmovdqa64 192(%rcx), %zmm18
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm11 = [3,0,0,0,12,4,0,0]
; AVX512-FCP-NEXT:    vmovdqa64 %zmm16, %zmm0
; AVX512-FCP-NEXT:    vpermt2q %zmm20, %zmm11, %zmm0
; AVX512-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm19 = [0,3,11,0,0,0,4,12]
; AVX512-FCP-NEXT:    vmovdqa64 %zmm27, %zmm0
; AVX512-FCP-NEXT:    vpermt2q %zmm18, %zmm19, %zmm0
; AVX512-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vmovdqa64 %zmm22, %zmm0
; AVX512-FCP-NEXT:    vpermt2q %zmm25, %zmm19, %zmm0
; AVX512-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vmovdqa64 %zmm24, %zmm0
; AVX512-FCP-NEXT:    vpermt2q %zmm23, %zmm19, %zmm0
; AVX512-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vpermi2q %zmm1, %zmm17, %zmm19
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm28 = [0,0,0,8,0,0,0,1]
; AVX512-FCP-NEXT:    vmovdqa64 %zmm17, %zmm0
; AVX512-FCP-NEXT:    vpermt2q %zmm1, %zmm28, %zmm0
; AVX512-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm14 = [0,8,0,0,0,1,9,0]
; AVX512-FCP-NEXT:    vbroadcasti32x4 {{.*#+}} zmm29 = [15,7,15,7,15,7,15,7]
; AVX512-FCP-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512-FCP-NEXT:    vmovdqa64 %zmm20, %zmm0
; AVX512-FCP-NEXT:    vpermt2q %zmm16, %zmm29, %zmm0
; AVX512-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm12 = [6,14,0,0,0,7,15,0]
; AVX512-FCP-NEXT:    vmovdqa64 %zmm27, %zmm26
; AVX512-FCP-NEXT:    vpermt2q %zmm18, %zmm12, %zmm26
; AVX512-FCP-NEXT:    vbroadcasti32x4 {{.*#+}} zmm21 = [13,5,13,5,13,5,13,5]
; AVX512-FCP-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512-FCP-NEXT:    vmovdqa64 %zmm27, %zmm0
; AVX512-FCP-NEXT:    vpermt2q %zmm18, %zmm21, %zmm0
; AVX512-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm31 = [1,0,0,0,10,2,0,0]
; AVX512-FCP-NEXT:    vmovdqa64 %zmm18, %zmm0
; AVX512-FCP-NEXT:    vpermt2q %zmm27, %zmm31, %zmm0
; AVX512-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vpermt2q %zmm18, %zmm28, %zmm27
; AVX512-FCP-NEXT:    vmovdqa64 %zmm22, %zmm18
; AVX512-FCP-NEXT:    vpermt2q %zmm25, %zmm12, %zmm18
; AVX512-FCP-NEXT:    vmovdqa64 %zmm22, %zmm0
; AVX512-FCP-NEXT:    vpermt2q %zmm25, %zmm21, %zmm0
; AVX512-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vmovdqa64 %zmm25, %zmm0
; AVX512-FCP-NEXT:    vpermt2q %zmm22, %zmm31, %zmm0
; AVX512-FCP-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
; AVX512-FCP-NEXT:    vpermt2q %zmm25, %zmm28, %zmm22
; AVX512-FCP-NEXT:    vmovdqa64 %zmm24, %zmm25
; AVX512-FCP-NEXT:    vmovdqa64 %zmm24, %zmm0
; AVX512-FCP-NEXT:    vmovdqa64 %zmm23, %zmm30
; AVX512-FCP-NEXT:    vpermt2q %zmm24, %zmm31, %zmm30
; AVX512-FCP-NEXT:    vpermt2q %zmm23, %zmm28, %zmm24
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm28 = [0,5,13,0,0,0,6,14]
; AVX512-FCP-NEXT:    vpermt2q %zmm23, %zmm12, %zmm25
; AVX512-FCP-NEXT:    vpermt2q %zmm23, %zmm21, %zmm0
; AVX512-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vmovdqa64 %zmm20, %zmm23
; AVX512-FCP-NEXT:    vpermt2q %zmm16, %zmm28, %zmm23
; AVX512-FCP-NEXT:    vpermi2q %zmm1, %zmm17, %zmm12
; AVX512-FCP-NEXT:    vpermi2q %zmm1, %zmm17, %zmm21
; AVX512-FCP-NEXT:    vpermt2q %zmm17, %zmm31, %zmm1
; AVX512-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm10 = [0,0,2,10,0,0,0,3]
; AVX512-FCP-NEXT:    vmovdqa64 %zmm20, %zmm17
; AVX512-FCP-NEXT:    vpermt2q %zmm16, %zmm10, %zmm17
; AVX512-FCP-NEXT:    vpermt2q %zmm16, %zmm14, %zmm20
; AVX512-FCP-NEXT:    vmovdqa64 128(%rsi), %zmm0
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, %zmm6
; AVX512-FCP-NEXT:    vpermt2q %zmm15, %zmm11, %zmm6
; AVX512-FCP-NEXT:    vmovdqa64 %zmm15, %zmm13
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm29, %zmm13
; AVX512-FCP-NEXT:    vmovdqa64 %zmm15, %zmm16
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm28, %zmm16
; AVX512-FCP-NEXT:    vmovdqa64 %zmm15, %zmm8
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm10, %zmm8
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm14, %zmm15
; AVX512-FCP-NEXT:    vmovdqa64 64(%rdi), %zmm2
; AVX512-FCP-NEXT:    vmovdqa64 64(%rsi), %zmm0
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, %zmm3
; AVX512-FCP-NEXT:    vpermt2q %zmm2, %zmm11, %zmm3
; AVX512-FCP-NEXT:    vmovdqa64 (%rdi), %zmm31
; AVX512-FCP-NEXT:    vmovdqa64 (%rsi), %zmm1
; AVX512-FCP-NEXT:    vmovdqa64 %zmm31, %zmm7
; AVX512-FCP-NEXT:    vpermt2q %zmm1, %zmm14, %zmm7
; AVX512-FCP-NEXT:    vmovdqa64 %zmm2, %zmm9
; AVX512-FCP-NEXT:    vmovdqa64 %zmm2, %zmm5
; AVX512-FCP-NEXT:    vmovdqa64 %zmm2, %zmm4
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm14, %zmm2
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm29, %zmm9
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm28, %zmm5
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm10, %zmm4
; AVX512-FCP-NEXT:    vpermi2q %zmm31, %zmm1, %zmm11
; AVX512-FCP-NEXT:    vpermi2q %zmm1, %zmm31, %zmm29
; AVX512-FCP-NEXT:    vpermi2q %zmm1, %zmm31, %zmm28
; AVX512-FCP-NEXT:    vpermt2q %zmm1, %zmm10, %zmm31
; AVX512-FCP-NEXT:    movb $49, %al
; AVX512-FCP-NEXT:    kmovw %eax, %k1
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm6, %zmm10 {%k1}
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm3, %zmm14 {%k1}
; AVX512-FCP-NEXT:    vmovdqa64 %zmm11, %zmm19 {%k1}
; AVX512-FCP-NEXT:    movb $-116, %al
; AVX512-FCP-NEXT:    kmovw %eax, %k3
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k3}
; AVX512-FCP-NEXT:    movb $24, %al
; AVX512-FCP-NEXT:    kmovw %eax, %k2
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k2}
; AVX512-FCP-NEXT:    vmovdqa64 %zmm27, %zmm20 {%k3}
; AVX512-FCP-NEXT:    vmovdqa64 %zmm22, %zmm15 {%k3}
; AVX512-FCP-NEXT:    vmovdqa64 %zmm24, %zmm2 {%k3}
; AVX512-FCP-NEXT:    vmovdqa64 192(%r8), %zmm0
; AVX512-FCP-NEXT:    movb $8, %al
; AVX512-FCP-NEXT:    kmovw %eax, %k3
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k3}
; AVX512-FCP-NEXT:    vmovdqa64 %zmm1, %zmm22
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm1, %zmm23 {%k2}
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm1 = [0,1,14,3,4,5,6,15]
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm1, %zmm26
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm3, %zmm17 {%k1}
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm3 = [12,1,2,3,4,13,6,7]
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm3, %zmm23
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm6 = [0,9,2,3,4,5,10,7]
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm17
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm11 = [0,1,2,3,8,5,6,7]
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm11, %zmm20
; AVX512-FCP-NEXT:    vmovdqa64 %zmm13, %zmm18 {%k2}
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, %zmm16 {%k2}
; AVX512-FCP-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, %zmm8 {%k1}
; AVX512-FCP-NEXT:    vmovdqa64 128(%r8), %zmm0
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k3}
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm1, %zmm18
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm3, %zmm16
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm8
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm11, %zmm15
; AVX512-FCP-NEXT:    vmovdqa64 %zmm9, %zmm25 {%k2}
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
; AVX512-FCP-NEXT:    vmovdqa64 (%r8), %zmm0
; AVX512-FCP-NEXT:    vmovdqa64 %zmm30, %zmm4 {%k1}
; AVX512-FCP-NEXT:    vmovdqa64 64(%r8), %zmm9
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm11, %zmm7
; AVX512-FCP-NEXT:    vpermt2q %zmm9, %zmm11, %zmm2
; AVX512-FCP-NEXT:    vmovdqa64 %zmm9, %zmm14 {%k3}
; AVX512-FCP-NEXT:    vpermt2q %zmm9, %zmm1, %zmm25
; AVX512-FCP-NEXT:    vpermt2q %zmm9, %zmm3, %zmm5
; AVX512-FCP-NEXT:    vpermt2q %zmm9, %zmm6, %zmm4
; AVX512-FCP-NEXT:    vmovdqa64 %zmm29, %zmm12 {%k2}
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm1, %zmm12
; AVX512-FCP-NEXT:    vmovdqa64 %zmm21, %zmm28 {%k2}
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm3, %zmm28
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm1, %zmm31 {%k1}
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm31
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, %zmm19 {%k3}
; AVX512-FCP-NEXT:    vmovdqa64 %zmm31, 64(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm19, 128(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm28, 192(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm12, 256(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm2, 320(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm4, 384(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm14, 448(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm5, 512(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm25, 576(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm15, 640(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm8, 704(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm10, 768(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm16, 832(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm18, 896(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm20, 960(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm17, 1024(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm22, 1088(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm23, 1152(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm26, 1216(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm7, (%r9)
; AVX512-FCP-NEXT:    addq $648, %rsp # imm = 0x288
; AVX512-FCP-NEXT:    vzeroupper
; AVX512-FCP-NEXT:    retq
;
; AVX512DQ-LABEL: store_i64_stride5_vf32:
; AVX512DQ:       # %bb.0:
; AVX512DQ-NEXT:    subq $648, %rsp # imm = 0x288
; AVX512DQ-NEXT:    vmovdqa64 128(%rdi), %zmm15
; AVX512DQ-NEXT:    vmovdqa64 192(%rdi), %zmm20
; AVX512DQ-NEXT:    vmovdqa64 192(%rsi), %zmm16
; AVX512DQ-NEXT:    vmovdqa64 (%rdx), %zmm17
; AVX512DQ-NEXT:    vmovdqa64 64(%rdx), %zmm24
; AVX512DQ-NEXT:    vmovdqa64 128(%rdx), %zmm22
; AVX512DQ-NEXT:    vmovdqa64 192(%rdx), %zmm27
; AVX512DQ-NEXT:    vmovdqa64 (%rcx), %zmm1
; AVX512DQ-NEXT:    vmovdqa64 64(%rcx), %zmm23
; AVX512DQ-NEXT:    vmovdqa64 128(%rcx), %zmm25
; AVX512DQ-NEXT:    vmovdqa64 192(%rcx), %zmm18
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm11 = [3,0,0,0,12,4,0,0]
; AVX512DQ-NEXT:    vmovdqa64 %zmm16, %zmm0
; AVX512DQ-NEXT:    vpermt2q %zmm20, %zmm11, %zmm0
; AVX512DQ-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm19 = [0,3,11,0,0,0,4,12]
; AVX512DQ-NEXT:    vmovdqa64 %zmm27, %zmm0
; AVX512DQ-NEXT:    vpermt2q %zmm18, %zmm19, %zmm0
; AVX512DQ-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vmovdqa64 %zmm22, %zmm0
; AVX512DQ-NEXT:    vpermt2q %zmm25, %zmm19, %zmm0
; AVX512DQ-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vmovdqa64 %zmm24, %zmm0
; AVX512DQ-NEXT:    vpermt2q %zmm23, %zmm19, %zmm0
; AVX512DQ-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vpermi2q %zmm1, %zmm17, %zmm19
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm28 = [0,0,0,8,0,0,0,1]
; AVX512DQ-NEXT:    vmovdqa64 %zmm17, %zmm0
; AVX512DQ-NEXT:    vpermt2q %zmm1, %zmm28, %zmm0
; AVX512DQ-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm14 = [0,8,0,0,0,1,9,0]
; AVX512DQ-NEXT:    vbroadcasti32x4 {{.*#+}} zmm29 = [15,7,15,7,15,7,15,7]
; AVX512DQ-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512DQ-NEXT:    vmovdqa64 %zmm20, %zmm0
; AVX512DQ-NEXT:    vpermt2q %zmm16, %zmm29, %zmm0
; AVX512DQ-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm12 = [6,14,0,0,0,7,15,0]
; AVX512DQ-NEXT:    vmovdqa64 %zmm27, %zmm26
; AVX512DQ-NEXT:    vpermt2q %zmm18, %zmm12, %zmm26
; AVX512DQ-NEXT:    vbroadcasti32x4 {{.*#+}} zmm21 = [13,5,13,5,13,5,13,5]
; AVX512DQ-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512DQ-NEXT:    vmovdqa64 %zmm27, %zmm0
; AVX512DQ-NEXT:    vpermt2q %zmm18, %zmm21, %zmm0
; AVX512DQ-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm31 = [1,0,0,0,10,2,0,0]
; AVX512DQ-NEXT:    vmovdqa64 %zmm18, %zmm0
; AVX512DQ-NEXT:    vpermt2q %zmm27, %zmm31, %zmm0
; AVX512DQ-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vpermt2q %zmm18, %zmm28, %zmm27
; AVX512DQ-NEXT:    vmovdqa64 %zmm22, %zmm18
; AVX512DQ-NEXT:    vpermt2q %zmm25, %zmm12, %zmm18
; AVX512DQ-NEXT:    vmovdqa64 %zmm22, %zmm0
; AVX512DQ-NEXT:    vpermt2q %zmm25, %zmm21, %zmm0
; AVX512DQ-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vmovdqa64 %zmm25, %zmm0
; AVX512DQ-NEXT:    vpermt2q %zmm22, %zmm31, %zmm0
; AVX512DQ-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
; AVX512DQ-NEXT:    vpermt2q %zmm25, %zmm28, %zmm22
; AVX512DQ-NEXT:    vmovdqa64 %zmm24, %zmm25
; AVX512DQ-NEXT:    vmovdqa64 %zmm24, %zmm0
; AVX512DQ-NEXT:    vmovdqa64 %zmm23, %zmm30
; AVX512DQ-NEXT:    vpermt2q %zmm24, %zmm31, %zmm30
; AVX512DQ-NEXT:    vpermt2q %zmm23, %zmm28, %zmm24
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm28 = [0,5,13,0,0,0,6,14]
; AVX512DQ-NEXT:    vpermt2q %zmm23, %zmm12, %zmm25
; AVX512DQ-NEXT:    vpermt2q %zmm23, %zmm21, %zmm0
; AVX512DQ-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vmovdqa64 %zmm20, %zmm23
; AVX512DQ-NEXT:    vpermt2q %zmm16, %zmm28, %zmm23
; AVX512DQ-NEXT:    vpermi2q %zmm1, %zmm17, %zmm12
; AVX512DQ-NEXT:    vpermi2q %zmm1, %zmm17, %zmm21
; AVX512DQ-NEXT:    vpermt2q %zmm17, %zmm31, %zmm1
; AVX512DQ-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm10 = [0,0,2,10,0,0,0,3]
; AVX512DQ-NEXT:    vmovdqa64 %zmm20, %zmm17
; AVX512DQ-NEXT:    vpermt2q %zmm16, %zmm10, %zmm17
; AVX512DQ-NEXT:    vpermt2q %zmm16, %zmm14, %zmm20
; AVX512DQ-NEXT:    vmovdqa64 128(%rsi), %zmm0
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, %zmm6
; AVX512DQ-NEXT:    vpermt2q %zmm15, %zmm11, %zmm6
; AVX512DQ-NEXT:    vmovdqa64 %zmm15, %zmm13
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm29, %zmm13
; AVX512DQ-NEXT:    vmovdqa64 %zmm15, %zmm16
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm28, %zmm16
; AVX512DQ-NEXT:    vmovdqa64 %zmm15, %zmm8
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm10, %zmm8
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm14, %zmm15
; AVX512DQ-NEXT:    vmovdqa64 64(%rdi), %zmm2
; AVX512DQ-NEXT:    vmovdqa64 64(%rsi), %zmm0
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, %zmm3
; AVX512DQ-NEXT:    vpermt2q %zmm2, %zmm11, %zmm3
; AVX512DQ-NEXT:    vmovdqa64 (%rdi), %zmm31
; AVX512DQ-NEXT:    vmovdqa64 (%rsi), %zmm1
; AVX512DQ-NEXT:    vmovdqa64 %zmm31, %zmm7
; AVX512DQ-NEXT:    vpermt2q %zmm1, %zmm14, %zmm7
; AVX512DQ-NEXT:    vmovdqa64 %zmm2, %zmm9
; AVX512DQ-NEXT:    vmovdqa64 %zmm2, %zmm5
; AVX512DQ-NEXT:    vmovdqa64 %zmm2, %zmm4
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm14, %zmm2
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm29, %zmm9
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm28, %zmm5
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm10, %zmm4
; AVX512DQ-NEXT:    vpermi2q %zmm31, %zmm1, %zmm11
; AVX512DQ-NEXT:    vpermi2q %zmm1, %zmm31, %zmm29
; AVX512DQ-NEXT:    vpermi2q %zmm1, %zmm31, %zmm28
; AVX512DQ-NEXT:    vpermt2q %zmm1, %zmm10, %zmm31
; AVX512DQ-NEXT:    movb $49, %al
; AVX512DQ-NEXT:    kmovw %eax, %k1
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm6, %zmm10 {%k1}
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm3, %zmm14 {%k1}
; AVX512DQ-NEXT:    vmovdqa64 %zmm11, %zmm19 {%k1}
; AVX512DQ-NEXT:    movb $-116, %al
; AVX512DQ-NEXT:    kmovw %eax, %k3
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k3}
; AVX512DQ-NEXT:    movb $24, %al
; AVX512DQ-NEXT:    kmovw %eax, %k2
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k2}
; AVX512DQ-NEXT:    vmovdqa64 %zmm27, %zmm20 {%k3}
; AVX512DQ-NEXT:    vmovdqa64 %zmm22, %zmm15 {%k3}
; AVX512DQ-NEXT:    vmovdqa64 %zmm24, %zmm2 {%k3}
; AVX512DQ-NEXT:    vmovdqa64 192(%r8), %zmm0
; AVX512DQ-NEXT:    movb $8, %al
; AVX512DQ-NEXT:    kmovw %eax, %k3
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k3}
; AVX512DQ-NEXT:    vmovdqa64 %zmm1, %zmm22
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm1, %zmm23 {%k2}
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm1 = [0,1,14,3,4,5,6,15]
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm1, %zmm26
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm3, %zmm17 {%k1}
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm3 = [12,1,2,3,4,13,6,7]
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm3, %zmm23
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm6 = [0,9,2,3,4,5,10,7]
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm6, %zmm17
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm11 = [0,1,2,3,8,5,6,7]
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm11, %zmm20
; AVX512DQ-NEXT:    vmovdqa64 %zmm13, %zmm18 {%k2}
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, %zmm16 {%k2}
; AVX512DQ-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, %zmm8 {%k1}
; AVX512DQ-NEXT:    vmovdqa64 128(%r8), %zmm0
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k3}
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm1, %zmm18
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm3, %zmm16
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm6, %zmm8
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm11, %zmm15
; AVX512DQ-NEXT:    vmovdqa64 %zmm9, %zmm25 {%k2}
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
; AVX512DQ-NEXT:    vmovdqa64 (%r8), %zmm0
; AVX512DQ-NEXT:    vmovdqa64 %zmm30, %zmm4 {%k1}
; AVX512DQ-NEXT:    vmovdqa64 64(%r8), %zmm9
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm11, %zmm7
; AVX512DQ-NEXT:    vpermt2q %zmm9, %zmm11, %zmm2
; AVX512DQ-NEXT:    vmovdqa64 %zmm9, %zmm14 {%k3}
; AVX512DQ-NEXT:    vpermt2q %zmm9, %zmm1, %zmm25
; AVX512DQ-NEXT:    vpermt2q %zmm9, %zmm3, %zmm5
; AVX512DQ-NEXT:    vpermt2q %zmm9, %zmm6, %zmm4
; AVX512DQ-NEXT:    vmovdqa64 %zmm29, %zmm12 {%k2}
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm1, %zmm12
; AVX512DQ-NEXT:    vmovdqa64 %zmm21, %zmm28 {%k2}
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm3, %zmm28
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm1, %zmm31 {%k1}
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm6, %zmm31
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, %zmm19 {%k3}
; AVX512DQ-NEXT:    vmovdqa64 %zmm31, 64(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm19, 128(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm28, 192(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm12, 256(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm2, 320(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm4, 384(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm14, 448(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm5, 512(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm25, 576(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm15, 640(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm8, 704(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm10, 768(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm16, 832(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm18, 896(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm20, 960(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm17, 1024(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm22, 1088(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm23, 1152(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm26, 1216(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm7, (%r9)
; AVX512DQ-NEXT:    addq $648, %rsp # imm = 0x288
; AVX512DQ-NEXT:    vzeroupper
; AVX512DQ-NEXT:    retq
;
; AVX512DQ-FCP-LABEL: store_i64_stride5_vf32:
; AVX512DQ-FCP:       # %bb.0:
; AVX512DQ-FCP-NEXT:    subq $648, %rsp # imm = 0x288
; AVX512DQ-FCP-NEXT:    vmovdqa64 128(%rdi), %zmm15
; AVX512DQ-FCP-NEXT:    vmovdqa64 192(%rdi), %zmm20
; AVX512DQ-FCP-NEXT:    vmovdqa64 192(%rsi), %zmm16
; AVX512DQ-FCP-NEXT:    vmovdqa64 (%rdx), %zmm17
; AVX512DQ-FCP-NEXT:    vmovdqa64 64(%rdx), %zmm24
; AVX512DQ-FCP-NEXT:    vmovdqa64 128(%rdx), %zmm22
; AVX512DQ-FCP-NEXT:    vmovdqa64 192(%rdx), %zmm27
; AVX512DQ-FCP-NEXT:    vmovdqa64 (%rcx), %zmm1
; AVX512DQ-FCP-NEXT:    vmovdqa64 64(%rcx), %zmm23
; AVX512DQ-FCP-NEXT:    vmovdqa64 128(%rcx), %zmm25
; AVX512DQ-FCP-NEXT:    vmovdqa64 192(%rcx), %zmm18
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm11 = [3,0,0,0,12,4,0,0]
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm16, %zmm0
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm20, %zmm11, %zmm0
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm19 = [0,3,11,0,0,0,4,12]
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm27, %zmm0
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm18, %zmm19, %zmm0
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm22, %zmm0
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm25, %zmm19, %zmm0
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm24, %zmm0
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm23, %zmm19, %zmm0
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vpermi2q %zmm1, %zmm17, %zmm19
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm28 = [0,0,0,8,0,0,0,1]
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm17, %zmm0
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm1, %zmm28, %zmm0
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm14 = [0,8,0,0,0,1,9,0]
; AVX512DQ-FCP-NEXT:    vbroadcasti32x4 {{.*#+}} zmm29 = [15,7,15,7,15,7,15,7]
; AVX512DQ-FCP-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm20, %zmm0
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm16, %zmm29, %zmm0
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm12 = [6,14,0,0,0,7,15,0]
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm27, %zmm26
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm18, %zmm12, %zmm26
; AVX512DQ-FCP-NEXT:    vbroadcasti32x4 {{.*#+}} zmm21 = [13,5,13,5,13,5,13,5]
; AVX512DQ-FCP-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm27, %zmm0
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm18, %zmm21, %zmm0
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm31 = [1,0,0,0,10,2,0,0]
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm18, %zmm0
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm27, %zmm31, %zmm0
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm18, %zmm28, %zmm27
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm22, %zmm18
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm25, %zmm12, %zmm18
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm22, %zmm0
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm25, %zmm21, %zmm0
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm25, %zmm0
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm22, %zmm31, %zmm0
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm25, %zmm28, %zmm22
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm24, %zmm25
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm24, %zmm0
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm23, %zmm30
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm24, %zmm31, %zmm30
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm23, %zmm28, %zmm24
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm28 = [0,5,13,0,0,0,6,14]
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm23, %zmm12, %zmm25
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm23, %zmm21, %zmm0
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm20, %zmm23
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm16, %zmm28, %zmm23
; AVX512DQ-FCP-NEXT:    vpermi2q %zmm1, %zmm17, %zmm12
; AVX512DQ-FCP-NEXT:    vpermi2q %zmm1, %zmm17, %zmm21
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm17, %zmm31, %zmm1
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm10 = [0,0,2,10,0,0,0,3]
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm20, %zmm17
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm16, %zmm10, %zmm17
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm16, %zmm14, %zmm20
; AVX512DQ-FCP-NEXT:    vmovdqa64 128(%rsi), %zmm0
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, %zmm6
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm15, %zmm11, %zmm6
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm15, %zmm13
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm29, %zmm13
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm15, %zmm16
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm28, %zmm16
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm15, %zmm8
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm10, %zmm8
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm14, %zmm15
; AVX512DQ-FCP-NEXT:    vmovdqa64 64(%rdi), %zmm2
; AVX512DQ-FCP-NEXT:    vmovdqa64 64(%rsi), %zmm0
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, %zmm3
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm2, %zmm11, %zmm3
; AVX512DQ-FCP-NEXT:    vmovdqa64 (%rdi), %zmm31
; AVX512DQ-FCP-NEXT:    vmovdqa64 (%rsi), %zmm1
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm31, %zmm7
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm1, %zmm14, %zmm7
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm2, %zmm9
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm2, %zmm5
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm2, %zmm4
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm14, %zmm2
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm29, %zmm9
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm28, %zmm5
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm10, %zmm4
; AVX512DQ-FCP-NEXT:    vpermi2q %zmm31, %zmm1, %zmm11
; AVX512DQ-FCP-NEXT:    vpermi2q %zmm1, %zmm31, %zmm29
; AVX512DQ-FCP-NEXT:    vpermi2q %zmm1, %zmm31, %zmm28
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm1, %zmm10, %zmm31
; AVX512DQ-FCP-NEXT:    movb $49, %al
; AVX512DQ-FCP-NEXT:    kmovw %eax, %k1
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm6, %zmm10 {%k1}
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm3, %zmm14 {%k1}
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm11, %zmm19 {%k1}
; AVX512DQ-FCP-NEXT:    movb $-116, %al
; AVX512DQ-FCP-NEXT:    kmovw %eax, %k3
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k3}
; AVX512DQ-FCP-NEXT:    movb $24, %al
; AVX512DQ-FCP-NEXT:    kmovw %eax, %k2
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k2}
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm27, %zmm20 {%k3}
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm22, %zmm15 {%k3}
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm24, %zmm2 {%k3}
; AVX512DQ-FCP-NEXT:    vmovdqa64 192(%r8), %zmm0
; AVX512DQ-FCP-NEXT:    movb $8, %al
; AVX512DQ-FCP-NEXT:    kmovw %eax, %k3
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k3}
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm1, %zmm22
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm1, %zmm23 {%k2}
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm1 = [0,1,14,3,4,5,6,15]
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm1, %zmm26
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm3, %zmm17 {%k1}
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm3 = [12,1,2,3,4,13,6,7]
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm3, %zmm23
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm6 = [0,9,2,3,4,5,10,7]
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm17
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm11 = [0,1,2,3,8,5,6,7]
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm11, %zmm20
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm13, %zmm18 {%k2}
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, %zmm16 {%k2}
; AVX512DQ-FCP-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, %zmm8 {%k1}
; AVX512DQ-FCP-NEXT:    vmovdqa64 128(%r8), %zmm0
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k3}
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm1, %zmm18
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm3, %zmm16
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm8
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm11, %zmm15
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm9, %zmm25 {%k2}
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
; AVX512DQ-FCP-NEXT:    vmovdqa64 (%r8), %zmm0
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm30, %zmm4 {%k1}
; AVX512DQ-FCP-NEXT:    vmovdqa64 64(%r8), %zmm9
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm11, %zmm7
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm9, %zmm11, %zmm2
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm9, %zmm14 {%k3}
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm9, %zmm1, %zmm25
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm9, %zmm3, %zmm5
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm9, %zmm6, %zmm4
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm29, %zmm12 {%k2}
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm1, %zmm12
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm21, %zmm28 {%k2}
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm3, %zmm28
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm1, %zmm31 {%k1}
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm31
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, %zmm19 {%k3}
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm31, 64(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm19, 128(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm28, 192(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm12, 256(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm2, 320(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm4, 384(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm14, 448(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm5, 512(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm25, 576(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm15, 640(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm8, 704(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm10, 768(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm16, 832(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm18, 896(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm20, 960(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm17, 1024(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm22, 1088(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm23, 1152(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm26, 1216(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm7, (%r9)
; AVX512DQ-FCP-NEXT:    addq $648, %rsp # imm = 0x288
; AVX512DQ-FCP-NEXT:    vzeroupper
; AVX512DQ-FCP-NEXT:    retq
;
; AVX512BW-LABEL: store_i64_stride5_vf32:
; AVX512BW:       # %bb.0:
; AVX512BW-NEXT:    subq $648, %rsp # imm = 0x288
; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm15
; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm20
; AVX512BW-NEXT:    vmovdqa64 192(%rsi), %zmm16
; AVX512BW-NEXT:    vmovdqa64 (%rdx), %zmm17
; AVX512BW-NEXT:    vmovdqa64 64(%rdx), %zmm24
; AVX512BW-NEXT:    vmovdqa64 128(%rdx), %zmm22
; AVX512BW-NEXT:    vmovdqa64 192(%rdx), %zmm27
; AVX512BW-NEXT:    vmovdqa64 (%rcx), %zmm1
; AVX512BW-NEXT:    vmovdqa64 64(%rcx), %zmm23
; AVX512BW-NEXT:    vmovdqa64 128(%rcx), %zmm25
; AVX512BW-NEXT:    vmovdqa64 192(%rcx), %zmm18
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm11 = [3,0,0,0,12,4,0,0]
; AVX512BW-NEXT:    vmovdqa64 %zmm16, %zmm0
; AVX512BW-NEXT:    vpermt2q %zmm20, %zmm11, %zmm0
; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm19 = [0,3,11,0,0,0,4,12]
; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm0
; AVX512BW-NEXT:    vpermt2q %zmm18, %zmm19, %zmm0
; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm0
; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm19, %zmm0
; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm0
; AVX512BW-NEXT:    vpermt2q %zmm23, %zmm19, %zmm0
; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm17, %zmm19
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm28 = [0,0,0,8,0,0,0,1]
; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm0
; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm28, %zmm0
; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm14 = [0,8,0,0,0,1,9,0]
; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm29 = [15,7,15,7,15,7,15,7]
; AVX512BW-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm0
; AVX512BW-NEXT:    vpermt2q %zmm16, %zmm29, %zmm0
; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm12 = [6,14,0,0,0,7,15,0]
; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm26
; AVX512BW-NEXT:    vpermt2q %zmm18, %zmm12, %zmm26
; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm21 = [13,5,13,5,13,5,13,5]
; AVX512BW-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm0
; AVX512BW-NEXT:    vpermt2q %zmm18, %zmm21, %zmm0
; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm31 = [1,0,0,0,10,2,0,0]
; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm0
; AVX512BW-NEXT:    vpermt2q %zmm27, %zmm31, %zmm0
; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vpermt2q %zmm18, %zmm28, %zmm27
; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm18
; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm12, %zmm18
; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm0
; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm21, %zmm0
; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm0
; AVX512BW-NEXT:    vpermt2q %zmm22, %zmm31, %zmm0
; AVX512BW-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm28, %zmm22
; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm25
; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm0
; AVX512BW-NEXT:    vmovdqa64 %zmm23, %zmm30
; AVX512BW-NEXT:    vpermt2q %zmm24, %zmm31, %zmm30
; AVX512BW-NEXT:    vpermt2q %zmm23, %zmm28, %zmm24
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm28 = [0,5,13,0,0,0,6,14]
; AVX512BW-NEXT:    vpermt2q %zmm23, %zmm12, %zmm25
; AVX512BW-NEXT:    vpermt2q %zmm23, %zmm21, %zmm0
; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm23
; AVX512BW-NEXT:    vpermt2q %zmm16, %zmm28, %zmm23
; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm17, %zmm12
; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm17, %zmm21
; AVX512BW-NEXT:    vpermt2q %zmm17, %zmm31, %zmm1
; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm10 = [0,0,2,10,0,0,0,3]
; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm17
; AVX512BW-NEXT:    vpermt2q %zmm16, %zmm10, %zmm17
; AVX512BW-NEXT:    vpermt2q %zmm16, %zmm14, %zmm20
; AVX512BW-NEXT:    vmovdqa64 128(%rsi), %zmm0
; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm6
; AVX512BW-NEXT:    vpermt2q %zmm15, %zmm11, %zmm6
; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm13
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm29, %zmm13
; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm16
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm28, %zmm16
; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm8
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm10, %zmm8
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm14, %zmm15
; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm2
; AVX512BW-NEXT:    vmovdqa64 64(%rsi), %zmm0
; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm3
; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm11, %zmm3
; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm31
; AVX512BW-NEXT:    vmovdqa64 (%rsi), %zmm1
; AVX512BW-NEXT:    vmovdqa64 %zmm31, %zmm7
; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm14, %zmm7
; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm9
; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm5
; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm14, %zmm2
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm29, %zmm9
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm28, %zmm5
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm10, %zmm4
; AVX512BW-NEXT:    vpermi2q %zmm31, %zmm1, %zmm11
; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm31, %zmm29
; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm31, %zmm28
; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm10, %zmm31
; AVX512BW-NEXT:    movb $49, %al
; AVX512BW-NEXT:    kmovd %eax, %k1
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm10 {%k1}
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm14 {%k1}
; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm19 {%k1}
; AVX512BW-NEXT:    movb $-116, %al
; AVX512BW-NEXT:    kmovd %eax, %k3
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k3}
; AVX512BW-NEXT:    movb $24, %al
; AVX512BW-NEXT:    kmovd %eax, %k2
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k2}
; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm20 {%k3}
; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm15 {%k3}
; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm2 {%k3}
; AVX512BW-NEXT:    vmovdqa64 192(%r8), %zmm0
; AVX512BW-NEXT:    movb $8, %al
; AVX512BW-NEXT:    kmovd %eax, %k3
; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k3}
; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm22
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm23 {%k2}
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm1 = [0,1,14,3,4,5,6,15]
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm1, %zmm26
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm17 {%k1}
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm3 = [12,1,2,3,4,13,6,7]
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm3, %zmm23
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm6 = [0,9,2,3,4,5,10,7]
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm6, %zmm17
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm11 = [0,1,2,3,8,5,6,7]
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm11, %zmm20
; AVX512BW-NEXT:    vmovdqa64 %zmm13, %zmm18 {%k2}
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm16 {%k2}
; AVX512BW-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm8 {%k1}
; AVX512BW-NEXT:    vmovdqa64 128(%r8), %zmm0
; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k3}
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm1, %zmm18
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm3, %zmm16
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm6, %zmm8
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm11, %zmm15
; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm25 {%k2}
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
; AVX512BW-NEXT:    vmovdqa64 (%r8), %zmm0
; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm4 {%k1}
; AVX512BW-NEXT:    vmovdqa64 64(%r8), %zmm9
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm11, %zmm7
; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm11, %zmm2
; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm14 {%k3}
; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm1, %zmm25
; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm3, %zmm5
; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm6, %zmm4
; AVX512BW-NEXT:    vmovdqa64 %zmm29, %zmm12 {%k2}
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm1, %zmm12
; AVX512BW-NEXT:    vmovdqa64 %zmm21, %zmm28 {%k2}
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm3, %zmm28
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm31 {%k1}
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm6, %zmm31
; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm19 {%k3}
; AVX512BW-NEXT:    vmovdqa64 %zmm31, 64(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm19, 128(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm28, 192(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm12, 256(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm2, 320(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm4, 384(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm14, 448(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm5, 512(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm25, 576(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm15, 640(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm8, 704(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm10, 768(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm16, 832(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm18, 896(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm20, 960(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm17, 1024(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm22, 1088(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm23, 1152(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm26, 1216(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm7, (%r9)
; AVX512BW-NEXT:    addq $648, %rsp # imm = 0x288
; AVX512BW-NEXT:    vzeroupper
; AVX512BW-NEXT:    retq
;
; AVX512BW-FCP-LABEL: store_i64_stride5_vf32:
; AVX512BW-FCP:       # %bb.0:
; AVX512BW-FCP-NEXT:    subq $648, %rsp # imm = 0x288
; AVX512BW-FCP-NEXT:    vmovdqa64 128(%rdi), %zmm15
; AVX512BW-FCP-NEXT:    vmovdqa64 192(%rdi), %zmm20
; AVX512BW-FCP-NEXT:    vmovdqa64 192(%rsi), %zmm16
; AVX512BW-FCP-NEXT:    vmovdqa64 (%rdx), %zmm17
; AVX512BW-FCP-NEXT:    vmovdqa64 64(%rdx), %zmm24
; AVX512BW-FCP-NEXT:    vmovdqa64 128(%rdx), %zmm22
; AVX512BW-FCP-NEXT:    vmovdqa64 192(%rdx), %zmm27
; AVX512BW-FCP-NEXT:    vmovdqa64 (%rcx), %zmm1
; AVX512BW-FCP-NEXT:    vmovdqa64 64(%rcx), %zmm23
; AVX512BW-FCP-NEXT:    vmovdqa64 128(%rcx), %zmm25
; AVX512BW-FCP-NEXT:    vmovdqa64 192(%rcx), %zmm18
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm11 = [3,0,0,0,12,4,0,0]
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm16, %zmm0
; AVX512BW-FCP-NEXT:    vpermt2q %zmm20, %zmm11, %zmm0
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm19 = [0,3,11,0,0,0,4,12]
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm27, %zmm0
; AVX512BW-FCP-NEXT:    vpermt2q %zmm18, %zmm19, %zmm0
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm22, %zmm0
; AVX512BW-FCP-NEXT:    vpermt2q %zmm25, %zmm19, %zmm0
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm24, %zmm0
; AVX512BW-FCP-NEXT:    vpermt2q %zmm23, %zmm19, %zmm0
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vpermi2q %zmm1, %zmm17, %zmm19
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm28 = [0,0,0,8,0,0,0,1]
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm17, %zmm0
; AVX512BW-FCP-NEXT:    vpermt2q %zmm1, %zmm28, %zmm0
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm14 = [0,8,0,0,0,1,9,0]
; AVX512BW-FCP-NEXT:    vbroadcasti32x4 {{.*#+}} zmm29 = [15,7,15,7,15,7,15,7]
; AVX512BW-FCP-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm20, %zmm0
; AVX512BW-FCP-NEXT:    vpermt2q %zmm16, %zmm29, %zmm0
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm12 = [6,14,0,0,0,7,15,0]
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm27, %zmm26
; AVX512BW-FCP-NEXT:    vpermt2q %zmm18, %zmm12, %zmm26
; AVX512BW-FCP-NEXT:    vbroadcasti32x4 {{.*#+}} zmm21 = [13,5,13,5,13,5,13,5]
; AVX512BW-FCP-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm27, %zmm0
; AVX512BW-FCP-NEXT:    vpermt2q %zmm18, %zmm21, %zmm0
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm31 = [1,0,0,0,10,2,0,0]
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm18, %zmm0
; AVX512BW-FCP-NEXT:    vpermt2q %zmm27, %zmm31, %zmm0
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vpermt2q %zmm18, %zmm28, %zmm27
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm22, %zmm18
; AVX512BW-FCP-NEXT:    vpermt2q %zmm25, %zmm12, %zmm18
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm22, %zmm0
; AVX512BW-FCP-NEXT:    vpermt2q %zmm25, %zmm21, %zmm0
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm25, %zmm0
; AVX512BW-FCP-NEXT:    vpermt2q %zmm22, %zmm31, %zmm0
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vpermt2q %zmm25, %zmm28, %zmm22
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm24, %zmm25
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm24, %zmm0
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm23, %zmm30
; AVX512BW-FCP-NEXT:    vpermt2q %zmm24, %zmm31, %zmm30
; AVX512BW-FCP-NEXT:    vpermt2q %zmm23, %zmm28, %zmm24
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm28 = [0,5,13,0,0,0,6,14]
; AVX512BW-FCP-NEXT:    vpermt2q %zmm23, %zmm12, %zmm25
; AVX512BW-FCP-NEXT:    vpermt2q %zmm23, %zmm21, %zmm0
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm20, %zmm23
; AVX512BW-FCP-NEXT:    vpermt2q %zmm16, %zmm28, %zmm23
; AVX512BW-FCP-NEXT:    vpermi2q %zmm1, %zmm17, %zmm12
; AVX512BW-FCP-NEXT:    vpermi2q %zmm1, %zmm17, %zmm21
; AVX512BW-FCP-NEXT:    vpermt2q %zmm17, %zmm31, %zmm1
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm10 = [0,0,2,10,0,0,0,3]
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm20, %zmm17
; AVX512BW-FCP-NEXT:    vpermt2q %zmm16, %zmm10, %zmm17
; AVX512BW-FCP-NEXT:    vpermt2q %zmm16, %zmm14, %zmm20
; AVX512BW-FCP-NEXT:    vmovdqa64 128(%rsi), %zmm0
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm6
; AVX512BW-FCP-NEXT:    vpermt2q %zmm15, %zmm11, %zmm6
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm15, %zmm13
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm29, %zmm13
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm15, %zmm16
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm28, %zmm16
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm15, %zmm8
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm10, %zmm8
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm14, %zmm15
; AVX512BW-FCP-NEXT:    vmovdqa64 64(%rdi), %zmm2
; AVX512BW-FCP-NEXT:    vmovdqa64 64(%rsi), %zmm0
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm3
; AVX512BW-FCP-NEXT:    vpermt2q %zmm2, %zmm11, %zmm3
; AVX512BW-FCP-NEXT:    vmovdqa64 (%rdi), %zmm31
; AVX512BW-FCP-NEXT:    vmovdqa64 (%rsi), %zmm1
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm31, %zmm7
; AVX512BW-FCP-NEXT:    vpermt2q %zmm1, %zmm14, %zmm7
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm2, %zmm9
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm2, %zmm5
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm2, %zmm4
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm14, %zmm2
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm29, %zmm9
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm28, %zmm5
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm10, %zmm4
; AVX512BW-FCP-NEXT:    vpermi2q %zmm31, %zmm1, %zmm11
; AVX512BW-FCP-NEXT:    vpermi2q %zmm1, %zmm31, %zmm29
; AVX512BW-FCP-NEXT:    vpermi2q %zmm1, %zmm31, %zmm28
; AVX512BW-FCP-NEXT:    vpermt2q %zmm1, %zmm10, %zmm31
; AVX512BW-FCP-NEXT:    movb $49, %al
; AVX512BW-FCP-NEXT:    kmovd %eax, %k1
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm6, %zmm10 {%k1}
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm3, %zmm14 {%k1}
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm11, %zmm19 {%k1}
; AVX512BW-FCP-NEXT:    movb $-116, %al
; AVX512BW-FCP-NEXT:    kmovd %eax, %k3
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k3}
; AVX512BW-FCP-NEXT:    movb $24, %al
; AVX512BW-FCP-NEXT:    kmovd %eax, %k2
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k2}
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm27, %zmm20 {%k3}
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm22, %zmm15 {%k3}
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm24, %zmm2 {%k3}
; AVX512BW-FCP-NEXT:    vmovdqa64 192(%r8), %zmm0
; AVX512BW-FCP-NEXT:    movb $8, %al
; AVX512BW-FCP-NEXT:    kmovd %eax, %k3
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k3}
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm1, %zmm22
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm1, %zmm23 {%k2}
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm1 = [0,1,14,3,4,5,6,15]
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm1, %zmm26
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm3, %zmm17 {%k1}
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm3 = [12,1,2,3,4,13,6,7]
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm3, %zmm23
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm6 = [0,9,2,3,4,5,10,7]
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm17
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm11 = [0,1,2,3,8,5,6,7]
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm11, %zmm20
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm13, %zmm18 {%k2}
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm16 {%k2}
; AVX512BW-FCP-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm8 {%k1}
; AVX512BW-FCP-NEXT:    vmovdqa64 128(%r8), %zmm0
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k3}
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm1, %zmm18
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm3, %zmm16
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm8
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm11, %zmm15
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm9, %zmm25 {%k2}
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
; AVX512BW-FCP-NEXT:    vmovdqa64 (%r8), %zmm0
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm30, %zmm4 {%k1}
; AVX512BW-FCP-NEXT:    vmovdqa64 64(%r8), %zmm9
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm11, %zmm7
; AVX512BW-FCP-NEXT:    vpermt2q %zmm9, %zmm11, %zmm2
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm9, %zmm14 {%k3}
; AVX512BW-FCP-NEXT:    vpermt2q %zmm9, %zmm1, %zmm25
; AVX512BW-FCP-NEXT:    vpermt2q %zmm9, %zmm3, %zmm5
; AVX512BW-FCP-NEXT:    vpermt2q %zmm9, %zmm6, %zmm4
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm29, %zmm12 {%k2}
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm1, %zmm12
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm21, %zmm28 {%k2}
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm3, %zmm28
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm1, %zmm31 {%k1}
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm31
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm19 {%k3}
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm31, 64(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm19, 128(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm28, 192(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm12, 256(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm2, 320(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm4, 384(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm14, 448(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm5, 512(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm25, 576(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm15, 640(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm8, 704(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm10, 768(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm16, 832(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm18, 896(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm20, 960(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm17, 1024(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm22, 1088(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm23, 1152(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm26, 1216(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm7, (%r9)
; AVX512BW-FCP-NEXT:    addq $648, %rsp # imm = 0x288
; AVX512BW-FCP-NEXT:    vzeroupper
; AVX512BW-FCP-NEXT:    retq
;
; AVX512DQ-BW-LABEL: store_i64_stride5_vf32:
; AVX512DQ-BW:       # %bb.0:
; AVX512DQ-BW-NEXT:    subq $648, %rsp # imm = 0x288
; AVX512DQ-BW-NEXT:    vmovdqa64 128(%rdi), %zmm15
; AVX512DQ-BW-NEXT:    vmovdqa64 192(%rdi), %zmm20
; AVX512DQ-BW-NEXT:    vmovdqa64 192(%rsi), %zmm16
; AVX512DQ-BW-NEXT:    vmovdqa64 (%rdx), %zmm17
; AVX512DQ-BW-NEXT:    vmovdqa64 64(%rdx), %zmm24
; AVX512DQ-BW-NEXT:    vmovdqa64 128(%rdx), %zmm22
; AVX512DQ-BW-NEXT:    vmovdqa64 192(%rdx), %zmm27
; AVX512DQ-BW-NEXT:    vmovdqa64 (%rcx), %zmm1
; AVX512DQ-BW-NEXT:    vmovdqa64 64(%rcx), %zmm23
; AVX512DQ-BW-NEXT:    vmovdqa64 128(%rcx), %zmm25
; AVX512DQ-BW-NEXT:    vmovdqa64 192(%rcx), %zmm18
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm11 = [3,0,0,0,12,4,0,0]
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm16, %zmm0
; AVX512DQ-BW-NEXT:    vpermt2q %zmm20, %zmm11, %zmm0
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm19 = [0,3,11,0,0,0,4,12]
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm27, %zmm0
; AVX512DQ-BW-NEXT:    vpermt2q %zmm18, %zmm19, %zmm0
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm22, %zmm0
; AVX512DQ-BW-NEXT:    vpermt2q %zmm25, %zmm19, %zmm0
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm24, %zmm0
; AVX512DQ-BW-NEXT:    vpermt2q %zmm23, %zmm19, %zmm0
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vpermi2q %zmm1, %zmm17, %zmm19
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm28 = [0,0,0,8,0,0,0,1]
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm17, %zmm0
; AVX512DQ-BW-NEXT:    vpermt2q %zmm1, %zmm28, %zmm0
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm14 = [0,8,0,0,0,1,9,0]
; AVX512DQ-BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm29 = [15,7,15,7,15,7,15,7]
; AVX512DQ-BW-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm20, %zmm0
; AVX512DQ-BW-NEXT:    vpermt2q %zmm16, %zmm29, %zmm0
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm12 = [6,14,0,0,0,7,15,0]
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm27, %zmm26
; AVX512DQ-BW-NEXT:    vpermt2q %zmm18, %zmm12, %zmm26
; AVX512DQ-BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm21 = [13,5,13,5,13,5,13,5]
; AVX512DQ-BW-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm27, %zmm0
; AVX512DQ-BW-NEXT:    vpermt2q %zmm18, %zmm21, %zmm0
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm31 = [1,0,0,0,10,2,0,0]
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm18, %zmm0
; AVX512DQ-BW-NEXT:    vpermt2q %zmm27, %zmm31, %zmm0
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vpermt2q %zmm18, %zmm28, %zmm27
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm22, %zmm18
; AVX512DQ-BW-NEXT:    vpermt2q %zmm25, %zmm12, %zmm18
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm22, %zmm0
; AVX512DQ-BW-NEXT:    vpermt2q %zmm25, %zmm21, %zmm0
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm25, %zmm0
; AVX512DQ-BW-NEXT:    vpermt2q %zmm22, %zmm31, %zmm0
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vpermt2q %zmm25, %zmm28, %zmm22
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm24, %zmm25
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm24, %zmm0
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm23, %zmm30
; AVX512DQ-BW-NEXT:    vpermt2q %zmm24, %zmm31, %zmm30
; AVX512DQ-BW-NEXT:    vpermt2q %zmm23, %zmm28, %zmm24
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm28 = [0,5,13,0,0,0,6,14]
; AVX512DQ-BW-NEXT:    vpermt2q %zmm23, %zmm12, %zmm25
; AVX512DQ-BW-NEXT:    vpermt2q %zmm23, %zmm21, %zmm0
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm20, %zmm23
; AVX512DQ-BW-NEXT:    vpermt2q %zmm16, %zmm28, %zmm23
; AVX512DQ-BW-NEXT:    vpermi2q %zmm1, %zmm17, %zmm12
; AVX512DQ-BW-NEXT:    vpermi2q %zmm1, %zmm17, %zmm21
; AVX512DQ-BW-NEXT:    vpermt2q %zmm17, %zmm31, %zmm1
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm10 = [0,0,2,10,0,0,0,3]
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm20, %zmm17
; AVX512DQ-BW-NEXT:    vpermt2q %zmm16, %zmm10, %zmm17
; AVX512DQ-BW-NEXT:    vpermt2q %zmm16, %zmm14, %zmm20
; AVX512DQ-BW-NEXT:    vmovdqa64 128(%rsi), %zmm0
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, %zmm6
; AVX512DQ-BW-NEXT:    vpermt2q %zmm15, %zmm11, %zmm6
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm15, %zmm13
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm29, %zmm13
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm15, %zmm16
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm28, %zmm16
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm15, %zmm8
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm10, %zmm8
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm14, %zmm15
; AVX512DQ-BW-NEXT:    vmovdqa64 64(%rdi), %zmm2
; AVX512DQ-BW-NEXT:    vmovdqa64 64(%rsi), %zmm0
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, %zmm3
; AVX512DQ-BW-NEXT:    vpermt2q %zmm2, %zmm11, %zmm3
; AVX512DQ-BW-NEXT:    vmovdqa64 (%rdi), %zmm31
; AVX512DQ-BW-NEXT:    vmovdqa64 (%rsi), %zmm1
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm31, %zmm7
; AVX512DQ-BW-NEXT:    vpermt2q %zmm1, %zmm14, %zmm7
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm2, %zmm9
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm2, %zmm5
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm2, %zmm4
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm14, %zmm2
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm29, %zmm9
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm28, %zmm5
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm10, %zmm4
; AVX512DQ-BW-NEXT:    vpermi2q %zmm31, %zmm1, %zmm11
; AVX512DQ-BW-NEXT:    vpermi2q %zmm1, %zmm31, %zmm29
; AVX512DQ-BW-NEXT:    vpermi2q %zmm1, %zmm31, %zmm28
; AVX512DQ-BW-NEXT:    vpermt2q %zmm1, %zmm10, %zmm31
; AVX512DQ-BW-NEXT:    movb $49, %al
; AVX512DQ-BW-NEXT:    kmovd %eax, %k1
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm6, %zmm10 {%k1}
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm3, %zmm14 {%k1}
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm11, %zmm19 {%k1}
; AVX512DQ-BW-NEXT:    movb $-116, %al
; AVX512DQ-BW-NEXT:    kmovd %eax, %k3
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k3}
; AVX512DQ-BW-NEXT:    movb $24, %al
; AVX512DQ-BW-NEXT:    kmovd %eax, %k2
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k2}
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm27, %zmm20 {%k3}
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm22, %zmm15 {%k3}
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm24, %zmm2 {%k3}
; AVX512DQ-BW-NEXT:    vmovdqa64 192(%r8), %zmm0
; AVX512DQ-BW-NEXT:    movb $8, %al
; AVX512DQ-BW-NEXT:    kmovd %eax, %k3
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k3}
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm1, %zmm22
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm1, %zmm23 {%k2}
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm1 = [0,1,14,3,4,5,6,15]
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm1, %zmm26
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm3, %zmm17 {%k1}
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm3 = [12,1,2,3,4,13,6,7]
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm3, %zmm23
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm6 = [0,9,2,3,4,5,10,7]
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm6, %zmm17
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm11 = [0,1,2,3,8,5,6,7]
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm11, %zmm20
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm13, %zmm18 {%k2}
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, %zmm16 {%k2}
; AVX512DQ-BW-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, %zmm8 {%k1}
; AVX512DQ-BW-NEXT:    vmovdqa64 128(%r8), %zmm0
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k3}
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm1, %zmm18
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm3, %zmm16
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm6, %zmm8
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm11, %zmm15
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm9, %zmm25 {%k2}
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
; AVX512DQ-BW-NEXT:    vmovdqa64 (%r8), %zmm0
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm30, %zmm4 {%k1}
; AVX512DQ-BW-NEXT:    vmovdqa64 64(%r8), %zmm9
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm11, %zmm7
; AVX512DQ-BW-NEXT:    vpermt2q %zmm9, %zmm11, %zmm2
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm9, %zmm14 {%k3}
; AVX512DQ-BW-NEXT:    vpermt2q %zmm9, %zmm1, %zmm25
; AVX512DQ-BW-NEXT:    vpermt2q %zmm9, %zmm3, %zmm5
; AVX512DQ-BW-NEXT:    vpermt2q %zmm9, %zmm6, %zmm4
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm29, %zmm12 {%k2}
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm1, %zmm12
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm21, %zmm28 {%k2}
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm3, %zmm28
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm1, %zmm31 {%k1}
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm6, %zmm31
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, %zmm19 {%k3}
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm31, 64(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm19, 128(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm28, 192(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm12, 256(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm2, 320(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm4, 384(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm14, 448(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm5, 512(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm25, 576(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm15, 640(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm8, 704(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm10, 768(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm16, 832(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm18, 896(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm20, 960(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm17, 1024(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm22, 1088(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm23, 1152(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm26, 1216(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm7, (%r9)
; AVX512DQ-BW-NEXT:    addq $648, %rsp # imm = 0x288
; AVX512DQ-BW-NEXT:    vzeroupper
; AVX512DQ-BW-NEXT:    retq
;
; AVX512DQ-BW-FCP-LABEL: store_i64_stride5_vf32:
; AVX512DQ-BW-FCP:       # %bb.0:
; AVX512DQ-BW-FCP-NEXT:    subq $648, %rsp # imm = 0x288
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 128(%rdi), %zmm15
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 192(%rdi), %zmm20
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 192(%rsi), %zmm16
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rdx), %zmm17
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 64(%rdx), %zmm24
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 128(%rdx), %zmm22
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 192(%rdx), %zmm27
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rcx), %zmm1
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 64(%rcx), %zmm23
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 128(%rcx), %zmm25
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 192(%rcx), %zmm18
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm11 = [3,0,0,0,12,4,0,0]
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm16, %zmm0
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm20, %zmm11, %zmm0
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm19 = [0,3,11,0,0,0,4,12]
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm27, %zmm0
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm18, %zmm19, %zmm0
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm22, %zmm0
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm25, %zmm19, %zmm0
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm24, %zmm0
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm23, %zmm19, %zmm0
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vpermi2q %zmm1, %zmm17, %zmm19
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm28 = [0,0,0,8,0,0,0,1]
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm17, %zmm0
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm1, %zmm28, %zmm0
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm14 = [0,8,0,0,0,1,9,0]
; AVX512DQ-BW-FCP-NEXT:    vbroadcasti32x4 {{.*#+}} zmm29 = [15,7,15,7,15,7,15,7]
; AVX512DQ-BW-FCP-NEXT:    # zmm29 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm20, %zmm0
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm16, %zmm29, %zmm0
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm12 = [6,14,0,0,0,7,15,0]
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm27, %zmm26
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm18, %zmm12, %zmm26
; AVX512DQ-BW-FCP-NEXT:    vbroadcasti32x4 {{.*#+}} zmm21 = [13,5,13,5,13,5,13,5]
; AVX512DQ-BW-FCP-NEXT:    # zmm21 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm27, %zmm0
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm18, %zmm21, %zmm0
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm31 = [1,0,0,0,10,2,0,0]
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm18, %zmm0
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm27, %zmm31, %zmm0
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm18, %zmm28, %zmm27
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm22, %zmm18
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm25, %zmm12, %zmm18
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm22, %zmm0
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm25, %zmm21, %zmm0
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm25, %zmm0
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm22, %zmm31, %zmm0
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm25, %zmm28, %zmm22
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm24, %zmm25
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm24, %zmm0
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm23, %zmm30
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm24, %zmm31, %zmm30
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm23, %zmm28, %zmm24
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm28 = [0,5,13,0,0,0,6,14]
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm23, %zmm12, %zmm25
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm23, %zmm21, %zmm0
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm20, %zmm23
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm16, %zmm28, %zmm23
; AVX512DQ-BW-FCP-NEXT:    vpermi2q %zmm1, %zmm17, %zmm12
; AVX512DQ-BW-FCP-NEXT:    vpermi2q %zmm1, %zmm17, %zmm21
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm17, %zmm31, %zmm1
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm10 = [0,0,2,10,0,0,0,3]
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm20, %zmm17
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm16, %zmm10, %zmm17
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm16, %zmm14, %zmm20
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 128(%rsi), %zmm0
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm6
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm15, %zmm11, %zmm6
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm15, %zmm13
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm29, %zmm13
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm15, %zmm16
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm28, %zmm16
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm15, %zmm8
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm10, %zmm8
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm14, %zmm15
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 64(%rdi), %zmm2
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 64(%rsi), %zmm0
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm3
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm2, %zmm11, %zmm3
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rdi), %zmm31
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rsi), %zmm1
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm31, %zmm7
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm1, %zmm14, %zmm7
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm2, %zmm9
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm2, %zmm5
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm2, %zmm4
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm14, %zmm2
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm29, %zmm9
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm28, %zmm5
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm10, %zmm4
; AVX512DQ-BW-FCP-NEXT:    vpermi2q %zmm31, %zmm1, %zmm11
; AVX512DQ-BW-FCP-NEXT:    vpermi2q %zmm1, %zmm31, %zmm29
; AVX512DQ-BW-FCP-NEXT:    vpermi2q %zmm1, %zmm31, %zmm28
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm1, %zmm10, %zmm31
; AVX512DQ-BW-FCP-NEXT:    movb $49, %al
; AVX512DQ-BW-FCP-NEXT:    kmovd %eax, %k1
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm6, %zmm10 {%k1}
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm3, %zmm14 {%k1}
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm11, %zmm19 {%k1}
; AVX512DQ-BW-FCP-NEXT:    movb $-116, %al
; AVX512DQ-BW-FCP-NEXT:    kmovd %eax, %k3
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k3}
; AVX512DQ-BW-FCP-NEXT:    movb $24, %al
; AVX512DQ-BW-FCP-NEXT:    kmovd %eax, %k2
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k2}
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm27, %zmm20 {%k3}
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm22, %zmm15 {%k3}
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm24, %zmm2 {%k3}
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 192(%r8), %zmm0
; AVX512DQ-BW-FCP-NEXT:    movb $8, %al
; AVX512DQ-BW-FCP-NEXT:    kmovd %eax, %k3
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k3}
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm1, %zmm22
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm1, %zmm23 {%k2}
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm1 = [0,1,14,3,4,5,6,15]
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm1, %zmm26
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm3, %zmm17 {%k1}
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm3 = [12,1,2,3,4,13,6,7]
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm3, %zmm23
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm6 = [0,9,2,3,4,5,10,7]
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm17
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm11 = [0,1,2,3,8,5,6,7]
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm11, %zmm20
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm13, %zmm18 {%k2}
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm16 {%k2}
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm8 {%k1}
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 128(%r8), %zmm0
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm10 {%k3}
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm1, %zmm18
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm3, %zmm16
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm8
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm11, %zmm15
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm9, %zmm25 {%k2}
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm5 {%k2}
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%r8), %zmm0
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm30, %zmm4 {%k1}
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 64(%r8), %zmm9
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm11, %zmm7
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm9, %zmm11, %zmm2
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm9, %zmm14 {%k3}
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm9, %zmm1, %zmm25
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm9, %zmm3, %zmm5
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm9, %zmm6, %zmm4
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm29, %zmm12 {%k2}
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm1, %zmm12
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm21, %zmm28 {%k2}
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm3, %zmm28
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm1, %zmm31 {%k1}
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm31
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm19 {%k3}
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm31, 64(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm19, 128(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm28, 192(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm12, 256(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm2, 320(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm4, 384(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm14, 448(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm5, 512(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm25, 576(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm15, 640(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm8, 704(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm10, 768(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm16, 832(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm18, 896(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm20, 960(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm17, 1024(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm22, 1088(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm23, 1152(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm26, 1216(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm7, (%r9)
; AVX512DQ-BW-FCP-NEXT:    addq $648, %rsp # imm = 0x288
; AVX512DQ-BW-FCP-NEXT:    vzeroupper
; AVX512DQ-BW-FCP-NEXT:    retq
  %in.vec0 = load <32 x i64>, ptr %in.vecptr0, align 64
  %in.vec1 = load <32 x i64>, ptr %in.vecptr1, align 64
  %in.vec2 = load <32 x i64>, ptr %in.vecptr2, align 64
  %in.vec3 = load <32 x i64>, ptr %in.vecptr3, align 64
  %in.vec4 = load <32 x i64>, ptr %in.vecptr4, align 64
  %1 = shufflevector <32 x i64> %in.vec0, <32 x i64> %in.vec1, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
  %2 = shufflevector <32 x i64> %in.vec2, <32 x i64> %in.vec3, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
  %3 = shufflevector <64 x i64> %1, <64 x i64> %2, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
  %4 = shufflevector <32 x i64> %in.vec4, <32 x i64> poison, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
  %5 = shufflevector <128 x i64> %3, <128 x i64> %4, <160 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127, i32 128, i32 129, i32 130, i32 131, i32 132, i32 133, i32 134, i32 135, i32 136, i32 137, i32 138, i32 139, i32 140, i32 141, i32 142, i32 143, i32 144, i32 145, i32 146, i32 147, i32 148, i32 149, i32 150, i32 151, i32 152, i32 153, i32 154, i32 155, i32 156, i32 157, i32 158, i32 159>
  %interleaved.vec = shufflevector <160 x i64> %5, <160 x i64> poison, <160 x i32> <i32 0, i32 32, i32 64, i32 96, i32 128, i32 1, i32 33, i32 65, i32 97, i32 129, i32 2, i32 34, i32 66, i32 98, i32 130, i32 3, i32 35, i32 67, i32 99, i32 131, i32 4, i32 36, i32 68, i32 100, i32 132, i32 5, i32 37, i32 69, i32 101, i32 133, i32 6, i32 38, i32 70, i32 102, i32 134, i32 7, i32 39, i32 71, i32 103, i32 135, i32 8, i32 40, i32 72, i32 104, i32 136, i32 9, i32 41, i32 73, i32 105, i32 137, i32 10, i32 42, i32 74, i32 106, i32 138, i32 11, i32 43, i32 75, i32 107, i32 139, i32 12, i32 44, i32 76, i32 108, i32 140, i32 13, i32 45, i32 77, i32 109, i32 141, i32 14, i32 46, i32 78, i32 110, i32 142, i32 15, i32 47, i32 79, i32 111, i32 143, i32 16, i32 48, i32 80, i32 112, i32 144, i32 17, i32 49, i32 81, i32 113, i32 145, i32 18, i32 50, i32 82, i32 114, i32 146, i32 19, i32 51, i32 83, i32 115, i32 147, i32 20, i32 52, i32 84, i32 116, i32 148, i32 21, i32 53, i32 85, i32 117, i32 149, i32 22, i32 54, i32 86, i32 118, i32 150, i32 23, i32 55, i32 87, i32 119, i32 151, i32 24, i32 56, i32 88, i32 120, i32 152, i32 25, i32 57, i32 89, i32 121, i32 153, i32 26, i32 58, i32 90, i32 122, i32 154, i32 27, i32 59, i32 91, i32 123, i32 155, i32 28, i32 60, i32 92, i32 124, i32 156, i32 29, i32 61, i32 93, i32 125, i32 157, i32 30, i32 62, i32 94, i32 126, i32 158, i32 31, i32 63, i32 95, i32 127, i32 159>
  store <160 x i64> %interleaved.vec, ptr %out.vec, align 64
  ret void
}

define void @store_i64_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %in.vecptr3, ptr %in.vecptr4, ptr %out.vec) nounwind {
; SSE-LABEL: store_i64_stride5_vf64:
; SSE:       # %bb.0:
; SSE-NEXT:    subq $2200, %rsp # imm = 0x898
; SSE-NEXT:    movapd (%rdi), %xmm3
; SSE-NEXT:    movapd 16(%rdi), %xmm4
; SSE-NEXT:    movapd 32(%rdi), %xmm5
; SSE-NEXT:    movapd (%rsi), %xmm6
; SSE-NEXT:    movapd 16(%rsi), %xmm7
; SSE-NEXT:    movapd 32(%rsi), %xmm8
; SSE-NEXT:    movapd (%rdx), %xmm9
; SSE-NEXT:    movapd 16(%rdx), %xmm10
; SSE-NEXT:    movapd 32(%rdx), %xmm11
; SSE-NEXT:    movapd (%rcx), %xmm12
; SSE-NEXT:    movapd 16(%rcx), %xmm13
; SSE-NEXT:    movapd 32(%rcx), %xmm14
; SSE-NEXT:    movapd 16(%r8), %xmm1
; SSE-NEXT:    movapd 32(%r8), %xmm0
; SSE-NEXT:    movapd (%r8), %xmm2
; SSE-NEXT:    movapd %xmm3, %xmm15
; SSE-NEXT:    unpcklpd {{.*#+}} xmm15 = xmm15[0],xmm6[0]
; SSE-NEXT:    movapd %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movsd {{.*#+}} xmm3 = xmm2[0],xmm3[1]
; SSE-NEXT:    movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm6 = xmm6[1],xmm9[1]
; SSE-NEXT:    movapd %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpcklpd {{.*#+}} xmm9 = xmm9[0],xmm12[0]
; SSE-NEXT:    movapd %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm12 = xmm12[1],xmm2[1]
; SSE-NEXT:    movapd %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd %xmm4, %xmm2
; SSE-NEXT:    unpcklpd {{.*#+}} xmm2 = xmm2[0],xmm7[0]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movsd {{.*#+}} xmm4 = xmm1[0],xmm4[1]
; SSE-NEXT:    movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm7 = xmm7[1],xmm10[1]
; SSE-NEXT:    movapd %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpcklpd {{.*#+}} xmm10 = xmm10[0],xmm13[0]
; SSE-NEXT:    movapd %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm13 = xmm13[1],xmm1[1]
; SSE-NEXT:    movapd %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd %xmm5, %xmm1
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm8[0]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movsd {{.*#+}} xmm5 = xmm0[0],xmm5[1]
; SSE-NEXT:    movapd %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm8 = xmm8[1],xmm11[1]
; SSE-NEXT:    movapd %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpcklpd {{.*#+}} xmm11 = xmm11[0],xmm14[0]
; SSE-NEXT:    movapd %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm14 = xmm14[1],xmm0[1]
; SSE-NEXT:    movapd %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 48(%rdi), %xmm1
; SSE-NEXT:    movapd 48(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm1, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 48(%r8), %xmm0
; SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 48(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 48(%rcx), %xmm2
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 64(%rdi), %xmm1
; SSE-NEXT:    movapd 64(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm1, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 64(%r8), %xmm0
; SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 64(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 64(%rcx), %xmm2
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 80(%rdi), %xmm1
; SSE-NEXT:    movapd 80(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm1, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 80(%r8), %xmm0
; SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 80(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 80(%rcx), %xmm2
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 96(%rdi), %xmm1
; SSE-NEXT:    movapd 96(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm1, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 96(%r8), %xmm0
; SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 96(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 96(%rcx), %xmm2
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 112(%rdi), %xmm1
; SSE-NEXT:    movapd 112(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm1, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 112(%r8), %xmm0
; SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 112(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 112(%rcx), %xmm2
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 128(%rdi), %xmm1
; SSE-NEXT:    movapd 128(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm1, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 128(%r8), %xmm0
; SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 128(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 128(%rcx), %xmm2
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 144(%rdi), %xmm1
; SSE-NEXT:    movapd 144(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm1, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 144(%r8), %xmm0
; SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 144(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 144(%rcx), %xmm2
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 160(%rdi), %xmm1
; SSE-NEXT:    movapd 160(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm1, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 160(%r8), %xmm0
; SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 160(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 160(%rcx), %xmm2
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 176(%rdi), %xmm1
; SSE-NEXT:    movapd 176(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm1, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 176(%r8), %xmm0
; SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 176(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 176(%rcx), %xmm2
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 192(%rdi), %xmm1
; SSE-NEXT:    movapd 192(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm1, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 192(%r8), %xmm0
; SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 192(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 192(%rcx), %xmm2
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 208(%rdi), %xmm1
; SSE-NEXT:    movapd 208(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm1, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 208(%r8), %xmm0
; SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 208(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 208(%rcx), %xmm2
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 224(%rdi), %xmm1
; SSE-NEXT:    movapd 224(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm1, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 224(%r8), %xmm0
; SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 224(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 224(%rcx), %xmm2
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 240(%rdi), %xmm1
; SSE-NEXT:    movapd 240(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm1, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 240(%r8), %xmm0
; SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 240(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 240(%rcx), %xmm2
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 256(%rdi), %xmm1
; SSE-NEXT:    movapd 256(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm1, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 256(%r8), %xmm0
; SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 256(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 256(%rcx), %xmm2
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 272(%rdi), %xmm1
; SSE-NEXT:    movapd 272(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm1, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 272(%r8), %xmm0
; SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 272(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 272(%rcx), %xmm2
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 288(%rdi), %xmm1
; SSE-NEXT:    movapd 288(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm1, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 288(%r8), %xmm0
; SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 288(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 288(%rcx), %xmm2
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 304(%rdi), %xmm1
; SSE-NEXT:    movapd 304(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm1, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 304(%r8), %xmm0
; SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 304(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 304(%rcx), %xmm2
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 320(%rdi), %xmm1
; SSE-NEXT:    movapd 320(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm1, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 320(%r8), %xmm0
; SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 320(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 320(%rcx), %xmm2
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 336(%rdi), %xmm1
; SSE-NEXT:    movapd 336(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm1, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 336(%r8), %xmm0
; SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 336(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 336(%rcx), %xmm2
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 352(%rdi), %xmm1
; SSE-NEXT:    movapd 352(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm1, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 352(%r8), %xmm0
; SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 352(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 352(%rcx), %xmm2
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 368(%rdi), %xmm1
; SSE-NEXT:    movapd 368(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm1, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 368(%r8), %xmm0
; SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 368(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 368(%rcx), %xmm2
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 384(%rdi), %xmm1
; SSE-NEXT:    movapd 384(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm1, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 384(%r8), %xmm0
; SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 384(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 384(%rcx), %xmm2
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 400(%rdi), %xmm1
; SSE-NEXT:    movapd 400(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm1, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 400(%r8), %xmm0
; SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 400(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 400(%rcx), %xmm2
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 416(%rdi), %xmm1
; SSE-NEXT:    movapd 416(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm1, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 416(%r8), %xmm0
; SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 416(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 416(%rcx), %xmm2
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 432(%rdi), %xmm1
; SSE-NEXT:    movapd 432(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm1, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 432(%r8), %xmm0
; SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT:    movapd %xmm1, (%rsp) # 16-byte Spill
; SSE-NEXT:    movapd 432(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 432(%rcx), %xmm2
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 448(%rdi), %xmm1
; SSE-NEXT:    movapd 448(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm1, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 448(%r8), %xmm0
; SSE-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 448(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 448(%rcx), %xmm15
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm15[0]
; SSE-NEXT:    movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    unpckhpd {{.*#+}} xmm15 = xmm15[1],xmm0[1]
; SSE-NEXT:    movapd 464(%rdi), %xmm14
; SSE-NEXT:    movapd 464(%rsi), %xmm13
; SSE-NEXT:    movapd %xmm14, %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm13[0]
; SSE-NEXT:    movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT:    movapd 464(%r8), %xmm0
; SSE-NEXT:    movsd {{.*#+}} xmm14 = xmm0[0],xmm14[1]
; SSE-NEXT:    movapd 464(%rdx), %xmm10
; SSE-NEXT:    unpckhpd {{.*#+}} xmm13 = xmm13[1],xmm10[1]
; SSE-NEXT:    movapd 464(%rcx), %xmm9
; SSE-NEXT:    unpcklpd {{.*#+}} xmm10 = xmm10[0],xmm9[0]
; SSE-NEXT:    unpckhpd {{.*#+}} xmm9 = xmm9[1],xmm0[1]
; SSE-NEXT:    movapd 480(%rdi), %xmm11
; SSE-NEXT:    movapd 480(%rsi), %xmm8
; SSE-NEXT:    movapd %xmm11, %xmm12
; SSE-NEXT:    unpcklpd {{.*#+}} xmm12 = xmm12[0],xmm8[0]
; SSE-NEXT:    movapd 480(%r8), %xmm2
; SSE-NEXT:    movsd {{.*#+}} xmm11 = xmm2[0],xmm11[1]
; SSE-NEXT:    movapd 480(%rdx), %xmm6
; SSE-NEXT:    unpckhpd {{.*#+}} xmm8 = xmm8[1],xmm6[1]
; SSE-NEXT:    movapd 480(%rcx), %xmm3
; SSE-NEXT:    unpcklpd {{.*#+}} xmm6 = xmm6[0],xmm3[0]
; SSE-NEXT:    unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm2[1]
; SSE-NEXT:    movapd 496(%rdi), %xmm4
; SSE-NEXT:    movapd 496(%rsi), %xmm2
; SSE-NEXT:    movapd %xmm4, %xmm5
; SSE-NEXT:    unpcklpd {{.*#+}} xmm5 = xmm5[0],xmm2[0]
; SSE-NEXT:    movapd 496(%r8), %xmm7
; SSE-NEXT:    movsd {{.*#+}} xmm4 = xmm7[0],xmm4[1]
; SSE-NEXT:    movapd 496(%rdx), %xmm1
; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT:    movapd 496(%rcx), %xmm0
; SSE-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE-NEXT:    unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm7[1]
; SSE-NEXT:    movapd %xmm0, 2544(%r9)
; SSE-NEXT:    movapd %xmm2, 2528(%r9)
; SSE-NEXT:    movapd %xmm4, 2512(%r9)
; SSE-NEXT:    movapd %xmm1, 2496(%r9)
; SSE-NEXT:    movapd %xmm5, 2480(%r9)
; SSE-NEXT:    movapd %xmm3, 2464(%r9)
; SSE-NEXT:    movapd %xmm8, 2448(%r9)
; SSE-NEXT:    movapd %xmm11, 2432(%r9)
; SSE-NEXT:    movapd %xmm6, 2416(%r9)
; SSE-NEXT:    movapd %xmm12, 2400(%r9)
; SSE-NEXT:    movapd %xmm9, 2384(%r9)
; SSE-NEXT:    movapd %xmm13, 2368(%r9)
; SSE-NEXT:    movapd %xmm14, 2352(%r9)
; SSE-NEXT:    movapd %xmm10, 2336(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 2320(%r9)
; SSE-NEXT:    movapd %xmm15, 2304(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 2288(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 2272(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 2256(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 2240(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 2224(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 2208(%r9)
; SSE-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 2192(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 2176(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 2160(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 2144(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 2128(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 2112(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 2096(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 2080(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 2064(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 2048(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 2032(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 2016(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 2000(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1984(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1968(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1952(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1936(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1920(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1904(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1888(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1872(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1856(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1840(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1824(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1808(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1792(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1776(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1760(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1744(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1728(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1712(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1696(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1680(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1664(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1648(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1632(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1616(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1600(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1584(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1568(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1552(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1536(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1520(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1504(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1488(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1472(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1456(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1440(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1424(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1408(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1392(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1376(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1360(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1344(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1328(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1312(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1296(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1280(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1264(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1248(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1232(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1216(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1200(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1184(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1168(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1152(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1136(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1120(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1104(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1088(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1072(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1056(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1040(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1024(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 1008(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 992(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 976(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 960(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 944(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 928(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 912(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 896(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 880(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 864(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 848(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 832(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 816(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 800(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 784(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 768(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 752(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 736(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 720(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 704(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 688(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 672(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 656(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 640(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 624(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 608(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 592(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 576(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 560(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 544(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 528(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 512(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 496(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 480(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 464(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 448(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 432(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 416(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 400(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 384(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 368(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 352(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 336(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 320(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 304(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 288(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 272(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 256(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 240(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 224(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 208(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 192(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 176(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 160(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 144(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 128(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 112(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 96(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 80(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 64(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 48(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 32(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, 16(%r9)
; SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT:    movaps %xmm0, (%r9)
; SSE-NEXT:    addq $2200, %rsp # imm = 0x898
; SSE-NEXT:    retq
;
; AVX-LABEL: store_i64_stride5_vf64:
; AVX:       # %bb.0:
; AVX-NEXT:    subq $2264, %rsp # imm = 0x8D8
; AVX-NEXT:    vmovaps 192(%rdi), %ymm14
; AVX-NEXT:    vmovaps 160(%rdi), %ymm4
; AVX-NEXT:    vmovaps 96(%rdi), %ymm5
; AVX-NEXT:    vmovaps 64(%rcx), %ymm1
; AVX-NEXT:    vmovaps 128(%rcx), %ymm0
; AVX-NEXT:    vmovaps (%rcx), %ymm2
; AVX-NEXT:    vpermilps {{.*#+}} xmm3 = mem[2,3,2,3]
; AVX-NEXT:    vmovaps 16(%rdx), %xmm6
; AVX-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vunpckhpd {{.*#+}} ymm2 = ymm2[1],ymm6[1],ymm2[3],ymm6[3]
; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3,4,5,6,7]
; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vpermilps {{.*#+}} xmm2 = mem[2,3,2,3]
; AVX-NEXT:    vmovaps 80(%rdx), %xmm3
; AVX-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vunpckhpd {{.*#+}} ymm1 = ymm1[1],ymm3[1],ymm1[3],ymm3[3]
; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3,4,5,6,7]
; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm5[0],mem[0],ymm5[2],mem[2]
; AVX-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovaps 96(%rcx), %xmm2
; AVX-NEXT:    vmovaps %xmm2, (%rsp) # 16-byte Spill
; AVX-NEXT:    vshufps {{.*#+}} xmm2 = xmm2[2,3,2,3]
; AVX-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vpermilps {{.*#+}} xmm1 = mem[2,3,2,3]
; AVX-NEXT:    vmovaps 144(%rdx), %xmm2
; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm2[1],ymm0[3],ymm2[3]
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm4[0],mem[0],ymm4[2],mem[2]
; AVX-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovaps 160(%rcx), %xmm1
; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[2,3,2,3]
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm14[0],mem[0],ymm14[2],mem[2]
; AVX-NEXT:    vmovaps %ymm14, %ymm2
; AVX-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovaps 192(%rcx), %xmm1
; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[2,3,2,3]
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovaps 192(%rcx), %ymm0
; AVX-NEXT:    vmovaps 208(%rdx), %xmm1
; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
; AVX-NEXT:    vpermilps {{.*#+}} xmm1 = mem[2,3,2,3]
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovaps 256(%rcx), %ymm0
; AVX-NEXT:    vmovaps 272(%rdx), %xmm1
; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
; AVX-NEXT:    vpermilps {{.*#+}} xmm1 = mem[2,3,2,3]
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovapd 288(%rdi), %ymm14
; AVX-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm14[0],mem[0],ymm14[2],mem[2]
; AVX-NEXT:    vmovapd 288(%rcx), %xmm1
; AVX-NEXT:    vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[2,3,2,3]
; AVX-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
; AVX-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovaps 320(%rdi), %ymm1
; AVX-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],mem[0],ymm1[2],mem[2]
; AVX-NEXT:    vmovaps %ymm1, %ymm10
; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovaps 320(%rcx), %xmm1
; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[2,3,2,3]
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovaps 320(%rcx), %ymm0
; AVX-NEXT:    vmovaps 336(%rdx), %xmm1
; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
; AVX-NEXT:    vpermilps {{.*#+}} xmm1 = mem[2,3,2,3]
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovaps 384(%rdi), %ymm1
; AVX-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],mem[0],ymm1[2],mem[2]
; AVX-NEXT:    vmovaps %ymm1, %ymm9
; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovaps 384(%rcx), %xmm1
; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[2,3,2,3]
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovaps 384(%rcx), %ymm0
; AVX-NEXT:    vmovaps 400(%rdx), %xmm1
; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
; AVX-NEXT:    vpermilps {{.*#+}} xmm1 = mem[2,3,2,3]
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovaps 448(%rcx), %ymm0
; AVX-NEXT:    vmovaps 464(%rdx), %xmm1
; AVX-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
; AVX-NEXT:    vpermilps {{.*#+}} xmm1 = mem[2,3,2,3]
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovapd 480(%rdi), %ymm1
; AVX-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],mem[0],ymm1[2],mem[2]
; AVX-NEXT:    vmovapd %ymm1, %ymm13
; AVX-NEXT:    vmovapd 480(%rcx), %xmm1
; AVX-NEXT:    vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[2,3,2,3]
; AVX-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
; AVX-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovaps (%rdi), %ymm8
; AVX-NEXT:    vbroadcastsd 8(%rsi), %ymm0
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT:    vmovaps (%rdx), %xmm1
; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = mem[2,3,2,3]
; AVX-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm8[0],mem[0],ymm8[2],mem[2]
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovapd 32(%rdi), %ymm6
; AVX-NEXT:    vbroadcastsd 40(%rsi), %ymm0
; AVX-NEXT:    vblendpd {{.*#+}} ymm0 = ymm6[0,1],ymm0[2,3]
; AVX-NEXT:    vmovaps 32(%rdx), %xmm1
; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
; AVX-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = mem[2,3,2,3]
; AVX-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm6[0],mem[0],ymm6[2],mem[2]
; AVX-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
; AVX-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovaps 64(%rdi), %ymm7
; AVX-NEXT:    vbroadcastsd 72(%rsi), %ymm0
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm7[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT:    vmovaps 64(%rdx), %xmm1
; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = mem[2,3,2,3]
; AVX-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm7[0],mem[0],ymm7[2],mem[2]
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vbroadcastsd 104(%rsi), %ymm0
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT:    vmovaps 96(%rdx), %xmm1
; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovaps 128(%rdi), %ymm5
; AVX-NEXT:    vbroadcastsd 136(%rsi), %ymm0
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT:    vmovaps 128(%rdx), %xmm1
; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = mem[2,3,2,3]
; AVX-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm5[0],mem[0],ymm5[2],mem[2]
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vbroadcastsd 168(%rsi), %ymm0
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT:    vmovaps 160(%rdx), %xmm1
; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vbroadcastsd 200(%rsi), %ymm0
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT:    vmovaps 192(%rdx), %xmm1
; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovapd 224(%rdi), %ymm3
; AVX-NEXT:    vbroadcastsd 232(%rsi), %ymm0
; AVX-NEXT:    vblendpd {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3]
; AVX-NEXT:    vmovaps 224(%rdx), %xmm1
; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
; AVX-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = mem[2,3,2,3]
; AVX-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm3[0],mem[0],ymm3[2],mem[2]
; AVX-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
; AVX-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovaps 256(%rdi), %ymm4
; AVX-NEXT:    vbroadcastsd 264(%rsi), %ymm0
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT:    vmovaps 256(%rdx), %xmm1
; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = mem[2,3,2,3]
; AVX-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm4[0],mem[0],ymm4[2],mem[2]
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vbroadcastsd 296(%rsi), %ymm0
; AVX-NEXT:    vblendpd {{.*#+}} ymm0 = ymm14[0,1],ymm0[2,3]
; AVX-NEXT:    vmovaps 288(%rdx), %xmm1
; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
; AVX-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vbroadcastsd 328(%rsi), %ymm0
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT:    vmovaps 320(%rdx), %xmm1
; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovapd 352(%rdi), %ymm2
; AVX-NEXT:    vbroadcastsd 360(%rsi), %ymm0
; AVX-NEXT:    vblendpd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3]
; AVX-NEXT:    vmovaps 352(%rdx), %xmm1
; AVX-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
; AVX-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = mem[2,3,2,3]
; AVX-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
; AVX-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vbroadcastsd 392(%rsi), %ymm0
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm0[4,5,6,7]
; AVX-NEXT:    vmovaps 384(%rdx), %xmm15
; AVX-NEXT:    vinsertf128 $1, %xmm15, %ymm0, %ymm1
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovapd 416(%rdi), %ymm0
; AVX-NEXT:    vbroadcastsd 424(%rsi), %ymm1
; AVX-NEXT:    vblendpd {{.*#+}} ymm1 = ymm0[0,1],ymm1[2,3]
; AVX-NEXT:    vmovaps 416(%rdx), %xmm9
; AVX-NEXT:    vmovaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vinsertf128 $1, %xmm9, %ymm0, %ymm9
; AVX-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1,2],ymm9[3]
; AVX-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vpermilps {{.*#+}} xmm1 = mem[2,3,2,3]
; AVX-NEXT:    vunpcklpd {{.*#+}} ymm9 = ymm0[0],mem[0],ymm0[2],mem[2]
; AVX-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm9[2,3]
; AVX-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovaps 448(%rdi), %ymm1
; AVX-NEXT:    vbroadcastsd 456(%rsi), %ymm9
; AVX-NEXT:    vblendps {{.*#+}} ymm9 = ymm1[0,1,2,3],ymm9[4,5,6,7]
; AVX-NEXT:    vmovaps 448(%rdx), %xmm10
; AVX-NEXT:    vmovaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vinsertf128 $1, %xmm10, %ymm0, %ymm10
; AVX-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],ymm10[6,7]
; AVX-NEXT:    vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vpermilps {{.*#+}} xmm9 = mem[2,3,2,3]
; AVX-NEXT:    vunpcklpd {{.*#+}} ymm10 = ymm1[0],mem[0],ymm1[2],mem[2]
; AVX-NEXT:    vblendps {{.*#+}} ymm11 = ymm9[0,1,2,3],ymm10[4,5,6,7]
; AVX-NEXT:    vbroadcastsd 488(%rsi), %ymm10
; AVX-NEXT:    vblendpd {{.*#+}} ymm12 = ymm13[0,1],ymm10[2,3]
; AVX-NEXT:    vmovapd %ymm13, %ymm9
; AVX-NEXT:    vmovaps 480(%rdx), %xmm10
; AVX-NEXT:    vinsertf128 $1, %xmm10, %ymm0, %ymm13
; AVX-NEXT:    vblendpd {{.*#+}} ymm12 = ymm12[0,1,2],ymm13[3]
; AVX-NEXT:    vmovupd %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm12 = xmm12[0],mem[0]
; AVX-NEXT:    vblendps {{.*#+}} ymm8 = ymm12[0,1,2,3],ymm8[4,5,6,7]
; AVX-NEXT:    vmovaps (%r8), %ymm12
; AVX-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm13 # 32-byte Folded Reload
; AVX-NEXT:    # ymm13 = mem[0,1,2,3,4,5],ymm12[6,7]
; AVX-NEXT:    vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm13 # 32-byte Folded Reload
; AVX-NEXT:    # ymm13 = ymm12[0,1],mem[2,3,4,5,6,7]
; AVX-NEXT:    vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm13 # 32-byte Folded Reload
; AVX-NEXT:    # ymm13 = mem[0,1],ymm12[2,3],mem[4,5,6,7]
; AVX-NEXT:    vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm12[4,5],ymm8[6,7]
; AVX-NEXT:    vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vperm2f128 {{.*#+}} ymm6 = mem[2,3],ymm6[2,3]
; AVX-NEXT:    vmovapd 48(%rdx), %xmm8
; AVX-NEXT:    vshufpd {{.*#+}} ymm6 = ymm8[0],ymm6[0],ymm8[2],ymm6[3]
; AVX-NEXT:    vmovapd 48(%rsi), %xmm12
; AVX-NEXT:    vunpckhpd {{.*#+}} xmm8 = xmm12[1],xmm8[1]
; AVX-NEXT:    vbroadcastsd 56(%rcx), %ymm12
; AVX-NEXT:    vblendpd {{.*#+}} ymm8 = ymm8[0,1],ymm12[2,3]
; AVX-NEXT:    vmovapd 32(%r8), %ymm12
; AVX-NEXT:    vblendpd $14, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm13 # 32-byte Folded Reload
; AVX-NEXT:    # ymm13 = ymm12[0],mem[1,2,3]
; AVX-NEXT:    vmovupd %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendpd $13, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm13 # 32-byte Folded Reload
; AVX-NEXT:    # ymm13 = mem[0],ymm12[1],mem[2,3]
; AVX-NEXT:    vmovupd %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendpd {{.*#+}} ymm6 = ymm6[0,1],ymm12[2],ymm6[3]
; AVX-NEXT:    vmovupd %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendpd {{.*#+}} ymm6 = ymm8[0,1,2],ymm12[3]
; AVX-NEXT:    vmovupd %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm6 = xmm6[0],mem[0]
; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
; AVX-NEXT:    vmovaps 64(%r8), %ymm7
; AVX-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm8 # 32-byte Folded Reload
; AVX-NEXT:    # ymm8 = mem[0,1,2,3,4,5],ymm7[6,7]
; AVX-NEXT:    vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm8 # 32-byte Folded Reload
; AVX-NEXT:    # ymm8 = ymm7[0,1],mem[2,3,4,5,6,7]
; AVX-NEXT:    vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm8 # 32-byte Folded Reload
; AVX-NEXT:    # ymm8 = mem[0,1],ymm7[2,3],mem[4,5,6,7]
; AVX-NEXT:    vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5],ymm6[6,7]
; AVX-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
; AVX-NEXT:    vperm2f128 {{.*#+}} ymm6 = mem[2,3],ymm6[2,3]
; AVX-NEXT:    vmovapd 112(%rdx), %xmm7
; AVX-NEXT:    vshufpd {{.*#+}} ymm6 = ymm7[0],ymm6[0],ymm7[2],ymm6[3]
; AVX-NEXT:    vmovapd 112(%rsi), %xmm8
; AVX-NEXT:    vunpckhpd {{.*#+}} xmm7 = xmm8[1],xmm7[1]
; AVX-NEXT:    vbroadcastsd 120(%rcx), %ymm8
; AVX-NEXT:    vblendpd {{.*#+}} ymm7 = ymm7[0,1],ymm8[2,3]
; AVX-NEXT:    vmovapd 96(%r8), %ymm8
; AVX-NEXT:    vblendpd $13, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm12 # 32-byte Folded Reload
; AVX-NEXT:    # ymm12 = mem[0],ymm8[1],mem[2,3]
; AVX-NEXT:    vmovupd %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendpd $14, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm12 # 32-byte Folded Reload
; AVX-NEXT:    # ymm12 = ymm8[0],mem[1,2,3]
; AVX-NEXT:    vmovupd %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendpd {{.*#+}} ymm6 = ymm6[0,1],ymm8[2],ymm6[3]
; AVX-NEXT:    vmovupd %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendpd {{.*#+}} ymm6 = ymm7[0,1,2],ymm8[3]
; AVX-NEXT:    vmovupd %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm6 = xmm6[0],mem[0]
; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5,6,7]
; AVX-NEXT:    vmovaps 128(%r8), %ymm6
; AVX-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm7 # 32-byte Folded Reload
; AVX-NEXT:    # ymm7 = mem[0,1,2,3,4,5],ymm6[6,7]
; AVX-NEXT:    vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm7 # 32-byte Folded Reload
; AVX-NEXT:    # ymm7 = ymm6[0,1],mem[2,3,4,5,6,7]
; AVX-NEXT:    vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm7 # 32-byte Folded Reload
; AVX-NEXT:    # ymm7 = mem[0,1],ymm6[2,3],mem[4,5,6,7]
; AVX-NEXT:    vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5],ymm5[6,7]
; AVX-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
; AVX-NEXT:    vperm2f128 {{.*#+}} ymm5 = mem[2,3],ymm5[2,3]
; AVX-NEXT:    vmovapd 176(%rdx), %xmm6
; AVX-NEXT:    vshufpd {{.*#+}} ymm5 = ymm6[0],ymm5[0],ymm6[2],ymm5[3]
; AVX-NEXT:    vmovapd 176(%rsi), %xmm7
; AVX-NEXT:    vunpckhpd {{.*#+}} xmm6 = xmm7[1],xmm6[1]
; AVX-NEXT:    vbroadcastsd 184(%rcx), %ymm7
; AVX-NEXT:    vblendpd {{.*#+}} ymm6 = ymm6[0,1],ymm7[2,3]
; AVX-NEXT:    vmovapd 160(%r8), %ymm7
; AVX-NEXT:    vblendpd $13, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm8 # 32-byte Folded Reload
; AVX-NEXT:    # ymm8 = mem[0],ymm7[1],mem[2,3]
; AVX-NEXT:    vmovupd %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendpd $14, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm8 # 32-byte Folded Reload
; AVX-NEXT:    # ymm8 = ymm7[0],mem[1,2,3]
; AVX-NEXT:    vmovupd %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendpd {{.*#+}} ymm5 = ymm5[0,1],ymm7[2],ymm5[3]
; AVX-NEXT:    vmovupd %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendpd {{.*#+}} ymm5 = ymm6[0,1,2],ymm7[3]
; AVX-NEXT:    vmovupd %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm5 = xmm5[0],mem[0]
; AVX-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
; AVX-NEXT:    # ymm5 = ymm5[0,1,2,3],mem[4,5,6,7]
; AVX-NEXT:    vmovaps 192(%r8), %ymm6
; AVX-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm7 # 32-byte Folded Reload
; AVX-NEXT:    # ymm7 = mem[0,1],ymm6[2,3],mem[4,5,6,7]
; AVX-NEXT:    vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm7 # 32-byte Folded Reload
; AVX-NEXT:    # ymm7 = mem[0,1,2,3,4,5],ymm6[6,7]
; AVX-NEXT:    vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm7 # 32-byte Folded Reload
; AVX-NEXT:    # ymm7 = ymm6[0,1],mem[2,3,4,5,6,7]
; AVX-NEXT:    vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5],ymm5[6,7]
; AVX-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vperm2f128 {{.*#+}} ymm3 = mem[2,3],ymm3[2,3]
; AVX-NEXT:    vmovapd 240(%rdx), %xmm5
; AVX-NEXT:    vshufpd {{.*#+}} ymm3 = ymm5[0],ymm3[0],ymm5[2],ymm3[3]
; AVX-NEXT:    vmovapd 240(%rsi), %xmm6
; AVX-NEXT:    vunpckhpd {{.*#+}} xmm5 = xmm6[1],xmm5[1]
; AVX-NEXT:    vbroadcastsd 248(%rcx), %ymm6
; AVX-NEXT:    vblendpd {{.*#+}} ymm5 = ymm5[0,1],ymm6[2,3]
; AVX-NEXT:    vmovapd 224(%r8), %ymm6
; AVX-NEXT:    vblendpd $14, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm7 # 32-byte Folded Reload
; AVX-NEXT:    # ymm7 = ymm6[0],mem[1,2,3]
; AVX-NEXT:    vmovupd %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendpd $13, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm7 # 32-byte Folded Reload
; AVX-NEXT:    # ymm7 = mem[0],ymm6[1],mem[2,3]
; AVX-NEXT:    vmovupd %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendpd {{.*#+}} ymm3 = ymm3[0,1],ymm6[2],ymm3[3]
; AVX-NEXT:    vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendpd {{.*#+}} ymm3 = ymm5[0,1,2],ymm6[3]
; AVX-NEXT:    vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm3 = xmm3[0],mem[0]
; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6,7]
; AVX-NEXT:    vmovaps 256(%r8), %ymm4
; AVX-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm5 # 32-byte Folded Reload
; AVX-NEXT:    # ymm5 = mem[0,1,2,3,4,5],ymm4[6,7]
; AVX-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm5 # 32-byte Folded Reload
; AVX-NEXT:    # ymm5 = ymm4[0,1],mem[2,3,4,5,6,7]
; AVX-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm5 # 32-byte Folded Reload
; AVX-NEXT:    # ymm5 = mem[0,1],ymm4[2,3],mem[4,5,6,7]
; AVX-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5],ymm3[6,7]
; AVX-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vperm2f128 {{.*#+}} ymm3 = mem[2,3],ymm14[2,3]
; AVX-NEXT:    vmovapd 304(%rdx), %xmm4
; AVX-NEXT:    vshufpd {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[2],ymm3[3]
; AVX-NEXT:    vmovapd 304(%rsi), %xmm5
; AVX-NEXT:    vunpckhpd {{.*#+}} xmm4 = xmm5[1],xmm4[1]
; AVX-NEXT:    vbroadcastsd 312(%rcx), %ymm5
; AVX-NEXT:    vblendpd {{.*#+}} ymm4 = ymm4[0,1],ymm5[2,3]
; AVX-NEXT:    vmovapd 288(%r8), %ymm5
; AVX-NEXT:    vblendpd $13, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm6 # 32-byte Folded Reload
; AVX-NEXT:    # ymm6 = mem[0],ymm5[1],mem[2,3]
; AVX-NEXT:    vmovupd %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendpd $14, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm6 # 32-byte Folded Reload
; AVX-NEXT:    # ymm6 = ymm5[0],mem[1,2,3]
; AVX-NEXT:    vmovupd %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendpd {{.*#+}} ymm3 = ymm3[0,1],ymm5[2],ymm3[3]
; AVX-NEXT:    vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendpd {{.*#+}} ymm3 = ymm4[0,1,2],ymm5[3]
; AVX-NEXT:    vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm3 = xmm3[0],mem[0]
; AVX-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
; AVX-NEXT:    # ymm3 = ymm3[0,1,2,3],mem[4,5,6,7]
; AVX-NEXT:    vmovaps 320(%r8), %ymm4
; AVX-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm5 # 32-byte Folded Reload
; AVX-NEXT:    # ymm5 = mem[0,1],ymm4[2,3],mem[4,5,6,7]
; AVX-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm5 # 32-byte Folded Reload
; AVX-NEXT:    # ymm5 = mem[0,1,2,3,4,5],ymm4[6,7]
; AVX-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm5 # 32-byte Folded Reload
; AVX-NEXT:    # ymm5 = ymm4[0,1],mem[2,3,4,5,6,7]
; AVX-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5],ymm3[6,7]
; AVX-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vperm2f128 {{.*#+}} ymm2 = mem[2,3],ymm2[2,3]
; AVX-NEXT:    vmovapd 368(%rdx), %xmm3
; AVX-NEXT:    vshufpd {{.*#+}} ymm2 = ymm3[0],ymm2[0],ymm3[2],ymm2[3]
; AVX-NEXT:    vmovapd 368(%rsi), %xmm4
; AVX-NEXT:    vunpckhpd {{.*#+}} xmm3 = xmm4[1],xmm3[1]
; AVX-NEXT:    vbroadcastsd 376(%rcx), %ymm4
; AVX-NEXT:    vblendpd {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3]
; AVX-NEXT:    vmovapd 352(%r8), %ymm4
; AVX-NEXT:    vblendpd $14, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm5 # 32-byte Folded Reload
; AVX-NEXT:    # ymm5 = ymm4[0],mem[1,2,3]
; AVX-NEXT:    vmovupd %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendpd $13, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm5 # 32-byte Folded Reload
; AVX-NEXT:    # ymm5 = mem[0],ymm4[1],mem[2,3]
; AVX-NEXT:    vmovupd %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3]
; AVX-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendpd {{.*#+}} ymm2 = ymm3[0,1,2],ymm4[3]
; AVX-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm2 = xmm2[0],mem[0]
; AVX-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
; AVX-NEXT:    # ymm2 = ymm2[0,1,2,3],mem[4,5,6,7]
; AVX-NEXT:    vmovaps 384(%r8), %ymm3
; AVX-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm4 # 32-byte Folded Reload
; AVX-NEXT:    # ymm4 = mem[0,1],ymm3[2,3],mem[4,5,6,7]
; AVX-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm4 # 32-byte Folded Reload
; AVX-NEXT:    # ymm4 = mem[0,1,2,3,4,5],ymm3[6,7]
; AVX-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm4 # 32-byte Folded Reload
; AVX-NEXT:    # ymm4 = ymm3[0,1],mem[2,3,4,5,6,7]
; AVX-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5],ymm2[6,7]
; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[2,3]
; AVX-NEXT:    vmovapd 432(%rdx), %xmm2
; AVX-NEXT:    vshufpd {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[2],ymm0[3]
; AVX-NEXT:    vmovapd 432(%rsi), %xmm3
; AVX-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm3[1],xmm2[1]
; AVX-NEXT:    vbroadcastsd 440(%rcx), %ymm3
; AVX-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3]
; AVX-NEXT:    vmovapd 416(%r8), %ymm3
; AVX-NEXT:    vblendpd $14, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm4 # 32-byte Folded Reload
; AVX-NEXT:    # ymm4 = ymm3[0],mem[1,2,3]
; AVX-NEXT:    vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendpd $13, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm4 # 32-byte Folded Reload
; AVX-NEXT:    # ymm4 = mem[0],ymm3[1],mem[2,3]
; AVX-NEXT:    vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm3[2],ymm0[3]
; AVX-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendpd {{.*#+}} ymm0 = ymm2[0,1,2],ymm3[3]
; AVX-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX-NEXT:    vmovaps 448(%r8), %ymm1
; AVX-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX-NEXT:    # ymm2 = mem[0,1,2,3,4,5],ymm1[6,7]
; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX-NEXT:    # ymm2 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendps {{.*#+}} ymm2 = ymm11[0,1],ymm1[2,3],ymm11[4,5,6,7]
; AVX-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm9[2,3]
; AVX-NEXT:    vmovapd 496(%rdx), %xmm1
; AVX-NEXT:    vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[3]
; AVX-NEXT:    vmovapd 496(%rsi), %xmm2
; AVX-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm2[1],xmm1[1]
; AVX-NEXT:    vbroadcastsd 504(%rcx), %ymm2
; AVX-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3]
; AVX-NEXT:    vmovapd 480(%r8), %ymm2
; AVX-NEXT:    vblendpd $13, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm3 # 32-byte Folded Reload
; AVX-NEXT:    # ymm3 = mem[0],ymm2[1],mem[2,3]
; AVX-NEXT:    vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendpd $14, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm3 # 32-byte Folded Reload
; AVX-NEXT:    # ymm3 = ymm2[0],mem[1,2,3]
; AVX-NEXT:    vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2],ymm0[3]
; AVX-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm2[3]
; AVX-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT:    vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT:    # xmm0 = xmm0[0],mem[0]
; AVX-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT:    # xmm0 = xmm0[0],mem[0]
; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT:    # xmm0 = xmm0[0],mem[0]
; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT:    # xmm0 = xmm0[0],mem[0]
; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
; AVX-NEXT:    # xmm0 = xmm0[0],mem[0]
; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm0 # 16-byte Folded Reload
; AVX-NEXT:    # xmm0 = xmm10[0],mem[0]
; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm7 # 16-byte Folded Reload
; AVX-NEXT:    # xmm7 = xmm15[0],mem[0]
; AVX-NEXT:    vmovaps 256(%rdi), %xmm0
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vmovaps 128(%rdi), %xmm0
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vmovaps 64(%rdi), %xmm0
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vmovaps 32(%rdi), %xmm0
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vmovaps 96(%rdi), %xmm0
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vmovaps 160(%rdi), %xmm0
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT:    vmovaps 224(%rdi), %xmm0
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm6 = xmm0[0],mem[0]
; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm5 = xmm0[0],mem[0]
; AVX-NEXT:    vmovaps 192(%rdi), %xmm0
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm4 = xmm0[0],mem[0]
; AVX-NEXT:    vmovaps 288(%rdi), %xmm0
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm3 = xmm0[0],mem[0]
; AVX-NEXT:    vmovaps 352(%rdi), %xmm15
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm15 = xmm15[0],mem[0]
; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm2 = xmm0[0],mem[0]
; AVX-NEXT:    vmovaps 320(%rdi), %xmm13
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm13 = xmm13[0],mem[0]
; AVX-NEXT:    vmovaps 416(%rdi), %xmm14
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm14 = xmm14[0],mem[0]
; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm1 = xmm0[0],mem[0]
; AVX-NEXT:    vmovaps 480(%rdi), %xmm12
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm12 = xmm12[0],mem[0]
; AVX-NEXT:    vmovaps 448(%rdi), %xmm11
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm11 = xmm11[0],mem[0]
; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; AVX-NEXT:    vmovaps 384(%rdi), %xmm10
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm10 = xmm10[0],mem[0]
; AVX-NEXT:    vmovaps (%rdi), %xmm9
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm9 = xmm9[0],mem[0]
; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
; AVX-NEXT:    vunpcklpd {{.*#+}} xmm8 = xmm8[0],mem[0]
; AVX-NEXT:    vmovaps %xmm8, 16(%r9)
; AVX-NEXT:    vmovaps %xmm9, (%r9)
; AVX-NEXT:    vmovaps %xmm7, 1936(%r9)
; AVX-NEXT:    vmovaps %xmm10, 1920(%r9)
; AVX-NEXT:    vmovaps %xmm0, 2256(%r9)
; AVX-NEXT:    vmovaps %xmm11, 2240(%r9)
; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT:    vmovaps %xmm0, 2416(%r9)
; AVX-NEXT:    vmovaps %xmm12, 2400(%r9)
; AVX-NEXT:    vmovaps %xmm1, 2096(%r9)
; AVX-NEXT:    vmovaps %xmm14, 2080(%r9)
; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT:    vmovaps %xmm0, 1616(%r9)
; AVX-NEXT:    vmovaps %xmm13, 1600(%r9)
; AVX-NEXT:    vmovaps %xmm2, 1776(%r9)
; AVX-NEXT:    vmovaps %xmm15, 1760(%r9)
; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT:    vmovaps %xmm0, 1456(%r9)
; AVX-NEXT:    vmovaps %xmm3, 1440(%r9)
; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT:    vmovaps %xmm0, 976(%r9)
; AVX-NEXT:    vmovaps %xmm4, 960(%r9)
; AVX-NEXT:    vmovaps %xmm5, 1136(%r9)
; AVX-NEXT:    vmovaps %xmm6, 1120(%r9)
; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT:    vmovaps %xmm0, 816(%r9)
; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT:    vmovaps %xmm0, 800(%r9)
; AVX-NEXT:    vmovaps (%rsp), %xmm0 # 16-byte Reload
; AVX-NEXT:    vmovaps %xmm0, 496(%r9)
; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT:    vmovaps %xmm0, 480(%r9)
; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT:    vmovaps %xmm0, 176(%r9)
; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT:    vmovaps %xmm0, 160(%r9)
; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT:    vmovaps %xmm0, 336(%r9)
; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT:    vmovaps %xmm0, 320(%r9)
; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT:    vmovaps %xmm0, 656(%r9)
; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT:    vmovaps %xmm0, 640(%r9)
; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT:    vmovaps %xmm0, 1296(%r9)
; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT:    vmovaps %xmm0, 1280(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 2496(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 2432(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 2304(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 2272(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 2176(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 2144(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 2112(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 1952(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 1856(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 1824(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 1792(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 1632(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 1536(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 1472(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 1344(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 1312(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 1216(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 1184(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 1152(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 992(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 896(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 832(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 704(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 672(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 576(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 512(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 384(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 352(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 256(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 224(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 192(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 64(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 32(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 2528(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 2464(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 2368(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 2336(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 2208(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 2048(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 2016(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 1984(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 1888(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 1728(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 1696(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 1664(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 1568(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 1504(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 1408(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 1376(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 1248(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 1088(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 1056(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 1024(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 928(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 864(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 768(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 736(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 608(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 544(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 448(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 416(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 288(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 128(%r9)
; AVX-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX-NEXT:    vmovaps %ymm0, 96(%r9)
; AVX-NEXT:    addq $2264, %rsp # imm = 0x8D8
; AVX-NEXT:    vzeroupper
; AVX-NEXT:    retq
;
; AVX2-LABEL: store_i64_stride5_vf64:
; AVX2:       # %bb.0:
; AVX2-NEXT:    subq $2696, %rsp # imm = 0xA88
; AVX2-NEXT:    vmovaps 96(%rdi), %ymm10
; AVX2-NEXT:    vmovaps 64(%rdi), %ymm11
; AVX2-NEXT:    vmovaps 32(%rdi), %ymm12
; AVX2-NEXT:    vmovaps (%rdi), %ymm13
; AVX2-NEXT:    vmovaps (%rsi), %xmm2
; AVX2-NEXT:    vmovaps 32(%rsi), %xmm4
; AVX2-NEXT:    vmovaps 64(%rsi), %xmm1
; AVX2-NEXT:    vmovaps 96(%rsi), %xmm0
; AVX2-NEXT:    vinsertf128 $1, (%rcx), %ymm2, %ymm5
; AVX2-NEXT:    vmovaps (%rdi), %xmm6
; AVX2-NEXT:    vmovaps 32(%rdi), %xmm7
; AVX2-NEXT:    vmovaps 64(%rdi), %xmm2
; AVX2-NEXT:    vmovaps (%rdx), %xmm8
; AVX2-NEXT:    vmovaps 32(%rdx), %xmm9
; AVX2-NEXT:    vmovaps 64(%rdx), %xmm3
; AVX2-NEXT:    vinsertf128 $1, %xmm8, %ymm6, %ymm6
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm5 = ymm6[0],ymm5[0],ymm6[2],ymm5[2]
; AVX2-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vinsertf128 $1, %xmm8, %ymm0, %ymm5
; AVX2-NEXT:    vbroadcastsd 8(%rsi), %ymm6
; AVX2-NEXT:    vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps {{.*#+}} ymm6 = ymm13[0,1,2,3],ymm6[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4,5],ymm5[6,7]
; AVX2-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovddup {{.*#+}} xmm5 = mem[0,0]
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm6 = ymm13[0],mem[0],ymm13[2],mem[2]
; AVX2-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vinsertf128 $1, 32(%rcx), %ymm4, %ymm4
; AVX2-NEXT:    vinsertf128 $1, %xmm9, %ymm7, %ymm5
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm4 = ymm5[0],ymm4[0],ymm5[2],ymm4[2]
; AVX2-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vinsertf128 $1, %xmm9, %ymm0, %ymm4
; AVX2-NEXT:    vbroadcastsd 40(%rsi), %ymm5
; AVX2-NEXT:    vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps {{.*#+}} ymm5 = ymm12[0,1,2,3],ymm5[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3,4,5],ymm4[6,7]
; AVX2-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovddup {{.*#+}} xmm4 = mem[0,0]
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm5 = ymm12[0],mem[0],ymm12[2],mem[2]
; AVX2-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vinsertf128 $1, 64(%rcx), %ymm1, %ymm1
; AVX2-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
; AVX2-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm1
; AVX2-NEXT:    vbroadcastsd 72(%rsi), %ymm2
; AVX2-NEXT:    vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps {{.*#+}} ymm2 = ymm11[0,1,2,3],ymm2[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
; AVX2-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovddup {{.*#+}} xmm1 = mem[0,0]
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm2 = ymm11[0],mem[0],ymm11[2],mem[2]
; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vinsertf128 $1, 96(%rcx), %ymm0, %ymm0
; AVX2-NEXT:    vmovaps 96(%rdi), %xmm1
; AVX2-NEXT:    vmovaps 96(%rdx), %xmm2
; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-NEXT:    vbroadcastsd 104(%rsi), %ymm1
; AVX2-NEXT:    vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm10[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm10[0],mem[0],ymm10[2],mem[2]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovaps 128(%rsi), %xmm0
; AVX2-NEXT:    vinsertf128 $1, 128(%rcx), %ymm0, %ymm0
; AVX2-NEXT:    vmovaps 128(%rdi), %xmm1
; AVX2-NEXT:    vmovaps 128(%rdx), %xmm2
; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-NEXT:    vmovaps 128(%rdi), %ymm2
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vbroadcastsd 136(%rsi), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovaps 160(%rsi), %xmm0
; AVX2-NEXT:    vinsertf128 $1, 160(%rcx), %ymm0, %ymm0
; AVX2-NEXT:    vmovaps 160(%rdi), %xmm1
; AVX2-NEXT:    vmovaps 160(%rdx), %xmm2
; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-NEXT:    vmovaps 160(%rdi), %ymm2
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vbroadcastsd 168(%rsi), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovaps 192(%rsi), %xmm0
; AVX2-NEXT:    vinsertf128 $1, 192(%rcx), %ymm0, %ymm0
; AVX2-NEXT:    vmovaps 192(%rdi), %xmm1
; AVX2-NEXT:    vmovaps 192(%rdx), %xmm2
; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-NEXT:    vmovaps 192(%rdi), %ymm2
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vbroadcastsd 200(%rsi), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovaps 224(%rsi), %xmm0
; AVX2-NEXT:    vinsertf128 $1, 224(%rcx), %ymm0, %ymm0
; AVX2-NEXT:    vmovaps 224(%rdi), %xmm1
; AVX2-NEXT:    vmovaps 224(%rdx), %xmm2
; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-NEXT:    vmovaps 224(%rdi), %ymm2
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vbroadcastsd 232(%rsi), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovaps 256(%rsi), %xmm0
; AVX2-NEXT:    vinsertf128 $1, 256(%rcx), %ymm0, %ymm0
; AVX2-NEXT:    vmovaps 256(%rdi), %xmm1
; AVX2-NEXT:    vmovaps 256(%rdx), %xmm2
; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-NEXT:    vmovaps 256(%rdi), %ymm2
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vbroadcastsd 264(%rsi), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovaps 288(%rsi), %xmm0
; AVX2-NEXT:    vinsertf128 $1, 288(%rcx), %ymm0, %ymm0
; AVX2-NEXT:    vmovaps 288(%rdi), %xmm1
; AVX2-NEXT:    vmovaps 288(%rdx), %xmm2
; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-NEXT:    vmovaps 288(%rdi), %ymm2
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vbroadcastsd 296(%rsi), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovaps 320(%rsi), %xmm0
; AVX2-NEXT:    vinsertf128 $1, 320(%rcx), %ymm0, %ymm0
; AVX2-NEXT:    vmovaps 320(%rdi), %xmm1
; AVX2-NEXT:    vmovaps 320(%rdx), %xmm2
; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-NEXT:    vmovaps 320(%rdi), %ymm2
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vbroadcastsd 328(%rsi), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovaps 352(%rsi), %xmm0
; AVX2-NEXT:    vinsertf128 $1, 352(%rcx), %ymm0, %ymm0
; AVX2-NEXT:    vmovaps 352(%rdi), %xmm1
; AVX2-NEXT:    vmovaps 352(%rdx), %xmm2
; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-NEXT:    vmovaps 352(%rdi), %ymm2
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vbroadcastsd 360(%rsi), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovaps 384(%rsi), %xmm0
; AVX2-NEXT:    vinsertf128 $1, 384(%rcx), %ymm0, %ymm0
; AVX2-NEXT:    vmovaps 384(%rdi), %xmm1
; AVX2-NEXT:    vmovaps 384(%rdx), %xmm2
; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-NEXT:    vmovaps 384(%rdi), %ymm2
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vbroadcastsd 392(%rsi), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovaps 416(%rsi), %xmm0
; AVX2-NEXT:    vinsertf128 $1, 416(%rcx), %ymm0, %ymm0
; AVX2-NEXT:    vmovaps 416(%rdi), %xmm1
; AVX2-NEXT:    vmovaps 416(%rdx), %xmm2
; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-NEXT:    vmovaps 416(%rdi), %ymm2
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vbroadcastsd 424(%rsi), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovaps 448(%rsi), %xmm0
; AVX2-NEXT:    vinsertf128 $1, 448(%rcx), %ymm0, %ymm0
; AVX2-NEXT:    vmovaps 448(%rdi), %xmm1
; AVX2-NEXT:    vmovaps 448(%rdx), %xmm2
; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-NEXT:    vmovaps 448(%rdi), %ymm2
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vbroadcastsd 456(%rsi), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovaps 480(%rsi), %xmm0
; AVX2-NEXT:    vinsertf128 $1, 480(%rcx), %ymm0, %ymm0
; AVX2-NEXT:    vmovaps 480(%rdi), %xmm1
; AVX2-NEXT:    vmovaps 480(%rdx), %xmm2
; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-NEXT:    vmovaps 480(%rdi), %ymm2
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vbroadcastsd 488(%rsi), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vbroadcastsd 56(%rsi), %ymm0
; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-NEXT:    vbroadcastsd 56(%rcx), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vbroadcastsd 120(%rsi), %ymm0
; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-NEXT:    vbroadcastsd 120(%rcx), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
; AVX2-NEXT:    vbroadcastsd 184(%rsi), %ymm0
; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-NEXT:    vbroadcastsd 184(%rcx), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vbroadcastsd 248(%rsi), %ymm0
; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-NEXT:    vbroadcastsd 248(%rcx), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vbroadcastsd 312(%rsi), %ymm0
; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-NEXT:    vbroadcastsd 312(%rcx), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vbroadcastsd 376(%rsi), %ymm0
; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-NEXT:    vbroadcastsd 376(%rcx), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vbroadcastsd 440(%rsi), %ymm0
; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-NEXT:    vbroadcastsd 440(%rcx), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vbroadcastsd 504(%rsi), %ymm0
; AVX2-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-NEXT:    vbroadcastsd 504(%rcx), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovaps (%rdx), %ymm9
; AVX2-NEXT:    vmovaps (%rcx), %ymm1
; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm2 = ymm9[1],ymm1[1],ymm9[3],ymm1[3]
; AVX2-NEXT:    vpermpd {{.*#+}} ymm2 = ymm2[0,2,3,3]
; AVX2-NEXT:    vbroadcastsd 24(%rsi), %ymm3
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm2[2,3,4,5,6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovaps 64(%rdx), %ymm13
; AVX2-NEXT:    vmovaps 64(%rcx), %ymm2
; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm3 = ymm13[1],ymm2[1],ymm13[3],ymm2[3]
; AVX2-NEXT:    vpermpd {{.*#+}} ymm3 = ymm3[0,2,3,3]
; AVX2-NEXT:    vbroadcastsd 88(%rsi), %ymm4
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm4[0,1],ymm3[2,3,4,5,6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovaps 128(%rdx), %ymm3
; AVX2-NEXT:    vmovaps 128(%rcx), %ymm15
; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm4 = ymm3[1],ymm15[1],ymm3[3],ymm15[3]
; AVX2-NEXT:    vpermpd {{.*#+}} ymm4 = ymm4[0,2,3,3]
; AVX2-NEXT:    vbroadcastsd 152(%rsi), %ymm5
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm5[0,1],ymm4[2,3,4,5,6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovaps 192(%rdx), %ymm5
; AVX2-NEXT:    vmovaps 192(%rcx), %ymm4
; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm6 = ymm5[1],ymm4[1],ymm5[3],ymm4[3]
; AVX2-NEXT:    vpermpd {{.*#+}} ymm6 = ymm6[0,2,3,3]
; AVX2-NEXT:    vbroadcastsd 216(%rsi), %ymm7
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm7[0,1],ymm6[2,3,4,5,6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovaps 256(%rdx), %ymm7
; AVX2-NEXT:    vmovaps 256(%rcx), %ymm6
; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm8 = ymm7[1],ymm6[1],ymm7[3],ymm6[3]
; AVX2-NEXT:    vpermpd {{.*#+}} ymm8 = ymm8[0,2,3,3]
; AVX2-NEXT:    vbroadcastsd 280(%rsi), %ymm10
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm10[0,1],ymm8[2,3,4,5,6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovaps 320(%rdx), %ymm10
; AVX2-NEXT:    vmovaps 320(%rcx), %ymm8
; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm11 = ymm10[1],ymm8[1],ymm10[3],ymm8[3]
; AVX2-NEXT:    vpermpd {{.*#+}} ymm11 = ymm11[0,2,3,3]
; AVX2-NEXT:    vbroadcastsd 344(%rsi), %ymm12
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm12[0,1],ymm11[2,3,4,5,6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovaps 384(%rdx), %ymm0
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovaps 384(%rcx), %ymm11
; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm14 = ymm0[1],ymm11[1],ymm0[3],ymm11[3]
; AVX2-NEXT:    vpermpd {{.*#+}} ymm14 = ymm14[0,2,3,3]
; AVX2-NEXT:    vbroadcastsd 408(%rsi), %ymm12
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm12[0,1],ymm14[2,3,4,5,6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovaps 448(%rdx), %ymm12
; AVX2-NEXT:    vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovaps 448(%rcx), %ymm0
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vunpckhpd {{.*#+}} ymm12 = ymm12[1],ymm0[1],ymm12[3],ymm0[3]
; AVX2-NEXT:    vpermpd {{.*#+}} ymm12 = ymm12[0,2,3,3]
; AVX2-NEXT:    vbroadcastsd 472(%rsi), %ymm14
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm14[0,1],ymm12[2,3,4,5,6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm9[0],ymm1[0],ymm9[2],ymm1[2]
; AVX2-NEXT:    vperm2f128 $49, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm0 = ymm0[2,3],mem[2,3]
; AVX2-NEXT:    vmovaps (%r8), %ymm1
; AVX2-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm12 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm12 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-NEXT:    vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm12 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm12 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm9 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm9 = mem[0,1,2,3,4,5],ymm1[6,7]
; AVX2-NEXT:    vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[2,3]
; AVX2-NEXT:    vbroadcastsd 48(%rcx), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX2-NEXT:    vmovaps 32(%r8), %ymm1
; AVX2-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm9 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm9 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-NEXT:    vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm9 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm9 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm9 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm9 = mem[0,1,2,3,4,5],ymm1[6,7]
; AVX2-NEXT:    vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm13[0],ymm2[0],ymm13[2],ymm2[2]
; AVX2-NEXT:    vperm2f128 $49, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm0 = ymm0[2,3],mem[2,3]
; AVX2-NEXT:    vmovaps 64(%r8), %ymm1
; AVX2-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm2 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm2 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm2 = mem[0,1,2,3,4,5],ymm1[6,7]
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[2,3]
; AVX2-NEXT:    vbroadcastsd 112(%rcx), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX2-NEXT:    vmovaps 96(%r8), %ymm1
; AVX2-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm2 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm2 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps $63, (%rsp), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm2 = mem[0,1,2,3,4,5],ymm1[6,7]
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm3[0],ymm15[0],ymm3[2],ymm15[2]
; AVX2-NEXT:    vperm2f128 $49, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm0 = ymm0[2,3],mem[2,3]
; AVX2-NEXT:    vmovaps 128(%r8), %ymm1
; AVX2-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm2 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm2 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm2 = mem[0,1,2,3,4,5],ymm1[6,7]
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[2,3]
; AVX2-NEXT:    vbroadcastsd 176(%rcx), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX2-NEXT:    vmovaps 160(%r8), %ymm1
; AVX2-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm2 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm2 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm2 = mem[0,1,2,3,4,5],ymm1[6,7]
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm5[0],ymm4[0],ymm5[2],ymm4[2]
; AVX2-NEXT:    vperm2f128 $49, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm0 = ymm0[2,3],mem[2,3]
; AVX2-NEXT:    vmovaps 192(%r8), %ymm1
; AVX2-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm2 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm2 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm2 = mem[0,1,2,3,4,5],ymm1[6,7]
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[2,3]
; AVX2-NEXT:    vbroadcastsd 240(%rcx), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX2-NEXT:    vmovaps 224(%r8), %ymm1
; AVX2-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm2 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm2 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm2 = mem[0,1,2,3,4,5],ymm1[6,7]
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm7[0],ymm6[0],ymm7[2],ymm6[2]
; AVX2-NEXT:    vperm2f128 $49, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm0 = ymm0[2,3],mem[2,3]
; AVX2-NEXT:    vmovaps 256(%r8), %ymm1
; AVX2-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm2 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm2 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm2 = mem[0,1,2,3,4,5],ymm1[6,7]
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[2,3]
; AVX2-NEXT:    vbroadcastsd 304(%rcx), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX2-NEXT:    vmovaps 288(%r8), %ymm1
; AVX2-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm2 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm2 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm2 = mem[0,1,2,3,4,5],ymm1[6,7]
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm10[0],ymm8[0],ymm10[2],ymm8[2]
; AVX2-NEXT:    vperm2f128 $49, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm0 = ymm0[2,3],mem[2,3]
; AVX2-NEXT:    vmovaps 320(%r8), %ymm1
; AVX2-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm2 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm2 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm2 = mem[0,1,2,3,4,5],ymm1[6,7]
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[2,3]
; AVX2-NEXT:    vbroadcastsd 368(%rcx), %ymm1
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX2-NEXT:    vmovaps 352(%r8), %ymm1
; AVX2-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm2 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm15 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm2 = mem[0,1,2,3,4,5],ymm1[6,7]
; AVX2-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps {{.*#+}} ymm14 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm11[0],ymm0[2],ymm11[2]
; AVX2-NEXT:    vperm2f128 $49, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm0 = ymm0[2,3],mem[2,3]
; AVX2-NEXT:    vmovaps 384(%r8), %ymm11
; AVX2-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm13 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm13 = ymm11[0,1],mem[2,3,4,5,6,7]
; AVX2-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm10 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm10 = mem[0,1],ymm11[2,3],mem[4,5,6,7]
; AVX2-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm1 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm1 = mem[0,1,2,3,4,5],ymm11[6,7]
; AVX2-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps {{.*#+}} ymm9 = ymm0[0,1,2,3],ymm11[4,5],ymm0[6,7]
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm11 = mem[2,3],ymm0[2,3]
; AVX2-NEXT:    vbroadcastsd 432(%rcx), %ymm12
; AVX2-NEXT:    vblendps {{.*#+}} ymm11 = ymm11[0,1],ymm12[2,3],ymm11[4,5,6,7]
; AVX2-NEXT:    vmovaps 416(%r8), %ymm12
; AVX2-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm8 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm8 = ymm12[0,1],mem[2,3,4,5,6,7]
; AVX2-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm7 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm7 = mem[0,1],ymm12[2,3],mem[4,5,6,7]
; AVX2-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm0 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm0 = mem[0,1,2,3,4,5],ymm12[6,7]
; AVX2-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps {{.*#+}} ymm6 = ymm11[0,1,2,3],ymm12[4,5],ymm11[6,7]
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm11 = ymm0[0],mem[0],ymm0[2],mem[2]
; AVX2-NEXT:    vperm2f128 $49, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm11 = ymm11[2,3],mem[2,3]
; AVX2-NEXT:    vmovaps 448(%r8), %ymm0
; AVX2-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm5 = ymm0[0,1],mem[2,3,4,5,6,7]
; AVX2-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm4 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
; AVX2-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm1 = mem[0,1,2,3,4,5],ymm0[6,7]
; AVX2-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT:    vblendps {{.*#+}} ymm3 = ymm11[0,1,2,3],ymm0[4,5],ymm11[6,7]
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vperm2f128 {{.*#+}} ymm11 = mem[2,3],ymm0[2,3]
; AVX2-NEXT:    vbroadcastsd 496(%rcx), %ymm12
; AVX2-NEXT:    vblendps {{.*#+}} ymm12 = ymm11[0,1],ymm12[2,3],ymm11[4,5,6,7]
; AVX2-NEXT:    vmovaps 480(%r8), %ymm0
; AVX2-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm2 = ymm0[0,1],mem[2,3,4,5,6,7]
; AVX2-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm1 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
; AVX2-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
; AVX2-NEXT:    # ymm11 = mem[0,1,2,3,4,5],ymm0[6,7]
; AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm12[0,1,2,3],ymm0[4,5],ymm12[6,7]
; AVX2-NEXT:    vmovaps %ymm0, 2496(%r9)
; AVX2-NEXT:    vmovaps %ymm1, 2464(%r9)
; AVX2-NEXT:    vmovaps %ymm2, 2432(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 2400(%r9)
; AVX2-NEXT:    vmovaps %ymm3, 2336(%r9)
; AVX2-NEXT:    vmovaps %ymm4, 2304(%r9)
; AVX2-NEXT:    vmovaps %ymm5, 2272(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 2240(%r9)
; AVX2-NEXT:    vmovaps %ymm6, 2176(%r9)
; AVX2-NEXT:    vmovaps %ymm7, 2144(%r9)
; AVX2-NEXT:    vmovaps %ymm8, 2112(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 2080(%r9)
; AVX2-NEXT:    vmovaps %ymm9, 2016(%r9)
; AVX2-NEXT:    vmovaps %ymm10, 1984(%r9)
; AVX2-NEXT:    vmovaps %ymm13, 1952(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 1920(%r9)
; AVX2-NEXT:    vmovaps %ymm14, 1856(%r9)
; AVX2-NEXT:    vmovaps %ymm15, 1824(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 1792(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 1760(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 1696(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 1664(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 1632(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 1600(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 1536(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 1504(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 1472(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 1440(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 1376(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 1344(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 1312(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 1280(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 1216(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 1184(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 1152(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 1120(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 1056(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 1024(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 992(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 960(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 896(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 864(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 832(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 800(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 736(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 704(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 672(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 640(%r9)
; AVX2-NEXT:    vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 576(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 544(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 512(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 480(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 416(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 384(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 352(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 320(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 256(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 224(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 192(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 160(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 96(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 64(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 32(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, (%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 2368(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 2048(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 1728(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 1408(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 1088(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 768(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 448(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 128(%r9)
; AVX2-NEXT:    vmovaps %ymm11, 2528(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 2208(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 1888(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 1568(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 1248(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 928(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 608(%r9)
; AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT:    vmovaps %ymm0, 288(%r9)
; AVX2-NEXT:    addq $2696, %rsp # imm = 0xA88
; AVX2-NEXT:    vzeroupper
; AVX2-NEXT:    retq
;
; AVX2-FP-LABEL: store_i64_stride5_vf64:
; AVX2-FP:       # %bb.0:
; AVX2-FP-NEXT:    subq $2696, %rsp # imm = 0xA88
; AVX2-FP-NEXT:    vmovaps 96(%rdi), %ymm10
; AVX2-FP-NEXT:    vmovaps 64(%rdi), %ymm11
; AVX2-FP-NEXT:    vmovaps 32(%rdi), %ymm12
; AVX2-FP-NEXT:    vmovaps (%rdi), %ymm13
; AVX2-FP-NEXT:    vmovaps (%rsi), %xmm2
; AVX2-FP-NEXT:    vmovaps 32(%rsi), %xmm4
; AVX2-FP-NEXT:    vmovaps 64(%rsi), %xmm1
; AVX2-FP-NEXT:    vmovaps 96(%rsi), %xmm0
; AVX2-FP-NEXT:    vinsertf128 $1, (%rcx), %ymm2, %ymm5
; AVX2-FP-NEXT:    vmovaps (%rdi), %xmm6
; AVX2-FP-NEXT:    vmovaps 32(%rdi), %xmm7
; AVX2-FP-NEXT:    vmovaps 64(%rdi), %xmm2
; AVX2-FP-NEXT:    vmovaps (%rdx), %xmm8
; AVX2-FP-NEXT:    vmovaps 32(%rdx), %xmm9
; AVX2-FP-NEXT:    vmovaps 64(%rdx), %xmm3
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm8, %ymm6, %ymm6
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm5 = ymm6[0],ymm5[0],ymm6[2],ymm5[2]
; AVX2-FP-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm8, %ymm0, %ymm5
; AVX2-FP-NEXT:    vbroadcastsd 8(%rsi), %ymm6
; AVX2-FP-NEXT:    vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm6 = ymm13[0,1,2,3],ymm6[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4,5],ymm5[6,7]
; AVX2-FP-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovddup {{.*#+}} xmm5 = mem[0,0]
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm6 = ymm13[0],mem[0],ymm13[2],mem[2]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vinsertf128 $1, 32(%rcx), %ymm4, %ymm4
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm9, %ymm7, %ymm5
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm4 = ymm5[0],ymm4[0],ymm5[2],ymm4[2]
; AVX2-FP-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm9, %ymm0, %ymm4
; AVX2-FP-NEXT:    vbroadcastsd 40(%rsi), %ymm5
; AVX2-FP-NEXT:    vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm5 = ymm12[0,1,2,3],ymm5[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3,4,5],ymm4[6,7]
; AVX2-FP-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovddup {{.*#+}} xmm4 = mem[0,0]
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm5 = ymm12[0],mem[0],ymm12[2],mem[2]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vinsertf128 $1, 64(%rcx), %ymm1, %ymm1
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
; AVX2-FP-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm1
; AVX2-FP-NEXT:    vbroadcastsd 72(%rsi), %ymm2
; AVX2-FP-NEXT:    vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm2 = ymm11[0,1,2,3],ymm2[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FP-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovddup {{.*#+}} xmm1 = mem[0,0]
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm2 = ymm11[0],mem[0],ymm11[2],mem[2]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vinsertf128 $1, 96(%rcx), %ymm0, %ymm0
; AVX2-FP-NEXT:    vmovaps 96(%rdi), %xmm1
; AVX2-FP-NEXT:    vmovaps 96(%rdx), %xmm2
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-FP-NEXT:    vbroadcastsd 104(%rsi), %ymm1
; AVX2-FP-NEXT:    vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm1 = ymm10[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm10[0],mem[0],ymm10[2],mem[2]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovaps 128(%rsi), %xmm0
; AVX2-FP-NEXT:    vinsertf128 $1, 128(%rcx), %ymm0, %ymm0
; AVX2-FP-NEXT:    vmovaps 128(%rdi), %xmm1
; AVX2-FP-NEXT:    vmovaps 128(%rdx), %xmm2
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-FP-NEXT:    vmovaps 128(%rdi), %ymm2
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vbroadcastsd 136(%rsi), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovaps 160(%rsi), %xmm0
; AVX2-FP-NEXT:    vinsertf128 $1, 160(%rcx), %ymm0, %ymm0
; AVX2-FP-NEXT:    vmovaps 160(%rdi), %xmm1
; AVX2-FP-NEXT:    vmovaps 160(%rdx), %xmm2
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-FP-NEXT:    vmovaps 160(%rdi), %ymm2
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vbroadcastsd 168(%rsi), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovaps 192(%rsi), %xmm0
; AVX2-FP-NEXT:    vinsertf128 $1, 192(%rcx), %ymm0, %ymm0
; AVX2-FP-NEXT:    vmovaps 192(%rdi), %xmm1
; AVX2-FP-NEXT:    vmovaps 192(%rdx), %xmm2
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-FP-NEXT:    vmovaps 192(%rdi), %ymm2
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vbroadcastsd 200(%rsi), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovaps 224(%rsi), %xmm0
; AVX2-FP-NEXT:    vinsertf128 $1, 224(%rcx), %ymm0, %ymm0
; AVX2-FP-NEXT:    vmovaps 224(%rdi), %xmm1
; AVX2-FP-NEXT:    vmovaps 224(%rdx), %xmm2
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-FP-NEXT:    vmovaps 224(%rdi), %ymm2
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vbroadcastsd 232(%rsi), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovaps 256(%rsi), %xmm0
; AVX2-FP-NEXT:    vinsertf128 $1, 256(%rcx), %ymm0, %ymm0
; AVX2-FP-NEXT:    vmovaps 256(%rdi), %xmm1
; AVX2-FP-NEXT:    vmovaps 256(%rdx), %xmm2
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-FP-NEXT:    vmovaps 256(%rdi), %ymm2
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vbroadcastsd 264(%rsi), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovaps 288(%rsi), %xmm0
; AVX2-FP-NEXT:    vinsertf128 $1, 288(%rcx), %ymm0, %ymm0
; AVX2-FP-NEXT:    vmovaps 288(%rdi), %xmm1
; AVX2-FP-NEXT:    vmovaps 288(%rdx), %xmm2
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-FP-NEXT:    vmovaps 288(%rdi), %ymm2
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vbroadcastsd 296(%rsi), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovaps 320(%rsi), %xmm0
; AVX2-FP-NEXT:    vinsertf128 $1, 320(%rcx), %ymm0, %ymm0
; AVX2-FP-NEXT:    vmovaps 320(%rdi), %xmm1
; AVX2-FP-NEXT:    vmovaps 320(%rdx), %xmm2
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-FP-NEXT:    vmovaps 320(%rdi), %ymm2
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vbroadcastsd 328(%rsi), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovaps 352(%rsi), %xmm0
; AVX2-FP-NEXT:    vinsertf128 $1, 352(%rcx), %ymm0, %ymm0
; AVX2-FP-NEXT:    vmovaps 352(%rdi), %xmm1
; AVX2-FP-NEXT:    vmovaps 352(%rdx), %xmm2
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-FP-NEXT:    vmovaps 352(%rdi), %ymm2
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vbroadcastsd 360(%rsi), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovaps 384(%rsi), %xmm0
; AVX2-FP-NEXT:    vinsertf128 $1, 384(%rcx), %ymm0, %ymm0
; AVX2-FP-NEXT:    vmovaps 384(%rdi), %xmm1
; AVX2-FP-NEXT:    vmovaps 384(%rdx), %xmm2
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-FP-NEXT:    vmovaps 384(%rdi), %ymm2
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vbroadcastsd 392(%rsi), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovaps 416(%rsi), %xmm0
; AVX2-FP-NEXT:    vinsertf128 $1, 416(%rcx), %ymm0, %ymm0
; AVX2-FP-NEXT:    vmovaps 416(%rdi), %xmm1
; AVX2-FP-NEXT:    vmovaps 416(%rdx), %xmm2
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-FP-NEXT:    vmovaps 416(%rdi), %ymm2
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vbroadcastsd 424(%rsi), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovaps 448(%rsi), %xmm0
; AVX2-FP-NEXT:    vinsertf128 $1, 448(%rcx), %ymm0, %ymm0
; AVX2-FP-NEXT:    vmovaps 448(%rdi), %xmm1
; AVX2-FP-NEXT:    vmovaps 448(%rdx), %xmm2
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-FP-NEXT:    vmovaps 448(%rdi), %ymm2
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vbroadcastsd 456(%rsi), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovaps 480(%rsi), %xmm0
; AVX2-FP-NEXT:    vinsertf128 $1, 480(%rcx), %ymm0, %ymm0
; AVX2-FP-NEXT:    vmovaps 480(%rdi), %xmm1
; AVX2-FP-NEXT:    vmovaps 480(%rdx), %xmm2
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-FP-NEXT:    vmovaps 480(%rdi), %ymm2
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vbroadcastsd 488(%rsi), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vbroadcastsd 56(%rsi), %ymm0
; AVX2-FP-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-FP-NEXT:    vbroadcastsd 56(%rcx), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vbroadcastsd 120(%rsi), %ymm0
; AVX2-FP-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-FP-NEXT:    vbroadcastsd 120(%rcx), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
; AVX2-FP-NEXT:    vbroadcastsd 184(%rsi), %ymm0
; AVX2-FP-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-FP-NEXT:    vbroadcastsd 184(%rcx), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vbroadcastsd 248(%rsi), %ymm0
; AVX2-FP-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-FP-NEXT:    vbroadcastsd 248(%rcx), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vbroadcastsd 312(%rsi), %ymm0
; AVX2-FP-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-FP-NEXT:    vbroadcastsd 312(%rcx), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vbroadcastsd 376(%rsi), %ymm0
; AVX2-FP-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-FP-NEXT:    vbroadcastsd 376(%rcx), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vbroadcastsd 440(%rsi), %ymm0
; AVX2-FP-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-FP-NEXT:    vbroadcastsd 440(%rcx), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vbroadcastsd 504(%rsi), %ymm0
; AVX2-FP-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-FP-NEXT:    vbroadcastsd 504(%rcx), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovaps (%rdx), %ymm9
; AVX2-FP-NEXT:    vmovaps (%rcx), %ymm1
; AVX2-FP-NEXT:    vunpckhpd {{.*#+}} ymm2 = ymm9[1],ymm1[1],ymm9[3],ymm1[3]
; AVX2-FP-NEXT:    vpermpd {{.*#+}} ymm2 = ymm2[0,2,3,3]
; AVX2-FP-NEXT:    vbroadcastsd 24(%rsi), %ymm3
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm2[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovaps 64(%rdx), %ymm13
; AVX2-FP-NEXT:    vmovaps 64(%rcx), %ymm2
; AVX2-FP-NEXT:    vunpckhpd {{.*#+}} ymm3 = ymm13[1],ymm2[1],ymm13[3],ymm2[3]
; AVX2-FP-NEXT:    vpermpd {{.*#+}} ymm3 = ymm3[0,2,3,3]
; AVX2-FP-NEXT:    vbroadcastsd 88(%rsi), %ymm4
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm4[0,1],ymm3[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovaps 128(%rdx), %ymm3
; AVX2-FP-NEXT:    vmovaps 128(%rcx), %ymm15
; AVX2-FP-NEXT:    vunpckhpd {{.*#+}} ymm4 = ymm3[1],ymm15[1],ymm3[3],ymm15[3]
; AVX2-FP-NEXT:    vpermpd {{.*#+}} ymm4 = ymm4[0,2,3,3]
; AVX2-FP-NEXT:    vbroadcastsd 152(%rsi), %ymm5
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm5[0,1],ymm4[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovaps 192(%rdx), %ymm5
; AVX2-FP-NEXT:    vmovaps 192(%rcx), %ymm4
; AVX2-FP-NEXT:    vunpckhpd {{.*#+}} ymm6 = ymm5[1],ymm4[1],ymm5[3],ymm4[3]
; AVX2-FP-NEXT:    vpermpd {{.*#+}} ymm6 = ymm6[0,2,3,3]
; AVX2-FP-NEXT:    vbroadcastsd 216(%rsi), %ymm7
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm7[0,1],ymm6[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovaps 256(%rdx), %ymm7
; AVX2-FP-NEXT:    vmovaps 256(%rcx), %ymm6
; AVX2-FP-NEXT:    vunpckhpd {{.*#+}} ymm8 = ymm7[1],ymm6[1],ymm7[3],ymm6[3]
; AVX2-FP-NEXT:    vpermpd {{.*#+}} ymm8 = ymm8[0,2,3,3]
; AVX2-FP-NEXT:    vbroadcastsd 280(%rsi), %ymm10
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm10[0,1],ymm8[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovaps 320(%rdx), %ymm10
; AVX2-FP-NEXT:    vmovaps 320(%rcx), %ymm8
; AVX2-FP-NEXT:    vunpckhpd {{.*#+}} ymm11 = ymm10[1],ymm8[1],ymm10[3],ymm8[3]
; AVX2-FP-NEXT:    vpermpd {{.*#+}} ymm11 = ymm11[0,2,3,3]
; AVX2-FP-NEXT:    vbroadcastsd 344(%rsi), %ymm12
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm12[0,1],ymm11[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovaps 384(%rdx), %ymm0
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovaps 384(%rcx), %ymm11
; AVX2-FP-NEXT:    vunpckhpd {{.*#+}} ymm14 = ymm0[1],ymm11[1],ymm0[3],ymm11[3]
; AVX2-FP-NEXT:    vpermpd {{.*#+}} ymm14 = ymm14[0,2,3,3]
; AVX2-FP-NEXT:    vbroadcastsd 408(%rsi), %ymm12
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm12[0,1],ymm14[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovaps 448(%rdx), %ymm12
; AVX2-FP-NEXT:    vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovaps 448(%rcx), %ymm0
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vunpckhpd {{.*#+}} ymm12 = ymm12[1],ymm0[1],ymm12[3],ymm0[3]
; AVX2-FP-NEXT:    vpermpd {{.*#+}} ymm12 = ymm12[0,2,3,3]
; AVX2-FP-NEXT:    vbroadcastsd 472(%rsi), %ymm14
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm14[0,1],ymm12[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm9[0],ymm1[0],ymm9[2],ymm1[2]
; AVX2-FP-NEXT:    vperm2f128 $49, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm0 = ymm0[2,3],mem[2,3]
; AVX2-FP-NEXT:    vmovaps (%r8), %ymm1
; AVX2-FP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm12 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm12 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm12 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm12 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm9 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm9 = mem[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FP-NEXT:    vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[2,3]
; AVX2-FP-NEXT:    vbroadcastsd 48(%rcx), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX2-FP-NEXT:    vmovaps 32(%r8), %ymm1
; AVX2-FP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm9 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm9 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm9 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm9 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm9 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm9 = mem[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FP-NEXT:    vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm13[0],ymm2[0],ymm13[2],ymm2[2]
; AVX2-FP-NEXT:    vperm2f128 $49, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm0 = ymm0[2,3],mem[2,3]
; AVX2-FP-NEXT:    vmovaps 64(%r8), %ymm1
; AVX2-FP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm2 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm2 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm2 = mem[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[2,3]
; AVX2-FP-NEXT:    vbroadcastsd 112(%rcx), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX2-FP-NEXT:    vmovaps 96(%r8), %ymm1
; AVX2-FP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm2 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm2 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps $63, (%rsp), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm2 = mem[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm3[0],ymm15[0],ymm3[2],ymm15[2]
; AVX2-FP-NEXT:    vperm2f128 $49, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm0 = ymm0[2,3],mem[2,3]
; AVX2-FP-NEXT:    vmovaps 128(%r8), %ymm1
; AVX2-FP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm2 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm2 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm2 = mem[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[2,3]
; AVX2-FP-NEXT:    vbroadcastsd 176(%rcx), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX2-FP-NEXT:    vmovaps 160(%r8), %ymm1
; AVX2-FP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm2 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm2 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm2 = mem[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm5[0],ymm4[0],ymm5[2],ymm4[2]
; AVX2-FP-NEXT:    vperm2f128 $49, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm0 = ymm0[2,3],mem[2,3]
; AVX2-FP-NEXT:    vmovaps 192(%r8), %ymm1
; AVX2-FP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm2 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm2 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm2 = mem[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[2,3]
; AVX2-FP-NEXT:    vbroadcastsd 240(%rcx), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX2-FP-NEXT:    vmovaps 224(%r8), %ymm1
; AVX2-FP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm2 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm2 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm2 = mem[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm7[0],ymm6[0],ymm7[2],ymm6[2]
; AVX2-FP-NEXT:    vperm2f128 $49, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm0 = ymm0[2,3],mem[2,3]
; AVX2-FP-NEXT:    vmovaps 256(%r8), %ymm1
; AVX2-FP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm2 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm2 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm2 = mem[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[2,3]
; AVX2-FP-NEXT:    vbroadcastsd 304(%rcx), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX2-FP-NEXT:    vmovaps 288(%r8), %ymm1
; AVX2-FP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm2 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm2 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm2 = mem[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm10[0],ymm8[0],ymm10[2],ymm8[2]
; AVX2-FP-NEXT:    vperm2f128 $49, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm0 = ymm0[2,3],mem[2,3]
; AVX2-FP-NEXT:    vmovaps 320(%r8), %ymm1
; AVX2-FP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm2 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm2 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm2 = mem[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[2,3]
; AVX2-FP-NEXT:    vbroadcastsd 368(%rcx), %ymm1
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX2-FP-NEXT:    vmovaps 352(%r8), %ymm1
; AVX2-FP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm2 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm15 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-FP-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm2 = mem[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm14 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm11[0],ymm0[2],ymm11[2]
; AVX2-FP-NEXT:    vperm2f128 $49, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm0 = ymm0[2,3],mem[2,3]
; AVX2-FP-NEXT:    vmovaps 384(%r8), %ymm11
; AVX2-FP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm13 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm13 = ymm11[0,1],mem[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm10 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm10 = mem[0,1],ymm11[2,3],mem[4,5,6,7]
; AVX2-FP-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm1 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm1 = mem[0,1,2,3,4,5],ymm11[6,7]
; AVX2-FP-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm9 = ymm0[0,1,2,3],ymm11[4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vperm2f128 {{.*#+}} ymm11 = mem[2,3],ymm0[2,3]
; AVX2-FP-NEXT:    vbroadcastsd 432(%rcx), %ymm12
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm11 = ymm11[0,1],ymm12[2,3],ymm11[4,5,6,7]
; AVX2-FP-NEXT:    vmovaps 416(%r8), %ymm12
; AVX2-FP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm8 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm8 = ymm12[0,1],mem[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm7 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm7 = mem[0,1],ymm12[2,3],mem[4,5,6,7]
; AVX2-FP-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm0 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm0 = mem[0,1,2,3,4,5],ymm12[6,7]
; AVX2-FP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm6 = ymm11[0,1,2,3],ymm12[4,5],ymm11[6,7]
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm11 = ymm0[0],mem[0],ymm0[2],mem[2]
; AVX2-FP-NEXT:    vperm2f128 $49, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm11 = ymm11[2,3],mem[2,3]
; AVX2-FP-NEXT:    vmovaps 448(%r8), %ymm0
; AVX2-FP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm5 = ymm0[0,1],mem[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm4 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
; AVX2-FP-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm1 = mem[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm3 = ymm11[0,1,2,3],ymm0[4,5],ymm11[6,7]
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vperm2f128 {{.*#+}} ymm11 = mem[2,3],ymm0[2,3]
; AVX2-FP-NEXT:    vbroadcastsd 496(%rcx), %ymm12
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm12 = ymm11[0,1],ymm12[2,3],ymm11[4,5,6,7]
; AVX2-FP-NEXT:    vmovaps 480(%r8), %ymm0
; AVX2-FP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm2 = ymm0[0,1],mem[2,3,4,5,6,7]
; AVX2-FP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm1 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
; AVX2-FP-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
; AVX2-FP-NEXT:    # ymm11 = mem[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FP-NEXT:    vblendps {{.*#+}} ymm0 = ymm12[0,1,2,3],ymm0[4,5],ymm12[6,7]
; AVX2-FP-NEXT:    vmovaps %ymm0, 2496(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm1, 2464(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm2, 2432(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 2400(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm3, 2336(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm4, 2304(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm5, 2272(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 2240(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm6, 2176(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm7, 2144(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm8, 2112(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 2080(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm9, 2016(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm10, 1984(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm13, 1952(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 1920(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm14, 1856(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm15, 1824(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 1792(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 1760(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 1696(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 1664(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 1632(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 1600(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 1536(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 1504(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 1472(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 1440(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 1376(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 1344(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 1312(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 1280(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 1216(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 1184(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 1152(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 1120(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 1056(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 1024(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 992(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 960(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 896(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 864(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 832(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 800(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 736(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 704(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 672(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 640(%r9)
; AVX2-FP-NEXT:    vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 576(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 544(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 512(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 480(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 416(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 384(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 352(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 320(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 256(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 224(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 192(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 160(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 96(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 64(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 32(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, (%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 2368(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 2048(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 1728(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 1408(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 1088(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 768(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 448(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 128(%r9)
; AVX2-FP-NEXT:    vmovaps %ymm11, 2528(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 2208(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 1888(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 1568(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 1248(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 928(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 608(%r9)
; AVX2-FP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FP-NEXT:    vmovaps %ymm0, 288(%r9)
; AVX2-FP-NEXT:    addq $2696, %rsp # imm = 0xA88
; AVX2-FP-NEXT:    vzeroupper
; AVX2-FP-NEXT:    retq
;
; AVX2-FCP-LABEL: store_i64_stride5_vf64:
; AVX2-FCP:       # %bb.0:
; AVX2-FCP-NEXT:    subq $2696, %rsp # imm = 0xA88
; AVX2-FCP-NEXT:    vmovaps 96(%rdi), %ymm10
; AVX2-FCP-NEXT:    vmovaps 64(%rdi), %ymm11
; AVX2-FCP-NEXT:    vmovaps 32(%rdi), %ymm12
; AVX2-FCP-NEXT:    vmovaps (%rdi), %ymm13
; AVX2-FCP-NEXT:    vmovaps (%rsi), %xmm2
; AVX2-FCP-NEXT:    vmovaps 32(%rsi), %xmm4
; AVX2-FCP-NEXT:    vmovaps 64(%rsi), %xmm1
; AVX2-FCP-NEXT:    vmovaps 96(%rsi), %xmm0
; AVX2-FCP-NEXT:    vinsertf128 $1, (%rcx), %ymm2, %ymm5
; AVX2-FCP-NEXT:    vmovaps (%rdi), %xmm6
; AVX2-FCP-NEXT:    vmovaps 32(%rdi), %xmm7
; AVX2-FCP-NEXT:    vmovaps 64(%rdi), %xmm2
; AVX2-FCP-NEXT:    vmovaps (%rdx), %xmm8
; AVX2-FCP-NEXT:    vmovaps 32(%rdx), %xmm9
; AVX2-FCP-NEXT:    vmovaps 64(%rdx), %xmm3
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm8, %ymm6, %ymm6
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm5 = ymm6[0],ymm5[0],ymm6[2],ymm5[2]
; AVX2-FCP-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm8, %ymm0, %ymm5
; AVX2-FCP-NEXT:    vbroadcastsd 8(%rsi), %ymm6
; AVX2-FCP-NEXT:    vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm6 = ymm13[0,1,2,3],ymm6[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3,4,5],ymm5[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovddup {{.*#+}} xmm5 = mem[0,0]
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm6 = ymm13[0],mem[0],ymm13[2],mem[2]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vinsertf128 $1, 32(%rcx), %ymm4, %ymm4
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm9, %ymm7, %ymm5
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm4 = ymm5[0],ymm4[0],ymm5[2],ymm4[2]
; AVX2-FCP-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm9, %ymm0, %ymm4
; AVX2-FCP-NEXT:    vbroadcastsd 40(%rsi), %ymm5
; AVX2-FCP-NEXT:    vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm5 = ymm12[0,1,2,3],ymm5[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3,4,5],ymm4[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovddup {{.*#+}} xmm4 = mem[0,0]
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm5 = ymm12[0],mem[0],ymm12[2],mem[2]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vinsertf128 $1, 64(%rcx), %ymm1, %ymm1
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[2],ymm1[2]
; AVX2-FCP-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm1
; AVX2-FCP-NEXT:    vbroadcastsd 72(%rsi), %ymm2
; AVX2-FCP-NEXT:    vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm2 = ymm11[0,1,2,3],ymm2[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovddup {{.*#+}} xmm1 = mem[0,0]
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm2 = ymm11[0],mem[0],ymm11[2],mem[2]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vinsertf128 $1, 96(%rcx), %ymm0, %ymm0
; AVX2-FCP-NEXT:    vmovaps 96(%rdi), %xmm1
; AVX2-FCP-NEXT:    vmovaps 96(%rdx), %xmm2
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-FCP-NEXT:    vbroadcastsd 104(%rsi), %ymm1
; AVX2-FCP-NEXT:    vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm1 = ymm10[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm10[0],mem[0],ymm10[2],mem[2]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovaps 128(%rsi), %xmm0
; AVX2-FCP-NEXT:    vinsertf128 $1, 128(%rcx), %ymm0, %ymm0
; AVX2-FCP-NEXT:    vmovaps 128(%rdi), %xmm1
; AVX2-FCP-NEXT:    vmovaps 128(%rdx), %xmm2
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-FCP-NEXT:    vmovaps 128(%rdi), %ymm2
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vbroadcastsd 136(%rsi), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovaps 160(%rsi), %xmm0
; AVX2-FCP-NEXT:    vinsertf128 $1, 160(%rcx), %ymm0, %ymm0
; AVX2-FCP-NEXT:    vmovaps 160(%rdi), %xmm1
; AVX2-FCP-NEXT:    vmovaps 160(%rdx), %xmm2
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-FCP-NEXT:    vmovaps 160(%rdi), %ymm2
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vbroadcastsd 168(%rsi), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovaps 192(%rsi), %xmm0
; AVX2-FCP-NEXT:    vinsertf128 $1, 192(%rcx), %ymm0, %ymm0
; AVX2-FCP-NEXT:    vmovaps 192(%rdi), %xmm1
; AVX2-FCP-NEXT:    vmovaps 192(%rdx), %xmm2
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-FCP-NEXT:    vmovaps 192(%rdi), %ymm2
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vbroadcastsd 200(%rsi), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovaps 224(%rsi), %xmm0
; AVX2-FCP-NEXT:    vinsertf128 $1, 224(%rcx), %ymm0, %ymm0
; AVX2-FCP-NEXT:    vmovaps 224(%rdi), %xmm1
; AVX2-FCP-NEXT:    vmovaps 224(%rdx), %xmm2
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-FCP-NEXT:    vmovaps 224(%rdi), %ymm2
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vbroadcastsd 232(%rsi), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovaps 256(%rsi), %xmm0
; AVX2-FCP-NEXT:    vinsertf128 $1, 256(%rcx), %ymm0, %ymm0
; AVX2-FCP-NEXT:    vmovaps 256(%rdi), %xmm1
; AVX2-FCP-NEXT:    vmovaps 256(%rdx), %xmm2
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-FCP-NEXT:    vmovaps 256(%rdi), %ymm2
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vbroadcastsd 264(%rsi), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovaps 288(%rsi), %xmm0
; AVX2-FCP-NEXT:    vinsertf128 $1, 288(%rcx), %ymm0, %ymm0
; AVX2-FCP-NEXT:    vmovaps 288(%rdi), %xmm1
; AVX2-FCP-NEXT:    vmovaps 288(%rdx), %xmm2
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-FCP-NEXT:    vmovaps 288(%rdi), %ymm2
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vbroadcastsd 296(%rsi), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovaps 320(%rsi), %xmm0
; AVX2-FCP-NEXT:    vinsertf128 $1, 320(%rcx), %ymm0, %ymm0
; AVX2-FCP-NEXT:    vmovaps 320(%rdi), %xmm1
; AVX2-FCP-NEXT:    vmovaps 320(%rdx), %xmm2
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-FCP-NEXT:    vmovaps 320(%rdi), %ymm2
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vbroadcastsd 328(%rsi), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovaps 352(%rsi), %xmm0
; AVX2-FCP-NEXT:    vinsertf128 $1, 352(%rcx), %ymm0, %ymm0
; AVX2-FCP-NEXT:    vmovaps 352(%rdi), %xmm1
; AVX2-FCP-NEXT:    vmovaps 352(%rdx), %xmm2
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-FCP-NEXT:    vmovaps 352(%rdi), %ymm2
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vbroadcastsd 360(%rsi), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovaps 384(%rsi), %xmm0
; AVX2-FCP-NEXT:    vinsertf128 $1, 384(%rcx), %ymm0, %ymm0
; AVX2-FCP-NEXT:    vmovaps 384(%rdi), %xmm1
; AVX2-FCP-NEXT:    vmovaps 384(%rdx), %xmm2
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-FCP-NEXT:    vmovaps 384(%rdi), %ymm2
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vbroadcastsd 392(%rsi), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovaps 416(%rsi), %xmm0
; AVX2-FCP-NEXT:    vinsertf128 $1, 416(%rcx), %ymm0, %ymm0
; AVX2-FCP-NEXT:    vmovaps 416(%rdi), %xmm1
; AVX2-FCP-NEXT:    vmovaps 416(%rdx), %xmm2
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-FCP-NEXT:    vmovaps 416(%rdi), %ymm2
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vbroadcastsd 424(%rsi), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovaps 448(%rsi), %xmm0
; AVX2-FCP-NEXT:    vinsertf128 $1, 448(%rcx), %ymm0, %ymm0
; AVX2-FCP-NEXT:    vmovaps 448(%rdi), %xmm1
; AVX2-FCP-NEXT:    vmovaps 448(%rdx), %xmm2
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-FCP-NEXT:    vmovaps 448(%rdi), %ymm2
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vbroadcastsd 456(%rsi), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovaps 480(%rsi), %xmm0
; AVX2-FCP-NEXT:    vinsertf128 $1, 480(%rcx), %ymm0, %ymm0
; AVX2-FCP-NEXT:    vmovaps 480(%rdi), %xmm1
; AVX2-FCP-NEXT:    vmovaps 480(%rdx), %xmm2
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX2-FCP-NEXT:    vmovaps 480(%rdi), %ymm2
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vbroadcastsd 488(%rsi), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm1 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vbroadcastsd 56(%rsi), %ymm0
; AVX2-FCP-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-FCP-NEXT:    vbroadcastsd 56(%rcx), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vbroadcastsd 120(%rsi), %ymm0
; AVX2-FCP-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-FCP-NEXT:    vbroadcastsd 120(%rcx), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
; AVX2-FCP-NEXT:    vbroadcastsd 184(%rsi), %ymm0
; AVX2-FCP-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-FCP-NEXT:    vbroadcastsd 184(%rcx), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vbroadcastsd 248(%rsi), %ymm0
; AVX2-FCP-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-FCP-NEXT:    vbroadcastsd 248(%rcx), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vbroadcastsd 312(%rsi), %ymm0
; AVX2-FCP-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-FCP-NEXT:    vbroadcastsd 312(%rcx), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vbroadcastsd 376(%rsi), %ymm0
; AVX2-FCP-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-FCP-NEXT:    vbroadcastsd 376(%rcx), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vbroadcastsd 440(%rsi), %ymm0
; AVX2-FCP-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-FCP-NEXT:    vbroadcastsd 440(%rcx), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vbroadcastsd 504(%rsi), %ymm0
; AVX2-FCP-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; AVX2-FCP-NEXT:    vbroadcastsd 504(%rcx), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovaps (%rdx), %ymm9
; AVX2-FCP-NEXT:    vmovaps (%rcx), %ymm1
; AVX2-FCP-NEXT:    vunpckhpd {{.*#+}} ymm2 = ymm9[1],ymm1[1],ymm9[3],ymm1[3]
; AVX2-FCP-NEXT:    vpermpd {{.*#+}} ymm2 = ymm2[0,2,3,3]
; AVX2-FCP-NEXT:    vbroadcastsd 24(%rsi), %ymm3
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm2[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovaps 64(%rdx), %ymm13
; AVX2-FCP-NEXT:    vmovaps 64(%rcx), %ymm2
; AVX2-FCP-NEXT:    vunpckhpd {{.*#+}} ymm3 = ymm13[1],ymm2[1],ymm13[3],ymm2[3]
; AVX2-FCP-NEXT:    vpermpd {{.*#+}} ymm3 = ymm3[0,2,3,3]
; AVX2-FCP-NEXT:    vbroadcastsd 88(%rsi), %ymm4
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm4[0,1],ymm3[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovaps 128(%rdx), %ymm3
; AVX2-FCP-NEXT:    vmovaps 128(%rcx), %ymm15
; AVX2-FCP-NEXT:    vunpckhpd {{.*#+}} ymm4 = ymm3[1],ymm15[1],ymm3[3],ymm15[3]
; AVX2-FCP-NEXT:    vpermpd {{.*#+}} ymm4 = ymm4[0,2,3,3]
; AVX2-FCP-NEXT:    vbroadcastsd 152(%rsi), %ymm5
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm5[0,1],ymm4[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovaps 192(%rdx), %ymm5
; AVX2-FCP-NEXT:    vmovaps 192(%rcx), %ymm4
; AVX2-FCP-NEXT:    vunpckhpd {{.*#+}} ymm6 = ymm5[1],ymm4[1],ymm5[3],ymm4[3]
; AVX2-FCP-NEXT:    vpermpd {{.*#+}} ymm6 = ymm6[0,2,3,3]
; AVX2-FCP-NEXT:    vbroadcastsd 216(%rsi), %ymm7
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm7[0,1],ymm6[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovaps 256(%rdx), %ymm7
; AVX2-FCP-NEXT:    vmovaps 256(%rcx), %ymm6
; AVX2-FCP-NEXT:    vunpckhpd {{.*#+}} ymm8 = ymm7[1],ymm6[1],ymm7[3],ymm6[3]
; AVX2-FCP-NEXT:    vpermpd {{.*#+}} ymm8 = ymm8[0,2,3,3]
; AVX2-FCP-NEXT:    vbroadcastsd 280(%rsi), %ymm10
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm10[0,1],ymm8[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovaps 320(%rdx), %ymm10
; AVX2-FCP-NEXT:    vmovaps 320(%rcx), %ymm8
; AVX2-FCP-NEXT:    vunpckhpd {{.*#+}} ymm11 = ymm10[1],ymm8[1],ymm10[3],ymm8[3]
; AVX2-FCP-NEXT:    vpermpd {{.*#+}} ymm11 = ymm11[0,2,3,3]
; AVX2-FCP-NEXT:    vbroadcastsd 344(%rsi), %ymm12
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm12[0,1],ymm11[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovaps 384(%rdx), %ymm0
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovaps 384(%rcx), %ymm11
; AVX2-FCP-NEXT:    vunpckhpd {{.*#+}} ymm14 = ymm0[1],ymm11[1],ymm0[3],ymm11[3]
; AVX2-FCP-NEXT:    vpermpd {{.*#+}} ymm14 = ymm14[0,2,3,3]
; AVX2-FCP-NEXT:    vbroadcastsd 408(%rsi), %ymm12
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm12[0,1],ymm14[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovaps 448(%rdx), %ymm12
; AVX2-FCP-NEXT:    vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovaps 448(%rcx), %ymm0
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vunpckhpd {{.*#+}} ymm12 = ymm12[1],ymm0[1],ymm12[3],ymm0[3]
; AVX2-FCP-NEXT:    vpermpd {{.*#+}} ymm12 = ymm12[0,2,3,3]
; AVX2-FCP-NEXT:    vbroadcastsd 472(%rsi), %ymm14
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm14[0,1],ymm12[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm9[0],ymm1[0],ymm9[2],ymm1[2]
; AVX2-FCP-NEXT:    vperm2f128 $49, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm0 = ymm0[2,3],mem[2,3]
; AVX2-FCP-NEXT:    vmovaps (%r8), %ymm1
; AVX2-FCP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm12 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm12 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm12 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm12 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm9 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm9 = mem[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[2,3]
; AVX2-FCP-NEXT:    vbroadcastsd 48(%rcx), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX2-FCP-NEXT:    vmovaps 32(%r8), %ymm1
; AVX2-FCP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm9 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm9 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm9 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm9 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm9 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm9 = mem[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm13[0],ymm2[0],ymm13[2],ymm2[2]
; AVX2-FCP-NEXT:    vperm2f128 $49, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm0 = ymm0[2,3],mem[2,3]
; AVX2-FCP-NEXT:    vmovaps 64(%r8), %ymm1
; AVX2-FCP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm2 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm2 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm2 = mem[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[2,3]
; AVX2-FCP-NEXT:    vbroadcastsd 112(%rcx), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX2-FCP-NEXT:    vmovaps 96(%r8), %ymm1
; AVX2-FCP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm2 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm2 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps $63, (%rsp), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm2 = mem[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm3[0],ymm15[0],ymm3[2],ymm15[2]
; AVX2-FCP-NEXT:    vperm2f128 $49, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm0 = ymm0[2,3],mem[2,3]
; AVX2-FCP-NEXT:    vmovaps 128(%r8), %ymm1
; AVX2-FCP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm2 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm2 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm2 = mem[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[2,3]
; AVX2-FCP-NEXT:    vbroadcastsd 176(%rcx), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX2-FCP-NEXT:    vmovaps 160(%r8), %ymm1
; AVX2-FCP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm2 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm2 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm2 = mem[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm5[0],ymm4[0],ymm5[2],ymm4[2]
; AVX2-FCP-NEXT:    vperm2f128 $49, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm0 = ymm0[2,3],mem[2,3]
; AVX2-FCP-NEXT:    vmovaps 192(%r8), %ymm1
; AVX2-FCP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm2 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm2 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm2 = mem[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[2,3]
; AVX2-FCP-NEXT:    vbroadcastsd 240(%rcx), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX2-FCP-NEXT:    vmovaps 224(%r8), %ymm1
; AVX2-FCP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm2 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm2 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm2 = mem[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm7[0],ymm6[0],ymm7[2],ymm6[2]
; AVX2-FCP-NEXT:    vperm2f128 $49, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm0 = ymm0[2,3],mem[2,3]
; AVX2-FCP-NEXT:    vmovaps 256(%r8), %ymm1
; AVX2-FCP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm2 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm2 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm2 = mem[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[2,3]
; AVX2-FCP-NEXT:    vbroadcastsd 304(%rcx), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX2-FCP-NEXT:    vmovaps 288(%r8), %ymm1
; AVX2-FCP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm2 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm2 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm2 = mem[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm10[0],ymm8[0],ymm10[2],ymm8[2]
; AVX2-FCP-NEXT:    vperm2f128 $49, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm0 = ymm0[2,3],mem[2,3]
; AVX2-FCP-NEXT:    vmovaps 320(%r8), %ymm1
; AVX2-FCP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm2 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm2 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm2 = mem[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[2,3]
; AVX2-FCP-NEXT:    vbroadcastsd 368(%rcx), %ymm1
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; AVX2-FCP-NEXT:    vmovaps 352(%r8), %ymm1
; AVX2-FCP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm2 = ymm1[0,1],mem[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm15 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm2 = mem[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm14 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm11[0],ymm0[2],ymm11[2]
; AVX2-FCP-NEXT:    vperm2f128 $49, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm0 = ymm0[2,3],mem[2,3]
; AVX2-FCP-NEXT:    vmovaps 384(%r8), %ymm11
; AVX2-FCP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm13 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm13 = ymm11[0,1],mem[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm10 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm10 = mem[0,1],ymm11[2,3],mem[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm1 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm1 = mem[0,1,2,3,4,5],ymm11[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm9 = ymm0[0,1,2,3],ymm11[4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vperm2f128 {{.*#+}} ymm11 = mem[2,3],ymm0[2,3]
; AVX2-FCP-NEXT:    vbroadcastsd 432(%rcx), %ymm12
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm11 = ymm11[0,1],ymm12[2,3],ymm11[4,5,6,7]
; AVX2-FCP-NEXT:    vmovaps 416(%r8), %ymm12
; AVX2-FCP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm8 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm8 = ymm12[0,1],mem[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm7 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm7 = mem[0,1],ymm12[2,3],mem[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm0 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm0 = mem[0,1,2,3,4,5],ymm12[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm6 = ymm11[0,1,2,3],ymm12[4,5],ymm11[6,7]
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm11 = ymm0[0],mem[0],ymm0[2],mem[2]
; AVX2-FCP-NEXT:    vperm2f128 $49, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm11 = ymm11[2,3],mem[2,3]
; AVX2-FCP-NEXT:    vmovaps 448(%r8), %ymm0
; AVX2-FCP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm5 = ymm0[0,1],mem[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm4 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm1 = mem[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm3 = ymm11[0,1,2,3],ymm0[4,5],ymm11[6,7]
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vperm2f128 {{.*#+}} ymm11 = mem[2,3],ymm0[2,3]
; AVX2-FCP-NEXT:    vbroadcastsd 496(%rcx), %ymm12
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm12 = ymm11[0,1],ymm12[2,3],ymm11[4,5,6,7]
; AVX2-FCP-NEXT:    vmovaps 480(%r8), %ymm0
; AVX2-FCP-NEXT:    vblendps $252, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm2 = ymm0[0,1],mem[2,3,4,5,6,7]
; AVX2-FCP-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm1 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
; AVX2-FCP-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
; AVX2-FCP-NEXT:    # ymm11 = mem[0,1,2,3,4,5],ymm0[6,7]
; AVX2-FCP-NEXT:    vblendps {{.*#+}} ymm0 = ymm12[0,1,2,3],ymm0[4,5],ymm12[6,7]
; AVX2-FCP-NEXT:    vmovaps %ymm0, 2496(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm1, 2464(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm2, 2432(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 2400(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm3, 2336(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm4, 2304(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm5, 2272(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 2240(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm6, 2176(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm7, 2144(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm8, 2112(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 2080(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm9, 2016(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm10, 1984(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm13, 1952(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 1920(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm14, 1856(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm15, 1824(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 1792(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 1760(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 1696(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 1664(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 1632(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 1600(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 1536(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 1504(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 1472(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 1440(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 1376(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 1344(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 1312(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 1280(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 1216(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 1184(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 1152(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 1120(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 1056(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 1024(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 992(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 960(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 896(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 864(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 832(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 800(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 736(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 704(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 672(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 640(%r9)
; AVX2-FCP-NEXT:    vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 576(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 544(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 512(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 480(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 416(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 384(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 352(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 320(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 256(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 224(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 192(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 160(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 96(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 64(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 32(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, (%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 2368(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 2048(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 1728(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 1408(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 1088(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 768(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 448(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 128(%r9)
; AVX2-FCP-NEXT:    vmovaps %ymm11, 2528(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 2208(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 1888(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 1568(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 1248(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 928(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 608(%r9)
; AVX2-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-FCP-NEXT:    vmovaps %ymm0, 288(%r9)
; AVX2-FCP-NEXT:    addq $2696, %rsp # imm = 0xA88
; AVX2-FCP-NEXT:    vzeroupper
; AVX2-FCP-NEXT:    retq
;
; AVX512-LABEL: store_i64_stride5_vf64:
; AVX512:       # %bb.0:
; AVX512-NEXT:    subq $3144, %rsp # imm = 0xC48
; AVX512-NEXT:    vmovdqa64 320(%rdi), %zmm6
; AVX512-NEXT:    vmovdqa64 256(%rdi), %zmm7
; AVX512-NEXT:    vmovdqa64 (%rdi), %zmm11
; AVX512-NEXT:    vmovdqa64 64(%rdi), %zmm10
; AVX512-NEXT:    vmovdqa64 128(%rdi), %zmm9
; AVX512-NEXT:    vmovdqa64 192(%rdi), %zmm8
; AVX512-NEXT:    vmovdqa64 256(%rsi), %zmm1
; AVX512-NEXT:    vmovdqa64 (%rsi), %zmm5
; AVX512-NEXT:    vmovdqa64 64(%rsi), %zmm4
; AVX512-NEXT:    vmovdqa64 128(%rsi), %zmm3
; AVX512-NEXT:    vmovdqa64 192(%rsi), %zmm2
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm20 = [3,0,0,0,12,4,0,0]
; AVX512-NEXT:    vmovdqa64 %zmm5, %zmm12
; AVX512-NEXT:    vpermt2q %zmm11, %zmm20, %zmm12
; AVX512-NEXT:    vmovdqu64 %zmm12, (%rsp) # 64-byte Spill
; AVX512-NEXT:    vmovdqa64 %zmm4, %zmm12
; AVX512-NEXT:    vpermt2q %zmm10, %zmm20, %zmm12
; AVX512-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vmovdqa64 %zmm3, %zmm12
; AVX512-NEXT:    vpermt2q %zmm9, %zmm20, %zmm12
; AVX512-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm12
; AVX512-NEXT:    vpermt2q %zmm8, %zmm20, %zmm12
; AVX512-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vmovdqa64 %zmm1, %zmm12
; AVX512-NEXT:    vpermt2q %zmm7, %zmm20, %zmm12
; AVX512-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm21 = [0,8,0,0,0,1,9,0]
; AVX512-NEXT:    vmovdqa64 %zmm11, %zmm0
; AVX512-NEXT:    vpermt2q %zmm5, %zmm21, %zmm0
; AVX512-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm13 = [0,0,2,10,0,0,0,3]
; AVX512-NEXT:    vmovdqa64 %zmm11, %zmm0
; AVX512-NEXT:    vpermt2q %zmm5, %zmm13, %zmm0
; AVX512-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm14 = [0,5,13,0,0,0,6,14]
; AVX512-NEXT:    vmovdqa64 %zmm11, %zmm0
; AVX512-NEXT:    vpermt2q %zmm5, %zmm14, %zmm0
; AVX512-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [15,7,15,7,15,7,15,7]
; AVX512-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512-NEXT:    vpermt2q %zmm5, %zmm0, %zmm11
; AVX512-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vmovdqa64 %zmm10, %zmm5
; AVX512-NEXT:    vpermt2q %zmm4, %zmm21, %zmm5
; AVX512-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vmovdqa64 %zmm10, %zmm5
; AVX512-NEXT:    vpermt2q %zmm4, %zmm13, %zmm5
; AVX512-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vmovdqa64 %zmm10, %zmm5
; AVX512-NEXT:    vpermt2q %zmm4, %zmm14, %zmm5
; AVX512-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vpermt2q %zmm4, %zmm0, %zmm10
; AVX512-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vmovdqa64 %zmm9, %zmm4
; AVX512-NEXT:    vpermt2q %zmm3, %zmm21, %zmm4
; AVX512-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vmovdqa64 %zmm9, %zmm4
; AVX512-NEXT:    vpermt2q %zmm3, %zmm13, %zmm4
; AVX512-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vmovdqa64 %zmm9, %zmm4
; AVX512-NEXT:    vpermt2q %zmm3, %zmm14, %zmm4
; AVX512-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vpermt2q %zmm3, %zmm0, %zmm9
; AVX512-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vmovdqa64 %zmm8, %zmm3
; AVX512-NEXT:    vpermt2q %zmm2, %zmm21, %zmm3
; AVX512-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vmovdqa64 %zmm8, %zmm3
; AVX512-NEXT:    vpermt2q %zmm2, %zmm13, %zmm3
; AVX512-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vmovdqa64 %zmm8, %zmm3
; AVX512-NEXT:    vpermt2q %zmm2, %zmm14, %zmm3
; AVX512-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vpermt2q %zmm2, %zmm0, %zmm8
; AVX512-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vmovdqa64 %zmm7, %zmm2
; AVX512-NEXT:    vpermt2q %zmm1, %zmm21, %zmm2
; AVX512-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vmovdqa64 %zmm7, %zmm2
; AVX512-NEXT:    vpermt2q %zmm1, %zmm13, %zmm2
; AVX512-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vmovdqa64 %zmm7, %zmm2
; AVX512-NEXT:    vpermt2q %zmm1, %zmm14, %zmm2
; AVX512-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vpermt2q %zmm1, %zmm0, %zmm7
; AVX512-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vmovdqa64 320(%rsi), %zmm1
; AVX512-NEXT:    vmovdqa64 %zmm1, %zmm2
; AVX512-NEXT:    vpermt2q %zmm6, %zmm20, %zmm2
; AVX512-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vmovdqa64 %zmm6, %zmm2
; AVX512-NEXT:    vpermt2q %zmm1, %zmm21, %zmm2
; AVX512-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vmovdqa64 %zmm6, %zmm2
; AVX512-NEXT:    vpermt2q %zmm1, %zmm13, %zmm2
; AVX512-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vmovdqa64 %zmm6, %zmm2
; AVX512-NEXT:    vpermt2q %zmm1, %zmm14, %zmm2
; AVX512-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vpermt2q %zmm1, %zmm0, %zmm6
; AVX512-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vmovdqa64 384(%rdi), %zmm3
; AVX512-NEXT:    vmovdqa64 384(%rsi), %zmm1
; AVX512-NEXT:    vmovdqa64 %zmm1, %zmm2
; AVX512-NEXT:    vpermt2q %zmm3, %zmm20, %zmm2
; AVX512-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vmovdqa64 %zmm3, %zmm2
; AVX512-NEXT:    vpermt2q %zmm1, %zmm21, %zmm2
; AVX512-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vmovdqa64 %zmm3, %zmm2
; AVX512-NEXT:    vpermt2q %zmm1, %zmm13, %zmm2
; AVX512-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vmovdqa64 %zmm3, %zmm2
; AVX512-NEXT:    vpermt2q %zmm1, %zmm14, %zmm2
; AVX512-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vpermt2q %zmm1, %zmm0, %zmm3
; AVX512-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vmovdqa64 448(%rdi), %zmm2
; AVX512-NEXT:    vmovdqa64 448(%rsi), %zmm1
; AVX512-NEXT:    vpermi2q %zmm2, %zmm1, %zmm20
; AVX512-NEXT:    vpermi2q %zmm1, %zmm2, %zmm21
; AVX512-NEXT:    vpermi2q %zmm1, %zmm2, %zmm13
; AVX512-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vpermi2q %zmm1, %zmm2, %zmm14
; AVX512-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vpermt2q %zmm1, %zmm0, %zmm2
; AVX512-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vmovdqa64 (%rdx), %zmm25
; AVX512-NEXT:    vmovdqa64 (%rcx), %zmm0
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm6 = [0,3,11,0,0,0,4,12]
; AVX512-NEXT:    vmovdqa64 %zmm25, %zmm3
; AVX512-NEXT:    vpermt2q %zmm0, %zmm6, %zmm3
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm2 = [0,0,0,8,0,0,0,1]
; AVX512-NEXT:    vmovdqa64 %zmm25, %zmm22
; AVX512-NEXT:    vpermt2q %zmm0, %zmm2, %zmm22
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm12 = [1,0,0,0,10,2,0,0]
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm27
; AVX512-NEXT:    vpermt2q %zmm25, %zmm12, %zmm27
; AVX512-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [13,5,13,5,13,5,13,5]
; AVX512-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512-NEXT:    vmovdqa64 %zmm25, %zmm28
; AVX512-NEXT:    vpermt2q %zmm0, %zmm10, %zmm28
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm16 = [6,14,0,0,0,7,15,0]
; AVX512-NEXT:    vpermt2q %zmm0, %zmm16, %zmm25
; AVX512-NEXT:    vmovdqa64 64(%rdx), %zmm26
; AVX512-NEXT:    vmovdqa64 64(%rcx), %zmm0
; AVX512-NEXT:    vmovdqa64 %zmm26, %zmm4
; AVX512-NEXT:    vpermt2q %zmm0, %zmm6, %zmm4
; AVX512-NEXT:    vmovdqa64 %zmm26, %zmm17
; AVX512-NEXT:    vpermt2q %zmm0, %zmm2, %zmm17
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm1
; AVX512-NEXT:    vpermt2q %zmm26, %zmm12, %zmm1
; AVX512-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vmovdqa64 %zmm26, %zmm1
; AVX512-NEXT:    vpermt2q %zmm0, %zmm10, %zmm1
; AVX512-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vpermt2q %zmm0, %zmm16, %zmm26
; AVX512-NEXT:    vmovdqa64 128(%rdx), %zmm14
; AVX512-NEXT:    vmovdqa64 128(%rcx), %zmm0
; AVX512-NEXT:    vmovdqa64 %zmm14, %zmm15
; AVX512-NEXT:    vpermt2q %zmm0, %zmm6, %zmm15
; AVX512-NEXT:    vmovdqa64 %zmm14, %zmm9
; AVX512-NEXT:    vpermt2q %zmm0, %zmm2, %zmm9
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm1
; AVX512-NEXT:    vpermt2q %zmm14, %zmm12, %zmm1
; AVX512-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vmovdqa64 %zmm14, %zmm1
; AVX512-NEXT:    vpermt2q %zmm0, %zmm10, %zmm1
; AVX512-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vpermt2q %zmm0, %zmm16, %zmm14
; AVX512-NEXT:    vmovdqa64 192(%rdx), %zmm19
; AVX512-NEXT:    vmovdqa64 192(%rcx), %zmm0
; AVX512-NEXT:    vmovdqa64 %zmm19, %zmm31
; AVX512-NEXT:    vpermt2q %zmm0, %zmm6, %zmm31
; AVX512-NEXT:    vmovdqa64 %zmm19, %zmm30
; AVX512-NEXT:    vpermt2q %zmm0, %zmm2, %zmm30
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm1
; AVX512-NEXT:    vpermt2q %zmm19, %zmm12, %zmm1
; AVX512-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vmovdqa64 %zmm19, %zmm1
; AVX512-NEXT:    vpermt2q %zmm0, %zmm10, %zmm1
; AVX512-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vpermt2q %zmm0, %zmm16, %zmm19
; AVX512-NEXT:    vmovdqa64 256(%rdx), %zmm18
; AVX512-NEXT:    vmovdqa64 256(%rcx), %zmm0
; AVX512-NEXT:    vmovdqa64 %zmm18, %zmm29
; AVX512-NEXT:    vpermt2q %zmm0, %zmm6, %zmm29
; AVX512-NEXT:    vmovdqa64 %zmm18, %zmm24
; AVX512-NEXT:    vpermt2q %zmm0, %zmm2, %zmm24
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm1
; AVX512-NEXT:    vpermt2q %zmm18, %zmm12, %zmm1
; AVX512-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vmovdqa64 %zmm18, %zmm1
; AVX512-NEXT:    vpermt2q %zmm0, %zmm10, %zmm1
; AVX512-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vpermt2q %zmm0, %zmm16, %zmm18
; AVX512-NEXT:    vmovdqa64 320(%rdx), %zmm7
; AVX512-NEXT:    vmovdqa64 320(%rcx), %zmm0
; AVX512-NEXT:    vmovdqa64 %zmm7, %zmm23
; AVX512-NEXT:    vpermt2q %zmm0, %zmm6, %zmm23
; AVX512-NEXT:    vmovdqa64 %zmm7, %zmm1
; AVX512-NEXT:    vpermt2q %zmm0, %zmm2, %zmm1
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm5
; AVX512-NEXT:    vpermt2q %zmm7, %zmm12, %zmm5
; AVX512-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vmovdqa64 %zmm7, %zmm5
; AVX512-NEXT:    vpermt2q %zmm0, %zmm10, %zmm5
; AVX512-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vpermt2q %zmm0, %zmm16, %zmm7
; AVX512-NEXT:    vmovdqa64 384(%rdx), %zmm11
; AVX512-NEXT:    vmovdqa64 384(%rcx), %zmm0
; AVX512-NEXT:    vmovdqa64 %zmm11, %zmm13
; AVX512-NEXT:    vpermt2q %zmm0, %zmm6, %zmm13
; AVX512-NEXT:    vmovdqa64 %zmm11, %zmm8
; AVX512-NEXT:    vpermt2q %zmm0, %zmm2, %zmm8
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm5
; AVX512-NEXT:    vpermt2q %zmm11, %zmm12, %zmm5
; AVX512-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vmovdqa64 %zmm11, %zmm5
; AVX512-NEXT:    vpermt2q %zmm0, %zmm10, %zmm5
; AVX512-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vpermt2q %zmm0, %zmm16, %zmm11
; AVX512-NEXT:    vmovdqa64 448(%rdx), %zmm5
; AVX512-NEXT:    vmovdqa64 448(%rcx), %zmm0
; AVX512-NEXT:    vpermi2q %zmm0, %zmm5, %zmm6
; AVX512-NEXT:    vpermi2q %zmm0, %zmm5, %zmm2
; AVX512-NEXT:    vpermi2q %zmm5, %zmm0, %zmm12
; AVX512-NEXT:    vpermi2q %zmm0, %zmm5, %zmm10
; AVX512-NEXT:    vpermt2q %zmm0, %zmm16, %zmm5
; AVX512-NEXT:    movb $49, %al
; AVX512-NEXT:    kmovw %eax, %k1
; AVX512-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k1}
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k1}
; AVX512-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm31 {%k1}
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm29 {%k1}
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm23 {%k1}
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k1}
; AVX512-NEXT:    vmovdqa64 %zmm20, %zmm6 {%k1}
; AVX512-NEXT:    movb $-116, %al
; AVX512-NEXT:    kmovw %eax, %k3
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm22, %zmm16 {%k3}
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm27, %zmm0 {%k1}
; AVX512-NEXT:    movb $24, %al
; AVX512-NEXT:    kmovw %eax, %k2
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm28, %zmm27 {%k2}
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm15, %zmm25 {%k2}
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm17, %zmm28 {%k3}
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm9, %zmm22 {%k3}
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm30, %zmm20 {%k3}
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm24, %zmm9 {%k3}
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm1, %zmm17 {%k3}
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm8, %zmm1 {%k3}
; AVX512-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm21 {%k3}
; AVX512-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    movb $8, %al
; AVX512-NEXT:    kmovw %eax, %k3
; AVX512-NEXT:    vmovdqa64 (%r8), %zmm2
; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k3}
; AVX512-NEXT:    vmovdqu64 %zmm3, (%rsp) # 64-byte Spill
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm21 = [0,1,2,3,8,5,6,7]
; AVX512-NEXT:    vpermt2q %zmm2, %zmm21, %zmm16
; AVX512-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm8 = [0,9,2,3,4,5,10,7]
; AVX512-NEXT:    vpermt2q %zmm2, %zmm8, %zmm0
; AVX512-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm15 = [12,1,2,3,4,13,6,7]
; AVX512-NEXT:    vpermt2q %zmm2, %zmm15, %zmm27
; AVX512-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vpmovsxbq {{.*#+}} zmm16 = [0,1,14,3,4,5,6,15]
; AVX512-NEXT:    vpermt2q %zmm2, %zmm16, %zmm25
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k2}
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k2}
; AVX512-NEXT:    vmovdqa64 64(%r8), %zmm2
; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k3}
; AVX512-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vpermt2q %zmm2, %zmm21, %zmm28
; AVX512-NEXT:    vmovdqu64 %zmm28, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vpermt2q %zmm2, %zmm8, %zmm1
; AVX512-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vpermt2q %zmm2, %zmm15, %zmm3
; AVX512-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vpermt2q %zmm2, %zmm16, %zmm26
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm14 {%k2}
; AVX512-NEXT:    vmovdqa64 128(%r8), %zmm2
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm30 {%k3}
; AVX512-NEXT:    vpermt2q %zmm2, %zmm21, %zmm22
; AVX512-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vpermt2q %zmm2, %zmm8, %zmm3
; AVX512-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vpermt2q %zmm2, %zmm15, %zmm1
; AVX512-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vpermt2q %zmm2, %zmm16, %zmm14
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k2}
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm1, %zmm19 {%k2}
; AVX512-NEXT:    vmovdqa64 192(%r8), %zmm2
; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm31 {%k3}
; AVX512-NEXT:    vpermt2q %zmm2, %zmm21, %zmm20
; AVX512-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vpermt2q %zmm2, %zmm8, %zmm3
; AVX512-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vpermt2q %zmm2, %zmm15, %zmm0
; AVX512-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-NEXT:    vpermt2q %zmm2, %zmm16, %zmm19
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm20 {%k1}
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm1, %zmm28 {%k2}
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm1, %zmm18 {%k2}
; AVX512-NEXT:    vmovdqa64 256(%r8), %zmm2
; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm29 {%k3}
; AVX512-NEXT:    vmovdqa64 %zmm9, %zmm3
; AVX512-NEXT:    vpermt2q %zmm2, %zmm21, %zmm3
; AVX512-NEXT:    vpermt2q %zmm2, %zmm8, %zmm20
; AVX512-NEXT:    vpermt2q %zmm2, %zmm15, %zmm28
; AVX512-NEXT:    vpermt2q %zmm2, %zmm16, %zmm18
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k1}
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm22 {%k2}
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k2}
; AVX512-NEXT:    vmovdqa64 320(%r8), %zmm2
; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm23 {%k3}
; AVX512-NEXT:    vmovdqa64 %zmm17, %zmm1
; AVX512-NEXT:    vpermt2q %zmm2, %zmm21, %zmm1
; AVX512-NEXT:    vpermt2q %zmm2, %zmm8, %zmm27
; AVX512-NEXT:    vpermt2q %zmm2, %zmm15, %zmm22
; AVX512-NEXT:    vpermt2q %zmm2, %zmm16, %zmm7
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm0, %zmm9 {%k1}
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k2}
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm11 {%k2}
; AVX512-NEXT:    vmovdqa64 384(%r8), %zmm2
; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm13 {%k3}
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
; AVX512-NEXT:    vpermt2q %zmm2, %zmm21, %zmm17
; AVX512-NEXT:    vpermt2q %zmm2, %zmm8, %zmm9
; AVX512-NEXT:    vpermt2q %zmm2, %zmm15, %zmm0
; AVX512-NEXT:    vpermt2q %zmm2, %zmm16, %zmm11
; AVX512-NEXT:    vmovdqa64 448(%r8), %zmm2
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
; AVX512-NEXT:    vpermt2q %zmm2, %zmm21, %zmm24
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm12, %zmm4 {%k1}
; AVX512-NEXT:    vpermt2q %zmm2, %zmm8, %zmm4
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm10, %zmm8 {%k2}
; AVX512-NEXT:    vpermt2q %zmm2, %zmm15, %zmm8
; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
; AVX512-NEXT:    vmovdqa64 %zmm10, %zmm5 {%k2}
; AVX512-NEXT:    vpermt2q %zmm2, %zmm16, %zmm5
; AVX512-NEXT:    vmovdqa64 %zmm2, %zmm6 {%k3}
; AVX512-NEXT:    vmovdqa64 %zmm5, 2496(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm8, 2432(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm6, 2368(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm4, 2304(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm24, 2240(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm11, 2176(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm0, 2112(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm13, 2048(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm9, 1984(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm17, 1920(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm7, 1856(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm22, 1792(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm23, 1728(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm27, 1664(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm1, 1600(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm18, 1536(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm28, 1472(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm29, 1408(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm20, 1344(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm3, 1280(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm19, 1216(%r9)
; AVX512-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovaps %zmm0, 1152(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm31, 1088(%r9)
; AVX512-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovaps %zmm0, 1024(%r9)
; AVX512-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovaps %zmm0, 960(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm14, 896(%r9)
; AVX512-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovaps %zmm0, 832(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm30, 768(%r9)
; AVX512-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovaps %zmm0, 704(%r9)
; AVX512-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovaps %zmm0, 640(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm26, 576(%r9)
; AVX512-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovaps %zmm0, 512(%r9)
; AVX512-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovaps %zmm0, 448(%r9)
; AVX512-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovaps %zmm0, 384(%r9)
; AVX512-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovaps %zmm0, 320(%r9)
; AVX512-NEXT:    vmovdqa64 %zmm25, 256(%r9)
; AVX512-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovaps %zmm0, 192(%r9)
; AVX512-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovaps %zmm0, 128(%r9)
; AVX512-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovaps %zmm0, 64(%r9)
; AVX512-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-NEXT:    vmovaps %zmm0, (%r9)
; AVX512-NEXT:    addq $3144, %rsp # imm = 0xC48
; AVX512-NEXT:    vzeroupper
; AVX512-NEXT:    retq
;
; AVX512-FCP-LABEL: store_i64_stride5_vf64:
; AVX512-FCP:       # %bb.0:
; AVX512-FCP-NEXT:    subq $3144, %rsp # imm = 0xC48
; AVX512-FCP-NEXT:    vmovdqa64 320(%rdi), %zmm6
; AVX512-FCP-NEXT:    vmovdqa64 256(%rdi), %zmm7
; AVX512-FCP-NEXT:    vmovdqa64 (%rdi), %zmm11
; AVX512-FCP-NEXT:    vmovdqa64 64(%rdi), %zmm10
; AVX512-FCP-NEXT:    vmovdqa64 128(%rdi), %zmm9
; AVX512-FCP-NEXT:    vmovdqa64 192(%rdi), %zmm8
; AVX512-FCP-NEXT:    vmovdqa64 256(%rsi), %zmm1
; AVX512-FCP-NEXT:    vmovdqa64 (%rsi), %zmm5
; AVX512-FCP-NEXT:    vmovdqa64 64(%rsi), %zmm4
; AVX512-FCP-NEXT:    vmovdqa64 128(%rsi), %zmm3
; AVX512-FCP-NEXT:    vmovdqa64 192(%rsi), %zmm2
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm20 = [3,0,0,0,12,4,0,0]
; AVX512-FCP-NEXT:    vmovdqa64 %zmm5, %zmm12
; AVX512-FCP-NEXT:    vpermt2q %zmm11, %zmm20, %zmm12
; AVX512-FCP-NEXT:    vmovdqu64 %zmm12, (%rsp) # 64-byte Spill
; AVX512-FCP-NEXT:    vmovdqa64 %zmm4, %zmm12
; AVX512-FCP-NEXT:    vpermt2q %zmm10, %zmm20, %zmm12
; AVX512-FCP-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vmovdqa64 %zmm3, %zmm12
; AVX512-FCP-NEXT:    vpermt2q %zmm9, %zmm20, %zmm12
; AVX512-FCP-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vmovdqa64 %zmm2, %zmm12
; AVX512-FCP-NEXT:    vpermt2q %zmm8, %zmm20, %zmm12
; AVX512-FCP-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vmovdqa64 %zmm1, %zmm12
; AVX512-FCP-NEXT:    vpermt2q %zmm7, %zmm20, %zmm12
; AVX512-FCP-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm21 = [0,8,0,0,0,1,9,0]
; AVX512-FCP-NEXT:    vmovdqa64 %zmm11, %zmm0
; AVX512-FCP-NEXT:    vpermt2q %zmm5, %zmm21, %zmm0
; AVX512-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm13 = [0,0,2,10,0,0,0,3]
; AVX512-FCP-NEXT:    vmovdqa64 %zmm11, %zmm0
; AVX512-FCP-NEXT:    vpermt2q %zmm5, %zmm13, %zmm0
; AVX512-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm14 = [0,5,13,0,0,0,6,14]
; AVX512-FCP-NEXT:    vmovdqa64 %zmm11, %zmm0
; AVX512-FCP-NEXT:    vpermt2q %zmm5, %zmm14, %zmm0
; AVX512-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [15,7,15,7,15,7,15,7]
; AVX512-FCP-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512-FCP-NEXT:    vpermt2q %zmm5, %zmm0, %zmm11
; AVX512-FCP-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vmovdqa64 %zmm10, %zmm5
; AVX512-FCP-NEXT:    vpermt2q %zmm4, %zmm21, %zmm5
; AVX512-FCP-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vmovdqa64 %zmm10, %zmm5
; AVX512-FCP-NEXT:    vpermt2q %zmm4, %zmm13, %zmm5
; AVX512-FCP-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vmovdqa64 %zmm10, %zmm5
; AVX512-FCP-NEXT:    vpermt2q %zmm4, %zmm14, %zmm5
; AVX512-FCP-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vpermt2q %zmm4, %zmm0, %zmm10
; AVX512-FCP-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vmovdqa64 %zmm9, %zmm4
; AVX512-FCP-NEXT:    vpermt2q %zmm3, %zmm21, %zmm4
; AVX512-FCP-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vmovdqa64 %zmm9, %zmm4
; AVX512-FCP-NEXT:    vpermt2q %zmm3, %zmm13, %zmm4
; AVX512-FCP-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vmovdqa64 %zmm9, %zmm4
; AVX512-FCP-NEXT:    vpermt2q %zmm3, %zmm14, %zmm4
; AVX512-FCP-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vpermt2q %zmm3, %zmm0, %zmm9
; AVX512-FCP-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vmovdqa64 %zmm8, %zmm3
; AVX512-FCP-NEXT:    vpermt2q %zmm2, %zmm21, %zmm3
; AVX512-FCP-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vmovdqa64 %zmm8, %zmm3
; AVX512-FCP-NEXT:    vpermt2q %zmm2, %zmm13, %zmm3
; AVX512-FCP-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vmovdqa64 %zmm8, %zmm3
; AVX512-FCP-NEXT:    vpermt2q %zmm2, %zmm14, %zmm3
; AVX512-FCP-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vpermt2q %zmm2, %zmm0, %zmm8
; AVX512-FCP-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vmovdqa64 %zmm7, %zmm2
; AVX512-FCP-NEXT:    vpermt2q %zmm1, %zmm21, %zmm2
; AVX512-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vmovdqa64 %zmm7, %zmm2
; AVX512-FCP-NEXT:    vpermt2q %zmm1, %zmm13, %zmm2
; AVX512-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vmovdqa64 %zmm7, %zmm2
; AVX512-FCP-NEXT:    vpermt2q %zmm1, %zmm14, %zmm2
; AVX512-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vpermt2q %zmm1, %zmm0, %zmm7
; AVX512-FCP-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vmovdqa64 320(%rsi), %zmm1
; AVX512-FCP-NEXT:    vmovdqa64 %zmm1, %zmm2
; AVX512-FCP-NEXT:    vpermt2q %zmm6, %zmm20, %zmm2
; AVX512-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vmovdqa64 %zmm6, %zmm2
; AVX512-FCP-NEXT:    vpermt2q %zmm1, %zmm21, %zmm2
; AVX512-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vmovdqa64 %zmm6, %zmm2
; AVX512-FCP-NEXT:    vpermt2q %zmm1, %zmm13, %zmm2
; AVX512-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vmovdqa64 %zmm6, %zmm2
; AVX512-FCP-NEXT:    vpermt2q %zmm1, %zmm14, %zmm2
; AVX512-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vpermt2q %zmm1, %zmm0, %zmm6
; AVX512-FCP-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vmovdqa64 384(%rdi), %zmm3
; AVX512-FCP-NEXT:    vmovdqa64 384(%rsi), %zmm1
; AVX512-FCP-NEXT:    vmovdqa64 %zmm1, %zmm2
; AVX512-FCP-NEXT:    vpermt2q %zmm3, %zmm20, %zmm2
; AVX512-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vmovdqa64 %zmm3, %zmm2
; AVX512-FCP-NEXT:    vpermt2q %zmm1, %zmm21, %zmm2
; AVX512-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vmovdqa64 %zmm3, %zmm2
; AVX512-FCP-NEXT:    vpermt2q %zmm1, %zmm13, %zmm2
; AVX512-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vmovdqa64 %zmm3, %zmm2
; AVX512-FCP-NEXT:    vpermt2q %zmm1, %zmm14, %zmm2
; AVX512-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vpermt2q %zmm1, %zmm0, %zmm3
; AVX512-FCP-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vmovdqa64 448(%rdi), %zmm2
; AVX512-FCP-NEXT:    vmovdqa64 448(%rsi), %zmm1
; AVX512-FCP-NEXT:    vpermi2q %zmm2, %zmm1, %zmm20
; AVX512-FCP-NEXT:    vpermi2q %zmm1, %zmm2, %zmm21
; AVX512-FCP-NEXT:    vpermi2q %zmm1, %zmm2, %zmm13
; AVX512-FCP-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vpermi2q %zmm1, %zmm2, %zmm14
; AVX512-FCP-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vpermt2q %zmm1, %zmm0, %zmm2
; AVX512-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vmovdqa64 (%rdx), %zmm25
; AVX512-FCP-NEXT:    vmovdqa64 (%rcx), %zmm0
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm6 = [0,3,11,0,0,0,4,12]
; AVX512-FCP-NEXT:    vmovdqa64 %zmm25, %zmm3
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm3
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm2 = [0,0,0,8,0,0,0,1]
; AVX512-FCP-NEXT:    vmovdqa64 %zmm25, %zmm22
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm2, %zmm22
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm12 = [1,0,0,0,10,2,0,0]
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, %zmm27
; AVX512-FCP-NEXT:    vpermt2q %zmm25, %zmm12, %zmm27
; AVX512-FCP-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [13,5,13,5,13,5,13,5]
; AVX512-FCP-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512-FCP-NEXT:    vmovdqa64 %zmm25, %zmm28
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm10, %zmm28
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm16 = [6,14,0,0,0,7,15,0]
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm16, %zmm25
; AVX512-FCP-NEXT:    vmovdqa64 64(%rdx), %zmm26
; AVX512-FCP-NEXT:    vmovdqa64 64(%rcx), %zmm0
; AVX512-FCP-NEXT:    vmovdqa64 %zmm26, %zmm4
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm4
; AVX512-FCP-NEXT:    vmovdqa64 %zmm26, %zmm17
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm2, %zmm17
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, %zmm1
; AVX512-FCP-NEXT:    vpermt2q %zmm26, %zmm12, %zmm1
; AVX512-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vmovdqa64 %zmm26, %zmm1
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm10, %zmm1
; AVX512-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm16, %zmm26
; AVX512-FCP-NEXT:    vmovdqa64 128(%rdx), %zmm14
; AVX512-FCP-NEXT:    vmovdqa64 128(%rcx), %zmm0
; AVX512-FCP-NEXT:    vmovdqa64 %zmm14, %zmm15
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm15
; AVX512-FCP-NEXT:    vmovdqa64 %zmm14, %zmm9
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm2, %zmm9
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, %zmm1
; AVX512-FCP-NEXT:    vpermt2q %zmm14, %zmm12, %zmm1
; AVX512-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vmovdqa64 %zmm14, %zmm1
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm10, %zmm1
; AVX512-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm16, %zmm14
; AVX512-FCP-NEXT:    vmovdqa64 192(%rdx), %zmm19
; AVX512-FCP-NEXT:    vmovdqa64 192(%rcx), %zmm0
; AVX512-FCP-NEXT:    vmovdqa64 %zmm19, %zmm31
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm31
; AVX512-FCP-NEXT:    vmovdqa64 %zmm19, %zmm30
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm2, %zmm30
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, %zmm1
; AVX512-FCP-NEXT:    vpermt2q %zmm19, %zmm12, %zmm1
; AVX512-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vmovdqa64 %zmm19, %zmm1
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm10, %zmm1
; AVX512-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm16, %zmm19
; AVX512-FCP-NEXT:    vmovdqa64 256(%rdx), %zmm18
; AVX512-FCP-NEXT:    vmovdqa64 256(%rcx), %zmm0
; AVX512-FCP-NEXT:    vmovdqa64 %zmm18, %zmm29
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm29
; AVX512-FCP-NEXT:    vmovdqa64 %zmm18, %zmm24
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm2, %zmm24
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, %zmm1
; AVX512-FCP-NEXT:    vpermt2q %zmm18, %zmm12, %zmm1
; AVX512-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vmovdqa64 %zmm18, %zmm1
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm10, %zmm1
; AVX512-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm16, %zmm18
; AVX512-FCP-NEXT:    vmovdqa64 320(%rdx), %zmm7
; AVX512-FCP-NEXT:    vmovdqa64 320(%rcx), %zmm0
; AVX512-FCP-NEXT:    vmovdqa64 %zmm7, %zmm23
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm23
; AVX512-FCP-NEXT:    vmovdqa64 %zmm7, %zmm1
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm2, %zmm1
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, %zmm5
; AVX512-FCP-NEXT:    vpermt2q %zmm7, %zmm12, %zmm5
; AVX512-FCP-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vmovdqa64 %zmm7, %zmm5
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm10, %zmm5
; AVX512-FCP-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm16, %zmm7
; AVX512-FCP-NEXT:    vmovdqa64 384(%rdx), %zmm11
; AVX512-FCP-NEXT:    vmovdqa64 384(%rcx), %zmm0
; AVX512-FCP-NEXT:    vmovdqa64 %zmm11, %zmm13
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm13
; AVX512-FCP-NEXT:    vmovdqa64 %zmm11, %zmm8
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm2, %zmm8
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, %zmm5
; AVX512-FCP-NEXT:    vpermt2q %zmm11, %zmm12, %zmm5
; AVX512-FCP-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vmovdqa64 %zmm11, %zmm5
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm10, %zmm5
; AVX512-FCP-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm16, %zmm11
; AVX512-FCP-NEXT:    vmovdqa64 448(%rdx), %zmm5
; AVX512-FCP-NEXT:    vmovdqa64 448(%rcx), %zmm0
; AVX512-FCP-NEXT:    vpermi2q %zmm0, %zmm5, %zmm6
; AVX512-FCP-NEXT:    vpermi2q %zmm0, %zmm5, %zmm2
; AVX512-FCP-NEXT:    vpermi2q %zmm5, %zmm0, %zmm12
; AVX512-FCP-NEXT:    vpermi2q %zmm0, %zmm5, %zmm10
; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm16, %zmm5
; AVX512-FCP-NEXT:    movb $49, %al
; AVX512-FCP-NEXT:    kmovw %eax, %k1
; AVX512-FCP-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k1}
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k1}
; AVX512-FCP-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, %zmm31 {%k1}
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, %zmm29 {%k1}
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, %zmm23 {%k1}
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k1}
; AVX512-FCP-NEXT:    vmovdqa64 %zmm20, %zmm6 {%k1}
; AVX512-FCP-NEXT:    movb $-116, %al
; AVX512-FCP-NEXT:    kmovw %eax, %k3
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm22, %zmm16 {%k3}
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm27, %zmm0 {%k1}
; AVX512-FCP-NEXT:    movb $24, %al
; AVX512-FCP-NEXT:    kmovw %eax, %k2
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm28, %zmm27 {%k2}
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm15, %zmm25 {%k2}
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm17, %zmm28 {%k3}
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm9, %zmm22 {%k3}
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm30, %zmm20 {%k3}
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm24, %zmm9 {%k3}
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm1, %zmm17 {%k3}
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm8, %zmm1 {%k3}
; AVX512-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vmovdqa64 %zmm2, %zmm21 {%k3}
; AVX512-FCP-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    movb $8, %al
; AVX512-FCP-NEXT:    kmovw %eax, %k3
; AVX512-FCP-NEXT:    vmovdqa64 (%r8), %zmm2
; AVX512-FCP-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k3}
; AVX512-FCP-NEXT:    vmovdqu64 %zmm3, (%rsp) # 64-byte Spill
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm21 = [0,1,2,3,8,5,6,7]
; AVX512-FCP-NEXT:    vpermt2q %zmm2, %zmm21, %zmm16
; AVX512-FCP-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm8 = [0,9,2,3,4,5,10,7]
; AVX512-FCP-NEXT:    vpermt2q %zmm2, %zmm8, %zmm0
; AVX512-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm15 = [12,1,2,3,4,13,6,7]
; AVX512-FCP-NEXT:    vpermt2q %zmm2, %zmm15, %zmm27
; AVX512-FCP-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm16 = [0,1,14,3,4,5,6,15]
; AVX512-FCP-NEXT:    vpermt2q %zmm2, %zmm16, %zmm25
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k2}
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k2}
; AVX512-FCP-NEXT:    vmovdqa64 64(%r8), %zmm2
; AVX512-FCP-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k3}
; AVX512-FCP-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vpermt2q %zmm2, %zmm21, %zmm28
; AVX512-FCP-NEXT:    vmovdqu64 %zmm28, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vpermt2q %zmm2, %zmm8, %zmm1
; AVX512-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vpermt2q %zmm2, %zmm15, %zmm3
; AVX512-FCP-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vpermt2q %zmm2, %zmm16, %zmm26
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, %zmm14 {%k2}
; AVX512-FCP-NEXT:    vmovdqa64 128(%r8), %zmm2
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm2, %zmm30 {%k3}
; AVX512-FCP-NEXT:    vpermt2q %zmm2, %zmm21, %zmm22
; AVX512-FCP-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vpermt2q %zmm2, %zmm8, %zmm3
; AVX512-FCP-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vpermt2q %zmm2, %zmm15, %zmm1
; AVX512-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vpermt2q %zmm2, %zmm16, %zmm14
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k2}
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm1, %zmm19 {%k2}
; AVX512-FCP-NEXT:    vmovdqa64 192(%r8), %zmm2
; AVX512-FCP-NEXT:    vmovdqa64 %zmm2, %zmm31 {%k3}
; AVX512-FCP-NEXT:    vpermt2q %zmm2, %zmm21, %zmm20
; AVX512-FCP-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vpermt2q %zmm2, %zmm8, %zmm3
; AVX512-FCP-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vpermt2q %zmm2, %zmm15, %zmm0
; AVX512-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512-FCP-NEXT:    vpermt2q %zmm2, %zmm16, %zmm19
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, %zmm20 {%k1}
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm1, %zmm28 {%k2}
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm1, %zmm18 {%k2}
; AVX512-FCP-NEXT:    vmovdqa64 256(%r8), %zmm2
; AVX512-FCP-NEXT:    vmovdqa64 %zmm2, %zmm29 {%k3}
; AVX512-FCP-NEXT:    vmovdqa64 %zmm9, %zmm3
; AVX512-FCP-NEXT:    vpermt2q %zmm2, %zmm21, %zmm3
; AVX512-FCP-NEXT:    vpermt2q %zmm2, %zmm8, %zmm20
; AVX512-FCP-NEXT:    vpermt2q %zmm2, %zmm15, %zmm28
; AVX512-FCP-NEXT:    vpermt2q %zmm2, %zmm16, %zmm18
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k1}
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, %zmm22 {%k2}
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k2}
; AVX512-FCP-NEXT:    vmovdqa64 320(%r8), %zmm2
; AVX512-FCP-NEXT:    vmovdqa64 %zmm2, %zmm23 {%k3}
; AVX512-FCP-NEXT:    vmovdqa64 %zmm17, %zmm1
; AVX512-FCP-NEXT:    vpermt2q %zmm2, %zmm21, %zmm1
; AVX512-FCP-NEXT:    vpermt2q %zmm2, %zmm8, %zmm27
; AVX512-FCP-NEXT:    vpermt2q %zmm2, %zmm15, %zmm22
; AVX512-FCP-NEXT:    vpermt2q %zmm2, %zmm16, %zmm7
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, %zmm9 {%k1}
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k2}
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm2, %zmm11 {%k2}
; AVX512-FCP-NEXT:    vmovdqa64 384(%r8), %zmm2
; AVX512-FCP-NEXT:    vmovdqa64 %zmm2, %zmm13 {%k3}
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
; AVX512-FCP-NEXT:    vpermt2q %zmm2, %zmm21, %zmm17
; AVX512-FCP-NEXT:    vpermt2q %zmm2, %zmm8, %zmm9
; AVX512-FCP-NEXT:    vpermt2q %zmm2, %zmm15, %zmm0
; AVX512-FCP-NEXT:    vpermt2q %zmm2, %zmm16, %zmm11
; AVX512-FCP-NEXT:    vmovdqa64 448(%r8), %zmm2
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
; AVX512-FCP-NEXT:    vpermt2q %zmm2, %zmm21, %zmm24
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm12, %zmm4 {%k1}
; AVX512-FCP-NEXT:    vpermt2q %zmm2, %zmm8, %zmm4
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm10, %zmm8 {%k2}
; AVX512-FCP-NEXT:    vpermt2q %zmm2, %zmm15, %zmm8
; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovdqa64 %zmm10, %zmm5 {%k2}
; AVX512-FCP-NEXT:    vpermt2q %zmm2, %zmm16, %zmm5
; AVX512-FCP-NEXT:    vmovdqa64 %zmm2, %zmm6 {%k3}
; AVX512-FCP-NEXT:    vmovdqa64 %zmm5, 2496(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm8, 2432(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm6, 2368(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm4, 2304(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm24, 2240(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm11, 2176(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, 2112(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm13, 2048(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm9, 1984(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm17, 1920(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm7, 1856(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm22, 1792(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm23, 1728(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm27, 1664(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm1, 1600(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm18, 1536(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm28, 1472(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm29, 1408(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm20, 1344(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm3, 1280(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm19, 1216(%r9)
; AVX512-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovaps %zmm0, 1152(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm31, 1088(%r9)
; AVX512-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovaps %zmm0, 1024(%r9)
; AVX512-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovaps %zmm0, 960(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm14, 896(%r9)
; AVX512-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovaps %zmm0, 832(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm30, 768(%r9)
; AVX512-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovaps %zmm0, 704(%r9)
; AVX512-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovaps %zmm0, 640(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm26, 576(%r9)
; AVX512-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovaps %zmm0, 512(%r9)
; AVX512-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovaps %zmm0, 448(%r9)
; AVX512-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovaps %zmm0, 384(%r9)
; AVX512-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovaps %zmm0, 320(%r9)
; AVX512-FCP-NEXT:    vmovdqa64 %zmm25, 256(%r9)
; AVX512-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovaps %zmm0, 192(%r9)
; AVX512-FCP-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovaps %zmm0, 128(%r9)
; AVX512-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovaps %zmm0, 64(%r9)
; AVX512-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512-FCP-NEXT:    vmovaps %zmm0, (%r9)
; AVX512-FCP-NEXT:    addq $3144, %rsp # imm = 0xC48
; AVX512-FCP-NEXT:    vzeroupper
; AVX512-FCP-NEXT:    retq
;
; AVX512DQ-LABEL: store_i64_stride5_vf64:
; AVX512DQ:       # %bb.0:
; AVX512DQ-NEXT:    subq $3144, %rsp # imm = 0xC48
; AVX512DQ-NEXT:    vmovdqa64 320(%rdi), %zmm6
; AVX512DQ-NEXT:    vmovdqa64 256(%rdi), %zmm7
; AVX512DQ-NEXT:    vmovdqa64 (%rdi), %zmm11
; AVX512DQ-NEXT:    vmovdqa64 64(%rdi), %zmm10
; AVX512DQ-NEXT:    vmovdqa64 128(%rdi), %zmm9
; AVX512DQ-NEXT:    vmovdqa64 192(%rdi), %zmm8
; AVX512DQ-NEXT:    vmovdqa64 256(%rsi), %zmm1
; AVX512DQ-NEXT:    vmovdqa64 (%rsi), %zmm5
; AVX512DQ-NEXT:    vmovdqa64 64(%rsi), %zmm4
; AVX512DQ-NEXT:    vmovdqa64 128(%rsi), %zmm3
; AVX512DQ-NEXT:    vmovdqa64 192(%rsi), %zmm2
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm20 = [3,0,0,0,12,4,0,0]
; AVX512DQ-NEXT:    vmovdqa64 %zmm5, %zmm12
; AVX512DQ-NEXT:    vpermt2q %zmm11, %zmm20, %zmm12
; AVX512DQ-NEXT:    vmovdqu64 %zmm12, (%rsp) # 64-byte Spill
; AVX512DQ-NEXT:    vmovdqa64 %zmm4, %zmm12
; AVX512DQ-NEXT:    vpermt2q %zmm10, %zmm20, %zmm12
; AVX512DQ-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vmovdqa64 %zmm3, %zmm12
; AVX512DQ-NEXT:    vpermt2q %zmm9, %zmm20, %zmm12
; AVX512DQ-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vmovdqa64 %zmm2, %zmm12
; AVX512DQ-NEXT:    vpermt2q %zmm8, %zmm20, %zmm12
; AVX512DQ-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vmovdqa64 %zmm1, %zmm12
; AVX512DQ-NEXT:    vpermt2q %zmm7, %zmm20, %zmm12
; AVX512DQ-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm21 = [0,8,0,0,0,1,9,0]
; AVX512DQ-NEXT:    vmovdqa64 %zmm11, %zmm0
; AVX512DQ-NEXT:    vpermt2q %zmm5, %zmm21, %zmm0
; AVX512DQ-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm13 = [0,0,2,10,0,0,0,3]
; AVX512DQ-NEXT:    vmovdqa64 %zmm11, %zmm0
; AVX512DQ-NEXT:    vpermt2q %zmm5, %zmm13, %zmm0
; AVX512DQ-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm14 = [0,5,13,0,0,0,6,14]
; AVX512DQ-NEXT:    vmovdqa64 %zmm11, %zmm0
; AVX512DQ-NEXT:    vpermt2q %zmm5, %zmm14, %zmm0
; AVX512DQ-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [15,7,15,7,15,7,15,7]
; AVX512DQ-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512DQ-NEXT:    vpermt2q %zmm5, %zmm0, %zmm11
; AVX512DQ-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vmovdqa64 %zmm10, %zmm5
; AVX512DQ-NEXT:    vpermt2q %zmm4, %zmm21, %zmm5
; AVX512DQ-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vmovdqa64 %zmm10, %zmm5
; AVX512DQ-NEXT:    vpermt2q %zmm4, %zmm13, %zmm5
; AVX512DQ-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vmovdqa64 %zmm10, %zmm5
; AVX512DQ-NEXT:    vpermt2q %zmm4, %zmm14, %zmm5
; AVX512DQ-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vpermt2q %zmm4, %zmm0, %zmm10
; AVX512DQ-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vmovdqa64 %zmm9, %zmm4
; AVX512DQ-NEXT:    vpermt2q %zmm3, %zmm21, %zmm4
; AVX512DQ-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vmovdqa64 %zmm9, %zmm4
; AVX512DQ-NEXT:    vpermt2q %zmm3, %zmm13, %zmm4
; AVX512DQ-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vmovdqa64 %zmm9, %zmm4
; AVX512DQ-NEXT:    vpermt2q %zmm3, %zmm14, %zmm4
; AVX512DQ-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vpermt2q %zmm3, %zmm0, %zmm9
; AVX512DQ-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vmovdqa64 %zmm8, %zmm3
; AVX512DQ-NEXT:    vpermt2q %zmm2, %zmm21, %zmm3
; AVX512DQ-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vmovdqa64 %zmm8, %zmm3
; AVX512DQ-NEXT:    vpermt2q %zmm2, %zmm13, %zmm3
; AVX512DQ-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vmovdqa64 %zmm8, %zmm3
; AVX512DQ-NEXT:    vpermt2q %zmm2, %zmm14, %zmm3
; AVX512DQ-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vpermt2q %zmm2, %zmm0, %zmm8
; AVX512DQ-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vmovdqa64 %zmm7, %zmm2
; AVX512DQ-NEXT:    vpermt2q %zmm1, %zmm21, %zmm2
; AVX512DQ-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vmovdqa64 %zmm7, %zmm2
; AVX512DQ-NEXT:    vpermt2q %zmm1, %zmm13, %zmm2
; AVX512DQ-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vmovdqa64 %zmm7, %zmm2
; AVX512DQ-NEXT:    vpermt2q %zmm1, %zmm14, %zmm2
; AVX512DQ-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vpermt2q %zmm1, %zmm0, %zmm7
; AVX512DQ-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vmovdqa64 320(%rsi), %zmm1
; AVX512DQ-NEXT:    vmovdqa64 %zmm1, %zmm2
; AVX512DQ-NEXT:    vpermt2q %zmm6, %zmm20, %zmm2
; AVX512DQ-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vmovdqa64 %zmm6, %zmm2
; AVX512DQ-NEXT:    vpermt2q %zmm1, %zmm21, %zmm2
; AVX512DQ-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vmovdqa64 %zmm6, %zmm2
; AVX512DQ-NEXT:    vpermt2q %zmm1, %zmm13, %zmm2
; AVX512DQ-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vmovdqa64 %zmm6, %zmm2
; AVX512DQ-NEXT:    vpermt2q %zmm1, %zmm14, %zmm2
; AVX512DQ-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vpermt2q %zmm1, %zmm0, %zmm6
; AVX512DQ-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vmovdqa64 384(%rdi), %zmm3
; AVX512DQ-NEXT:    vmovdqa64 384(%rsi), %zmm1
; AVX512DQ-NEXT:    vmovdqa64 %zmm1, %zmm2
; AVX512DQ-NEXT:    vpermt2q %zmm3, %zmm20, %zmm2
; AVX512DQ-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vmovdqa64 %zmm3, %zmm2
; AVX512DQ-NEXT:    vpermt2q %zmm1, %zmm21, %zmm2
; AVX512DQ-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vmovdqa64 %zmm3, %zmm2
; AVX512DQ-NEXT:    vpermt2q %zmm1, %zmm13, %zmm2
; AVX512DQ-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vmovdqa64 %zmm3, %zmm2
; AVX512DQ-NEXT:    vpermt2q %zmm1, %zmm14, %zmm2
; AVX512DQ-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vpermt2q %zmm1, %zmm0, %zmm3
; AVX512DQ-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vmovdqa64 448(%rdi), %zmm2
; AVX512DQ-NEXT:    vmovdqa64 448(%rsi), %zmm1
; AVX512DQ-NEXT:    vpermi2q %zmm2, %zmm1, %zmm20
; AVX512DQ-NEXT:    vpermi2q %zmm1, %zmm2, %zmm21
; AVX512DQ-NEXT:    vpermi2q %zmm1, %zmm2, %zmm13
; AVX512DQ-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vpermi2q %zmm1, %zmm2, %zmm14
; AVX512DQ-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vpermt2q %zmm1, %zmm0, %zmm2
; AVX512DQ-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vmovdqa64 (%rdx), %zmm25
; AVX512DQ-NEXT:    vmovdqa64 (%rcx), %zmm0
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm6 = [0,3,11,0,0,0,4,12]
; AVX512DQ-NEXT:    vmovdqa64 %zmm25, %zmm3
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm6, %zmm3
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm2 = [0,0,0,8,0,0,0,1]
; AVX512DQ-NEXT:    vmovdqa64 %zmm25, %zmm22
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm2, %zmm22
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm12 = [1,0,0,0,10,2,0,0]
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, %zmm27
; AVX512DQ-NEXT:    vpermt2q %zmm25, %zmm12, %zmm27
; AVX512DQ-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [13,5,13,5,13,5,13,5]
; AVX512DQ-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512DQ-NEXT:    vmovdqa64 %zmm25, %zmm28
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm10, %zmm28
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm16 = [6,14,0,0,0,7,15,0]
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm16, %zmm25
; AVX512DQ-NEXT:    vmovdqa64 64(%rdx), %zmm26
; AVX512DQ-NEXT:    vmovdqa64 64(%rcx), %zmm0
; AVX512DQ-NEXT:    vmovdqa64 %zmm26, %zmm4
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm6, %zmm4
; AVX512DQ-NEXT:    vmovdqa64 %zmm26, %zmm17
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm2, %zmm17
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, %zmm1
; AVX512DQ-NEXT:    vpermt2q %zmm26, %zmm12, %zmm1
; AVX512DQ-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vmovdqa64 %zmm26, %zmm1
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm10, %zmm1
; AVX512DQ-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm16, %zmm26
; AVX512DQ-NEXT:    vmovdqa64 128(%rdx), %zmm14
; AVX512DQ-NEXT:    vmovdqa64 128(%rcx), %zmm0
; AVX512DQ-NEXT:    vmovdqa64 %zmm14, %zmm15
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm6, %zmm15
; AVX512DQ-NEXT:    vmovdqa64 %zmm14, %zmm9
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm2, %zmm9
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, %zmm1
; AVX512DQ-NEXT:    vpermt2q %zmm14, %zmm12, %zmm1
; AVX512DQ-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vmovdqa64 %zmm14, %zmm1
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm10, %zmm1
; AVX512DQ-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm16, %zmm14
; AVX512DQ-NEXT:    vmovdqa64 192(%rdx), %zmm19
; AVX512DQ-NEXT:    vmovdqa64 192(%rcx), %zmm0
; AVX512DQ-NEXT:    vmovdqa64 %zmm19, %zmm31
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm6, %zmm31
; AVX512DQ-NEXT:    vmovdqa64 %zmm19, %zmm30
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm2, %zmm30
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, %zmm1
; AVX512DQ-NEXT:    vpermt2q %zmm19, %zmm12, %zmm1
; AVX512DQ-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vmovdqa64 %zmm19, %zmm1
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm10, %zmm1
; AVX512DQ-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm16, %zmm19
; AVX512DQ-NEXT:    vmovdqa64 256(%rdx), %zmm18
; AVX512DQ-NEXT:    vmovdqa64 256(%rcx), %zmm0
; AVX512DQ-NEXT:    vmovdqa64 %zmm18, %zmm29
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm6, %zmm29
; AVX512DQ-NEXT:    vmovdqa64 %zmm18, %zmm24
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm2, %zmm24
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, %zmm1
; AVX512DQ-NEXT:    vpermt2q %zmm18, %zmm12, %zmm1
; AVX512DQ-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vmovdqa64 %zmm18, %zmm1
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm10, %zmm1
; AVX512DQ-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm16, %zmm18
; AVX512DQ-NEXT:    vmovdqa64 320(%rdx), %zmm7
; AVX512DQ-NEXT:    vmovdqa64 320(%rcx), %zmm0
; AVX512DQ-NEXT:    vmovdqa64 %zmm7, %zmm23
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm6, %zmm23
; AVX512DQ-NEXT:    vmovdqa64 %zmm7, %zmm1
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm2, %zmm1
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, %zmm5
; AVX512DQ-NEXT:    vpermt2q %zmm7, %zmm12, %zmm5
; AVX512DQ-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vmovdqa64 %zmm7, %zmm5
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm10, %zmm5
; AVX512DQ-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm16, %zmm7
; AVX512DQ-NEXT:    vmovdqa64 384(%rdx), %zmm11
; AVX512DQ-NEXT:    vmovdqa64 384(%rcx), %zmm0
; AVX512DQ-NEXT:    vmovdqa64 %zmm11, %zmm13
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm6, %zmm13
; AVX512DQ-NEXT:    vmovdqa64 %zmm11, %zmm8
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm2, %zmm8
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, %zmm5
; AVX512DQ-NEXT:    vpermt2q %zmm11, %zmm12, %zmm5
; AVX512DQ-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vmovdqa64 %zmm11, %zmm5
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm10, %zmm5
; AVX512DQ-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm16, %zmm11
; AVX512DQ-NEXT:    vmovdqa64 448(%rdx), %zmm5
; AVX512DQ-NEXT:    vmovdqa64 448(%rcx), %zmm0
; AVX512DQ-NEXT:    vpermi2q %zmm0, %zmm5, %zmm6
; AVX512DQ-NEXT:    vpermi2q %zmm0, %zmm5, %zmm2
; AVX512DQ-NEXT:    vpermi2q %zmm5, %zmm0, %zmm12
; AVX512DQ-NEXT:    vpermi2q %zmm0, %zmm5, %zmm10
; AVX512DQ-NEXT:    vpermt2q %zmm0, %zmm16, %zmm5
; AVX512DQ-NEXT:    movb $49, %al
; AVX512DQ-NEXT:    kmovw %eax, %k1
; AVX512DQ-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k1}
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k1}
; AVX512DQ-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, %zmm31 {%k1}
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, %zmm29 {%k1}
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, %zmm23 {%k1}
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k1}
; AVX512DQ-NEXT:    vmovdqa64 %zmm20, %zmm6 {%k1}
; AVX512DQ-NEXT:    movb $-116, %al
; AVX512DQ-NEXT:    kmovw %eax, %k3
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm22, %zmm16 {%k3}
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm27, %zmm0 {%k1}
; AVX512DQ-NEXT:    movb $24, %al
; AVX512DQ-NEXT:    kmovw %eax, %k2
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm28, %zmm27 {%k2}
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm15, %zmm25 {%k2}
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm17, %zmm28 {%k3}
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm9, %zmm22 {%k3}
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm30, %zmm20 {%k3}
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm24, %zmm9 {%k3}
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm1, %zmm17 {%k3}
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm8, %zmm1 {%k3}
; AVX512DQ-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vmovdqa64 %zmm2, %zmm21 {%k3}
; AVX512DQ-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    movb $8, %al
; AVX512DQ-NEXT:    kmovw %eax, %k3
; AVX512DQ-NEXT:    vmovdqa64 (%r8), %zmm2
; AVX512DQ-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k3}
; AVX512DQ-NEXT:    vmovdqu64 %zmm3, (%rsp) # 64-byte Spill
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm21 = [0,1,2,3,8,5,6,7]
; AVX512DQ-NEXT:    vpermt2q %zmm2, %zmm21, %zmm16
; AVX512DQ-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm8 = [0,9,2,3,4,5,10,7]
; AVX512DQ-NEXT:    vpermt2q %zmm2, %zmm8, %zmm0
; AVX512DQ-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm15 = [12,1,2,3,4,13,6,7]
; AVX512DQ-NEXT:    vpermt2q %zmm2, %zmm15, %zmm27
; AVX512DQ-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vpmovsxbq {{.*#+}} zmm16 = [0,1,14,3,4,5,6,15]
; AVX512DQ-NEXT:    vpermt2q %zmm2, %zmm16, %zmm25
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k2}
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k2}
; AVX512DQ-NEXT:    vmovdqa64 64(%r8), %zmm2
; AVX512DQ-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k3}
; AVX512DQ-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vpermt2q %zmm2, %zmm21, %zmm28
; AVX512DQ-NEXT:    vmovdqu64 %zmm28, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vpermt2q %zmm2, %zmm8, %zmm1
; AVX512DQ-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vpermt2q %zmm2, %zmm15, %zmm3
; AVX512DQ-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vpermt2q %zmm2, %zmm16, %zmm26
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, %zmm14 {%k2}
; AVX512DQ-NEXT:    vmovdqa64 128(%r8), %zmm2
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm2, %zmm30 {%k3}
; AVX512DQ-NEXT:    vpermt2q %zmm2, %zmm21, %zmm22
; AVX512DQ-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vpermt2q %zmm2, %zmm8, %zmm3
; AVX512DQ-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vpermt2q %zmm2, %zmm15, %zmm1
; AVX512DQ-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vpermt2q %zmm2, %zmm16, %zmm14
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k2}
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm1, %zmm19 {%k2}
; AVX512DQ-NEXT:    vmovdqa64 192(%r8), %zmm2
; AVX512DQ-NEXT:    vmovdqa64 %zmm2, %zmm31 {%k3}
; AVX512DQ-NEXT:    vpermt2q %zmm2, %zmm21, %zmm20
; AVX512DQ-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vpermt2q %zmm2, %zmm8, %zmm3
; AVX512DQ-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vpermt2q %zmm2, %zmm15, %zmm0
; AVX512DQ-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-NEXT:    vpermt2q %zmm2, %zmm16, %zmm19
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, %zmm20 {%k1}
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm1, %zmm28 {%k2}
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm1, %zmm18 {%k2}
; AVX512DQ-NEXT:    vmovdqa64 256(%r8), %zmm2
; AVX512DQ-NEXT:    vmovdqa64 %zmm2, %zmm29 {%k3}
; AVX512DQ-NEXT:    vmovdqa64 %zmm9, %zmm3
; AVX512DQ-NEXT:    vpermt2q %zmm2, %zmm21, %zmm3
; AVX512DQ-NEXT:    vpermt2q %zmm2, %zmm8, %zmm20
; AVX512DQ-NEXT:    vpermt2q %zmm2, %zmm15, %zmm28
; AVX512DQ-NEXT:    vpermt2q %zmm2, %zmm16, %zmm18
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k1}
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, %zmm22 {%k2}
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k2}
; AVX512DQ-NEXT:    vmovdqa64 320(%r8), %zmm2
; AVX512DQ-NEXT:    vmovdqa64 %zmm2, %zmm23 {%k3}
; AVX512DQ-NEXT:    vmovdqa64 %zmm17, %zmm1
; AVX512DQ-NEXT:    vpermt2q %zmm2, %zmm21, %zmm1
; AVX512DQ-NEXT:    vpermt2q %zmm2, %zmm8, %zmm27
; AVX512DQ-NEXT:    vpermt2q %zmm2, %zmm15, %zmm22
; AVX512DQ-NEXT:    vpermt2q %zmm2, %zmm16, %zmm7
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, %zmm9 {%k1}
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k2}
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm2, %zmm11 {%k2}
; AVX512DQ-NEXT:    vmovdqa64 384(%r8), %zmm2
; AVX512DQ-NEXT:    vmovdqa64 %zmm2, %zmm13 {%k3}
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
; AVX512DQ-NEXT:    vpermt2q %zmm2, %zmm21, %zmm17
; AVX512DQ-NEXT:    vpermt2q %zmm2, %zmm8, %zmm9
; AVX512DQ-NEXT:    vpermt2q %zmm2, %zmm15, %zmm0
; AVX512DQ-NEXT:    vpermt2q %zmm2, %zmm16, %zmm11
; AVX512DQ-NEXT:    vmovdqa64 448(%r8), %zmm2
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
; AVX512DQ-NEXT:    vpermt2q %zmm2, %zmm21, %zmm24
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm12, %zmm4 {%k1}
; AVX512DQ-NEXT:    vpermt2q %zmm2, %zmm8, %zmm4
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm10, %zmm8 {%k2}
; AVX512DQ-NEXT:    vpermt2q %zmm2, %zmm15, %zmm8
; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
; AVX512DQ-NEXT:    vmovdqa64 %zmm10, %zmm5 {%k2}
; AVX512DQ-NEXT:    vpermt2q %zmm2, %zmm16, %zmm5
; AVX512DQ-NEXT:    vmovdqa64 %zmm2, %zmm6 {%k3}
; AVX512DQ-NEXT:    vmovdqa64 %zmm5, 2496(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm8, 2432(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm6, 2368(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm4, 2304(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm24, 2240(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm11, 2176(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm0, 2112(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm13, 2048(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm9, 1984(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm17, 1920(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm7, 1856(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm22, 1792(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm23, 1728(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm27, 1664(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm1, 1600(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm18, 1536(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm28, 1472(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm29, 1408(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm20, 1344(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm3, 1280(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm19, 1216(%r9)
; AVX512DQ-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovaps %zmm0, 1152(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm31, 1088(%r9)
; AVX512DQ-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovaps %zmm0, 1024(%r9)
; AVX512DQ-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovaps %zmm0, 960(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm14, 896(%r9)
; AVX512DQ-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovaps %zmm0, 832(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm30, 768(%r9)
; AVX512DQ-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovaps %zmm0, 704(%r9)
; AVX512DQ-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovaps %zmm0, 640(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm26, 576(%r9)
; AVX512DQ-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovaps %zmm0, 512(%r9)
; AVX512DQ-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovaps %zmm0, 448(%r9)
; AVX512DQ-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovaps %zmm0, 384(%r9)
; AVX512DQ-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovaps %zmm0, 320(%r9)
; AVX512DQ-NEXT:    vmovdqa64 %zmm25, 256(%r9)
; AVX512DQ-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovaps %zmm0, 192(%r9)
; AVX512DQ-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovaps %zmm0, 128(%r9)
; AVX512DQ-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovaps %zmm0, 64(%r9)
; AVX512DQ-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-NEXT:    vmovaps %zmm0, (%r9)
; AVX512DQ-NEXT:    addq $3144, %rsp # imm = 0xC48
; AVX512DQ-NEXT:    vzeroupper
; AVX512DQ-NEXT:    retq
;
; AVX512DQ-FCP-LABEL: store_i64_stride5_vf64:
; AVX512DQ-FCP:       # %bb.0:
; AVX512DQ-FCP-NEXT:    subq $3144, %rsp # imm = 0xC48
; AVX512DQ-FCP-NEXT:    vmovdqa64 320(%rdi), %zmm6
; AVX512DQ-FCP-NEXT:    vmovdqa64 256(%rdi), %zmm7
; AVX512DQ-FCP-NEXT:    vmovdqa64 (%rdi), %zmm11
; AVX512DQ-FCP-NEXT:    vmovdqa64 64(%rdi), %zmm10
; AVX512DQ-FCP-NEXT:    vmovdqa64 128(%rdi), %zmm9
; AVX512DQ-FCP-NEXT:    vmovdqa64 192(%rdi), %zmm8
; AVX512DQ-FCP-NEXT:    vmovdqa64 256(%rsi), %zmm1
; AVX512DQ-FCP-NEXT:    vmovdqa64 (%rsi), %zmm5
; AVX512DQ-FCP-NEXT:    vmovdqa64 64(%rsi), %zmm4
; AVX512DQ-FCP-NEXT:    vmovdqa64 128(%rsi), %zmm3
; AVX512DQ-FCP-NEXT:    vmovdqa64 192(%rsi), %zmm2
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm20 = [3,0,0,0,12,4,0,0]
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm5, %zmm12
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm11, %zmm20, %zmm12
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm12, (%rsp) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm4, %zmm12
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm10, %zmm20, %zmm12
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm3, %zmm12
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm9, %zmm20, %zmm12
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm2, %zmm12
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm8, %zmm20, %zmm12
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm1, %zmm12
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm7, %zmm20, %zmm12
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm21 = [0,8,0,0,0,1,9,0]
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm11, %zmm0
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm5, %zmm21, %zmm0
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm13 = [0,0,2,10,0,0,0,3]
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm11, %zmm0
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm5, %zmm13, %zmm0
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm14 = [0,5,13,0,0,0,6,14]
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm11, %zmm0
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm5, %zmm14, %zmm0
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [15,7,15,7,15,7,15,7]
; AVX512DQ-FCP-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm5, %zmm0, %zmm11
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm10, %zmm5
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm4, %zmm21, %zmm5
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm10, %zmm5
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm4, %zmm13, %zmm5
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm10, %zmm5
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm4, %zmm14, %zmm5
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm4, %zmm0, %zmm10
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm9, %zmm4
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm3, %zmm21, %zmm4
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm9, %zmm4
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm3, %zmm13, %zmm4
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm9, %zmm4
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm3, %zmm14, %zmm4
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm3, %zmm0, %zmm9
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm8, %zmm3
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm2, %zmm21, %zmm3
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm8, %zmm3
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm2, %zmm13, %zmm3
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm8, %zmm3
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm2, %zmm14, %zmm3
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm2, %zmm0, %zmm8
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm7, %zmm2
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm1, %zmm21, %zmm2
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm7, %zmm2
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm1, %zmm13, %zmm2
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm7, %zmm2
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm1, %zmm14, %zmm2
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm1, %zmm0, %zmm7
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vmovdqa64 320(%rsi), %zmm1
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm1, %zmm2
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm6, %zmm20, %zmm2
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm6, %zmm2
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm1, %zmm21, %zmm2
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm6, %zmm2
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm1, %zmm13, %zmm2
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm6, %zmm2
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm1, %zmm14, %zmm2
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm1, %zmm0, %zmm6
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vmovdqa64 384(%rdi), %zmm3
; AVX512DQ-FCP-NEXT:    vmovdqa64 384(%rsi), %zmm1
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm1, %zmm2
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm3, %zmm20, %zmm2
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm3, %zmm2
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm1, %zmm21, %zmm2
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm3, %zmm2
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm1, %zmm13, %zmm2
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm3, %zmm2
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm1, %zmm14, %zmm2
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm1, %zmm0, %zmm3
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vmovdqa64 448(%rdi), %zmm2
; AVX512DQ-FCP-NEXT:    vmovdqa64 448(%rsi), %zmm1
; AVX512DQ-FCP-NEXT:    vpermi2q %zmm2, %zmm1, %zmm20
; AVX512DQ-FCP-NEXT:    vpermi2q %zmm1, %zmm2, %zmm21
; AVX512DQ-FCP-NEXT:    vpermi2q %zmm1, %zmm2, %zmm13
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vpermi2q %zmm1, %zmm2, %zmm14
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm1, %zmm0, %zmm2
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vmovdqa64 (%rdx), %zmm25
; AVX512DQ-FCP-NEXT:    vmovdqa64 (%rcx), %zmm0
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm6 = [0,3,11,0,0,0,4,12]
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm25, %zmm3
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm3
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm2 = [0,0,0,8,0,0,0,1]
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm25, %zmm22
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm2, %zmm22
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm12 = [1,0,0,0,10,2,0,0]
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, %zmm27
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm25, %zmm12, %zmm27
; AVX512DQ-FCP-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [13,5,13,5,13,5,13,5]
; AVX512DQ-FCP-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm25, %zmm28
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm10, %zmm28
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm16 = [6,14,0,0,0,7,15,0]
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm16, %zmm25
; AVX512DQ-FCP-NEXT:    vmovdqa64 64(%rdx), %zmm26
; AVX512DQ-FCP-NEXT:    vmovdqa64 64(%rcx), %zmm0
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm26, %zmm4
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm4
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm26, %zmm17
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm2, %zmm17
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, %zmm1
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm26, %zmm12, %zmm1
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm26, %zmm1
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm10, %zmm1
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm16, %zmm26
; AVX512DQ-FCP-NEXT:    vmovdqa64 128(%rdx), %zmm14
; AVX512DQ-FCP-NEXT:    vmovdqa64 128(%rcx), %zmm0
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm14, %zmm15
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm15
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm14, %zmm9
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm2, %zmm9
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, %zmm1
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm14, %zmm12, %zmm1
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm14, %zmm1
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm10, %zmm1
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm16, %zmm14
; AVX512DQ-FCP-NEXT:    vmovdqa64 192(%rdx), %zmm19
; AVX512DQ-FCP-NEXT:    vmovdqa64 192(%rcx), %zmm0
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm19, %zmm31
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm31
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm19, %zmm30
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm2, %zmm30
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, %zmm1
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm19, %zmm12, %zmm1
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm19, %zmm1
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm10, %zmm1
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm16, %zmm19
; AVX512DQ-FCP-NEXT:    vmovdqa64 256(%rdx), %zmm18
; AVX512DQ-FCP-NEXT:    vmovdqa64 256(%rcx), %zmm0
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm18, %zmm29
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm29
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm18, %zmm24
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm2, %zmm24
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, %zmm1
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm18, %zmm12, %zmm1
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm18, %zmm1
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm10, %zmm1
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm16, %zmm18
; AVX512DQ-FCP-NEXT:    vmovdqa64 320(%rdx), %zmm7
; AVX512DQ-FCP-NEXT:    vmovdqa64 320(%rcx), %zmm0
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm7, %zmm23
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm23
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm7, %zmm1
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm2, %zmm1
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, %zmm5
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm7, %zmm12, %zmm5
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm7, %zmm5
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm10, %zmm5
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm16, %zmm7
; AVX512DQ-FCP-NEXT:    vmovdqa64 384(%rdx), %zmm11
; AVX512DQ-FCP-NEXT:    vmovdqa64 384(%rcx), %zmm0
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm11, %zmm13
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm13
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm11, %zmm8
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm2, %zmm8
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, %zmm5
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm11, %zmm12, %zmm5
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm11, %zmm5
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm10, %zmm5
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm16, %zmm11
; AVX512DQ-FCP-NEXT:    vmovdqa64 448(%rdx), %zmm5
; AVX512DQ-FCP-NEXT:    vmovdqa64 448(%rcx), %zmm0
; AVX512DQ-FCP-NEXT:    vpermi2q %zmm0, %zmm5, %zmm6
; AVX512DQ-FCP-NEXT:    vpermi2q %zmm0, %zmm5, %zmm2
; AVX512DQ-FCP-NEXT:    vpermi2q %zmm5, %zmm0, %zmm12
; AVX512DQ-FCP-NEXT:    vpermi2q %zmm0, %zmm5, %zmm10
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm16, %zmm5
; AVX512DQ-FCP-NEXT:    movb $49, %al
; AVX512DQ-FCP-NEXT:    kmovw %eax, %k1
; AVX512DQ-FCP-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k1}
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k1}
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, %zmm31 {%k1}
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, %zmm29 {%k1}
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, %zmm23 {%k1}
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k1}
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm20, %zmm6 {%k1}
; AVX512DQ-FCP-NEXT:    movb $-116, %al
; AVX512DQ-FCP-NEXT:    kmovw %eax, %k3
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm22, %zmm16 {%k3}
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm27, %zmm0 {%k1}
; AVX512DQ-FCP-NEXT:    movb $24, %al
; AVX512DQ-FCP-NEXT:    kmovw %eax, %k2
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm28, %zmm27 {%k2}
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm15, %zmm25 {%k2}
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm17, %zmm28 {%k3}
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm9, %zmm22 {%k3}
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm30, %zmm20 {%k3}
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm24, %zmm9 {%k3}
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm1, %zmm17 {%k3}
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm8, %zmm1 {%k3}
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm2, %zmm21 {%k3}
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    movb $8, %al
; AVX512DQ-FCP-NEXT:    kmovw %eax, %k3
; AVX512DQ-FCP-NEXT:    vmovdqa64 (%r8), %zmm2
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k3}
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm3, (%rsp) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm21 = [0,1,2,3,8,5,6,7]
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm2, %zmm21, %zmm16
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm8 = [0,9,2,3,4,5,10,7]
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm2, %zmm8, %zmm0
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm15 = [12,1,2,3,4,13,6,7]
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm2, %zmm15, %zmm27
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm16 = [0,1,14,3,4,5,6,15]
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm2, %zmm16, %zmm25
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k2}
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k2}
; AVX512DQ-FCP-NEXT:    vmovdqa64 64(%r8), %zmm2
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k3}
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm2, %zmm21, %zmm28
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm28, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm2, %zmm8, %zmm1
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm2, %zmm15, %zmm3
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm2, %zmm16, %zmm26
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, %zmm14 {%k2}
; AVX512DQ-FCP-NEXT:    vmovdqa64 128(%r8), %zmm2
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm2, %zmm30 {%k3}
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm2, %zmm21, %zmm22
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm2, %zmm8, %zmm3
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm2, %zmm15, %zmm1
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm2, %zmm16, %zmm14
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k2}
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm1, %zmm19 {%k2}
; AVX512DQ-FCP-NEXT:    vmovdqa64 192(%r8), %zmm2
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm2, %zmm31 {%k3}
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm2, %zmm21, %zmm20
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm2, %zmm8, %zmm3
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm2, %zmm15, %zmm0
; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm2, %zmm16, %zmm19
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, %zmm20 {%k1}
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm1, %zmm28 {%k2}
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm1, %zmm18 {%k2}
; AVX512DQ-FCP-NEXT:    vmovdqa64 256(%r8), %zmm2
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm2, %zmm29 {%k3}
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm9, %zmm3
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm2, %zmm21, %zmm3
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm2, %zmm8, %zmm20
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm2, %zmm15, %zmm28
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm2, %zmm16, %zmm18
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k1}
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, %zmm22 {%k2}
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k2}
; AVX512DQ-FCP-NEXT:    vmovdqa64 320(%r8), %zmm2
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm2, %zmm23 {%k3}
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm17, %zmm1
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm2, %zmm21, %zmm1
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm2, %zmm8, %zmm27
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm2, %zmm15, %zmm22
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm2, %zmm16, %zmm7
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, %zmm9 {%k1}
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k2}
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm2, %zmm11 {%k2}
; AVX512DQ-FCP-NEXT:    vmovdqa64 384(%r8), %zmm2
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm2, %zmm13 {%k3}
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm2, %zmm21, %zmm17
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm2, %zmm8, %zmm9
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm2, %zmm15, %zmm0
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm2, %zmm16, %zmm11
; AVX512DQ-FCP-NEXT:    vmovdqa64 448(%r8), %zmm2
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm2, %zmm21, %zmm24
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm12, %zmm4 {%k1}
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm2, %zmm8, %zmm4
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm10, %zmm8 {%k2}
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm2, %zmm15, %zmm8
; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm10, %zmm5 {%k2}
; AVX512DQ-FCP-NEXT:    vpermt2q %zmm2, %zmm16, %zmm5
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm2, %zmm6 {%k3}
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm5, 2496(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm8, 2432(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm6, 2368(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm4, 2304(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm24, 2240(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm11, 2176(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, 2112(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm13, 2048(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm9, 1984(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm17, 1920(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm7, 1856(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm22, 1792(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm23, 1728(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm27, 1664(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm1, 1600(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm18, 1536(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm28, 1472(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm29, 1408(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm20, 1344(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm3, 1280(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm19, 1216(%r9)
; AVX512DQ-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovaps %zmm0, 1152(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm31, 1088(%r9)
; AVX512DQ-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovaps %zmm0, 1024(%r9)
; AVX512DQ-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovaps %zmm0, 960(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm14, 896(%r9)
; AVX512DQ-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovaps %zmm0, 832(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm30, 768(%r9)
; AVX512DQ-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovaps %zmm0, 704(%r9)
; AVX512DQ-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovaps %zmm0, 640(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm26, 576(%r9)
; AVX512DQ-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovaps %zmm0, 512(%r9)
; AVX512DQ-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovaps %zmm0, 448(%r9)
; AVX512DQ-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovaps %zmm0, 384(%r9)
; AVX512DQ-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovaps %zmm0, 320(%r9)
; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm25, 256(%r9)
; AVX512DQ-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovaps %zmm0, 192(%r9)
; AVX512DQ-FCP-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovaps %zmm0, 128(%r9)
; AVX512DQ-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovaps %zmm0, 64(%r9)
; AVX512DQ-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-FCP-NEXT:    vmovaps %zmm0, (%r9)
; AVX512DQ-FCP-NEXT:    addq $3144, %rsp # imm = 0xC48
; AVX512DQ-FCP-NEXT:    vzeroupper
; AVX512DQ-FCP-NEXT:    retq
;
; AVX512BW-LABEL: store_i64_stride5_vf64:
; AVX512BW:       # %bb.0:
; AVX512BW-NEXT:    subq $3144, %rsp # imm = 0xC48
; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %zmm6
; AVX512BW-NEXT:    vmovdqa64 256(%rdi), %zmm7
; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm11
; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm10
; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %zmm9
; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %zmm8
; AVX512BW-NEXT:    vmovdqa64 256(%rsi), %zmm1
; AVX512BW-NEXT:    vmovdqa64 (%rsi), %zmm5
; AVX512BW-NEXT:    vmovdqa64 64(%rsi), %zmm4
; AVX512BW-NEXT:    vmovdqa64 128(%rsi), %zmm3
; AVX512BW-NEXT:    vmovdqa64 192(%rsi), %zmm2
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm20 = [3,0,0,0,12,4,0,0]
; AVX512BW-NEXT:    vmovdqa64 %zmm5, %zmm12
; AVX512BW-NEXT:    vpermt2q %zmm11, %zmm20, %zmm12
; AVX512BW-NEXT:    vmovdqu64 %zmm12, (%rsp) # 64-byte Spill
; AVX512BW-NEXT:    vmovdqa64 %zmm4, %zmm12
; AVX512BW-NEXT:    vpermt2q %zmm10, %zmm20, %zmm12
; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm12
; AVX512BW-NEXT:    vpermt2q %zmm9, %zmm20, %zmm12
; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm12
; AVX512BW-NEXT:    vpermt2q %zmm8, %zmm20, %zmm12
; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm12
; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm20, %zmm12
; AVX512BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm21 = [0,8,0,0,0,1,9,0]
; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm0
; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm21, %zmm0
; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm13 = [0,0,2,10,0,0,0,3]
; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm0
; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm13, %zmm0
; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm14 = [0,5,13,0,0,0,6,14]
; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm0
; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm14, %zmm0
; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [15,7,15,7,15,7,15,7]
; AVX512BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512BW-NEXT:    vpermt2q %zmm5, %zmm0, %zmm11
; AVX512BW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm5
; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm21, %zmm5
; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm5
; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm13, %zmm5
; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm5
; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm14, %zmm5
; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vpermt2q %zmm4, %zmm0, %zmm10
; AVX512BW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm4
; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm21, %zmm4
; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm4
; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm13, %zmm4
; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm4
; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm14, %zmm4
; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm9
; AVX512BW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm3
; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm21, %zmm3
; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm3
; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm13, %zmm3
; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm3
; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm14, %zmm3
; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm0, %zmm8
; AVX512BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm2
; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm21, %zmm2
; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm2
; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm13, %zmm2
; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm2
; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm14, %zmm2
; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm7
; AVX512BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vmovdqa64 320(%rsi), %zmm1
; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm2
; AVX512BW-NEXT:    vpermt2q %zmm6, %zmm20, %zmm2
; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm2
; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm21, %zmm2
; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm2
; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm13, %zmm2
; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vmovdqa64 %zmm6, %zmm2
; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm14, %zmm2
; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm6
; AVX512BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vmovdqa64 384(%rdi), %zmm3
; AVX512BW-NEXT:    vmovdqa64 384(%rsi), %zmm1
; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm2
; AVX512BW-NEXT:    vpermt2q %zmm3, %zmm20, %zmm2
; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm2
; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm21, %zmm2
; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm2
; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm13, %zmm2
; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vmovdqa64 %zmm3, %zmm2
; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm14, %zmm2
; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm3
; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vmovdqa64 448(%rdi), %zmm2
; AVX512BW-NEXT:    vmovdqa64 448(%rsi), %zmm1
; AVX512BW-NEXT:    vpermi2q %zmm2, %zmm1, %zmm20
; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm2, %zmm21
; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm2, %zmm13
; AVX512BW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vpermi2q %zmm1, %zmm2, %zmm14
; AVX512BW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vmovdqa64 (%rdx), %zmm25
; AVX512BW-NEXT:    vmovdqa64 (%rcx), %zmm0
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm6 = [0,3,11,0,0,0,4,12]
; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm3
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm6, %zmm3
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm2 = [0,0,0,8,0,0,0,1]
; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm22
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm2, %zmm22
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm12 = [1,0,0,0,10,2,0,0]
; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm27
; AVX512BW-NEXT:    vpermt2q %zmm25, %zmm12, %zmm27
; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [13,5,13,5,13,5,13,5]
; AVX512BW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512BW-NEXT:    vmovdqa64 %zmm25, %zmm28
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm10, %zmm28
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm16 = [6,14,0,0,0,7,15,0]
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm16, %zmm25
; AVX512BW-NEXT:    vmovdqa64 64(%rdx), %zmm26
; AVX512BW-NEXT:    vmovdqa64 64(%rcx), %zmm0
; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm4
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm6, %zmm4
; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm17
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm2, %zmm17
; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1
; AVX512BW-NEXT:    vpermt2q %zmm26, %zmm12, %zmm1
; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vmovdqa64 %zmm26, %zmm1
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm10, %zmm1
; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm16, %zmm26
; AVX512BW-NEXT:    vmovdqa64 128(%rdx), %zmm14
; AVX512BW-NEXT:    vmovdqa64 128(%rcx), %zmm0
; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm15
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm6, %zmm15
; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm9
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm2, %zmm9
; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1
; AVX512BW-NEXT:    vpermt2q %zmm14, %zmm12, %zmm1
; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vmovdqa64 %zmm14, %zmm1
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm10, %zmm1
; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm16, %zmm14
; AVX512BW-NEXT:    vmovdqa64 192(%rdx), %zmm19
; AVX512BW-NEXT:    vmovdqa64 192(%rcx), %zmm0
; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm31
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm6, %zmm31
; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm30
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm2, %zmm30
; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1
; AVX512BW-NEXT:    vpermt2q %zmm19, %zmm12, %zmm1
; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vmovdqa64 %zmm19, %zmm1
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm10, %zmm1
; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm16, %zmm19
; AVX512BW-NEXT:    vmovdqa64 256(%rdx), %zmm18
; AVX512BW-NEXT:    vmovdqa64 256(%rcx), %zmm0
; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm29
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm6, %zmm29
; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm24
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm2, %zmm24
; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1
; AVX512BW-NEXT:    vpermt2q %zmm18, %zmm12, %zmm1
; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vmovdqa64 %zmm18, %zmm1
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm10, %zmm1
; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm16, %zmm18
; AVX512BW-NEXT:    vmovdqa64 320(%rdx), %zmm7
; AVX512BW-NEXT:    vmovdqa64 320(%rcx), %zmm0
; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm23
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm6, %zmm23
; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm1
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm2, %zmm1
; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5
; AVX512BW-NEXT:    vpermt2q %zmm7, %zmm12, %zmm5
; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vmovdqa64 %zmm7, %zmm5
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm10, %zmm5
; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm16, %zmm7
; AVX512BW-NEXT:    vmovdqa64 384(%rdx), %zmm11
; AVX512BW-NEXT:    vmovdqa64 384(%rcx), %zmm0
; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm13
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm6, %zmm13
; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm8
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm2, %zmm8
; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm5
; AVX512BW-NEXT:    vpermt2q %zmm11, %zmm12, %zmm5
; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vmovdqa64 %zmm11, %zmm5
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm10, %zmm5
; AVX512BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm16, %zmm11
; AVX512BW-NEXT:    vmovdqa64 448(%rdx), %zmm5
; AVX512BW-NEXT:    vmovdqa64 448(%rcx), %zmm0
; AVX512BW-NEXT:    vpermi2q %zmm0, %zmm5, %zmm6
; AVX512BW-NEXT:    vpermi2q %zmm0, %zmm5, %zmm2
; AVX512BW-NEXT:    vpermi2q %zmm5, %zmm0, %zmm12
; AVX512BW-NEXT:    vpermi2q %zmm0, %zmm5, %zmm10
; AVX512BW-NEXT:    vpermt2q %zmm0, %zmm16, %zmm5
; AVX512BW-NEXT:    movb $49, %al
; AVX512BW-NEXT:    kmovd %eax, %k1
; AVX512BW-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k1}
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k1}
; AVX512BW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm31 {%k1}
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm29 {%k1}
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm23 {%k1}
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k1}
; AVX512BW-NEXT:    vmovdqa64 %zmm20, %zmm6 {%k1}
; AVX512BW-NEXT:    movb $-116, %al
; AVX512BW-NEXT:    kmovd %eax, %k3
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm22, %zmm16 {%k3}
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm27, %zmm0 {%k1}
; AVX512BW-NEXT:    movb $24, %al
; AVX512BW-NEXT:    kmovd %eax, %k2
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm28, %zmm27 {%k2}
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm15, %zmm25 {%k2}
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm28 {%k3}
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm22 {%k3}
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm30, %zmm20 {%k3}
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm24, %zmm9 {%k3}
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm17 {%k3}
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm8, %zmm1 {%k3}
; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm21 {%k3}
; AVX512BW-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    movb $8, %al
; AVX512BW-NEXT:    kmovd %eax, %k3
; AVX512BW-NEXT:    vmovdqa64 (%r8), %zmm2
; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k3}
; AVX512BW-NEXT:    vmovdqu64 %zmm3, (%rsp) # 64-byte Spill
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm21 = [0,1,2,3,8,5,6,7]
; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm21, %zmm16
; AVX512BW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm8 = [0,9,2,3,4,5,10,7]
; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm8, %zmm0
; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm15 = [12,1,2,3,4,13,6,7]
; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm15, %zmm27
; AVX512BW-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vpmovsxbq {{.*#+}} zmm16 = [0,1,14,3,4,5,6,15]
; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm16, %zmm25
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k2}
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k2}
; AVX512BW-NEXT:    vmovdqa64 64(%r8), %zmm2
; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k3}
; AVX512BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm21, %zmm28
; AVX512BW-NEXT:    vmovdqu64 %zmm28, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm8, %zmm1
; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm15, %zmm3
; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm16, %zmm26
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm14 {%k2}
; AVX512BW-NEXT:    vmovdqa64 128(%r8), %zmm2
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm30 {%k3}
; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm21, %zmm22
; AVX512BW-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm8, %zmm3
; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm15, %zmm1
; AVX512BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm16, %zmm14
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k2}
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm19 {%k2}
; AVX512BW-NEXT:    vmovdqa64 192(%r8), %zmm2
; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm31 {%k3}
; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm21, %zmm20
; AVX512BW-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm8, %zmm3
; AVX512BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm15, %zmm0
; AVX512BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm16, %zmm19
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm20 {%k1}
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm28 {%k2}
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm1, %zmm18 {%k2}
; AVX512BW-NEXT:    vmovdqa64 256(%r8), %zmm2
; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm29 {%k3}
; AVX512BW-NEXT:    vmovdqa64 %zmm9, %zmm3
; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm21, %zmm3
; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm8, %zmm20
; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm15, %zmm28
; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm16, %zmm18
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k1}
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm22 {%k2}
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k2}
; AVX512BW-NEXT:    vmovdqa64 320(%r8), %zmm2
; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm23 {%k3}
; AVX512BW-NEXT:    vmovdqa64 %zmm17, %zmm1
; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm21, %zmm1
; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm8, %zmm27
; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm15, %zmm22
; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm16, %zmm7
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm0, %zmm9 {%k1}
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k2}
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm11 {%k2}
; AVX512BW-NEXT:    vmovdqa64 384(%r8), %zmm2
; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm13 {%k3}
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm21, %zmm17
; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm8, %zmm9
; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm15, %zmm0
; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm16, %zmm11
; AVX512BW-NEXT:    vmovdqa64 448(%r8), %zmm2
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm21, %zmm24
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm12, %zmm4 {%k1}
; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm8, %zmm4
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm8 {%k2}
; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm15, %zmm8
; AVX512BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
; AVX512BW-NEXT:    vmovdqa64 %zmm10, %zmm5 {%k2}
; AVX512BW-NEXT:    vpermt2q %zmm2, %zmm16, %zmm5
; AVX512BW-NEXT:    vmovdqa64 %zmm2, %zmm6 {%k3}
; AVX512BW-NEXT:    vmovdqa64 %zmm5, 2496(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm8, 2432(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm6, 2368(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm4, 2304(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm24, 2240(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm11, 2176(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm0, 2112(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm13, 2048(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm9, 1984(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm17, 1920(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm7, 1856(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm22, 1792(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm23, 1728(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm27, 1664(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm1, 1600(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm18, 1536(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm28, 1472(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm29, 1408(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm20, 1344(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm3, 1280(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm19, 1216(%r9)
; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovaps %zmm0, 1152(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm31, 1088(%r9)
; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovaps %zmm0, 1024(%r9)
; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovaps %zmm0, 960(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm14, 896(%r9)
; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovaps %zmm0, 832(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm30, 768(%r9)
; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovaps %zmm0, 704(%r9)
; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovaps %zmm0, 640(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm26, 576(%r9)
; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovaps %zmm0, 512(%r9)
; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovaps %zmm0, 448(%r9)
; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovaps %zmm0, 384(%r9)
; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovaps %zmm0, 320(%r9)
; AVX512BW-NEXT:    vmovdqa64 %zmm25, 256(%r9)
; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovaps %zmm0, 192(%r9)
; AVX512BW-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovaps %zmm0, 128(%r9)
; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovaps %zmm0, 64(%r9)
; AVX512BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-NEXT:    vmovaps %zmm0, (%r9)
; AVX512BW-NEXT:    addq $3144, %rsp # imm = 0xC48
; AVX512BW-NEXT:    vzeroupper
; AVX512BW-NEXT:    retq
;
; AVX512BW-FCP-LABEL: store_i64_stride5_vf64:
; AVX512BW-FCP:       # %bb.0:
; AVX512BW-FCP-NEXT:    subq $3144, %rsp # imm = 0xC48
; AVX512BW-FCP-NEXT:    vmovdqa64 320(%rdi), %zmm6
; AVX512BW-FCP-NEXT:    vmovdqa64 256(%rdi), %zmm7
; AVX512BW-FCP-NEXT:    vmovdqa64 (%rdi), %zmm11
; AVX512BW-FCP-NEXT:    vmovdqa64 64(%rdi), %zmm10
; AVX512BW-FCP-NEXT:    vmovdqa64 128(%rdi), %zmm9
; AVX512BW-FCP-NEXT:    vmovdqa64 192(%rdi), %zmm8
; AVX512BW-FCP-NEXT:    vmovdqa64 256(%rsi), %zmm1
; AVX512BW-FCP-NEXT:    vmovdqa64 (%rsi), %zmm5
; AVX512BW-FCP-NEXT:    vmovdqa64 64(%rsi), %zmm4
; AVX512BW-FCP-NEXT:    vmovdqa64 128(%rsi), %zmm3
; AVX512BW-FCP-NEXT:    vmovdqa64 192(%rsi), %zmm2
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm20 = [3,0,0,0,12,4,0,0]
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm5, %zmm12
; AVX512BW-FCP-NEXT:    vpermt2q %zmm11, %zmm20, %zmm12
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm12, (%rsp) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm4, %zmm12
; AVX512BW-FCP-NEXT:    vpermt2q %zmm10, %zmm20, %zmm12
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm3, %zmm12
; AVX512BW-FCP-NEXT:    vpermt2q %zmm9, %zmm20, %zmm12
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm2, %zmm12
; AVX512BW-FCP-NEXT:    vpermt2q %zmm8, %zmm20, %zmm12
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm1, %zmm12
; AVX512BW-FCP-NEXT:    vpermt2q %zmm7, %zmm20, %zmm12
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm21 = [0,8,0,0,0,1,9,0]
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm11, %zmm0
; AVX512BW-FCP-NEXT:    vpermt2q %zmm5, %zmm21, %zmm0
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm13 = [0,0,2,10,0,0,0,3]
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm11, %zmm0
; AVX512BW-FCP-NEXT:    vpermt2q %zmm5, %zmm13, %zmm0
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm14 = [0,5,13,0,0,0,6,14]
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm11, %zmm0
; AVX512BW-FCP-NEXT:    vpermt2q %zmm5, %zmm14, %zmm0
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [15,7,15,7,15,7,15,7]
; AVX512BW-FCP-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512BW-FCP-NEXT:    vpermt2q %zmm5, %zmm0, %zmm11
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm10, %zmm5
; AVX512BW-FCP-NEXT:    vpermt2q %zmm4, %zmm21, %zmm5
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm10, %zmm5
; AVX512BW-FCP-NEXT:    vpermt2q %zmm4, %zmm13, %zmm5
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm10, %zmm5
; AVX512BW-FCP-NEXT:    vpermt2q %zmm4, %zmm14, %zmm5
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vpermt2q %zmm4, %zmm0, %zmm10
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm9, %zmm4
; AVX512BW-FCP-NEXT:    vpermt2q %zmm3, %zmm21, %zmm4
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm9, %zmm4
; AVX512BW-FCP-NEXT:    vpermt2q %zmm3, %zmm13, %zmm4
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm9, %zmm4
; AVX512BW-FCP-NEXT:    vpermt2q %zmm3, %zmm14, %zmm4
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vpermt2q %zmm3, %zmm0, %zmm9
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm8, %zmm3
; AVX512BW-FCP-NEXT:    vpermt2q %zmm2, %zmm21, %zmm3
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm8, %zmm3
; AVX512BW-FCP-NEXT:    vpermt2q %zmm2, %zmm13, %zmm3
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm8, %zmm3
; AVX512BW-FCP-NEXT:    vpermt2q %zmm2, %zmm14, %zmm3
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vpermt2q %zmm2, %zmm0, %zmm8
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm7, %zmm2
; AVX512BW-FCP-NEXT:    vpermt2q %zmm1, %zmm21, %zmm2
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm7, %zmm2
; AVX512BW-FCP-NEXT:    vpermt2q %zmm1, %zmm13, %zmm2
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm7, %zmm2
; AVX512BW-FCP-NEXT:    vpermt2q %zmm1, %zmm14, %zmm2
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vpermt2q %zmm1, %zmm0, %zmm7
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vmovdqa64 320(%rsi), %zmm1
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm1, %zmm2
; AVX512BW-FCP-NEXT:    vpermt2q %zmm6, %zmm20, %zmm2
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm6, %zmm2
; AVX512BW-FCP-NEXT:    vpermt2q %zmm1, %zmm21, %zmm2
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm6, %zmm2
; AVX512BW-FCP-NEXT:    vpermt2q %zmm1, %zmm13, %zmm2
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm6, %zmm2
; AVX512BW-FCP-NEXT:    vpermt2q %zmm1, %zmm14, %zmm2
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vpermt2q %zmm1, %zmm0, %zmm6
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vmovdqa64 384(%rdi), %zmm3
; AVX512BW-FCP-NEXT:    vmovdqa64 384(%rsi), %zmm1
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm1, %zmm2
; AVX512BW-FCP-NEXT:    vpermt2q %zmm3, %zmm20, %zmm2
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm3, %zmm2
; AVX512BW-FCP-NEXT:    vpermt2q %zmm1, %zmm21, %zmm2
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm3, %zmm2
; AVX512BW-FCP-NEXT:    vpermt2q %zmm1, %zmm13, %zmm2
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm3, %zmm2
; AVX512BW-FCP-NEXT:    vpermt2q %zmm1, %zmm14, %zmm2
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vpermt2q %zmm1, %zmm0, %zmm3
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vmovdqa64 448(%rdi), %zmm2
; AVX512BW-FCP-NEXT:    vmovdqa64 448(%rsi), %zmm1
; AVX512BW-FCP-NEXT:    vpermi2q %zmm2, %zmm1, %zmm20
; AVX512BW-FCP-NEXT:    vpermi2q %zmm1, %zmm2, %zmm21
; AVX512BW-FCP-NEXT:    vpermi2q %zmm1, %zmm2, %zmm13
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vpermi2q %zmm1, %zmm2, %zmm14
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vpermt2q %zmm1, %zmm0, %zmm2
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vmovdqa64 (%rdx), %zmm25
; AVX512BW-FCP-NEXT:    vmovdqa64 (%rcx), %zmm0
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm6 = [0,3,11,0,0,0,4,12]
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm25, %zmm3
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm3
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm2 = [0,0,0,8,0,0,0,1]
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm25, %zmm22
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm2, %zmm22
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm12 = [1,0,0,0,10,2,0,0]
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm27
; AVX512BW-FCP-NEXT:    vpermt2q %zmm25, %zmm12, %zmm27
; AVX512BW-FCP-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [13,5,13,5,13,5,13,5]
; AVX512BW-FCP-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm25, %zmm28
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm10, %zmm28
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm16 = [6,14,0,0,0,7,15,0]
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm16, %zmm25
; AVX512BW-FCP-NEXT:    vmovdqa64 64(%rdx), %zmm26
; AVX512BW-FCP-NEXT:    vmovdqa64 64(%rcx), %zmm0
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm26, %zmm4
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm4
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm26, %zmm17
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm2, %zmm17
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm1
; AVX512BW-FCP-NEXT:    vpermt2q %zmm26, %zmm12, %zmm1
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm26, %zmm1
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm10, %zmm1
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm16, %zmm26
; AVX512BW-FCP-NEXT:    vmovdqa64 128(%rdx), %zmm14
; AVX512BW-FCP-NEXT:    vmovdqa64 128(%rcx), %zmm0
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm14, %zmm15
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm15
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm14, %zmm9
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm2, %zmm9
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm1
; AVX512BW-FCP-NEXT:    vpermt2q %zmm14, %zmm12, %zmm1
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm14, %zmm1
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm10, %zmm1
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm16, %zmm14
; AVX512BW-FCP-NEXT:    vmovdqa64 192(%rdx), %zmm19
; AVX512BW-FCP-NEXT:    vmovdqa64 192(%rcx), %zmm0
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm19, %zmm31
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm31
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm19, %zmm30
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm2, %zmm30
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm1
; AVX512BW-FCP-NEXT:    vpermt2q %zmm19, %zmm12, %zmm1
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm19, %zmm1
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm10, %zmm1
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm16, %zmm19
; AVX512BW-FCP-NEXT:    vmovdqa64 256(%rdx), %zmm18
; AVX512BW-FCP-NEXT:    vmovdqa64 256(%rcx), %zmm0
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm18, %zmm29
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm29
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm18, %zmm24
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm2, %zmm24
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm1
; AVX512BW-FCP-NEXT:    vpermt2q %zmm18, %zmm12, %zmm1
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm18, %zmm1
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm10, %zmm1
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm16, %zmm18
; AVX512BW-FCP-NEXT:    vmovdqa64 320(%rdx), %zmm7
; AVX512BW-FCP-NEXT:    vmovdqa64 320(%rcx), %zmm0
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm7, %zmm23
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm23
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm7, %zmm1
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm2, %zmm1
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm5
; AVX512BW-FCP-NEXT:    vpermt2q %zmm7, %zmm12, %zmm5
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm7, %zmm5
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm10, %zmm5
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm16, %zmm7
; AVX512BW-FCP-NEXT:    vmovdqa64 384(%rdx), %zmm11
; AVX512BW-FCP-NEXT:    vmovdqa64 384(%rcx), %zmm0
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm11, %zmm13
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm13
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm11, %zmm8
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm2, %zmm8
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm5
; AVX512BW-FCP-NEXT:    vpermt2q %zmm11, %zmm12, %zmm5
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm11, %zmm5
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm10, %zmm5
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm16, %zmm11
; AVX512BW-FCP-NEXT:    vmovdqa64 448(%rdx), %zmm5
; AVX512BW-FCP-NEXT:    vmovdqa64 448(%rcx), %zmm0
; AVX512BW-FCP-NEXT:    vpermi2q %zmm0, %zmm5, %zmm6
; AVX512BW-FCP-NEXT:    vpermi2q %zmm0, %zmm5, %zmm2
; AVX512BW-FCP-NEXT:    vpermi2q %zmm5, %zmm0, %zmm12
; AVX512BW-FCP-NEXT:    vpermi2q %zmm0, %zmm5, %zmm10
; AVX512BW-FCP-NEXT:    vpermt2q %zmm0, %zmm16, %zmm5
; AVX512BW-FCP-NEXT:    movb $49, %al
; AVX512BW-FCP-NEXT:    kmovd %eax, %k1
; AVX512BW-FCP-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k1}
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k1}
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm31 {%k1}
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm29 {%k1}
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm23 {%k1}
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k1}
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm20, %zmm6 {%k1}
; AVX512BW-FCP-NEXT:    movb $-116, %al
; AVX512BW-FCP-NEXT:    kmovd %eax, %k3
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm22, %zmm16 {%k3}
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm27, %zmm0 {%k1}
; AVX512BW-FCP-NEXT:    movb $24, %al
; AVX512BW-FCP-NEXT:    kmovd %eax, %k2
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm28, %zmm27 {%k2}
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm15, %zmm25 {%k2}
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm17, %zmm28 {%k3}
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm9, %zmm22 {%k3}
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm30, %zmm20 {%k3}
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm24, %zmm9 {%k3}
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm1, %zmm17 {%k3}
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm8, %zmm1 {%k3}
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm2, %zmm21 {%k3}
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    movb $8, %al
; AVX512BW-FCP-NEXT:    kmovd %eax, %k3
; AVX512BW-FCP-NEXT:    vmovdqa64 (%r8), %zmm2
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k3}
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm3, (%rsp) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm21 = [0,1,2,3,8,5,6,7]
; AVX512BW-FCP-NEXT:    vpermt2q %zmm2, %zmm21, %zmm16
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm8 = [0,9,2,3,4,5,10,7]
; AVX512BW-FCP-NEXT:    vpermt2q %zmm2, %zmm8, %zmm0
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm15 = [12,1,2,3,4,13,6,7]
; AVX512BW-FCP-NEXT:    vpermt2q %zmm2, %zmm15, %zmm27
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm16 = [0,1,14,3,4,5,6,15]
; AVX512BW-FCP-NEXT:    vpermt2q %zmm2, %zmm16, %zmm25
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k2}
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k2}
; AVX512BW-FCP-NEXT:    vmovdqa64 64(%r8), %zmm2
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k3}
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vpermt2q %zmm2, %zmm21, %zmm28
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm28, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vpermt2q %zmm2, %zmm8, %zmm1
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vpermt2q %zmm2, %zmm15, %zmm3
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vpermt2q %zmm2, %zmm16, %zmm26
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm14 {%k2}
; AVX512BW-FCP-NEXT:    vmovdqa64 128(%r8), %zmm2
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm2, %zmm30 {%k3}
; AVX512BW-FCP-NEXT:    vpermt2q %zmm2, %zmm21, %zmm22
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vpermt2q %zmm2, %zmm8, %zmm3
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vpermt2q %zmm2, %zmm15, %zmm1
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vpermt2q %zmm2, %zmm16, %zmm14
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k2}
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm1, %zmm19 {%k2}
; AVX512BW-FCP-NEXT:    vmovdqa64 192(%r8), %zmm2
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm2, %zmm31 {%k3}
; AVX512BW-FCP-NEXT:    vpermt2q %zmm2, %zmm21, %zmm20
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vpermt2q %zmm2, %zmm8, %zmm3
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vpermt2q %zmm2, %zmm15, %zmm0
; AVX512BW-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512BW-FCP-NEXT:    vpermt2q %zmm2, %zmm16, %zmm19
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm20 {%k1}
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm1, %zmm28 {%k2}
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm1, %zmm18 {%k2}
; AVX512BW-FCP-NEXT:    vmovdqa64 256(%r8), %zmm2
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm2, %zmm29 {%k3}
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm9, %zmm3
; AVX512BW-FCP-NEXT:    vpermt2q %zmm2, %zmm21, %zmm3
; AVX512BW-FCP-NEXT:    vpermt2q %zmm2, %zmm8, %zmm20
; AVX512BW-FCP-NEXT:    vpermt2q %zmm2, %zmm15, %zmm28
; AVX512BW-FCP-NEXT:    vpermt2q %zmm2, %zmm16, %zmm18
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k1}
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm22 {%k2}
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k2}
; AVX512BW-FCP-NEXT:    vmovdqa64 320(%r8), %zmm2
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm2, %zmm23 {%k3}
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm17, %zmm1
; AVX512BW-FCP-NEXT:    vpermt2q %zmm2, %zmm21, %zmm1
; AVX512BW-FCP-NEXT:    vpermt2q %zmm2, %zmm8, %zmm27
; AVX512BW-FCP-NEXT:    vpermt2q %zmm2, %zmm15, %zmm22
; AVX512BW-FCP-NEXT:    vpermt2q %zmm2, %zmm16, %zmm7
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm9 {%k1}
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k2}
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm2, %zmm11 {%k2}
; AVX512BW-FCP-NEXT:    vmovdqa64 384(%r8), %zmm2
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm2, %zmm13 {%k3}
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vpermt2q %zmm2, %zmm21, %zmm17
; AVX512BW-FCP-NEXT:    vpermt2q %zmm2, %zmm8, %zmm9
; AVX512BW-FCP-NEXT:    vpermt2q %zmm2, %zmm15, %zmm0
; AVX512BW-FCP-NEXT:    vpermt2q %zmm2, %zmm16, %zmm11
; AVX512BW-FCP-NEXT:    vmovdqa64 448(%r8), %zmm2
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vpermt2q %zmm2, %zmm21, %zmm24
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm12, %zmm4 {%k1}
; AVX512BW-FCP-NEXT:    vpermt2q %zmm2, %zmm8, %zmm4
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm10, %zmm8 {%k2}
; AVX512BW-FCP-NEXT:    vpermt2q %zmm2, %zmm15, %zmm8
; AVX512BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm10, %zmm5 {%k2}
; AVX512BW-FCP-NEXT:    vpermt2q %zmm2, %zmm16, %zmm5
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm2, %zmm6 {%k3}
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm5, 2496(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm8, 2432(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm6, 2368(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm4, 2304(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm24, 2240(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm11, 2176(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm0, 2112(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm13, 2048(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm9, 1984(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm17, 1920(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm7, 1856(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm22, 1792(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm23, 1728(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm27, 1664(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm1, 1600(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm18, 1536(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm28, 1472(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm29, 1408(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm20, 1344(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm3, 1280(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm19, 1216(%r9)
; AVX512BW-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovaps %zmm0, 1152(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm31, 1088(%r9)
; AVX512BW-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovaps %zmm0, 1024(%r9)
; AVX512BW-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovaps %zmm0, 960(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm14, 896(%r9)
; AVX512BW-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovaps %zmm0, 832(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm30, 768(%r9)
; AVX512BW-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovaps %zmm0, 704(%r9)
; AVX512BW-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovaps %zmm0, 640(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm26, 576(%r9)
; AVX512BW-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovaps %zmm0, 512(%r9)
; AVX512BW-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovaps %zmm0, 448(%r9)
; AVX512BW-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovaps %zmm0, 384(%r9)
; AVX512BW-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovaps %zmm0, 320(%r9)
; AVX512BW-FCP-NEXT:    vmovdqa64 %zmm25, 256(%r9)
; AVX512BW-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovaps %zmm0, 192(%r9)
; AVX512BW-FCP-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovaps %zmm0, 128(%r9)
; AVX512BW-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovaps %zmm0, 64(%r9)
; AVX512BW-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512BW-FCP-NEXT:    vmovaps %zmm0, (%r9)
; AVX512BW-FCP-NEXT:    addq $3144, %rsp # imm = 0xC48
; AVX512BW-FCP-NEXT:    vzeroupper
; AVX512BW-FCP-NEXT:    retq
;
; AVX512DQ-BW-LABEL: store_i64_stride5_vf64:
; AVX512DQ-BW:       # %bb.0:
; AVX512DQ-BW-NEXT:    subq $3144, %rsp # imm = 0xC48
; AVX512DQ-BW-NEXT:    vmovdqa64 320(%rdi), %zmm6
; AVX512DQ-BW-NEXT:    vmovdqa64 256(%rdi), %zmm7
; AVX512DQ-BW-NEXT:    vmovdqa64 (%rdi), %zmm11
; AVX512DQ-BW-NEXT:    vmovdqa64 64(%rdi), %zmm10
; AVX512DQ-BW-NEXT:    vmovdqa64 128(%rdi), %zmm9
; AVX512DQ-BW-NEXT:    vmovdqa64 192(%rdi), %zmm8
; AVX512DQ-BW-NEXT:    vmovdqa64 256(%rsi), %zmm1
; AVX512DQ-BW-NEXT:    vmovdqa64 (%rsi), %zmm5
; AVX512DQ-BW-NEXT:    vmovdqa64 64(%rsi), %zmm4
; AVX512DQ-BW-NEXT:    vmovdqa64 128(%rsi), %zmm3
; AVX512DQ-BW-NEXT:    vmovdqa64 192(%rsi), %zmm2
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm20 = [3,0,0,0,12,4,0,0]
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm5, %zmm12
; AVX512DQ-BW-NEXT:    vpermt2q %zmm11, %zmm20, %zmm12
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm12, (%rsp) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm4, %zmm12
; AVX512DQ-BW-NEXT:    vpermt2q %zmm10, %zmm20, %zmm12
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm3, %zmm12
; AVX512DQ-BW-NEXT:    vpermt2q %zmm9, %zmm20, %zmm12
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm2, %zmm12
; AVX512DQ-BW-NEXT:    vpermt2q %zmm8, %zmm20, %zmm12
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm1, %zmm12
; AVX512DQ-BW-NEXT:    vpermt2q %zmm7, %zmm20, %zmm12
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm21 = [0,8,0,0,0,1,9,0]
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm11, %zmm0
; AVX512DQ-BW-NEXT:    vpermt2q %zmm5, %zmm21, %zmm0
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm13 = [0,0,2,10,0,0,0,3]
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm11, %zmm0
; AVX512DQ-BW-NEXT:    vpermt2q %zmm5, %zmm13, %zmm0
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm14 = [0,5,13,0,0,0,6,14]
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm11, %zmm0
; AVX512DQ-BW-NEXT:    vpermt2q %zmm5, %zmm14, %zmm0
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [15,7,15,7,15,7,15,7]
; AVX512DQ-BW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512DQ-BW-NEXT:    vpermt2q %zmm5, %zmm0, %zmm11
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm10, %zmm5
; AVX512DQ-BW-NEXT:    vpermt2q %zmm4, %zmm21, %zmm5
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm10, %zmm5
; AVX512DQ-BW-NEXT:    vpermt2q %zmm4, %zmm13, %zmm5
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm10, %zmm5
; AVX512DQ-BW-NEXT:    vpermt2q %zmm4, %zmm14, %zmm5
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vpermt2q %zmm4, %zmm0, %zmm10
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm9, %zmm4
; AVX512DQ-BW-NEXT:    vpermt2q %zmm3, %zmm21, %zmm4
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm9, %zmm4
; AVX512DQ-BW-NEXT:    vpermt2q %zmm3, %zmm13, %zmm4
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm9, %zmm4
; AVX512DQ-BW-NEXT:    vpermt2q %zmm3, %zmm14, %zmm4
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vpermt2q %zmm3, %zmm0, %zmm9
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm8, %zmm3
; AVX512DQ-BW-NEXT:    vpermt2q %zmm2, %zmm21, %zmm3
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm8, %zmm3
; AVX512DQ-BW-NEXT:    vpermt2q %zmm2, %zmm13, %zmm3
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm8, %zmm3
; AVX512DQ-BW-NEXT:    vpermt2q %zmm2, %zmm14, %zmm3
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vpermt2q %zmm2, %zmm0, %zmm8
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm7, %zmm2
; AVX512DQ-BW-NEXT:    vpermt2q %zmm1, %zmm21, %zmm2
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm7, %zmm2
; AVX512DQ-BW-NEXT:    vpermt2q %zmm1, %zmm13, %zmm2
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm7, %zmm2
; AVX512DQ-BW-NEXT:    vpermt2q %zmm1, %zmm14, %zmm2
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm7
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vmovdqa64 320(%rsi), %zmm1
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm1, %zmm2
; AVX512DQ-BW-NEXT:    vpermt2q %zmm6, %zmm20, %zmm2
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm6, %zmm2
; AVX512DQ-BW-NEXT:    vpermt2q %zmm1, %zmm21, %zmm2
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm6, %zmm2
; AVX512DQ-BW-NEXT:    vpermt2q %zmm1, %zmm13, %zmm2
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm6, %zmm2
; AVX512DQ-BW-NEXT:    vpermt2q %zmm1, %zmm14, %zmm2
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm6
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vmovdqa64 384(%rdi), %zmm3
; AVX512DQ-BW-NEXT:    vmovdqa64 384(%rsi), %zmm1
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm1, %zmm2
; AVX512DQ-BW-NEXT:    vpermt2q %zmm3, %zmm20, %zmm2
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm3, %zmm2
; AVX512DQ-BW-NEXT:    vpermt2q %zmm1, %zmm21, %zmm2
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm3, %zmm2
; AVX512DQ-BW-NEXT:    vpermt2q %zmm1, %zmm13, %zmm2
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm3, %zmm2
; AVX512DQ-BW-NEXT:    vpermt2q %zmm1, %zmm14, %zmm2
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm3
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vmovdqa64 448(%rdi), %zmm2
; AVX512DQ-BW-NEXT:    vmovdqa64 448(%rsi), %zmm1
; AVX512DQ-BW-NEXT:    vpermi2q %zmm2, %zmm1, %zmm20
; AVX512DQ-BW-NEXT:    vpermi2q %zmm1, %zmm2, %zmm21
; AVX512DQ-BW-NEXT:    vpermi2q %zmm1, %zmm2, %zmm13
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vpermi2q %zmm1, %zmm2, %zmm14
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vpermt2q %zmm1, %zmm0, %zmm2
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vmovdqa64 (%rdx), %zmm25
; AVX512DQ-BW-NEXT:    vmovdqa64 (%rcx), %zmm0
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm6 = [0,3,11,0,0,0,4,12]
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm25, %zmm3
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm6, %zmm3
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm2 = [0,0,0,8,0,0,0,1]
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm25, %zmm22
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm2, %zmm22
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm12 = [1,0,0,0,10,2,0,0]
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, %zmm27
; AVX512DQ-BW-NEXT:    vpermt2q %zmm25, %zmm12, %zmm27
; AVX512DQ-BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [13,5,13,5,13,5,13,5]
; AVX512DQ-BW-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm25, %zmm28
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm10, %zmm28
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm16 = [6,14,0,0,0,7,15,0]
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm16, %zmm25
; AVX512DQ-BW-NEXT:    vmovdqa64 64(%rdx), %zmm26
; AVX512DQ-BW-NEXT:    vmovdqa64 64(%rcx), %zmm0
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm26, %zmm4
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm6, %zmm4
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm26, %zmm17
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm2, %zmm17
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, %zmm1
; AVX512DQ-BW-NEXT:    vpermt2q %zmm26, %zmm12, %zmm1
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm26, %zmm1
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm10, %zmm1
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm16, %zmm26
; AVX512DQ-BW-NEXT:    vmovdqa64 128(%rdx), %zmm14
; AVX512DQ-BW-NEXT:    vmovdqa64 128(%rcx), %zmm0
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm14, %zmm15
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm6, %zmm15
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm14, %zmm9
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm2, %zmm9
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, %zmm1
; AVX512DQ-BW-NEXT:    vpermt2q %zmm14, %zmm12, %zmm1
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm14, %zmm1
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm10, %zmm1
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm16, %zmm14
; AVX512DQ-BW-NEXT:    vmovdqa64 192(%rdx), %zmm19
; AVX512DQ-BW-NEXT:    vmovdqa64 192(%rcx), %zmm0
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm19, %zmm31
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm6, %zmm31
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm19, %zmm30
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm2, %zmm30
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, %zmm1
; AVX512DQ-BW-NEXT:    vpermt2q %zmm19, %zmm12, %zmm1
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm19, %zmm1
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm10, %zmm1
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm16, %zmm19
; AVX512DQ-BW-NEXT:    vmovdqa64 256(%rdx), %zmm18
; AVX512DQ-BW-NEXT:    vmovdqa64 256(%rcx), %zmm0
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm18, %zmm29
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm6, %zmm29
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm18, %zmm24
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm2, %zmm24
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, %zmm1
; AVX512DQ-BW-NEXT:    vpermt2q %zmm18, %zmm12, %zmm1
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm18, %zmm1
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm10, %zmm1
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm16, %zmm18
; AVX512DQ-BW-NEXT:    vmovdqa64 320(%rdx), %zmm7
; AVX512DQ-BW-NEXT:    vmovdqa64 320(%rcx), %zmm0
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm7, %zmm23
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm6, %zmm23
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm7, %zmm1
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm2, %zmm1
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, %zmm5
; AVX512DQ-BW-NEXT:    vpermt2q %zmm7, %zmm12, %zmm5
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm7, %zmm5
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm10, %zmm5
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm16, %zmm7
; AVX512DQ-BW-NEXT:    vmovdqa64 384(%rdx), %zmm11
; AVX512DQ-BW-NEXT:    vmovdqa64 384(%rcx), %zmm0
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm11, %zmm13
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm6, %zmm13
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm11, %zmm8
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm2, %zmm8
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, %zmm5
; AVX512DQ-BW-NEXT:    vpermt2q %zmm11, %zmm12, %zmm5
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm11, %zmm5
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm10, %zmm5
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm16, %zmm11
; AVX512DQ-BW-NEXT:    vmovdqa64 448(%rdx), %zmm5
; AVX512DQ-BW-NEXT:    vmovdqa64 448(%rcx), %zmm0
; AVX512DQ-BW-NEXT:    vpermi2q %zmm0, %zmm5, %zmm6
; AVX512DQ-BW-NEXT:    vpermi2q %zmm0, %zmm5, %zmm2
; AVX512DQ-BW-NEXT:    vpermi2q %zmm5, %zmm0, %zmm12
; AVX512DQ-BW-NEXT:    vpermi2q %zmm0, %zmm5, %zmm10
; AVX512DQ-BW-NEXT:    vpermt2q %zmm0, %zmm16, %zmm5
; AVX512DQ-BW-NEXT:    movb $49, %al
; AVX512DQ-BW-NEXT:    kmovd %eax, %k1
; AVX512DQ-BW-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k1}
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k1}
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, %zmm31 {%k1}
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, %zmm29 {%k1}
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, %zmm23 {%k1}
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k1}
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm20, %zmm6 {%k1}
; AVX512DQ-BW-NEXT:    movb $-116, %al
; AVX512DQ-BW-NEXT:    kmovd %eax, %k3
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm22, %zmm16 {%k3}
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm27, %zmm0 {%k1}
; AVX512DQ-BW-NEXT:    movb $24, %al
; AVX512DQ-BW-NEXT:    kmovd %eax, %k2
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm28, %zmm27 {%k2}
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm15, %zmm25 {%k2}
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm17, %zmm28 {%k3}
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm9, %zmm22 {%k3}
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm30, %zmm20 {%k3}
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm24, %zmm9 {%k3}
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm1, %zmm17 {%k3}
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm8, %zmm1 {%k3}
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm2, %zmm21 {%k3}
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    movb $8, %al
; AVX512DQ-BW-NEXT:    kmovd %eax, %k3
; AVX512DQ-BW-NEXT:    vmovdqa64 (%r8), %zmm2
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k3}
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm3, (%rsp) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm21 = [0,1,2,3,8,5,6,7]
; AVX512DQ-BW-NEXT:    vpermt2q %zmm2, %zmm21, %zmm16
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm8 = [0,9,2,3,4,5,10,7]
; AVX512DQ-BW-NEXT:    vpermt2q %zmm2, %zmm8, %zmm0
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm15 = [12,1,2,3,4,13,6,7]
; AVX512DQ-BW-NEXT:    vpermt2q %zmm2, %zmm15, %zmm27
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vpmovsxbq {{.*#+}} zmm16 = [0,1,14,3,4,5,6,15]
; AVX512DQ-BW-NEXT:    vpermt2q %zmm2, %zmm16, %zmm25
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k2}
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k2}
; AVX512DQ-BW-NEXT:    vmovdqa64 64(%r8), %zmm2
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k3}
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vpermt2q %zmm2, %zmm21, %zmm28
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm28, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vpermt2q %zmm2, %zmm8, %zmm1
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vpermt2q %zmm2, %zmm15, %zmm3
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vpermt2q %zmm2, %zmm16, %zmm26
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, %zmm14 {%k2}
; AVX512DQ-BW-NEXT:    vmovdqa64 128(%r8), %zmm2
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm2, %zmm30 {%k3}
; AVX512DQ-BW-NEXT:    vpermt2q %zmm2, %zmm21, %zmm22
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vpermt2q %zmm2, %zmm8, %zmm3
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vpermt2q %zmm2, %zmm15, %zmm1
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vpermt2q %zmm2, %zmm16, %zmm14
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k2}
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm1, %zmm19 {%k2}
; AVX512DQ-BW-NEXT:    vmovdqa64 192(%r8), %zmm2
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm2, %zmm31 {%k3}
; AVX512DQ-BW-NEXT:    vpermt2q %zmm2, %zmm21, %zmm20
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vpermt2q %zmm2, %zmm8, %zmm3
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vpermt2q %zmm2, %zmm15, %zmm0
; AVX512DQ-BW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-NEXT:    vpermt2q %zmm2, %zmm16, %zmm19
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, %zmm20 {%k1}
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm1, %zmm28 {%k2}
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm1, %zmm18 {%k2}
; AVX512DQ-BW-NEXT:    vmovdqa64 256(%r8), %zmm2
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm2, %zmm29 {%k3}
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm9, %zmm3
; AVX512DQ-BW-NEXT:    vpermt2q %zmm2, %zmm21, %zmm3
; AVX512DQ-BW-NEXT:    vpermt2q %zmm2, %zmm8, %zmm20
; AVX512DQ-BW-NEXT:    vpermt2q %zmm2, %zmm15, %zmm28
; AVX512DQ-BW-NEXT:    vpermt2q %zmm2, %zmm16, %zmm18
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k1}
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, %zmm22 {%k2}
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k2}
; AVX512DQ-BW-NEXT:    vmovdqa64 320(%r8), %zmm2
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm2, %zmm23 {%k3}
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm17, %zmm1
; AVX512DQ-BW-NEXT:    vpermt2q %zmm2, %zmm21, %zmm1
; AVX512DQ-BW-NEXT:    vpermt2q %zmm2, %zmm8, %zmm27
; AVX512DQ-BW-NEXT:    vpermt2q %zmm2, %zmm15, %zmm22
; AVX512DQ-BW-NEXT:    vpermt2q %zmm2, %zmm16, %zmm7
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, %zmm9 {%k1}
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k2}
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm2, %zmm11 {%k2}
; AVX512DQ-BW-NEXT:    vmovdqa64 384(%r8), %zmm2
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm2, %zmm13 {%k3}
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vpermt2q %zmm2, %zmm21, %zmm17
; AVX512DQ-BW-NEXT:    vpermt2q %zmm2, %zmm8, %zmm9
; AVX512DQ-BW-NEXT:    vpermt2q %zmm2, %zmm15, %zmm0
; AVX512DQ-BW-NEXT:    vpermt2q %zmm2, %zmm16, %zmm11
; AVX512DQ-BW-NEXT:    vmovdqa64 448(%r8), %zmm2
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vpermt2q %zmm2, %zmm21, %zmm24
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm12, %zmm4 {%k1}
; AVX512DQ-BW-NEXT:    vpermt2q %zmm2, %zmm8, %zmm4
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm10, %zmm8 {%k2}
; AVX512DQ-BW-NEXT:    vpermt2q %zmm2, %zmm15, %zmm8
; AVX512DQ-BW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm10, %zmm5 {%k2}
; AVX512DQ-BW-NEXT:    vpermt2q %zmm2, %zmm16, %zmm5
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm2, %zmm6 {%k3}
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm5, 2496(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm8, 2432(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm6, 2368(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm4, 2304(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm24, 2240(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm11, 2176(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm0, 2112(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm13, 2048(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm9, 1984(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm17, 1920(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm7, 1856(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm22, 1792(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm23, 1728(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm27, 1664(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm1, 1600(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm18, 1536(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm28, 1472(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm29, 1408(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm20, 1344(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm3, 1280(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm19, 1216(%r9)
; AVX512DQ-BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovaps %zmm0, 1152(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm31, 1088(%r9)
; AVX512DQ-BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovaps %zmm0, 1024(%r9)
; AVX512DQ-BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovaps %zmm0, 960(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm14, 896(%r9)
; AVX512DQ-BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovaps %zmm0, 832(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm30, 768(%r9)
; AVX512DQ-BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovaps %zmm0, 704(%r9)
; AVX512DQ-BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovaps %zmm0, 640(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm26, 576(%r9)
; AVX512DQ-BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovaps %zmm0, 512(%r9)
; AVX512DQ-BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovaps %zmm0, 448(%r9)
; AVX512DQ-BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovaps %zmm0, 384(%r9)
; AVX512DQ-BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovaps %zmm0, 320(%r9)
; AVX512DQ-BW-NEXT:    vmovdqa64 %zmm25, 256(%r9)
; AVX512DQ-BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovaps %zmm0, 192(%r9)
; AVX512DQ-BW-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovaps %zmm0, 128(%r9)
; AVX512DQ-BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovaps %zmm0, 64(%r9)
; AVX512DQ-BW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-NEXT:    vmovaps %zmm0, (%r9)
; AVX512DQ-BW-NEXT:    addq $3144, %rsp # imm = 0xC48
; AVX512DQ-BW-NEXT:    vzeroupper
; AVX512DQ-BW-NEXT:    retq
;
; AVX512DQ-BW-FCP-LABEL: store_i64_stride5_vf64:
; AVX512DQ-BW-FCP:       # %bb.0:
; AVX512DQ-BW-FCP-NEXT:    subq $3144, %rsp # imm = 0xC48
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 320(%rdi), %zmm6
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 256(%rdi), %zmm7
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rdi), %zmm11
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 64(%rdi), %zmm10
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 128(%rdi), %zmm9
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 192(%rdi), %zmm8
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 256(%rsi), %zmm1
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rsi), %zmm5
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 64(%rsi), %zmm4
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 128(%rsi), %zmm3
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 192(%rsi), %zmm2
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm20 = [3,0,0,0,12,4,0,0]
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm5, %zmm12
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm11, %zmm20, %zmm12
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm12, (%rsp) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm4, %zmm12
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm10, %zmm20, %zmm12
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm3, %zmm12
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm9, %zmm20, %zmm12
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm2, %zmm12
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm8, %zmm20, %zmm12
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm1, %zmm12
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm7, %zmm20, %zmm12
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm21 = [0,8,0,0,0,1,9,0]
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm11, %zmm0
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm5, %zmm21, %zmm0
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm13 = [0,0,2,10,0,0,0,3]
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm11, %zmm0
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm5, %zmm13, %zmm0
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm14 = [0,5,13,0,0,0,6,14]
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm11, %zmm0
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm5, %zmm14, %zmm0
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [15,7,15,7,15,7,15,7]
; AVX512DQ-BW-FCP-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm5, %zmm0, %zmm11
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm10, %zmm5
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm4, %zmm21, %zmm5
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm10, %zmm5
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm4, %zmm13, %zmm5
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm10, %zmm5
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm4, %zmm14, %zmm5
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm4, %zmm0, %zmm10
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm9, %zmm4
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm3, %zmm21, %zmm4
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm9, %zmm4
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm3, %zmm13, %zmm4
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm9, %zmm4
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm3, %zmm14, %zmm4
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm3, %zmm0, %zmm9
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm8, %zmm3
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm2, %zmm21, %zmm3
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm8, %zmm3
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm2, %zmm13, %zmm3
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm8, %zmm3
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm2, %zmm14, %zmm3
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm2, %zmm0, %zmm8
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm7, %zmm2
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm1, %zmm21, %zmm2
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm7, %zmm2
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm1, %zmm13, %zmm2
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm7, %zmm2
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm1, %zmm14, %zmm2
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm1, %zmm0, %zmm7
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 320(%rsi), %zmm1
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm1, %zmm2
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm6, %zmm20, %zmm2
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm6, %zmm2
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm1, %zmm21, %zmm2
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm6, %zmm2
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm1, %zmm13, %zmm2
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm6, %zmm2
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm1, %zmm14, %zmm2
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm1, %zmm0, %zmm6
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 384(%rdi), %zmm3
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 384(%rsi), %zmm1
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm1, %zmm2
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm3, %zmm20, %zmm2
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm3, %zmm2
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm1, %zmm21, %zmm2
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm3, %zmm2
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm1, %zmm13, %zmm2
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm3, %zmm2
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm1, %zmm14, %zmm2
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm1, %zmm0, %zmm3
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 448(%rdi), %zmm2
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 448(%rsi), %zmm1
; AVX512DQ-BW-FCP-NEXT:    vpermi2q %zmm2, %zmm1, %zmm20
; AVX512DQ-BW-FCP-NEXT:    vpermi2q %zmm1, %zmm2, %zmm21
; AVX512DQ-BW-FCP-NEXT:    vpermi2q %zmm1, %zmm2, %zmm13
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vpermi2q %zmm1, %zmm2, %zmm14
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm1, %zmm0, %zmm2
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rdx), %zmm25
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%rcx), %zmm0
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm6 = [0,3,11,0,0,0,4,12]
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm25, %zmm3
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm3
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm2 = [0,0,0,8,0,0,0,1]
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm25, %zmm22
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm2, %zmm22
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm12 = [1,0,0,0,10,2,0,0]
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm27
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm25, %zmm12, %zmm27
; AVX512DQ-BW-FCP-NEXT:    vbroadcasti32x4 {{.*#+}} zmm10 = [13,5,13,5,13,5,13,5]
; AVX512DQ-BW-FCP-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm25, %zmm28
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm10, %zmm28
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm16 = [6,14,0,0,0,7,15,0]
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm16, %zmm25
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 64(%rdx), %zmm26
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 64(%rcx), %zmm0
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm26, %zmm4
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm4
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm26, %zmm17
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm2, %zmm17
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm1
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm26, %zmm12, %zmm1
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm26, %zmm1
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm10, %zmm1
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm16, %zmm26
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 128(%rdx), %zmm14
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 128(%rcx), %zmm0
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm14, %zmm15
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm15
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm14, %zmm9
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm2, %zmm9
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm1
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm14, %zmm12, %zmm1
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm14, %zmm1
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm10, %zmm1
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm16, %zmm14
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 192(%rdx), %zmm19
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 192(%rcx), %zmm0
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm19, %zmm31
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm31
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm19, %zmm30
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm2, %zmm30
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm1
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm19, %zmm12, %zmm1
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm19, %zmm1
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm10, %zmm1
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm16, %zmm19
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 256(%rdx), %zmm18
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 256(%rcx), %zmm0
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm18, %zmm29
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm29
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm18, %zmm24
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm2, %zmm24
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm1
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm18, %zmm12, %zmm1
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm18, %zmm1
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm10, %zmm1
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm16, %zmm18
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 320(%rdx), %zmm7
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 320(%rcx), %zmm0
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm7, %zmm23
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm23
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm7, %zmm1
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm2, %zmm1
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm5
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm7, %zmm12, %zmm5
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm7, %zmm5
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm10, %zmm5
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm16, %zmm7
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 384(%rdx), %zmm11
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 384(%rcx), %zmm0
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm11, %zmm13
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm13
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm11, %zmm8
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm2, %zmm8
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm5
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm11, %zmm12, %zmm5
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm11, %zmm5
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm10, %zmm5
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm16, %zmm11
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 448(%rdx), %zmm5
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 448(%rcx), %zmm0
; AVX512DQ-BW-FCP-NEXT:    vpermi2q %zmm0, %zmm5, %zmm6
; AVX512DQ-BW-FCP-NEXT:    vpermi2q %zmm0, %zmm5, %zmm2
; AVX512DQ-BW-FCP-NEXT:    vpermi2q %zmm5, %zmm0, %zmm12
; AVX512DQ-BW-FCP-NEXT:    vpermi2q %zmm0, %zmm5, %zmm10
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm0, %zmm16, %zmm5
; AVX512DQ-BW-FCP-NEXT:    movb $49, %al
; AVX512DQ-BW-FCP-NEXT:    kmovd %eax, %k1
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 (%rsp), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm4 {%k1}
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm15 {%k1}
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm31 {%k1}
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm29 {%k1}
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm23 {%k1}
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm13 {%k1}
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm20, %zmm6 {%k1}
; AVX512DQ-BW-FCP-NEXT:    movb $-116, %al
; AVX512DQ-BW-FCP-NEXT:    kmovd %eax, %k3
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm22, %zmm16 {%k3}
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm27, %zmm0 {%k1}
; AVX512DQ-BW-FCP-NEXT:    movb $24, %al
; AVX512DQ-BW-FCP-NEXT:    kmovd %eax, %k2
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm28, %zmm27 {%k2}
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm15, %zmm25 {%k2}
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm17, %zmm28 {%k3}
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm9, %zmm22 {%k3}
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm30, %zmm20 {%k3}
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm24, %zmm9 {%k3}
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm1, %zmm17 {%k3}
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm8, %zmm1 {%k3}
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm2, %zmm21 {%k3}
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    movb $8, %al
; AVX512DQ-BW-FCP-NEXT:    kmovd %eax, %k3
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 (%r8), %zmm2
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k3}
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm3, (%rsp) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm21 = [0,1,2,3,8,5,6,7]
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm2, %zmm21, %zmm16
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm8 = [0,9,2,3,4,5,10,7]
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm2, %zmm8, %zmm0
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm15 = [12,1,2,3,4,13,6,7]
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm2, %zmm15, %zmm27
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vpmovsxbq {{.*#+}} zmm16 = [0,1,14,3,4,5,6,15]
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm2, %zmm16, %zmm25
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k2}
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm26 {%k2}
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 64(%r8), %zmm2
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm2, %zmm4 {%k3}
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm2, %zmm21, %zmm28
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm28, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm2, %zmm8, %zmm1
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm2, %zmm15, %zmm3
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm2, %zmm16, %zmm26
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k2}
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm14 {%k2}
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 128(%r8), %zmm2
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm2, %zmm30 {%k3}
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm2, %zmm21, %zmm22
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm2, %zmm8, %zmm3
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm2, %zmm15, %zmm1
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm2, %zmm16, %zmm14
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm3 {%k1}
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm1, %zmm0 {%k2}
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm1, %zmm19 {%k2}
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 192(%r8), %zmm2
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm2, %zmm31 {%k3}
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm2, %zmm21, %zmm20
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm2, %zmm8, %zmm3
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm2, %zmm15, %zmm0
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm2, %zmm16, %zmm19
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm20 {%k1}
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm1, %zmm28 {%k2}
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm1, %zmm18 {%k2}
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 256(%r8), %zmm2
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm2, %zmm29 {%k3}
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm9, %zmm3
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm2, %zmm21, %zmm3
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm2, %zmm8, %zmm20
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm2, %zmm15, %zmm28
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm2, %zmm16, %zmm18
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm27 {%k1}
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm22 {%k2}
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm7 {%k2}
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 320(%r8), %zmm2
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm2, %zmm23 {%k3}
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm17, %zmm1
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm2, %zmm21, %zmm1
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm2, %zmm8, %zmm27
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm2, %zmm15, %zmm22
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm2, %zmm16, %zmm7
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, %zmm9 {%k1}
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm2, %zmm0 {%k2}
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm2, %zmm11 {%k2}
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 384(%r8), %zmm2
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm2, %zmm13 {%k3}
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm2, %zmm21, %zmm17
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm2, %zmm8, %zmm9
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm2, %zmm15, %zmm0
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm2, %zmm16, %zmm11
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 448(%r8), %zmm2
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm2, %zmm21, %zmm24
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm12, %zmm4 {%k1}
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm2, %zmm8, %zmm4
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm10, %zmm8 {%k2}
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm2, %zmm15, %zmm8
; AVX512DQ-BW-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm10, %zmm5 {%k2}
; AVX512DQ-BW-FCP-NEXT:    vpermt2q %zmm2, %zmm16, %zmm5
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm2, %zmm6 {%k3}
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm5, 2496(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm8, 2432(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm6, 2368(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm4, 2304(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm24, 2240(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm11, 2176(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm0, 2112(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm13, 2048(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm9, 1984(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm17, 1920(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm7, 1856(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm22, 1792(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm23, 1728(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm27, 1664(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm1, 1600(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm18, 1536(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm28, 1472(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm29, 1408(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm20, 1344(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm3, 1280(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm19, 1216(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovaps %zmm0, 1152(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm31, 1088(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovaps %zmm0, 1024(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovaps %zmm0, 960(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm14, 896(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovaps %zmm0, 832(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm30, 768(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovaps %zmm0, 704(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovaps %zmm0, 640(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm26, 576(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovaps %zmm0, 512(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovaps %zmm0, 448(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovaps %zmm0, 384(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovaps %zmm0, 320(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovdqa64 %zmm25, 256(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovaps %zmm0, 192(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovups (%rsp), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovaps %zmm0, 128(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovaps %zmm0, 64(%r9)
; AVX512DQ-BW-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
; AVX512DQ-BW-FCP-NEXT:    vmovaps %zmm0, (%r9)
; AVX512DQ-BW-FCP-NEXT:    addq $3144, %rsp # imm = 0xC48
; AVX512DQ-BW-FCP-NEXT:    vzeroupper
; AVX512DQ-BW-FCP-NEXT:    retq
  %in.vec0 = load <64 x i64>, ptr %in.vecptr0, align 64
  %in.vec1 = load <64 x i64>, ptr %in.vecptr1, align 64
  %in.vec2 = load <64 x i64>, ptr %in.vecptr2, align 64
  %in.vec3 = load <64 x i64>, ptr %in.vecptr3, align 64
  %in.vec4 = load <64 x i64>, ptr %in.vecptr4, align 64
  %1 = shufflevector <64 x i64> %in.vec0, <64 x i64> %in.vec1, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
  %2 = shufflevector <64 x i64> %in.vec2, <64 x i64> %in.vec3, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
  %3 = shufflevector <128 x i64> %1, <128 x i64> %2, <256 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127, i32 128, i32 129, i32 130, i32 131, i32 132, i32 133, i32 134, i32 135, i32 136, i32 137, i32 138, i32 139, i32 140, i32 141, i32 142, i32 143, i32 144, i32 145, i32 146, i32 147, i32 148, i32 149, i32 150, i32 151, i32 152, i32 153, i32 154, i32 155, i32 156, i32 157, i32 158, i32 159, i32 160, i32 161, i32 162, i32 163, i32 164, i32 165, i32 166, i32 167, i32 168, i32 169, i32 170, i32 171, i32 172, i32 173, i32 174, i32 175, i32 176, i32 177, i32 178, i32 179, i32 180, i32 181, i32 182, i32 183, i32 184, i32 185, i32 186, i32 187, i32 188, i32 189, i32 190, i32 191, i32 192, i32 193, i32 194, i32 195, i32 196, i32 197, i32 198, i32 199, i32 200, i32 201, i32 202, i32 203, i32 204, i32 205, i32 206, i32 207, i32 208, i32 209, i32 210, i32 211, i32 212, i32 213, i32 214, i32 215, i32 216, i32 217, i32 218, i32 219, i32 220, i32 221, i32 222, i32 223, i32 224, i32 225, i32 226, i32 227, i32 228, i32 229, i32 230, i32 231, i32 232, i32 233, i32 234, i32 235, i32 236, i32 237, i32 238, i32 239, i32 240, i32 241, i32 242, i32 243, i32 244, i32 245, i32 246, i32 247, i32 248, i32 249, i32 250, i32 251, i32 252, i32 253, i32 254, i32 255>
  %4 = shufflevector <64 x i64> %in.vec4, <64 x i64> poison, <256 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
  %5 = shufflevector <256 x i64> %3, <256 x i64> %4, <320 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127, i32 128, i32 129, i32 130, i32 131, i32 132, i32 133, i32 134, i32 135, i32 136, i32 137, i32 138, i32 139, i32 140, i32 141, i32 142, i32 143, i32 144, i32 145, i32 146, i32 147, i32 148, i32 149, i32 150, i32 151, i32 152, i32 153, i32 154, i32 155, i32 156, i32 157, i32 158, i32 159, i32 160, i32 161, i32 162, i32 163, i32 164, i32 165, i32 166, i32 167, i32 168, i32 169, i32 170, i32 171, i32 172, i32 173, i32 174, i32 175, i32 176, i32 177, i32 178, i32 179, i32 180, i32 181, i32 182, i32 183, i32 184, i32 185, i32 186, i32 187, i32 188, i32 189, i32 190, i32 191, i32 192, i32 193, i32 194, i32 195, i32 196, i32 197, i32 198, i32 199, i32 200, i32 201, i32 202, i32 203, i32 204, i32 205, i32 206, i32 207, i32 208, i32 209, i32 210, i32 211, i32 212, i32 213, i32 214, i32 215, i32 216, i32 217, i32 218, i32 219, i32 220, i32 221, i32 222, i32 223, i32 224, i32 225, i32 226, i32 227, i32 228, i32 229, i32 230, i32 231, i32 232, i32 233, i32 234, i32 235, i32 236, i32 237, i32 238, i32 239, i32 240, i32 241, i32 242, i32 243, i32 244, i32 245, i32 246, i32 247, i32 248, i32 249, i32 250, i32 251, i32 252, i32 253, i32 254, i32 255, i32 256, i32 257, i32 258, i32 259, i32 260, i32 261, i32 262, i32 263, i32 264, i32 265, i32 266, i32 267, i32 268, i32 269, i32 270, i32 271, i32 272, i32 273, i32 274, i32 275, i32 276, i32 277, i32 278, i32 279, i32 280, i32 281, i32 282, i32 283, i32 284, i32 285, i32 286, i32 287, i32 288, i32 289, i32 290, i32 291, i32 292, i32 293, i32 294, i32 295, i32 296, i32 297, i32 298, i32 299, i32 300, i32 301, i32 302, i32 303, i32 304, i32 305, i32 306, i32 307, i32 308, i32 309, i32 310, i32 311, i32 312, i32 313, i32 314, i32 315, i32 316, i32 317, i32 318, i32 319>
  %interleaved.vec = shufflevector <320 x i64> %5, <320 x i64> poison, <320 x i32> <i32 0, i32 64, i32 128, i32 192, i32 256, i32 1, i32 65, i32 129, i32 193, i32 257, i32 2, i32 66, i32 130, i32 194, i32 258, i32 3, i32 67, i32 131, i32 195, i32 259, i32 4, i32 68, i32 132, i32 196, i32 260, i32 5, i32 69, i32 133, i32 197, i32 261, i32 6, i32 70, i32 134, i32 198, i32 262, i32 7, i32 71, i32 135, i32 199, i32 263, i32 8, i32 72, i32 136, i32 200, i32 264, i32 9, i32 73, i32 137, i32 201, i32 265, i32 10, i32 74, i32 138, i32 202, i32 266, i32 11, i32 75, i32 139, i32 203, i32 267, i32 12, i32 76, i32 140, i32 204, i32 268, i32 13, i32 77, i32 141, i32 205, i32 269, i32 14, i32 78, i32 142, i32 206, i32 270, i32 15, i32 79, i32 143, i32 207, i32 271, i32 16, i32 80, i32 144, i32 208, i32 272, i32 17, i32 81, i32 145, i32 209, i32 273, i32 18, i32 82, i32 146, i32 210, i32 274, i32 19, i32 83, i32 147, i32 211, i32 275, i32 20, i32 84, i32 148, i32 212, i32 276, i32 21, i32 85, i32 149, i32 213, i32 277, i32 22, i32 86, i32 150, i32 214, i32 278, i32 23, i32 87, i32 151, i32 215, i32 279, i32 24, i32 88, i32 152, i32 216, i32 280, i32 25, i32 89, i32 153, i32 217, i32 281, i32 26, i32 90, i32 154, i32 218, i32 282, i32 27, i32 91, i32 155, i32 219, i32 283, i32 28, i32 92, i32 156, i32 220, i32 284, i32 29, i32 93, i32 157, i32 221, i32 285, i32 30, i32 94, i32 158, i32 222, i32 286, i32 31, i32 95, i32 159, i32 223, i32 287, i32 32, i32 96, i32 160, i32 224, i32 288, i32 33, i32 97, i32 161, i32 225, i32 289, i32 34, i32 98, i32 162, i32 226, i32 290, i32 35, i32 99, i32 163, i32 227, i32 291, i32 36, i32 100, i32 164, i32 228, i32 292, i32 37, i32 101, i32 165, i32 229, i32 293, i32 38, i32 102, i32 166, i32 230, i32 294, i32 39, i32 103, i32 167, i32 231, i32 295, i32 40, i32 104, i32 168, i32 232, i32 296, i32 41, i32 105, i32 169, i32 233, i32 297, i32 42, i32 106, i32 170, i32 234, i32 298, i32 43, i32 107, i32 171, i32 235, i32 299, i32 44, i32 108, i32 172, i32 236, i32 300, i32 45, i32 109, i32 173, i32 237, i32 301, i32 46, i32 110, i32 174, i32 238, i32 302, i32 47, i32 111, i32 175, i32 239, i32 303, i32 48, i32 112, i32 176, i32 240, i32 304, i32 49, i32 113, i32 177, i32 241, i32 305, i32 50, i32 114, i32 178, i32 242, i32 306, i32 51, i32 115, i32 179, i32 243, i32 307, i32 52, i32 116, i32 180, i32 244, i32 308, i32 53, i32 117, i32 181, i32 245, i32 309, i32 54, i32 118, i32 182, i32 246, i32 310, i32 55, i32 119, i32 183, i32 247, i32 311, i32 56, i32 120, i32 184, i32 248, i32 312, i32 57, i32 121, i32 185, i32 249, i32 313, i32 58, i32 122, i32 186, i32 250, i32 314, i32 59, i32 123, i32 187, i32 251, i32 315, i32 60, i32 124, i32 188, i32 252, i32 316, i32 61, i32 125, i32 189, i32 253, i32 317, i32 62, i32 126, i32 190, i32 254, i32 318, i32 63, i32 127, i32 191, i32 255, i32 319>
  store <320 x i64> %interleaved.vec, ptr %out.vec, align 64
  ret void
}