; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --no_x86_scrub_sp --no_x86_scrub_mem_shuffle
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX512,AVX512F
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefixes=AVX512,AVX512BW
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vbmi | FileCheck %s --check-prefixes=AVX512,AVX512VBMI
define <8 x i64> @var_shuffle_v8i64(<8 x i64> %v, <8 x i64> %indices) nounwind {
; AVX512-LABEL: var_shuffle_v8i64:
; AVX512: # %bb.0:
; AVX512-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512-NEXT: retq
%index0 = extractelement <8 x i64> %indices, i32 0
%index1 = extractelement <8 x i64> %indices, i32 1
%index2 = extractelement <8 x i64> %indices, i32 2
%index3 = extractelement <8 x i64> %indices, i32 3
%index4 = extractelement <8 x i64> %indices, i32 4
%index5 = extractelement <8 x i64> %indices, i32 5
%index6 = extractelement <8 x i64> %indices, i32 6
%index7 = extractelement <8 x i64> %indices, i32 7
%v0 = extractelement <8 x i64> %v, i64 %index0
%v1 = extractelement <8 x i64> %v, i64 %index1
%v2 = extractelement <8 x i64> %v, i64 %index2
%v3 = extractelement <8 x i64> %v, i64 %index3
%v4 = extractelement <8 x i64> %v, i64 %index4
%v5 = extractelement <8 x i64> %v, i64 %index5
%v6 = extractelement <8 x i64> %v, i64 %index6
%v7 = extractelement <8 x i64> %v, i64 %index7
%ret0 = insertelement <8 x i64> undef, i64 %v0, i32 0
%ret1 = insertelement <8 x i64> %ret0, i64 %v1, i32 1
%ret2 = insertelement <8 x i64> %ret1, i64 %v2, i32 2
%ret3 = insertelement <8 x i64> %ret2, i64 %v3, i32 3
%ret4 = insertelement <8 x i64> %ret3, i64 %v4, i32 4
%ret5 = insertelement <8 x i64> %ret4, i64 %v5, i32 5
%ret6 = insertelement <8 x i64> %ret5, i64 %v6, i32 6
%ret7 = insertelement <8 x i64> %ret6, i64 %v7, i32 7
ret <8 x i64> %ret7
}
define <16 x i32> @var_shuffle_v16i32(<16 x i32> %v, <16 x i32> %indices) nounwind {
; AVX512-LABEL: var_shuffle_v16i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpermps %zmm0, %zmm1, %zmm0
; AVX512-NEXT: retq
%index0 = extractelement <16 x i32> %indices, i32 0
%index1 = extractelement <16 x i32> %indices, i32 1
%index2 = extractelement <16 x i32> %indices, i32 2
%index3 = extractelement <16 x i32> %indices, i32 3
%index4 = extractelement <16 x i32> %indices, i32 4
%index5 = extractelement <16 x i32> %indices, i32 5
%index6 = extractelement <16 x i32> %indices, i32 6
%index7 = extractelement <16 x i32> %indices, i32 7
%index8 = extractelement <16 x i32> %indices, i32 8
%index9 = extractelement <16 x i32> %indices, i32 9
%index10 = extractelement <16 x i32> %indices, i32 10
%index11 = extractelement <16 x i32> %indices, i32 11
%index12 = extractelement <16 x i32> %indices, i32 12
%index13 = extractelement <16 x i32> %indices, i32 13
%index14 = extractelement <16 x i32> %indices, i32 14
%index15 = extractelement <16 x i32> %indices, i32 15
%v0 = extractelement <16 x i32> %v, i32 %index0
%v1 = extractelement <16 x i32> %v, i32 %index1
%v2 = extractelement <16 x i32> %v, i32 %index2
%v3 = extractelement <16 x i32> %v, i32 %index3
%v4 = extractelement <16 x i32> %v, i32 %index4
%v5 = extractelement <16 x i32> %v, i32 %index5
%v6 = extractelement <16 x i32> %v, i32 %index6
%v7 = extractelement <16 x i32> %v, i32 %index7
%v8 = extractelement <16 x i32> %v, i32 %index8
%v9 = extractelement <16 x i32> %v, i32 %index9
%v10 = extractelement <16 x i32> %v, i32 %index10
%v11 = extractelement <16 x i32> %v, i32 %index11
%v12 = extractelement <16 x i32> %v, i32 %index12
%v13 = extractelement <16 x i32> %v, i32 %index13
%v14 = extractelement <16 x i32> %v, i32 %index14
%v15 = extractelement <16 x i32> %v, i32 %index15
%ret0 = insertelement <16 x i32> undef, i32 %v0, i32 0
%ret1 = insertelement <16 x i32> %ret0, i32 %v1, i32 1
%ret2 = insertelement <16 x i32> %ret1, i32 %v2, i32 2
%ret3 = insertelement <16 x i32> %ret2, i32 %v3, i32 3
%ret4 = insertelement <16 x i32> %ret3, i32 %v4, i32 4
%ret5 = insertelement <16 x i32> %ret4, i32 %v5, i32 5
%ret6 = insertelement <16 x i32> %ret5, i32 %v6, i32 6
%ret7 = insertelement <16 x i32> %ret6, i32 %v7, i32 7
%ret8 = insertelement <16 x i32> %ret7, i32 %v8, i32 8
%ret9 = insertelement <16 x i32> %ret8, i32 %v9, i32 9
%ret10 = insertelement <16 x i32> %ret9, i32 %v10, i32 10
%ret11 = insertelement <16 x i32> %ret10, i32 %v11, i32 11
%ret12 = insertelement <16 x i32> %ret11, i32 %v12, i32 12
%ret13 = insertelement <16 x i32> %ret12, i32 %v13, i32 13
%ret14 = insertelement <16 x i32> %ret13, i32 %v14, i32 14
%ret15 = insertelement <16 x i32> %ret14, i32 %v15, i32 15
ret <16 x i32> %ret15
}
define <32 x i16> @var_shuffle_v32i16(<32 x i16> %v, <32 x i16> %indices) nounwind {
; AVX512F-LABEL: var_shuffle_v32i16:
; AVX512F: # %bb.0:
; AVX512F-NEXT: pushq %rbp
; AVX512F-NEXT: movq %rsp, %rbp
; AVX512F-NEXT: andq $-64, %rsp
; AVX512F-NEXT: subq $128, %rsp
; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX512F-NEXT: vextracti32x4 $2, %zmm1, %xmm3
; AVX512F-NEXT: vextracti32x4 $3, %zmm1, %xmm4
; AVX512F-NEXT: vpextrw $0, %xmm4, %eax
; AVX512F-NEXT: vmovaps %zmm0, (%rsp)
; AVX512F-NEXT: andl $31, %eax
; AVX512F-NEXT: movzwl (%rsp,%rax,2), %eax
; AVX512F-NEXT: vmovd %eax, %xmm0
; AVX512F-NEXT: vpextrw $1, %xmm4, %eax
; AVX512F-NEXT: andl $31, %eax
; AVX512F-NEXT: vpinsrw $1, (%rsp,%rax,2), %xmm0, %xmm0
; AVX512F-NEXT: vpextrw $2, %xmm4, %eax
; AVX512F-NEXT: andl $31, %eax
; AVX512F-NEXT: vpinsrw $2, (%rsp,%rax,2), %xmm0, %xmm0
; AVX512F-NEXT: vpextrw $3, %xmm4, %eax
; AVX512F-NEXT: andl $31, %eax
; AVX512F-NEXT: vpinsrw $3, (%rsp,%rax,2), %xmm0, %xmm0
; AVX512F-NEXT: vpextrw $4, %xmm4, %eax
; AVX512F-NEXT: andl $31, %eax
; AVX512F-NEXT: vpinsrw $4, (%rsp,%rax,2), %xmm0, %xmm0
; AVX512F-NEXT: vpextrw $5, %xmm4, %eax
; AVX512F-NEXT: andl $31, %eax
; AVX512F-NEXT: vpinsrw $5, (%rsp,%rax,2), %xmm0, %xmm0
; AVX512F-NEXT: vpextrw $6, %xmm4, %eax
; AVX512F-NEXT: andl $31, %eax
; AVX512F-NEXT: vpinsrw $6, (%rsp,%rax,2), %xmm0, %xmm0
; AVX512F-NEXT: vpextrw $7, %xmm4, %eax
; AVX512F-NEXT: andl $31, %eax
; AVX512F-NEXT: vpinsrw $7, (%rsp,%rax,2), %xmm0, %xmm0
; AVX512F-NEXT: vpextrw $0, %xmm3, %eax
; AVX512F-NEXT: andl $31, %eax
; AVX512F-NEXT: movzwl (%rsp,%rax,2), %eax
; AVX512F-NEXT: vmovd %eax, %xmm4
; AVX512F-NEXT: vpextrw $1, %xmm3, %eax
; AVX512F-NEXT: andl $31, %eax
; AVX512F-NEXT: vpinsrw $1, (%rsp,%rax,2), %xmm4, %xmm4
; AVX512F-NEXT: vpextrw $2, %xmm3, %eax
; AVX512F-NEXT: andl $31, %eax
; AVX512F-NEXT: vpinsrw $2, (%rsp,%rax,2), %xmm4, %xmm4
; AVX512F-NEXT: vpextrw $3, %xmm3, %eax
; AVX512F-NEXT: andl $31, %eax
; AVX512F-NEXT: vpinsrw $3, (%rsp,%rax,2), %xmm4, %xmm4
; AVX512F-NEXT: vpextrw $4, %xmm3, %eax
; AVX512F-NEXT: andl $31, %eax
; AVX512F-NEXT: vpinsrw $4, (%rsp,%rax,2), %xmm4, %xmm4
; AVX512F-NEXT: vpextrw $5, %xmm3, %eax
; AVX512F-NEXT: andl $31, %eax
; AVX512F-NEXT: vpinsrw $5, (%rsp,%rax,2), %xmm4, %xmm4
; AVX512F-NEXT: vpextrw $6, %xmm3, %eax
; AVX512F-NEXT: andl $31, %eax
; AVX512F-NEXT: vpinsrw $6, (%rsp,%rax,2), %xmm4, %xmm4
; AVX512F-NEXT: vpextrw $7, %xmm3, %eax
; AVX512F-NEXT: andl $31, %eax
; AVX512F-NEXT: vpinsrw $7, (%rsp,%rax,2), %xmm4, %xmm3
; AVX512F-NEXT: vpextrw $0, %xmm2, %eax
; AVX512F-NEXT: andl $31, %eax
; AVX512F-NEXT: movzwl (%rsp,%rax,2), %eax
; AVX512F-NEXT: vmovd %eax, %xmm4
; AVX512F-NEXT: vpextrw $1, %xmm2, %eax
; AVX512F-NEXT: andl $31, %eax
; AVX512F-NEXT: vpinsrw $1, (%rsp,%rax,2), %xmm4, %xmm4
; AVX512F-NEXT: vpextrw $2, %xmm2, %eax
; AVX512F-NEXT: andl $31, %eax
; AVX512F-NEXT: vpinsrw $2, (%rsp,%rax,2), %xmm4, %xmm4
; AVX512F-NEXT: vpextrw $3, %xmm2, %eax
; AVX512F-NEXT: andl $31, %eax
; AVX512F-NEXT: vpinsrw $3, (%rsp,%rax,2), %xmm4, %xmm4
; AVX512F-NEXT: vpextrw $4, %xmm2, %eax
; AVX512F-NEXT: andl $31, %eax
; AVX512F-NEXT: vpinsrw $4, (%rsp,%rax,2), %xmm4, %xmm4
; AVX512F-NEXT: vpextrw $5, %xmm2, %eax
; AVX512F-NEXT: andl $31, %eax
; AVX512F-NEXT: movzwl (%rsp,%rax,2), %eax
; AVX512F-NEXT: vpinsrw $5, %eax, %xmm4, %xmm4
; AVX512F-NEXT: vpextrw $6, %xmm2, %eax
; AVX512F-NEXT: andl $31, %eax
; AVX512F-NEXT: movzwl (%rsp,%rax,2), %eax
; AVX512F-NEXT: vpinsrw $6, %eax, %xmm4, %xmm4
; AVX512F-NEXT: vpextrw $7, %xmm2, %eax
; AVX512F-NEXT: andl $31, %eax
; AVX512F-NEXT: movzwl (%rsp,%rax,2), %eax
; AVX512F-NEXT: vpinsrw $7, %eax, %xmm4, %xmm2
; AVX512F-NEXT: vpextrw $0, %xmm1, %eax
; AVX512F-NEXT: andl $31, %eax
; AVX512F-NEXT: movzwl (%rsp,%rax,2), %eax
; AVX512F-NEXT: vmovd %eax, %xmm4
; AVX512F-NEXT: vpextrw $1, %xmm1, %eax
; AVX512F-NEXT: andl $31, %eax
; AVX512F-NEXT: vpinsrw $1, (%rsp,%rax,2), %xmm4, %xmm4
; AVX512F-NEXT: vpextrw $2, %xmm1, %eax
; AVX512F-NEXT: andl $31, %eax
; AVX512F-NEXT: vpinsrw $2, (%rsp,%rax,2), %xmm4, %xmm4
; AVX512F-NEXT: vpextrw $3, %xmm1, %eax
; AVX512F-NEXT: andl $31, %eax
; AVX512F-NEXT: vpinsrw $3, (%rsp,%rax,2), %xmm4, %xmm4
; AVX512F-NEXT: vpextrw $4, %xmm1, %eax
; AVX512F-NEXT: andl $31, %eax
; AVX512F-NEXT: vpinsrw $4, (%rsp,%rax,2), %xmm4, %xmm4
; AVX512F-NEXT: vpextrw $5, %xmm1, %eax
; AVX512F-NEXT: andl $31, %eax
; AVX512F-NEXT: vpinsrw $5, (%rsp,%rax,2), %xmm4, %xmm4
; AVX512F-NEXT: vpextrw $6, %xmm1, %eax
; AVX512F-NEXT: andl $31, %eax
; AVX512F-NEXT: vpinsrw $6, (%rsp,%rax,2), %xmm4, %xmm4
; AVX512F-NEXT: vpextrw $7, %xmm1, %eax
; AVX512F-NEXT: andl $31, %eax
; AVX512F-NEXT: vpinsrw $7, (%rsp,%rax,2), %xmm4, %xmm1
; AVX512F-NEXT: vinserti128 $1, %xmm0, %ymm3, %ymm0
; AVX512F-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
; AVX512F-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512F-NEXT: movq %rbp, %rsp
; AVX512F-NEXT: popq %rbp
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: var_shuffle_v32i16:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpermw %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512VBMI-LABEL: var_shuffle_v32i16:
; AVX512VBMI: # %bb.0:
; AVX512VBMI-NEXT: vpermw %zmm0, %zmm1, %zmm0
; AVX512VBMI-NEXT: retq
%index0 = extractelement <32 x i16> %indices, i32 0
%index1 = extractelement <32 x i16> %indices, i32 1
%index2 = extractelement <32 x i16> %indices, i32 2
%index3 = extractelement <32 x i16> %indices, i32 3
%index4 = extractelement <32 x i16> %indices, i32 4
%index5 = extractelement <32 x i16> %indices, i32 5
%index6 = extractelement <32 x i16> %indices, i32 6
%index7 = extractelement <32 x i16> %indices, i32 7
%index8 = extractelement <32 x i16> %indices, i32 8
%index9 = extractelement <32 x i16> %indices, i32 9
%index10 = extractelement <32 x i16> %indices, i32 10
%index11 = extractelement <32 x i16> %indices, i32 11
%index12 = extractelement <32 x i16> %indices, i32 12
%index13 = extractelement <32 x i16> %indices, i32 13
%index14 = extractelement <32 x i16> %indices, i32 14
%index15 = extractelement <32 x i16> %indices, i32 15
%index16 = extractelement <32 x i16> %indices, i32 16
%index17 = extractelement <32 x i16> %indices, i32 17
%index18 = extractelement <32 x i16> %indices, i32 18
%index19 = extractelement <32 x i16> %indices, i32 19
%index20 = extractelement <32 x i16> %indices, i32 20
%index21 = extractelement <32 x i16> %indices, i32 21
%index22 = extractelement <32 x i16> %indices, i32 22
%index23 = extractelement <32 x i16> %indices, i32 23
%index24 = extractelement <32 x i16> %indices, i32 24
%index25 = extractelement <32 x i16> %indices, i32 25
%index26 = extractelement <32 x i16> %indices, i32 26
%index27 = extractelement <32 x i16> %indices, i32 27
%index28 = extractelement <32 x i16> %indices, i32 28
%index29 = extractelement <32 x i16> %indices, i32 29
%index30 = extractelement <32 x i16> %indices, i32 30
%index31 = extractelement <32 x i16> %indices, i32 31
%v0 = extractelement <32 x i16> %v, i16 %index0
%v1 = extractelement <32 x i16> %v, i16 %index1
%v2 = extractelement <32 x i16> %v, i16 %index2
%v3 = extractelement <32 x i16> %v, i16 %index3
%v4 = extractelement <32 x i16> %v, i16 %index4
%v5 = extractelement <32 x i16> %v, i16 %index5
%v6 = extractelement <32 x i16> %v, i16 %index6
%v7 = extractelement <32 x i16> %v, i16 %index7
%v8 = extractelement <32 x i16> %v, i16 %index8
%v9 = extractelement <32 x i16> %v, i16 %index9
%v10 = extractelement <32 x i16> %v, i16 %index10
%v11 = extractelement <32 x i16> %v, i16 %index11
%v12 = extractelement <32 x i16> %v, i16 %index12
%v13 = extractelement <32 x i16> %v, i16 %index13
%v14 = extractelement <32 x i16> %v, i16 %index14
%v15 = extractelement <32 x i16> %v, i16 %index15
%v16 = extractelement <32 x i16> %v, i16 %index16
%v17 = extractelement <32 x i16> %v, i16 %index17
%v18 = extractelement <32 x i16> %v, i16 %index18
%v19 = extractelement <32 x i16> %v, i16 %index19
%v20 = extractelement <32 x i16> %v, i16 %index20
%v21 = extractelement <32 x i16> %v, i16 %index21
%v22 = extractelement <32 x i16> %v, i16 %index22
%v23 = extractelement <32 x i16> %v, i16 %index23
%v24 = extractelement <32 x i16> %v, i16 %index24
%v25 = extractelement <32 x i16> %v, i16 %index25
%v26 = extractelement <32 x i16> %v, i16 %index26
%v27 = extractelement <32 x i16> %v, i16 %index27
%v28 = extractelement <32 x i16> %v, i16 %index28
%v29 = extractelement <32 x i16> %v, i16 %index29
%v30 = extractelement <32 x i16> %v, i16 %index30
%v31 = extractelement <32 x i16> %v, i16 %index31
%ret0 = insertelement <32 x i16> undef, i16 %v0, i32 0
%ret1 = insertelement <32 x i16> %ret0, i16 %v1, i32 1
%ret2 = insertelement <32 x i16> %ret1, i16 %v2, i32 2
%ret3 = insertelement <32 x i16> %ret2, i16 %v3, i32 3
%ret4 = insertelement <32 x i16> %ret3, i16 %v4, i32 4
%ret5 = insertelement <32 x i16> %ret4, i16 %v5, i32 5
%ret6 = insertelement <32 x i16> %ret5, i16 %v6, i32 6
%ret7 = insertelement <32 x i16> %ret6, i16 %v7, i32 7
%ret8 = insertelement <32 x i16> %ret7, i16 %v8, i32 8
%ret9 = insertelement <32 x i16> %ret8, i16 %v9, i32 9
%ret10 = insertelement <32 x i16> %ret9, i16 %v10, i32 10
%ret11 = insertelement <32 x i16> %ret10, i16 %v11, i32 11
%ret12 = insertelement <32 x i16> %ret11, i16 %v12, i32 12
%ret13 = insertelement <32 x i16> %ret12, i16 %v13, i32 13
%ret14 = insertelement <32 x i16> %ret13, i16 %v14, i32 14
%ret15 = insertelement <32 x i16> %ret14, i16 %v15, i32 15
%ret16 = insertelement <32 x i16> %ret15, i16 %v16, i32 16
%ret17 = insertelement <32 x i16> %ret16, i16 %v17, i32 17
%ret18 = insertelement <32 x i16> %ret17, i16 %v18, i32 18
%ret19 = insertelement <32 x i16> %ret18, i16 %v19, i32 19
%ret20 = insertelement <32 x i16> %ret19, i16 %v20, i32 20
%ret21 = insertelement <32 x i16> %ret20, i16 %v21, i32 21
%ret22 = insertelement <32 x i16> %ret21, i16 %v22, i32 22
%ret23 = insertelement <32 x i16> %ret22, i16 %v23, i32 23
%ret24 = insertelement <32 x i16> %ret23, i16 %v24, i32 24
%ret25 = insertelement <32 x i16> %ret24, i16 %v25, i32 25
%ret26 = insertelement <32 x i16> %ret25, i16 %v26, i32 26
%ret27 = insertelement <32 x i16> %ret26, i16 %v27, i32 27
%ret28 = insertelement <32 x i16> %ret27, i16 %v28, i32 28
%ret29 = insertelement <32 x i16> %ret28, i16 %v29, i32 29
%ret30 = insertelement <32 x i16> %ret29, i16 %v30, i32 30
%ret31 = insertelement <32 x i16> %ret30, i16 %v31, i32 31
ret <32 x i16> %ret31
}
define <64 x i8> @var_shuffle_v64i8(<64 x i8> %v, <64 x i8> %indices) nounwind {
; AVX512F-LABEL: var_shuffle_v64i8:
; AVX512F: # %bb.0:
; AVX512F-NEXT: pushq %rbp
; AVX512F-NEXT: movq %rsp, %rbp
; AVX512F-NEXT: andq $-64, %rsp
; AVX512F-NEXT: subq $128, %rsp
; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX512F-NEXT: vextracti32x4 $2, %zmm1, %xmm3
; AVX512F-NEXT: vextracti32x4 $3, %zmm1, %xmm4
; AVX512F-NEXT: vpextrb $0, %xmm4, %eax
; AVX512F-NEXT: vmovaps %zmm0, (%rsp)
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
; AVX512F-NEXT: vmovd %eax, %xmm0
; AVX512F-NEXT: vpextrb $1, %xmm4, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $1, (%rsp,%rax), %xmm0, %xmm0
; AVX512F-NEXT: vpextrb $2, %xmm4, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $2, (%rsp,%rax), %xmm0, %xmm0
; AVX512F-NEXT: vpextrb $3, %xmm4, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $3, (%rsp,%rax), %xmm0, %xmm0
; AVX512F-NEXT: vpextrb $4, %xmm4, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $4, (%rsp,%rax), %xmm0, %xmm0
; AVX512F-NEXT: vpextrb $5, %xmm4, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $5, (%rsp,%rax), %xmm0, %xmm0
; AVX512F-NEXT: vpextrb $6, %xmm4, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $6, (%rsp,%rax), %xmm0, %xmm0
; AVX512F-NEXT: vpextrb $7, %xmm4, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $7, (%rsp,%rax), %xmm0, %xmm0
; AVX512F-NEXT: vpextrb $8, %xmm4, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $8, (%rsp,%rax), %xmm0, %xmm0
; AVX512F-NEXT: vpextrb $9, %xmm4, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $9, (%rsp,%rax), %xmm0, %xmm0
; AVX512F-NEXT: vpextrb $10, %xmm4, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $10, (%rsp,%rax), %xmm0, %xmm0
; AVX512F-NEXT: vpextrb $11, %xmm4, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $11, (%rsp,%rax), %xmm0, %xmm0
; AVX512F-NEXT: vpextrb $12, %xmm4, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $12, (%rsp,%rax), %xmm0, %xmm0
; AVX512F-NEXT: vpextrb $13, %xmm4, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $13, (%rsp,%rax), %xmm0, %xmm0
; AVX512F-NEXT: vpextrb $14, %xmm4, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $14, (%rsp,%rax), %xmm0, %xmm0
; AVX512F-NEXT: vpextrb $15, %xmm4, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $15, (%rsp,%rax), %xmm0, %xmm0
; AVX512F-NEXT: vpextrb $0, %xmm3, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
; AVX512F-NEXT: vmovd %eax, %xmm4
; AVX512F-NEXT: vpextrb $1, %xmm3, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $1, (%rsp,%rax), %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $2, %xmm3, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $2, (%rsp,%rax), %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $3, %xmm3, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $3, (%rsp,%rax), %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $4, %xmm3, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $4, (%rsp,%rax), %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $5, %xmm3, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $5, (%rsp,%rax), %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $6, %xmm3, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $6, (%rsp,%rax), %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $7, %xmm3, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $7, (%rsp,%rax), %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $8, %xmm3, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $8, (%rsp,%rax), %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $9, %xmm3, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $9, (%rsp,%rax), %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $10, %xmm3, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $10, (%rsp,%rax), %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $11, %xmm3, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $11, (%rsp,%rax), %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $12, %xmm3, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $12, (%rsp,%rax), %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $13, %xmm3, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
; AVX512F-NEXT: vpinsrb $13, %eax, %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $14, %xmm3, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
; AVX512F-NEXT: vpinsrb $14, %eax, %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $15, %xmm3, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
; AVX512F-NEXT: vpinsrb $15, %eax, %xmm4, %xmm3
; AVX512F-NEXT: vpextrb $0, %xmm2, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
; AVX512F-NEXT: vmovd %eax, %xmm4
; AVX512F-NEXT: vpextrb $1, %xmm2, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $1, (%rsp,%rax), %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $2, %xmm2, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $2, (%rsp,%rax), %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $3, %xmm2, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $3, (%rsp,%rax), %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $4, %xmm2, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $4, (%rsp,%rax), %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $5, %xmm2, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $5, (%rsp,%rax), %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $6, %xmm2, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $6, (%rsp,%rax), %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $7, %xmm2, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $7, (%rsp,%rax), %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $8, %xmm2, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $8, (%rsp,%rax), %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $9, %xmm2, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $9, (%rsp,%rax), %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $10, %xmm2, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $10, (%rsp,%rax), %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $11, %xmm2, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $11, (%rsp,%rax), %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $12, %xmm2, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
; AVX512F-NEXT: vpinsrb $12, %eax, %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $13, %xmm2, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
; AVX512F-NEXT: vpinsrb $13, %eax, %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $14, %xmm2, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
; AVX512F-NEXT: vpinsrb $14, %eax, %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $15, %xmm2, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
; AVX512F-NEXT: vpinsrb $15, %eax, %xmm4, %xmm2
; AVX512F-NEXT: vpextrb $0, %xmm1, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
; AVX512F-NEXT: vmovd %eax, %xmm4
; AVX512F-NEXT: vpextrb $1, %xmm1, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $1, (%rsp,%rax), %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $2, %xmm1, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $2, (%rsp,%rax), %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $3, %xmm1, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $3, (%rsp,%rax), %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $4, %xmm1, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $4, (%rsp,%rax), %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $5, %xmm1, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $5, (%rsp,%rax), %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $6, %xmm1, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $6, (%rsp,%rax), %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $7, %xmm1, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $7, (%rsp,%rax), %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $8, %xmm1, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $8, (%rsp,%rax), %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $9, %xmm1, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $9, (%rsp,%rax), %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $10, %xmm1, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $10, (%rsp,%rax), %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $11, %xmm1, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
; AVX512F-NEXT: vpinsrb $11, %eax, %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $12, %xmm1, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
; AVX512F-NEXT: vpinsrb $12, %eax, %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $13, %xmm1, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
; AVX512F-NEXT: vpinsrb $13, %eax, %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $14, %xmm1, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
; AVX512F-NEXT: vpinsrb $14, %eax, %xmm4, %xmm4
; AVX512F-NEXT: vpextrb $15, %xmm1, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
; AVX512F-NEXT: vpinsrb $15, %eax, %xmm4, %xmm1
; AVX512F-NEXT: vinserti128 $1, %xmm0, %ymm3, %ymm0
; AVX512F-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
; AVX512F-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512F-NEXT: movq %rbp, %rsp
; AVX512F-NEXT: popq %rbp
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: var_shuffle_v64i8:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: pushq %rbp
; AVX512BW-NEXT: movq %rsp, %rbp
; AVX512BW-NEXT: andq $-64, %rsp
; AVX512BW-NEXT: subq $128, %rsp
; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX512BW-NEXT: vextracti32x4 $2, %zmm1, %xmm3
; AVX512BW-NEXT: vextracti32x4 $3, %zmm1, %xmm4
; AVX512BW-NEXT: vpextrb $0, %xmm4, %eax
; AVX512BW-NEXT: vmovaps %zmm0, (%rsp)
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: movzbl (%rsp,%rax), %eax
; AVX512BW-NEXT: vmovd %eax, %xmm0
; AVX512BW-NEXT: vpextrb $1, %xmm4, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $1, (%rsp,%rax), %xmm0, %xmm0
; AVX512BW-NEXT: vpextrb $2, %xmm4, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $2, (%rsp,%rax), %xmm0, %xmm0
; AVX512BW-NEXT: vpextrb $3, %xmm4, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $3, (%rsp,%rax), %xmm0, %xmm0
; AVX512BW-NEXT: vpextrb $4, %xmm4, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $4, (%rsp,%rax), %xmm0, %xmm0
; AVX512BW-NEXT: vpextrb $5, %xmm4, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $5, (%rsp,%rax), %xmm0, %xmm0
; AVX512BW-NEXT: vpextrb $6, %xmm4, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $6, (%rsp,%rax), %xmm0, %xmm0
; AVX512BW-NEXT: vpextrb $7, %xmm4, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $7, (%rsp,%rax), %xmm0, %xmm0
; AVX512BW-NEXT: vpextrb $8, %xmm4, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $8, (%rsp,%rax), %xmm0, %xmm0
; AVX512BW-NEXT: vpextrb $9, %xmm4, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $9, (%rsp,%rax), %xmm0, %xmm0
; AVX512BW-NEXT: vpextrb $10, %xmm4, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $10, (%rsp,%rax), %xmm0, %xmm0
; AVX512BW-NEXT: vpextrb $11, %xmm4, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $11, (%rsp,%rax), %xmm0, %xmm0
; AVX512BW-NEXT: vpextrb $12, %xmm4, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $12, (%rsp,%rax), %xmm0, %xmm0
; AVX512BW-NEXT: vpextrb $13, %xmm4, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $13, (%rsp,%rax), %xmm0, %xmm0
; AVX512BW-NEXT: vpextrb $14, %xmm4, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $14, (%rsp,%rax), %xmm0, %xmm0
; AVX512BW-NEXT: vpextrb $15, %xmm4, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $15, (%rsp,%rax), %xmm0, %xmm0
; AVX512BW-NEXT: vpextrb $0, %xmm3, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: movzbl (%rsp,%rax), %eax
; AVX512BW-NEXT: vmovd %eax, %xmm4
; AVX512BW-NEXT: vpextrb $1, %xmm3, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $1, (%rsp,%rax), %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $2, %xmm3, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $2, (%rsp,%rax), %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $3, %xmm3, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $3, (%rsp,%rax), %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $4, %xmm3, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $4, (%rsp,%rax), %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $5, %xmm3, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $5, (%rsp,%rax), %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $6, %xmm3, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $6, (%rsp,%rax), %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $7, %xmm3, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $7, (%rsp,%rax), %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $8, %xmm3, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $8, (%rsp,%rax), %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $9, %xmm3, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $9, (%rsp,%rax), %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $10, %xmm3, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $10, (%rsp,%rax), %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $11, %xmm3, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $11, (%rsp,%rax), %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $12, %xmm3, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $12, (%rsp,%rax), %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $13, %xmm3, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: movzbl (%rsp,%rax), %eax
; AVX512BW-NEXT: vpinsrb $13, %eax, %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $14, %xmm3, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: movzbl (%rsp,%rax), %eax
; AVX512BW-NEXT: vpinsrb $14, %eax, %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $15, %xmm3, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: movzbl (%rsp,%rax), %eax
; AVX512BW-NEXT: vpinsrb $15, %eax, %xmm4, %xmm3
; AVX512BW-NEXT: vpextrb $0, %xmm2, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: movzbl (%rsp,%rax), %eax
; AVX512BW-NEXT: vmovd %eax, %xmm4
; AVX512BW-NEXT: vpextrb $1, %xmm2, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $1, (%rsp,%rax), %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $2, %xmm2, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $2, (%rsp,%rax), %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $3, %xmm2, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $3, (%rsp,%rax), %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $4, %xmm2, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $4, (%rsp,%rax), %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $5, %xmm2, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $5, (%rsp,%rax), %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $6, %xmm2, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $6, (%rsp,%rax), %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $7, %xmm2, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $7, (%rsp,%rax), %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $8, %xmm2, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $8, (%rsp,%rax), %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $9, %xmm2, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $9, (%rsp,%rax), %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $10, %xmm2, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $10, (%rsp,%rax), %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $11, %xmm2, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $11, (%rsp,%rax), %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $12, %xmm2, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: movzbl (%rsp,%rax), %eax
; AVX512BW-NEXT: vpinsrb $12, %eax, %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $13, %xmm2, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: movzbl (%rsp,%rax), %eax
; AVX512BW-NEXT: vpinsrb $13, %eax, %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $14, %xmm2, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: movzbl (%rsp,%rax), %eax
; AVX512BW-NEXT: vpinsrb $14, %eax, %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $15, %xmm2, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: movzbl (%rsp,%rax), %eax
; AVX512BW-NEXT: vpinsrb $15, %eax, %xmm4, %xmm2
; AVX512BW-NEXT: vpextrb $0, %xmm1, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: movzbl (%rsp,%rax), %eax
; AVX512BW-NEXT: vmovd %eax, %xmm4
; AVX512BW-NEXT: vpextrb $1, %xmm1, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $1, (%rsp,%rax), %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $2, %xmm1, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $2, (%rsp,%rax), %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $3, %xmm1, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $3, (%rsp,%rax), %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $4, %xmm1, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $4, (%rsp,%rax), %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $5, %xmm1, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $5, (%rsp,%rax), %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $6, %xmm1, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $6, (%rsp,%rax), %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $7, %xmm1, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $7, (%rsp,%rax), %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $8, %xmm1, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $8, (%rsp,%rax), %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $9, %xmm1, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $9, (%rsp,%rax), %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $10, %xmm1, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $10, (%rsp,%rax), %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $11, %xmm1, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: movzbl (%rsp,%rax), %eax
; AVX512BW-NEXT: vpinsrb $11, %eax, %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $12, %xmm1, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: movzbl (%rsp,%rax), %eax
; AVX512BW-NEXT: vpinsrb $12, %eax, %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $13, %xmm1, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: movzbl (%rsp,%rax), %eax
; AVX512BW-NEXT: vpinsrb $13, %eax, %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $14, %xmm1, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: movzbl (%rsp,%rax), %eax
; AVX512BW-NEXT: vpinsrb $14, %eax, %xmm4, %xmm4
; AVX512BW-NEXT: vpextrb $15, %xmm1, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: movzbl (%rsp,%rax), %eax
; AVX512BW-NEXT: vpinsrb $15, %eax, %xmm4, %xmm1
; AVX512BW-NEXT: vinserti128 $1, %xmm0, %ymm3, %ymm0
; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512BW-NEXT: movq %rbp, %rsp
; AVX512BW-NEXT: popq %rbp
; AVX512BW-NEXT: retq
;
; AVX512VBMI-LABEL: var_shuffle_v64i8:
; AVX512VBMI: # %bb.0:
; AVX512VBMI-NEXT: vpermb %zmm0, %zmm1, %zmm0
; AVX512VBMI-NEXT: retq
%index0 = extractelement <64 x i8> %indices, i32 0
%index1 = extractelement <64 x i8> %indices, i32 1
%index2 = extractelement <64 x i8> %indices, i32 2
%index3 = extractelement <64 x i8> %indices, i32 3
%index4 = extractelement <64 x i8> %indices, i32 4
%index5 = extractelement <64 x i8> %indices, i32 5
%index6 = extractelement <64 x i8> %indices, i32 6
%index7 = extractelement <64 x i8> %indices, i32 7
%index8 = extractelement <64 x i8> %indices, i32 8
%index9 = extractelement <64 x i8> %indices, i32 9
%index10 = extractelement <64 x i8> %indices, i32 10
%index11 = extractelement <64 x i8> %indices, i32 11
%index12 = extractelement <64 x i8> %indices, i32 12
%index13 = extractelement <64 x i8> %indices, i32 13
%index14 = extractelement <64 x i8> %indices, i32 14
%index15 = extractelement <64 x i8> %indices, i32 15
%index16 = extractelement <64 x i8> %indices, i32 16
%index17 = extractelement <64 x i8> %indices, i32 17
%index18 = extractelement <64 x i8> %indices, i32 18
%index19 = extractelement <64 x i8> %indices, i32 19
%index20 = extractelement <64 x i8> %indices, i32 20
%index21 = extractelement <64 x i8> %indices, i32 21
%index22 = extractelement <64 x i8> %indices, i32 22
%index23 = extractelement <64 x i8> %indices, i32 23
%index24 = extractelement <64 x i8> %indices, i32 24
%index25 = extractelement <64 x i8> %indices, i32 25
%index26 = extractelement <64 x i8> %indices, i32 26
%index27 = extractelement <64 x i8> %indices, i32 27
%index28 = extractelement <64 x i8> %indices, i32 28
%index29 = extractelement <64 x i8> %indices, i32 29
%index30 = extractelement <64 x i8> %indices, i32 30
%index31 = extractelement <64 x i8> %indices, i32 31
%index32 = extractelement <64 x i8> %indices, i32 32
%index33 = extractelement <64 x i8> %indices, i32 33
%index34 = extractelement <64 x i8> %indices, i32 34
%index35 = extractelement <64 x i8> %indices, i32 35
%index36 = extractelement <64 x i8> %indices, i32 36
%index37 = extractelement <64 x i8> %indices, i32 37
%index38 = extractelement <64 x i8> %indices, i32 38
%index39 = extractelement <64 x i8> %indices, i32 39
%index40 = extractelement <64 x i8> %indices, i32 40
%index41 = extractelement <64 x i8> %indices, i32 41
%index42 = extractelement <64 x i8> %indices, i32 42
%index43 = extractelement <64 x i8> %indices, i32 43
%index44 = extractelement <64 x i8> %indices, i32 44
%index45 = extractelement <64 x i8> %indices, i32 45
%index46 = extractelement <64 x i8> %indices, i32 46
%index47 = extractelement <64 x i8> %indices, i32 47
%index48 = extractelement <64 x i8> %indices, i32 48
%index49 = extractelement <64 x i8> %indices, i32 49
%index50 = extractelement <64 x i8> %indices, i32 50
%index51 = extractelement <64 x i8> %indices, i32 51
%index52 = extractelement <64 x i8> %indices, i32 52
%index53 = extractelement <64 x i8> %indices, i32 53
%index54 = extractelement <64 x i8> %indices, i32 54
%index55 = extractelement <64 x i8> %indices, i32 55
%index56 = extractelement <64 x i8> %indices, i32 56
%index57 = extractelement <64 x i8> %indices, i32 57
%index58 = extractelement <64 x i8> %indices, i32 58
%index59 = extractelement <64 x i8> %indices, i32 59
%index60 = extractelement <64 x i8> %indices, i32 60
%index61 = extractelement <64 x i8> %indices, i32 61
%index62 = extractelement <64 x i8> %indices, i32 62
%index63 = extractelement <64 x i8> %indices, i32 63
%v0 = extractelement <64 x i8> %v, i8 %index0
%v1 = extractelement <64 x i8> %v, i8 %index1
%v2 = extractelement <64 x i8> %v, i8 %index2
%v3 = extractelement <64 x i8> %v, i8 %index3
%v4 = extractelement <64 x i8> %v, i8 %index4
%v5 = extractelement <64 x i8> %v, i8 %index5
%v6 = extractelement <64 x i8> %v, i8 %index6
%v7 = extractelement <64 x i8> %v, i8 %index7
%v8 = extractelement <64 x i8> %v, i8 %index8
%v9 = extractelement <64 x i8> %v, i8 %index9
%v10 = extractelement <64 x i8> %v, i8 %index10
%v11 = extractelement <64 x i8> %v, i8 %index11
%v12 = extractelement <64 x i8> %v, i8 %index12
%v13 = extractelement <64 x i8> %v, i8 %index13
%v14 = extractelement <64 x i8> %v, i8 %index14
%v15 = extractelement <64 x i8> %v, i8 %index15
%v16 = extractelement <64 x i8> %v, i8 %index16
%v17 = extractelement <64 x i8> %v, i8 %index17
%v18 = extractelement <64 x i8> %v, i8 %index18
%v19 = extractelement <64 x i8> %v, i8 %index19
%v20 = extractelement <64 x i8> %v, i8 %index20
%v21 = extractelement <64 x i8> %v, i8 %index21
%v22 = extractelement <64 x i8> %v, i8 %index22
%v23 = extractelement <64 x i8> %v, i8 %index23
%v24 = extractelement <64 x i8> %v, i8 %index24
%v25 = extractelement <64 x i8> %v, i8 %index25
%v26 = extractelement <64 x i8> %v, i8 %index26
%v27 = extractelement <64 x i8> %v, i8 %index27
%v28 = extractelement <64 x i8> %v, i8 %index28
%v29 = extractelement <64 x i8> %v, i8 %index29
%v30 = extractelement <64 x i8> %v, i8 %index30
%v31 = extractelement <64 x i8> %v, i8 %index31
%v32 = extractelement <64 x i8> %v, i8 %index32
%v33 = extractelement <64 x i8> %v, i8 %index33
%v34 = extractelement <64 x i8> %v, i8 %index34
%v35 = extractelement <64 x i8> %v, i8 %index35
%v36 = extractelement <64 x i8> %v, i8 %index36
%v37 = extractelement <64 x i8> %v, i8 %index37
%v38 = extractelement <64 x i8> %v, i8 %index38
%v39 = extractelement <64 x i8> %v, i8 %index39
%v40 = extractelement <64 x i8> %v, i8 %index40
%v41 = extractelement <64 x i8> %v, i8 %index41
%v42 = extractelement <64 x i8> %v, i8 %index42
%v43 = extractelement <64 x i8> %v, i8 %index43
%v44 = extractelement <64 x i8> %v, i8 %index44
%v45 = extractelement <64 x i8> %v, i8 %index45
%v46 = extractelement <64 x i8> %v, i8 %index46
%v47 = extractelement <64 x i8> %v, i8 %index47
%v48 = extractelement <64 x i8> %v, i8 %index48
%v49 = extractelement <64 x i8> %v, i8 %index49
%v50 = extractelement <64 x i8> %v, i8 %index50
%v51 = extractelement <64 x i8> %v, i8 %index51
%v52 = extractelement <64 x i8> %v, i8 %index52
%v53 = extractelement <64 x i8> %v, i8 %index53
%v54 = extractelement <64 x i8> %v, i8 %index54
%v55 = extractelement <64 x i8> %v, i8 %index55
%v56 = extractelement <64 x i8> %v, i8 %index56
%v57 = extractelement <64 x i8> %v, i8 %index57
%v58 = extractelement <64 x i8> %v, i8 %index58
%v59 = extractelement <64 x i8> %v, i8 %index59
%v60 = extractelement <64 x i8> %v, i8 %index60
%v61 = extractelement <64 x i8> %v, i8 %index61
%v62 = extractelement <64 x i8> %v, i8 %index62
%v63 = extractelement <64 x i8> %v, i8 %index63
%ret0 = insertelement <64 x i8> undef, i8 %v0, i32 0
%ret1 = insertelement <64 x i8> %ret0, i8 %v1, i32 1
%ret2 = insertelement <64 x i8> %ret1, i8 %v2, i32 2
%ret3 = insertelement <64 x i8> %ret2, i8 %v3, i32 3
%ret4 = insertelement <64 x i8> %ret3, i8 %v4, i32 4
%ret5 = insertelement <64 x i8> %ret4, i8 %v5, i32 5
%ret6 = insertelement <64 x i8> %ret5, i8 %v6, i32 6
%ret7 = insertelement <64 x i8> %ret6, i8 %v7, i32 7
%ret8 = insertelement <64 x i8> %ret7, i8 %v8, i32 8
%ret9 = insertelement <64 x i8> %ret8, i8 %v9, i32 9
%ret10 = insertelement <64 x i8> %ret9, i8 %v10, i32 10
%ret11 = insertelement <64 x i8> %ret10, i8 %v11, i32 11
%ret12 = insertelement <64 x i8> %ret11, i8 %v12, i32 12
%ret13 = insertelement <64 x i8> %ret12, i8 %v13, i32 13
%ret14 = insertelement <64 x i8> %ret13, i8 %v14, i32 14
%ret15 = insertelement <64 x i8> %ret14, i8 %v15, i32 15
%ret16 = insertelement <64 x i8> %ret15, i8 %v16, i32 16
%ret17 = insertelement <64 x i8> %ret16, i8 %v17, i32 17
%ret18 = insertelement <64 x i8> %ret17, i8 %v18, i32 18
%ret19 = insertelement <64 x i8> %ret18, i8 %v19, i32 19
%ret20 = insertelement <64 x i8> %ret19, i8 %v20, i32 20
%ret21 = insertelement <64 x i8> %ret20, i8 %v21, i32 21
%ret22 = insertelement <64 x i8> %ret21, i8 %v22, i32 22
%ret23 = insertelement <64 x i8> %ret22, i8 %v23, i32 23
%ret24 = insertelement <64 x i8> %ret23, i8 %v24, i32 24
%ret25 = insertelement <64 x i8> %ret24, i8 %v25, i32 25
%ret26 = insertelement <64 x i8> %ret25, i8 %v26, i32 26
%ret27 = insertelement <64 x i8> %ret26, i8 %v27, i32 27
%ret28 = insertelement <64 x i8> %ret27, i8 %v28, i32 28
%ret29 = insertelement <64 x i8> %ret28, i8 %v29, i32 29
%ret30 = insertelement <64 x i8> %ret29, i8 %v30, i32 30
%ret31 = insertelement <64 x i8> %ret30, i8 %v31, i32 31
%ret32 = insertelement <64 x i8> %ret31, i8 %v32, i32 32
%ret33 = insertelement <64 x i8> %ret32, i8 %v33, i32 33
%ret34 = insertelement <64 x i8> %ret33, i8 %v34, i32 34
%ret35 = insertelement <64 x i8> %ret34, i8 %v35, i32 35
%ret36 = insertelement <64 x i8> %ret35, i8 %v36, i32 36
%ret37 = insertelement <64 x i8> %ret36, i8 %v37, i32 37
%ret38 = insertelement <64 x i8> %ret37, i8 %v38, i32 38
%ret39 = insertelement <64 x i8> %ret38, i8 %v39, i32 39
%ret40 = insertelement <64 x i8> %ret39, i8 %v40, i32 40
%ret41 = insertelement <64 x i8> %ret40, i8 %v41, i32 41
%ret42 = insertelement <64 x i8> %ret41, i8 %v42, i32 42
%ret43 = insertelement <64 x i8> %ret42, i8 %v43, i32 43
%ret44 = insertelement <64 x i8> %ret43, i8 %v44, i32 44
%ret45 = insertelement <64 x i8> %ret44, i8 %v45, i32 45
%ret46 = insertelement <64 x i8> %ret45, i8 %v46, i32 46
%ret47 = insertelement <64 x i8> %ret46, i8 %v47, i32 47
%ret48 = insertelement <64 x i8> %ret47, i8 %v48, i32 48
%ret49 = insertelement <64 x i8> %ret48, i8 %v49, i32 49
%ret50 = insertelement <64 x i8> %ret49, i8 %v50, i32 50
%ret51 = insertelement <64 x i8> %ret50, i8 %v51, i32 51
%ret52 = insertelement <64 x i8> %ret51, i8 %v52, i32 52
%ret53 = insertelement <64 x i8> %ret52, i8 %v53, i32 53
%ret54 = insertelement <64 x i8> %ret53, i8 %v54, i32 54
%ret55 = insertelement <64 x i8> %ret54, i8 %v55, i32 55
%ret56 = insertelement <64 x i8> %ret55, i8 %v56, i32 56
%ret57 = insertelement <64 x i8> %ret56, i8 %v57, i32 57
%ret58 = insertelement <64 x i8> %ret57, i8 %v58, i32 58
%ret59 = insertelement <64 x i8> %ret58, i8 %v59, i32 59
%ret60 = insertelement <64 x i8> %ret59, i8 %v60, i32 60
%ret61 = insertelement <64 x i8> %ret60, i8 %v61, i32 61
%ret62 = insertelement <64 x i8> %ret61, i8 %v62, i32 62
%ret63 = insertelement <64 x i8> %ret62, i8 %v63, i32 63
ret <64 x i8> %ret63
}
define <8 x double> @var_shuffle_v8f64(<8 x double> %v, <8 x i64> %indices) nounwind {
; AVX512-LABEL: var_shuffle_v8f64:
; AVX512: # %bb.0:
; AVX512-NEXT: vpermpd %zmm0, %zmm1, %zmm0
; AVX512-NEXT: retq
%index0 = extractelement <8 x i64> %indices, i32 0
%index1 = extractelement <8 x i64> %indices, i32 1
%index2 = extractelement <8 x i64> %indices, i32 2
%index3 = extractelement <8 x i64> %indices, i32 3
%index4 = extractelement <8 x i64> %indices, i32 4
%index5 = extractelement <8 x i64> %indices, i32 5
%index6 = extractelement <8 x i64> %indices, i32 6
%index7 = extractelement <8 x i64> %indices, i32 7
%v0 = extractelement <8 x double> %v, i64 %index0
%v1 = extractelement <8 x double> %v, i64 %index1
%v2 = extractelement <8 x double> %v, i64 %index2
%v3 = extractelement <8 x double> %v, i64 %index3
%v4 = extractelement <8 x double> %v, i64 %index4
%v5 = extractelement <8 x double> %v, i64 %index5
%v6 = extractelement <8 x double> %v, i64 %index6
%v7 = extractelement <8 x double> %v, i64 %index7
%ret0 = insertelement <8 x double> undef, double %v0, i32 0
%ret1 = insertelement <8 x double> %ret0, double %v1, i32 1
%ret2 = insertelement <8 x double> %ret1, double %v2, i32 2
%ret3 = insertelement <8 x double> %ret2, double %v3, i32 3
%ret4 = insertelement <8 x double> %ret3, double %v4, i32 4
%ret5 = insertelement <8 x double> %ret4, double %v5, i32 5
%ret6 = insertelement <8 x double> %ret5, double %v6, i32 6
%ret7 = insertelement <8 x double> %ret6, double %v7, i32 7
ret <8 x double> %ret7
}
define <16 x float> @var_shuffle_v16f32(<16 x float> %v, <16 x i32> %indices) nounwind {
; AVX512-LABEL: var_shuffle_v16f32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpermps %zmm0, %zmm1, %zmm0
; AVX512-NEXT: retq
%index0 = extractelement <16 x i32> %indices, i32 0
%index1 = extractelement <16 x i32> %indices, i32 1
%index2 = extractelement <16 x i32> %indices, i32 2
%index3 = extractelement <16 x i32> %indices, i32 3
%index4 = extractelement <16 x i32> %indices, i32 4
%index5 = extractelement <16 x i32> %indices, i32 5
%index6 = extractelement <16 x i32> %indices, i32 6
%index7 = extractelement <16 x i32> %indices, i32 7
%index8 = extractelement <16 x i32> %indices, i32 8
%index9 = extractelement <16 x i32> %indices, i32 9
%index10 = extractelement <16 x i32> %indices, i32 10
%index11 = extractelement <16 x i32> %indices, i32 11
%index12 = extractelement <16 x i32> %indices, i32 12
%index13 = extractelement <16 x i32> %indices, i32 13
%index14 = extractelement <16 x i32> %indices, i32 14
%index15 = extractelement <16 x i32> %indices, i32 15
%v0 = extractelement <16 x float> %v, i32 %index0
%v1 = extractelement <16 x float> %v, i32 %index1
%v2 = extractelement <16 x float> %v, i32 %index2
%v3 = extractelement <16 x float> %v, i32 %index3
%v4 = extractelement <16 x float> %v, i32 %index4
%v5 = extractelement <16 x float> %v, i32 %index5
%v6 = extractelement <16 x float> %v, i32 %index6
%v7 = extractelement <16 x float> %v, i32 %index7
%v8 = extractelement <16 x float> %v, i32 %index8
%v9 = extractelement <16 x float> %v, i32 %index9
%v10 = extractelement <16 x float> %v, i32 %index10
%v11 = extractelement <16 x float> %v, i32 %index11
%v12 = extractelement <16 x float> %v, i32 %index12
%v13 = extractelement <16 x float> %v, i32 %index13
%v14 = extractelement <16 x float> %v, i32 %index14
%v15 = extractelement <16 x float> %v, i32 %index15
%ret0 = insertelement <16 x float> undef, float %v0, i32 0
%ret1 = insertelement <16 x float> %ret0, float %v1, i32 1
%ret2 = insertelement <16 x float> %ret1, float %v2, i32 2
%ret3 = insertelement <16 x float> %ret2, float %v3, i32 3
%ret4 = insertelement <16 x float> %ret3, float %v4, i32 4
%ret5 = insertelement <16 x float> %ret4, float %v5, i32 5
%ret6 = insertelement <16 x float> %ret5, float %v6, i32 6
%ret7 = insertelement <16 x float> %ret6, float %v7, i32 7
%ret8 = insertelement <16 x float> %ret7, float %v8, i32 8
%ret9 = insertelement <16 x float> %ret8, float %v9, i32 9
%ret10 = insertelement <16 x float> %ret9, float %v10, i32 10
%ret11 = insertelement <16 x float> %ret10, float %v11, i32 11
%ret12 = insertelement <16 x float> %ret11, float %v12, i32 12
%ret13 = insertelement <16 x float> %ret12, float %v13, i32 13
%ret14 = insertelement <16 x float> %ret13, float %v14, i32 14
%ret15 = insertelement <16 x float> %ret14, float %v15, i32 15
ret <16 x float> %ret15
}
define void @var_cvt_shuffle_v64f32_v64i8_idx(ptr %dst, <64 x i8> %src, i32 %b) nounwind {
; AVX512F-LABEL: var_cvt_shuffle_v64f32_v64i8_idx:
; AVX512F: # %bb.0:
; AVX512F-NEXT: pushq %rbp
; AVX512F-NEXT: movq %rsp, %rbp
; AVX512F-NEXT: andq $-64, %rsp
; AVX512F-NEXT: subq $128, %rsp
; AVX512F-NEXT: # kill: def $esi killed $esi def $rsi
; AVX512F-NEXT: vpbroadcastd %esi, %zmm2
; AVX512F-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm1
; AVX512F-NEXT: vmovd %xmm1, %eax
; AVX512F-NEXT: vmovaps %zmm0, (%rsp)
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
; AVX512F-NEXT: vmovd %eax, %xmm0
; AVX512F-NEXT: vpextrd $1, %xmm1, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $1, (%rsp,%rax), %xmm0, %xmm0
; AVX512F-NEXT: vpextrd $2, %xmm1, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $2, (%rsp,%rax), %xmm0, %xmm0
; AVX512F-NEXT: vpextrd $3, %xmm1, %eax
; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm3
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $3, (%rsp,%rax), %xmm0, %xmm0
; AVX512F-NEXT: vmovd %xmm3, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $4, (%rsp,%rax), %xmm0, %xmm0
; AVX512F-NEXT: vpextrd $1, %xmm3, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $5, (%rsp,%rax), %xmm0, %xmm0
; AVX512F-NEXT: vpextrd $2, %xmm3, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $6, (%rsp,%rax), %xmm0, %xmm0
; AVX512F-NEXT: vpextrd $3, %xmm3, %eax
; AVX512F-NEXT: vextracti32x4 $2, %zmm1, %xmm5
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $7, (%rsp,%rax), %xmm0, %xmm0
; AVX512F-NEXT: vmovd %xmm5, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $8, (%rsp,%rax), %xmm0, %xmm0
; AVX512F-NEXT: vpextrd $1, %xmm5, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $9, (%rsp,%rax), %xmm0, %xmm0
; AVX512F-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm4
; AVX512F-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm3
; AVX512F-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2
; AVX512F-NEXT: andl $63, %esi
; AVX512F-NEXT: vpinsrb $10, (%rsp,%rsi), %xmm0, %xmm0
; AVX512F-NEXT: vpextrd $3, %xmm5, %eax
; AVX512F-NEXT: vextracti32x4 $3, %zmm1, %xmm1
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $11, (%rsp,%rax), %xmm0, %xmm0
; AVX512F-NEXT: vmovd %xmm1, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $12, (%rsp,%rax), %xmm0, %xmm0
; AVX512F-NEXT: vpextrd $1, %xmm1, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $13, (%rsp,%rax), %xmm0, %xmm0
; AVX512F-NEXT: vpextrd $2, %xmm1, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $14, (%rsp,%rax), %xmm0, %xmm0
; AVX512F-NEXT: vpextrd $3, %xmm1, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $15, (%rsp,%rax), %xmm0, %xmm0
; AVX512F-NEXT: vmovd %xmm4, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
; AVX512F-NEXT: vmovd %eax, %xmm1
; AVX512F-NEXT: vpextrd $1, %xmm4, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $1, (%rsp,%rax), %xmm1, %xmm1
; AVX512F-NEXT: vpextrd $2, %xmm4, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $2, (%rsp,%rax), %xmm1, %xmm1
; AVX512F-NEXT: vpextrd $3, %xmm4, %eax
; AVX512F-NEXT: vextracti128 $1, %ymm4, %xmm5
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $3, (%rsp,%rax), %xmm1, %xmm1
; AVX512F-NEXT: vmovd %xmm5, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $4, (%rsp,%rax), %xmm1, %xmm1
; AVX512F-NEXT: vpextrd $1, %xmm5, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $5, (%rsp,%rax), %xmm1, %xmm1
; AVX512F-NEXT: vpextrd $2, %xmm5, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $6, (%rsp,%rax), %xmm1, %xmm1
; AVX512F-NEXT: vpextrd $3, %xmm5, %eax
; AVX512F-NEXT: vextracti32x4 $2, %zmm4, %xmm5
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $7, (%rsp,%rax), %xmm1, %xmm1
; AVX512F-NEXT: vmovd %xmm5, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $8, (%rsp,%rax), %xmm1, %xmm1
; AVX512F-NEXT: vpextrd $1, %xmm5, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $9, (%rsp,%rax), %xmm1, %xmm1
; AVX512F-NEXT: vpextrd $2, %xmm5, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $10, (%rsp,%rax), %xmm1, %xmm6
; AVX512F-NEXT: vpextrd $3, %xmm5, %eax
; AVX512F-NEXT: vextracti32x4 $3, %zmm4, %xmm1
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $11, (%rsp,%rax), %xmm6, %xmm4
; AVX512F-NEXT: vmovd %xmm1, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $12, (%rsp,%rax), %xmm4, %xmm4
; AVX512F-NEXT: vmovd %xmm3, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
; AVX512F-NEXT: vmovd %eax, %xmm5
; AVX512F-NEXT: vpextrd $1, %xmm3, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $1, (%rsp,%rax), %xmm5, %xmm5
; AVX512F-NEXT: vpextrd $2, %xmm3, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $2, (%rsp,%rax), %xmm5, %xmm5
; AVX512F-NEXT: vpextrd $3, %xmm3, %eax
; AVX512F-NEXT: vextracti128 $1, %ymm3, %xmm6
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $3, (%rsp,%rax), %xmm5, %xmm5
; AVX512F-NEXT: vmovd %xmm6, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $4, (%rsp,%rax), %xmm5, %xmm5
; AVX512F-NEXT: vpextrd $1, %xmm6, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $5, (%rsp,%rax), %xmm5, %xmm5
; AVX512F-NEXT: vpextrd $2, %xmm6, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $6, (%rsp,%rax), %xmm5, %xmm7
; AVX512F-NEXT: vpextrd $3, %xmm6, %eax
; AVX512F-NEXT: vextracti32x4 $2, %zmm3, %xmm5
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $7, (%rsp,%rax), %xmm7, %xmm6
; AVX512F-NEXT: vmovd %xmm5, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $8, (%rsp,%rax), %xmm6, %xmm6
; AVX512F-NEXT: vpextrd $1, %xmm5, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $9, (%rsp,%rax), %xmm6, %xmm6
; AVX512F-NEXT: vpextrd $2, %xmm5, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $10, (%rsp,%rax), %xmm6, %xmm6
; AVX512F-NEXT: vmovd %xmm2, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
; AVX512F-NEXT: vmovd %eax, %xmm7
; AVX512F-NEXT: vpextrd $1, %xmm2, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $1, (%rsp,%rax), %xmm7, %xmm7
; AVX512F-NEXT: vpextrd $2, %xmm2, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $2, (%rsp,%rax), %xmm7, %xmm7
; AVX512F-NEXT: vpextrd $3, %xmm2, %eax
; AVX512F-NEXT: vextracti128 $1, %ymm2, %xmm8
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $3, (%rsp,%rax), %xmm7, %xmm7
; AVX512F-NEXT: vmovd %xmm8, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $4, (%rsp,%rax), %xmm7, %xmm7
; AVX512F-NEXT: vpextrd $1, %xmm8, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $5, (%rsp,%rax), %xmm7, %xmm7
; AVX512F-NEXT: vpextrd $2, %xmm8, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $6, (%rsp,%rax), %xmm7, %xmm7
; AVX512F-NEXT: vpextrd $3, %xmm8, %eax
; AVX512F-NEXT: vextracti32x4 $2, %zmm2, %xmm8
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $7, (%rsp,%rax), %xmm7, %xmm7
; AVX512F-NEXT: vmovd %xmm8, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $8, (%rsp,%rax), %xmm7, %xmm7
; AVX512F-NEXT: vpextrd $1, %xmm8, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: vpinsrb $9, (%rsp,%rax), %xmm7, %xmm7
; AVX512F-NEXT: vpextrd $2, %xmm8, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
; AVX512F-NEXT: vpinsrb $10, %eax, %xmm7, %xmm7
; AVX512F-NEXT: vpextrd $3, %xmm8, %eax
; AVX512F-NEXT: vextracti32x4 $3, %zmm2, %xmm2
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
; AVX512F-NEXT: vpinsrb $11, %eax, %xmm7, %xmm7
; AVX512F-NEXT: vmovd %xmm2, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
; AVX512F-NEXT: vpinsrb $12, %eax, %xmm7, %xmm7
; AVX512F-NEXT: vpextrd $1, %xmm2, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
; AVX512F-NEXT: vpinsrb $13, %eax, %xmm7, %xmm7
; AVX512F-NEXT: vpextrd $2, %xmm2, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
; AVX512F-NEXT: vpinsrb $14, %eax, %xmm7, %xmm7
; AVX512F-NEXT: vpextrd $3, %xmm2, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
; AVX512F-NEXT: vpinsrb $15, %eax, %xmm7, %xmm2
; AVX512F-NEXT: vpextrd $3, %xmm5, %eax
; AVX512F-NEXT: vextracti32x4 $3, %zmm3, %xmm3
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
; AVX512F-NEXT: vpinsrb $11, %eax, %xmm6, %xmm5
; AVX512F-NEXT: vmovd %xmm3, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
; AVX512F-NEXT: vpinsrb $12, %eax, %xmm5, %xmm5
; AVX512F-NEXT: vpextrd $1, %xmm3, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
; AVX512F-NEXT: vpinsrb $13, %eax, %xmm5, %xmm5
; AVX512F-NEXT: vpextrd $2, %xmm3, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
; AVX512F-NEXT: vpinsrb $14, %eax, %xmm5, %xmm5
; AVX512F-NEXT: vpextrd $3, %xmm3, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
; AVX512F-NEXT: vpinsrb $15, %eax, %xmm5, %xmm3
; AVX512F-NEXT: vpextrd $1, %xmm1, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
; AVX512F-NEXT: vpinsrb $13, %eax, %xmm4, %xmm4
; AVX512F-NEXT: vpextrd $2, %xmm1, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
; AVX512F-NEXT: vpinsrb $14, %eax, %xmm4, %xmm4
; AVX512F-NEXT: vpextrd $3, %xmm1, %eax
; AVX512F-NEXT: andl $63, %eax
; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
; AVX512F-NEXT: vpinsrb $15, %eax, %xmm4, %xmm1
; AVX512F-NEXT: vpmovsxbd %xmm2, %zmm2
; AVX512F-NEXT: vcvtdq2ps %zmm2, %zmm2
; AVX512F-NEXT: vpmovsxbd %xmm3, %zmm3
; AVX512F-NEXT: vcvtdq2ps %zmm3, %zmm3
; AVX512F-NEXT: vpmovsxbd %xmm1, %zmm1
; AVX512F-NEXT: vcvtdq2ps %zmm1, %zmm1
; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512F-NEXT: vcvtdq2ps %zmm0, %zmm0
; AVX512F-NEXT: vmovaps %zmm0, 192(%rdi)
; AVX512F-NEXT: vmovaps %zmm1, 128(%rdi)
; AVX512F-NEXT: vmovaps %zmm3, 64(%rdi)
; AVX512F-NEXT: vmovaps %zmm2, (%rdi)
; AVX512F-NEXT: movq %rbp, %rsp
; AVX512F-NEXT: popq %rbp
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: var_cvt_shuffle_v64f32_v64i8_idx:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: pushq %rbp
; AVX512BW-NEXT: movq %rsp, %rbp
; AVX512BW-NEXT: andq $-64, %rsp
; AVX512BW-NEXT: subq $128, %rsp
; AVX512BW-NEXT: # kill: def $esi killed $esi def $rsi
; AVX512BW-NEXT: vpbroadcastd %esi, %zmm2
; AVX512BW-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm1
; AVX512BW-NEXT: vmovd %xmm1, %eax
; AVX512BW-NEXT: vmovaps %zmm0, (%rsp)
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: movzbl (%rsp,%rax), %eax
; AVX512BW-NEXT: vmovd %eax, %xmm0
; AVX512BW-NEXT: vpextrd $1, %xmm1, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $1, (%rsp,%rax), %xmm0, %xmm0
; AVX512BW-NEXT: vpextrd $2, %xmm1, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $2, (%rsp,%rax), %xmm0, %xmm0
; AVX512BW-NEXT: vpextrd $3, %xmm1, %eax
; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm3
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $3, (%rsp,%rax), %xmm0, %xmm0
; AVX512BW-NEXT: vmovd %xmm3, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $4, (%rsp,%rax), %xmm0, %xmm0
; AVX512BW-NEXT: vpextrd $1, %xmm3, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $5, (%rsp,%rax), %xmm0, %xmm0
; AVX512BW-NEXT: vpextrd $2, %xmm3, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $6, (%rsp,%rax), %xmm0, %xmm0
; AVX512BW-NEXT: vpextrd $3, %xmm3, %eax
; AVX512BW-NEXT: vextracti32x4 $2, %zmm1, %xmm5
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $7, (%rsp,%rax), %xmm0, %xmm0
; AVX512BW-NEXT: vmovd %xmm5, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $8, (%rsp,%rax), %xmm0, %xmm0
; AVX512BW-NEXT: vpextrd $1, %xmm5, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $9, (%rsp,%rax), %xmm0, %xmm0
; AVX512BW-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm4
; AVX512BW-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm3
; AVX512BW-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2
; AVX512BW-NEXT: andl $63, %esi
; AVX512BW-NEXT: vpinsrb $10, (%rsp,%rsi), %xmm0, %xmm0
; AVX512BW-NEXT: vpextrd $3, %xmm5, %eax
; AVX512BW-NEXT: vextracti32x4 $3, %zmm1, %xmm1
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $11, (%rsp,%rax), %xmm0, %xmm0
; AVX512BW-NEXT: vmovd %xmm1, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $12, (%rsp,%rax), %xmm0, %xmm0
; AVX512BW-NEXT: vpextrd $1, %xmm1, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $13, (%rsp,%rax), %xmm0, %xmm0
; AVX512BW-NEXT: vpextrd $2, %xmm1, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $14, (%rsp,%rax), %xmm0, %xmm0
; AVX512BW-NEXT: vpextrd $3, %xmm1, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $15, (%rsp,%rax), %xmm0, %xmm0
; AVX512BW-NEXT: vmovd %xmm4, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: movzbl (%rsp,%rax), %eax
; AVX512BW-NEXT: vmovd %eax, %xmm1
; AVX512BW-NEXT: vpextrd $1, %xmm4, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $1, (%rsp,%rax), %xmm1, %xmm1
; AVX512BW-NEXT: vpextrd $2, %xmm4, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $2, (%rsp,%rax), %xmm1, %xmm1
; AVX512BW-NEXT: vpextrd $3, %xmm4, %eax
; AVX512BW-NEXT: vextracti128 $1, %ymm4, %xmm5
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $3, (%rsp,%rax), %xmm1, %xmm1
; AVX512BW-NEXT: vmovd %xmm5, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $4, (%rsp,%rax), %xmm1, %xmm1
; AVX512BW-NEXT: vpextrd $1, %xmm5, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $5, (%rsp,%rax), %xmm1, %xmm1
; AVX512BW-NEXT: vpextrd $2, %xmm5, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $6, (%rsp,%rax), %xmm1, %xmm1
; AVX512BW-NEXT: vpextrd $3, %xmm5, %eax
; AVX512BW-NEXT: vextracti32x4 $2, %zmm4, %xmm5
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $7, (%rsp,%rax), %xmm1, %xmm1
; AVX512BW-NEXT: vmovd %xmm5, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $8, (%rsp,%rax), %xmm1, %xmm1
; AVX512BW-NEXT: vpextrd $1, %xmm5, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $9, (%rsp,%rax), %xmm1, %xmm1
; AVX512BW-NEXT: vpextrd $2, %xmm5, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $10, (%rsp,%rax), %xmm1, %xmm6
; AVX512BW-NEXT: vpextrd $3, %xmm5, %eax
; AVX512BW-NEXT: vextracti32x4 $3, %zmm4, %xmm1
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $11, (%rsp,%rax), %xmm6, %xmm4
; AVX512BW-NEXT: vmovd %xmm1, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $12, (%rsp,%rax), %xmm4, %xmm4
; AVX512BW-NEXT: vmovd %xmm3, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: movzbl (%rsp,%rax), %eax
; AVX512BW-NEXT: vmovd %eax, %xmm5
; AVX512BW-NEXT: vpextrd $1, %xmm3, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $1, (%rsp,%rax), %xmm5, %xmm5
; AVX512BW-NEXT: vpextrd $2, %xmm3, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $2, (%rsp,%rax), %xmm5, %xmm5
; AVX512BW-NEXT: vpextrd $3, %xmm3, %eax
; AVX512BW-NEXT: vextracti128 $1, %ymm3, %xmm6
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $3, (%rsp,%rax), %xmm5, %xmm5
; AVX512BW-NEXT: vmovd %xmm6, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $4, (%rsp,%rax), %xmm5, %xmm5
; AVX512BW-NEXT: vpextrd $1, %xmm6, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $5, (%rsp,%rax), %xmm5, %xmm5
; AVX512BW-NEXT: vpextrd $2, %xmm6, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $6, (%rsp,%rax), %xmm5, %xmm7
; AVX512BW-NEXT: vpextrd $3, %xmm6, %eax
; AVX512BW-NEXT: vextracti32x4 $2, %zmm3, %xmm5
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $7, (%rsp,%rax), %xmm7, %xmm6
; AVX512BW-NEXT: vmovd %xmm5, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $8, (%rsp,%rax), %xmm6, %xmm6
; AVX512BW-NEXT: vpextrd $1, %xmm5, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $9, (%rsp,%rax), %xmm6, %xmm6
; AVX512BW-NEXT: vpextrd $2, %xmm5, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $10, (%rsp,%rax), %xmm6, %xmm6
; AVX512BW-NEXT: vmovd %xmm2, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: movzbl (%rsp,%rax), %eax
; AVX512BW-NEXT: vmovd %eax, %xmm7
; AVX512BW-NEXT: vpextrd $1, %xmm2, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $1, (%rsp,%rax), %xmm7, %xmm7
; AVX512BW-NEXT: vpextrd $2, %xmm2, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $2, (%rsp,%rax), %xmm7, %xmm7
; AVX512BW-NEXT: vpextrd $3, %xmm2, %eax
; AVX512BW-NEXT: vextracti128 $1, %ymm2, %xmm8
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $3, (%rsp,%rax), %xmm7, %xmm7
; AVX512BW-NEXT: vmovd %xmm8, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $4, (%rsp,%rax), %xmm7, %xmm7
; AVX512BW-NEXT: vpextrd $1, %xmm8, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $5, (%rsp,%rax), %xmm7, %xmm7
; AVX512BW-NEXT: vpextrd $2, %xmm8, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $6, (%rsp,%rax), %xmm7, %xmm7
; AVX512BW-NEXT: vpextrd $3, %xmm8, %eax
; AVX512BW-NEXT: vextracti32x4 $2, %zmm2, %xmm8
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $7, (%rsp,%rax), %xmm7, %xmm7
; AVX512BW-NEXT: vmovd %xmm8, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $8, (%rsp,%rax), %xmm7, %xmm7
; AVX512BW-NEXT: vpextrd $1, %xmm8, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: vpinsrb $9, (%rsp,%rax), %xmm7, %xmm7
; AVX512BW-NEXT: vpextrd $2, %xmm8, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: movzbl (%rsp,%rax), %eax
; AVX512BW-NEXT: vpinsrb $10, %eax, %xmm7, %xmm7
; AVX512BW-NEXT: vpextrd $3, %xmm8, %eax
; AVX512BW-NEXT: vextracti32x4 $3, %zmm2, %xmm2
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: movzbl (%rsp,%rax), %eax
; AVX512BW-NEXT: vpinsrb $11, %eax, %xmm7, %xmm7
; AVX512BW-NEXT: vmovd %xmm2, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: movzbl (%rsp,%rax), %eax
; AVX512BW-NEXT: vpinsrb $12, %eax, %xmm7, %xmm7
; AVX512BW-NEXT: vpextrd $1, %xmm2, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: movzbl (%rsp,%rax), %eax
; AVX512BW-NEXT: vpinsrb $13, %eax, %xmm7, %xmm7
; AVX512BW-NEXT: vpextrd $2, %xmm2, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: movzbl (%rsp,%rax), %eax
; AVX512BW-NEXT: vpinsrb $14, %eax, %xmm7, %xmm7
; AVX512BW-NEXT: vpextrd $3, %xmm2, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: movzbl (%rsp,%rax), %eax
; AVX512BW-NEXT: vpinsrb $15, %eax, %xmm7, %xmm2
; AVX512BW-NEXT: vpextrd $3, %xmm5, %eax
; AVX512BW-NEXT: vextracti32x4 $3, %zmm3, %xmm3
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: movzbl (%rsp,%rax), %eax
; AVX512BW-NEXT: vpinsrb $11, %eax, %xmm6, %xmm5
; AVX512BW-NEXT: vmovd %xmm3, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: movzbl (%rsp,%rax), %eax
; AVX512BW-NEXT: vpinsrb $12, %eax, %xmm5, %xmm5
; AVX512BW-NEXT: vpextrd $1, %xmm3, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: movzbl (%rsp,%rax), %eax
; AVX512BW-NEXT: vpinsrb $13, %eax, %xmm5, %xmm5
; AVX512BW-NEXT: vpextrd $2, %xmm3, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: movzbl (%rsp,%rax), %eax
; AVX512BW-NEXT: vpinsrb $14, %eax, %xmm5, %xmm5
; AVX512BW-NEXT: vpextrd $3, %xmm3, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: movzbl (%rsp,%rax), %eax
; AVX512BW-NEXT: vpinsrb $15, %eax, %xmm5, %xmm3
; AVX512BW-NEXT: vpextrd $1, %xmm1, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: movzbl (%rsp,%rax), %eax
; AVX512BW-NEXT: vpinsrb $13, %eax, %xmm4, %xmm4
; AVX512BW-NEXT: vpextrd $2, %xmm1, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: movzbl (%rsp,%rax), %eax
; AVX512BW-NEXT: vpinsrb $14, %eax, %xmm4, %xmm4
; AVX512BW-NEXT: vpextrd $3, %xmm1, %eax
; AVX512BW-NEXT: andl $63, %eax
; AVX512BW-NEXT: movzbl (%rsp,%rax), %eax
; AVX512BW-NEXT: vpinsrb $15, %eax, %xmm4, %xmm1
; AVX512BW-NEXT: vpmovsxbd %xmm2, %zmm2
; AVX512BW-NEXT: vcvtdq2ps %zmm2, %zmm2
; AVX512BW-NEXT: vpmovsxbd %xmm3, %zmm3
; AVX512BW-NEXT: vcvtdq2ps %zmm3, %zmm3
; AVX512BW-NEXT: vpmovsxbd %xmm1, %zmm1
; AVX512BW-NEXT: vcvtdq2ps %zmm1, %zmm1
; AVX512BW-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512BW-NEXT: vcvtdq2ps %zmm0, %zmm0
; AVX512BW-NEXT: vmovaps %zmm0, 192(%rdi)
; AVX512BW-NEXT: vmovaps %zmm1, 128(%rdi)
; AVX512BW-NEXT: vmovaps %zmm3, 64(%rdi)
; AVX512BW-NEXT: vmovaps %zmm2, (%rdi)
; AVX512BW-NEXT: movq %rbp, %rsp
; AVX512BW-NEXT: popq %rbp
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512VBMI-LABEL: var_cvt_shuffle_v64f32_v64i8_idx:
; AVX512VBMI: # %bb.0:
; AVX512VBMI-NEXT: pushq %rbp
; AVX512VBMI-NEXT: movq %rsp, %rbp
; AVX512VBMI-NEXT: andq $-64, %rsp
; AVX512VBMI-NEXT: subq $128, %rsp
; AVX512VBMI-NEXT: # kill: def $esi killed $esi def $rsi
; AVX512VBMI-NEXT: vpbroadcastd %esi, %zmm1
; AVX512VBMI-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm2
; AVX512VBMI-NEXT: vmovd %xmm2, %eax
; AVX512VBMI-NEXT: vmovdqa64 %zmm0, (%rsp)
; AVX512VBMI-NEXT: andl $63, %eax
; AVX512VBMI-NEXT: movzbl (%rsp,%rax), %eax
; AVX512VBMI-NEXT: vmovd %eax, %xmm3
; AVX512VBMI-NEXT: vpextrd $1, %xmm2, %eax
; AVX512VBMI-NEXT: andl $63, %eax
; AVX512VBMI-NEXT: vpinsrb $1, (%rsp,%rax), %xmm3, %xmm3
; AVX512VBMI-NEXT: vpextrd $2, %xmm2, %eax
; AVX512VBMI-NEXT: andl $63, %eax
; AVX512VBMI-NEXT: vpinsrb $2, (%rsp,%rax), %xmm3, %xmm3
; AVX512VBMI-NEXT: vpextrd $3, %xmm2, %eax
; AVX512VBMI-NEXT: vextracti128 $1, %ymm2, %xmm4
; AVX512VBMI-NEXT: andl $63, %eax
; AVX512VBMI-NEXT: vpinsrb $3, (%rsp,%rax), %xmm3, %xmm3
; AVX512VBMI-NEXT: vmovd %xmm4, %eax
; AVX512VBMI-NEXT: andl $63, %eax
; AVX512VBMI-NEXT: vpinsrb $4, (%rsp,%rax), %xmm3, %xmm3
; AVX512VBMI-NEXT: vpextrd $1, %xmm4, %eax
; AVX512VBMI-NEXT: andl $63, %eax
; AVX512VBMI-NEXT: vpinsrb $5, (%rsp,%rax), %xmm3, %xmm3
; AVX512VBMI-NEXT: vpextrd $2, %xmm4, %eax
; AVX512VBMI-NEXT: andl $63, %eax
; AVX512VBMI-NEXT: vpinsrb $6, (%rsp,%rax), %xmm3, %xmm3
; AVX512VBMI-NEXT: vpextrd $3, %xmm4, %eax
; AVX512VBMI-NEXT: vextracti32x4 $2, %zmm2, %xmm4
; AVX512VBMI-NEXT: andl $63, %eax
; AVX512VBMI-NEXT: vpinsrb $7, (%rsp,%rax), %xmm3, %xmm3
; AVX512VBMI-NEXT: vmovd %xmm4, %eax
; AVX512VBMI-NEXT: andl $63, %eax
; AVX512VBMI-NEXT: vpinsrb $8, (%rsp,%rax), %xmm3, %xmm3
; AVX512VBMI-NEXT: vpextrd $1, %xmm4, %eax
; AVX512VBMI-NEXT: andl $63, %eax
; AVX512VBMI-NEXT: vpinsrb $9, (%rsp,%rax), %xmm3, %xmm3
; AVX512VBMI-NEXT: andl $63, %esi
; AVX512VBMI-NEXT: vpinsrb $10, (%rsp,%rsi), %xmm3, %xmm3
; AVX512VBMI-NEXT: vpextrd $3, %xmm4, %eax
; AVX512VBMI-NEXT: vextracti32x4 $3, %zmm2, %xmm2
; AVX512VBMI-NEXT: andl $63, %eax
; AVX512VBMI-NEXT: vpinsrb $11, (%rsp,%rax), %xmm3, %xmm3
; AVX512VBMI-NEXT: vmovd %xmm2, %eax
; AVX512VBMI-NEXT: andl $63, %eax
; AVX512VBMI-NEXT: vpinsrb $12, (%rsp,%rax), %xmm3, %xmm3
; AVX512VBMI-NEXT: vpextrd $1, %xmm2, %eax
; AVX512VBMI-NEXT: andl $63, %eax
; AVX512VBMI-NEXT: vpinsrb $13, (%rsp,%rax), %xmm3, %xmm3
; AVX512VBMI-NEXT: vpextrd $2, %xmm2, %eax
; AVX512VBMI-NEXT: andl $63, %eax
; AVX512VBMI-NEXT: vpinsrb $14, (%rsp,%rax), %xmm3, %xmm3
; AVX512VBMI-NEXT: vpextrd $3, %xmm2, %eax
; AVX512VBMI-NEXT: andl $63, %eax
; AVX512VBMI-NEXT: vpinsrb $15, (%rsp,%rax), %xmm3, %xmm2
; AVX512VBMI-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm3
; AVX512VBMI-NEXT: vpmovdb %zmm3, %xmm3
; AVX512VBMI-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm4
; AVX512VBMI-NEXT: vpmovdb %zmm4, %xmm4
; AVX512VBMI-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1
; AVX512VBMI-NEXT: vpmovdb %zmm1, %xmm1
; AVX512VBMI-NEXT: vpmovsxbd %xmm2, %zmm2
; AVX512VBMI-NEXT: vcvtdq2ps %zmm2, %zmm2
; AVX512VBMI-NEXT: vpermb %zmm0, %zmm1, %zmm1
; AVX512VBMI-NEXT: vpmovsxbd %xmm1, %zmm1
; AVX512VBMI-NEXT: vcvtdq2ps %zmm1, %zmm1
; AVX512VBMI-NEXT: vpermb %zmm0, %zmm4, %zmm4
; AVX512VBMI-NEXT: vpmovsxbd %xmm4, %zmm4
; AVX512VBMI-NEXT: vcvtdq2ps %zmm4, %zmm4
; AVX512VBMI-NEXT: vpermb %zmm0, %zmm3, %zmm0
; AVX512VBMI-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512VBMI-NEXT: vcvtdq2ps %zmm0, %zmm0
; AVX512VBMI-NEXT: vmovaps %zmm0, 128(%rdi)
; AVX512VBMI-NEXT: vmovaps %zmm4, 64(%rdi)
; AVX512VBMI-NEXT: vmovaps %zmm1, (%rdi)
; AVX512VBMI-NEXT: vmovaps %zmm2, 192(%rdi)
; AVX512VBMI-NEXT: movq %rbp, %rsp
; AVX512VBMI-NEXT: popq %rbp
; AVX512VBMI-NEXT: vzeroupper
; AVX512VBMI-NEXT: retq
%b_broadcast_init = insertelement <64 x i32> undef, i32 %b, i32 0
%b_broadcast = shufflevector <64 x i32> %b_broadcast_init, <64 x i32> undef, <64 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 undef, i32 0, i32 0, i32 0, i32 0, i32 0>
%sub_add__b_broadcast_ = add <64 x i32> %b_broadcast, <i32 58, i32 57, i32 56, i32 55, i32 54, i32 53, i32 52, i32 51, i32 50, i32 49, i32 48, i32 47, i32 46, i32 45, i32 44, i32 43, i32 42, i32 41, i32 40, i32 39, i32 38, i32 37, i32 36, i32 35, i32 34, i32 33, i32 32, i32 31, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 undef, i32 -1, i32 -2, i32 -3, i32 -4, i32 -5>
%index_0.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 0
%index_1.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 1
%index_2.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 2
%index_3.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 3
%index_4.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 4
%index_5.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 5
%index_6.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 6
%index_7.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 7
%index_8.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 8
%index_9.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 9
%index_10.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 10
%index_11.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 11
%index_12.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 12
%index_13.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 13
%index_14.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 14
%index_15.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 15
%index_16.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 16
%index_17.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 17
%index_18.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 18
%index_19.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 19
%index_20.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 20
%index_21.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 21
%index_22.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 22
%index_23.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 23
%index_24.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 24
%index_25.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 25
%index_26.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 26
%index_27.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 27
%index_28.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 28
%index_29.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 29
%index_30.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 30
%index_31.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 31
%index_32.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 32
%index_33.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 33
%index_34.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 34
%index_35.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 35
%index_36.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 36
%index_37.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 37
%index_38.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 38
%index_39.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 39
%index_40.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 40
%index_41.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 41
%index_42.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 42
%index_43.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 43
%index_44.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 44
%index_45.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 45
%index_46.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 46
%index_47.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 47
%index_48.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 48
%index_49.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 49
%index_50.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 50
%index_51.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 51
%index_52.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 52
%index_53.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 53
%index_54.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 54
%index_55.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 55
%index_56.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 56
%index_57.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 57
%index_59.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 59
%index_60.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 60
%index_61.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 61
%index_62.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 62
%index_63.i.i = extractelement <64 x i32> %sub_add__b_broadcast_, i32 63
%v_0.i.i = extractelement <64 x i8> %src, i32 %index_0.i.i
%v_1.i.i = extractelement <64 x i8> %src, i32 %index_1.i.i
%v_2.i.i = extractelement <64 x i8> %src, i32 %index_2.i.i
%v_3.i.i = extractelement <64 x i8> %src, i32 %index_3.i.i
%v_4.i.i = extractelement <64 x i8> %src, i32 %index_4.i.i
%v_5.i.i = extractelement <64 x i8> %src, i32 %index_5.i.i
%v_6.i.i = extractelement <64 x i8> %src, i32 %index_6.i.i
%v_7.i.i = extractelement <64 x i8> %src, i32 %index_7.i.i
%v_8.i.i = extractelement <64 x i8> %src, i32 %index_8.i.i
%v_9.i.i = extractelement <64 x i8> %src, i32 %index_9.i.i
%v_10.i.i = extractelement <64 x i8> %src, i32 %index_10.i.i
%v_11.i.i = extractelement <64 x i8> %src, i32 %index_11.i.i
%v_12.i.i = extractelement <64 x i8> %src, i32 %index_12.i.i
%v_13.i.i = extractelement <64 x i8> %src, i32 %index_13.i.i
%v_14.i.i = extractelement <64 x i8> %src, i32 %index_14.i.i
%v_15.i.i = extractelement <64 x i8> %src, i32 %index_15.i.i
%v_16.i.i = extractelement <64 x i8> %src, i32 %index_16.i.i
%v_17.i.i = extractelement <64 x i8> %src, i32 %index_17.i.i
%v_18.i.i = extractelement <64 x i8> %src, i32 %index_18.i.i
%v_19.i.i = extractelement <64 x i8> %src, i32 %index_19.i.i
%v_20.i.i = extractelement <64 x i8> %src, i32 %index_20.i.i
%v_21.i.i = extractelement <64 x i8> %src, i32 %index_21.i.i
%v_22.i.i = extractelement <64 x i8> %src, i32 %index_22.i.i
%v_23.i.i = extractelement <64 x i8> %src, i32 %index_23.i.i
%v_24.i.i = extractelement <64 x i8> %src, i32 %index_24.i.i
%v_25.i.i = extractelement <64 x i8> %src, i32 %index_25.i.i
%v_26.i.i = extractelement <64 x i8> %src, i32 %index_26.i.i
%v_27.i.i = extractelement <64 x i8> %src, i32 %index_27.i.i
%v_28.i.i = extractelement <64 x i8> %src, i32 %index_28.i.i
%v_29.i.i = extractelement <64 x i8> %src, i32 %index_29.i.i
%v_30.i.i = extractelement <64 x i8> %src, i32 %index_30.i.i
%v_31.i.i = extractelement <64 x i8> %src, i32 %index_31.i.i
%v_32.i.i = extractelement <64 x i8> %src, i32 %index_32.i.i
%v_33.i.i = extractelement <64 x i8> %src, i32 %index_33.i.i
%v_34.i.i = extractelement <64 x i8> %src, i32 %index_34.i.i
%v_35.i.i = extractelement <64 x i8> %src, i32 %index_35.i.i
%v_36.i.i = extractelement <64 x i8> %src, i32 %index_36.i.i
%v_37.i.i = extractelement <64 x i8> %src, i32 %index_37.i.i
%v_38.i.i = extractelement <64 x i8> %src, i32 %index_38.i.i
%v_39.i.i = extractelement <64 x i8> %src, i32 %index_39.i.i
%v_40.i.i = extractelement <64 x i8> %src, i32 %index_40.i.i
%v_41.i.i = extractelement <64 x i8> %src, i32 %index_41.i.i
%v_42.i.i = extractelement <64 x i8> %src, i32 %index_42.i.i
%v_43.i.i = extractelement <64 x i8> %src, i32 %index_43.i.i
%v_44.i.i = extractelement <64 x i8> %src, i32 %index_44.i.i
%v_45.i.i = extractelement <64 x i8> %src, i32 %index_45.i.i
%v_46.i.i = extractelement <64 x i8> %src, i32 %index_46.i.i
%v_47.i.i = extractelement <64 x i8> %src, i32 %index_47.i.i
%v_48.i.i = extractelement <64 x i8> %src, i32 %index_48.i.i
%v_49.i.i = extractelement <64 x i8> %src, i32 %index_49.i.i
%v_50.i.i = extractelement <64 x i8> %src, i32 %index_50.i.i
%v_51.i.i = extractelement <64 x i8> %src, i32 %index_51.i.i
%v_52.i.i = extractelement <64 x i8> %src, i32 %index_52.i.i
%v_53.i.i = extractelement <64 x i8> %src, i32 %index_53.i.i
%v_54.i.i = extractelement <64 x i8> %src, i32 %index_54.i.i
%v_55.i.i = extractelement <64 x i8> %src, i32 %index_55.i.i
%v_56.i.i = extractelement <64 x i8> %src, i32 %index_56.i.i
%v_57.i.i = extractelement <64 x i8> %src, i32 %index_57.i.i
%v_58.i.i = extractelement <64 x i8> %src, i32 %b
%v_59.i.i = extractelement <64 x i8> %src, i32 %index_59.i.i
%v_60.i.i = extractelement <64 x i8> %src, i32 %index_60.i.i
%v_61.i.i = extractelement <64 x i8> %src, i32 %index_61.i.i
%v_62.i.i = extractelement <64 x i8> %src, i32 %index_62.i.i
%v_63.i.i = extractelement <64 x i8> %src, i32 %index_63.i.i
%dst_0.i.i = insertelement <64 x i8> undef, i8 %v_0.i.i, i32 0
%dst_1.i.i = insertelement <64 x i8> %dst_0.i.i, i8 %v_1.i.i, i32 1
%dst_2.i.i = insertelement <64 x i8> %dst_1.i.i, i8 %v_2.i.i, i32 2
%dst_3.i.i = insertelement <64 x i8> %dst_2.i.i, i8 %v_3.i.i, i32 3
%dst_4.i.i = insertelement <64 x i8> %dst_3.i.i, i8 %v_4.i.i, i32 4
%dst_5.i.i = insertelement <64 x i8> %dst_4.i.i, i8 %v_5.i.i, i32 5
%dst_6.i.i = insertelement <64 x i8> %dst_5.i.i, i8 %v_6.i.i, i32 6
%dst_7.i.i = insertelement <64 x i8> %dst_6.i.i, i8 %v_7.i.i, i32 7
%dst_8.i.i = insertelement <64 x i8> %dst_7.i.i, i8 %v_8.i.i, i32 8
%dst_9.i.i = insertelement <64 x i8> %dst_8.i.i, i8 %v_9.i.i, i32 9
%dst_10.i.i = insertelement <64 x i8> %dst_9.i.i, i8 %v_10.i.i, i32 10
%dst_11.i.i = insertelement <64 x i8> %dst_10.i.i, i8 %v_11.i.i, i32 11
%dst_12.i.i = insertelement <64 x i8> %dst_11.i.i, i8 %v_12.i.i, i32 12
%dst_13.i.i = insertelement <64 x i8> %dst_12.i.i, i8 %v_13.i.i, i32 13
%dst_14.i.i = insertelement <64 x i8> %dst_13.i.i, i8 %v_14.i.i, i32 14
%dst_15.i.i = insertelement <64 x i8> %dst_14.i.i, i8 %v_15.i.i, i32 15
%dst_16.i.i = insertelement <64 x i8> %dst_15.i.i, i8 %v_16.i.i, i32 16
%dst_17.i.i = insertelement <64 x i8> %dst_16.i.i, i8 %v_17.i.i, i32 17
%dst_18.i.i = insertelement <64 x i8> %dst_17.i.i, i8 %v_18.i.i, i32 18
%dst_19.i.i = insertelement <64 x i8> %dst_18.i.i, i8 %v_19.i.i, i32 19
%dst_20.i.i = insertelement <64 x i8> %dst_19.i.i, i8 %v_20.i.i, i32 20
%dst_21.i.i = insertelement <64 x i8> %dst_20.i.i, i8 %v_21.i.i, i32 21
%dst_22.i.i = insertelement <64 x i8> %dst_21.i.i, i8 %v_22.i.i, i32 22
%dst_23.i.i = insertelement <64 x i8> %dst_22.i.i, i8 %v_23.i.i, i32 23
%dst_24.i.i = insertelement <64 x i8> %dst_23.i.i, i8 %v_24.i.i, i32 24
%dst_25.i.i = insertelement <64 x i8> %dst_24.i.i, i8 %v_25.i.i, i32 25
%dst_26.i.i = insertelement <64 x i8> %dst_25.i.i, i8 %v_26.i.i, i32 26
%dst_27.i.i = insertelement <64 x i8> %dst_26.i.i, i8 %v_27.i.i, i32 27
%dst_28.i.i = insertelement <64 x i8> %dst_27.i.i, i8 %v_28.i.i, i32 28
%dst_29.i.i = insertelement <64 x i8> %dst_28.i.i, i8 %v_29.i.i, i32 29
%dst_30.i.i = insertelement <64 x i8> %dst_29.i.i, i8 %v_30.i.i, i32 30
%dst_31.i.i = insertelement <64 x i8> %dst_30.i.i, i8 %v_31.i.i, i32 31
%dst_32.i.i = insertelement <64 x i8> %dst_31.i.i, i8 %v_32.i.i, i32 32
%dst_33.i.i = insertelement <64 x i8> %dst_32.i.i, i8 %v_33.i.i, i32 33
%dst_34.i.i = insertelement <64 x i8> %dst_33.i.i, i8 %v_34.i.i, i32 34
%dst_35.i.i = insertelement <64 x i8> %dst_34.i.i, i8 %v_35.i.i, i32 35
%dst_36.i.i = insertelement <64 x i8> %dst_35.i.i, i8 %v_36.i.i, i32 36
%dst_37.i.i = insertelement <64 x i8> %dst_36.i.i, i8 %v_37.i.i, i32 37
%dst_38.i.i = insertelement <64 x i8> %dst_37.i.i, i8 %v_38.i.i, i32 38
%dst_39.i.i = insertelement <64 x i8> %dst_38.i.i, i8 %v_39.i.i, i32 39
%dst_40.i.i = insertelement <64 x i8> %dst_39.i.i, i8 %v_40.i.i, i32 40
%dst_41.i.i = insertelement <64 x i8> %dst_40.i.i, i8 %v_41.i.i, i32 41
%dst_42.i.i = insertelement <64 x i8> %dst_41.i.i, i8 %v_42.i.i, i32 42
%dst_43.i.i = insertelement <64 x i8> %dst_42.i.i, i8 %v_43.i.i, i32 43
%dst_44.i.i = insertelement <64 x i8> %dst_43.i.i, i8 %v_44.i.i, i32 44
%dst_45.i.i = insertelement <64 x i8> %dst_44.i.i, i8 %v_45.i.i, i32 45
%dst_46.i.i = insertelement <64 x i8> %dst_45.i.i, i8 %v_46.i.i, i32 46
%dst_47.i.i = insertelement <64 x i8> %dst_46.i.i, i8 %v_47.i.i, i32 47
%dst_48.i.i = insertelement <64 x i8> %dst_47.i.i, i8 %v_48.i.i, i32 48
%dst_49.i.i = insertelement <64 x i8> %dst_48.i.i, i8 %v_49.i.i, i32 49
%dst_50.i.i = insertelement <64 x i8> %dst_49.i.i, i8 %v_50.i.i, i32 50
%dst_51.i.i = insertelement <64 x i8> %dst_50.i.i, i8 %v_51.i.i, i32 51
%dst_52.i.i = insertelement <64 x i8> %dst_51.i.i, i8 %v_52.i.i, i32 52
%dst_53.i.i = insertelement <64 x i8> %dst_52.i.i, i8 %v_53.i.i, i32 53
%dst_54.i.i = insertelement <64 x i8> %dst_53.i.i, i8 %v_54.i.i, i32 54
%dst_55.i.i = insertelement <64 x i8> %dst_54.i.i, i8 %v_55.i.i, i32 55
%dst_56.i.i = insertelement <64 x i8> %dst_55.i.i, i8 %v_56.i.i, i32 56
%dst_57.i.i = insertelement <64 x i8> %dst_56.i.i, i8 %v_57.i.i, i32 57
%dst_58.i.i = insertelement <64 x i8> %dst_57.i.i, i8 %v_58.i.i, i32 58
%dst_59.i.i = insertelement <64 x i8> %dst_58.i.i, i8 %v_59.i.i, i32 59
%dst_60.i.i = insertelement <64 x i8> %dst_59.i.i, i8 %v_60.i.i, i32 60
%dst_61.i.i = insertelement <64 x i8> %dst_60.i.i, i8 %v_61.i.i, i32 61
%dst_62.i.i = insertelement <64 x i8> %dst_61.i.i, i8 %v_62.i.i, i32 62
%dst_63.i.i = insertelement <64 x i8> %dst_62.i.i, i8 %v_63.i.i, i32 63
%shuf_load_to_float = sitofp <64 x i8> %dst_63.i.i to <64 x float>
store <64 x float> %shuf_load_to_float, ptr %dst
ret void
}