; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=ALL,SSE,SSE2
; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse4.1 | FileCheck %s --check-prefixes=ALL,SSE,SSE41
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=ALL,AVX,AVX1OR2,AVX1
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=ALL,AVX,AVX1OR2,AVX2
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=ALL,AVX,AVX512,AVX512F
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefixes=ALL,AVX,AVX512,AVX512BW
; RUN: llc < %s -mtriple=i686-- -mattr=+avx2 | FileCheck %s --check-prefixes=ALL,X86AVX2
define <16 x i8> @undef_index(i8 %x) nounwind {
; ALL-LABEL: undef_index:
; ALL: # %bb.0:
; ALL-NEXT: ret{{[l|q]}}
%ins = insertelement <16 x i8> undef, i8 %x, i64 undef
ret <16 x i8> %ins
}
define <16 x i8> @undef_scalar(<16 x i8> %x, i32 %index) nounwind {
; ALL-LABEL: undef_scalar:
; ALL: # %bb.0:
; ALL-NEXT: ret{{[l|q]}}
%ins = insertelement <16 x i8> %x, i8 undef, i32 %index
ret <16 x i8> %ins
}
;
; Insertion into undef vectors
;
define <16 x i8> @arg_i8_v16i8_undef(i8 %x, i32 %y) nounwind {
; SSE2-LABEL: arg_i8_v16i8_undef:
; SSE2: # %bb.0:
; SSE2-NEXT: movd %edi, %xmm0
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE2-NEXT: retq
;
; SSE41-LABEL: arg_i8_v16i8_undef:
; SSE41: # %bb.0:
; SSE41-NEXT: movd %edi, %xmm0
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: pshufb %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: arg_i8_v16i8_undef:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %edi, %xmm0
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: arg_i8_v16i8_undef:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %edi, %xmm0
; AVX2-NEXT: vpbroadcastb %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: arg_i8_v16i8_undef:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovd %edi, %xmm0
; AVX512F-NEXT: vpbroadcastb %xmm0, %xmm0
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: arg_i8_v16i8_undef:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpbroadcastb %edi, %xmm0
; AVX512BW-NEXT: retq
;
; X86AVX2-LABEL: arg_i8_v16i8_undef:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: vpbroadcastb {{[0-9]+}}(%esp), %xmm0
; X86AVX2-NEXT: retl
%ins = insertelement <16 x i8> undef, i8 %x, i32 %y
ret <16 x i8> %ins
}
define <8 x i16> @arg_i16_v8i16_undef(i16 %x, i32 %y) nounwind {
; SSE-LABEL: arg_i16_v8i16_undef:
; SSE: # %bb.0:
; SSE-NEXT: movd %edi, %xmm0
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE-NEXT: retq
;
; AVX1-LABEL: arg_i16_v8i16_undef:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %edi, %xmm0
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; AVX1-NEXT: retq
;
; AVX2-LABEL: arg_i16_v8i16_undef:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %edi, %xmm0
; AVX2-NEXT: vpbroadcastw %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: arg_i16_v8i16_undef:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovd %edi, %xmm0
; AVX512F-NEXT: vpbroadcastw %xmm0, %xmm0
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: arg_i16_v8i16_undef:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpbroadcastw %edi, %xmm0
; AVX512BW-NEXT: retq
;
; X86AVX2-LABEL: arg_i16_v8i16_undef:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: vpbroadcastw {{[0-9]+}}(%esp), %xmm0
; X86AVX2-NEXT: retl
%ins = insertelement <8 x i16> undef, i16 %x, i32 %y
ret <8 x i16> %ins
}
define <4 x i32> @arg_i32_v4i32_undef(i32 %x, i32 %y) nounwind {
; SSE-LABEL: arg_i32_v4i32_undef:
; SSE: # %bb.0:
; SSE-NEXT: movd %edi, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE-NEXT: retq
;
; AVX1-LABEL: arg_i32_v4i32_undef:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %edi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; AVX1-NEXT: retq
;
; AVX2-LABEL: arg_i32_v4i32_undef:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %edi, %xmm0
; AVX2-NEXT: vpbroadcastd %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: arg_i32_v4i32_undef:
; AVX512: # %bb.0:
; AVX512-NEXT: vpbroadcastd %edi, %xmm0
; AVX512-NEXT: retq
;
; X86AVX2-LABEL: arg_i32_v4i32_undef:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: vbroadcastss {{[0-9]+}}(%esp), %xmm0
; X86AVX2-NEXT: retl
%ins = insertelement <4 x i32> undef, i32 %x, i32 %y
ret <4 x i32> %ins
}
define <2 x i64> @arg_i64_v2i64_undef(i64 %x, i32 %y) nounwind {
; SSE-LABEL: arg_i64_v2i64_undef:
; SSE: # %bb.0:
; SSE-NEXT: movq %rdi, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; SSE-NEXT: retq
;
; AVX1-LABEL: arg_i64_v2i64_undef:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovq %rdi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; AVX1-NEXT: retq
;
; AVX2-LABEL: arg_i64_v2i64_undef:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovq %rdi, %xmm0
; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: arg_i64_v2i64_undef:
; AVX512: # %bb.0:
; AVX512-NEXT: vpbroadcastq %rdi, %xmm0
; AVX512-NEXT: retq
;
; X86AVX2-LABEL: arg_i64_v2i64_undef:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; X86AVX2-NEXT: retl
%ins = insertelement <2 x i64> undef, i64 %x, i32 %y
ret <2 x i64> %ins
}
define <4 x float> @arg_f32_v4f32_undef(float %x, i32 %y) nounwind {
; SSE-LABEL: arg_f32_v4f32_undef:
; SSE: # %bb.0:
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE-NEXT: retq
;
; AVX1-LABEL: arg_f32_v4f32_undef:
; AVX1: # %bb.0:
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,0,0,0]
; AVX1-NEXT: retq
;
; AVX2-LABEL: arg_f32_v4f32_undef:
; AVX2: # %bb.0:
; AVX2-NEXT: vbroadcastss %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: arg_f32_v4f32_undef:
; AVX512: # %bb.0:
; AVX512-NEXT: vbroadcastss %xmm0, %xmm0
; AVX512-NEXT: retq
;
; X86AVX2-LABEL: arg_f32_v4f32_undef:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: vbroadcastss {{[0-9]+}}(%esp), %xmm0
; X86AVX2-NEXT: retl
%ins = insertelement <4 x float> undef, float %x, i32 %y
ret <4 x float> %ins
}
define <2 x double> @arg_f64_v2f64_undef(double %x, i32 %y) nounwind {
; SSE2-LABEL: arg_f64_v2f64_undef:
; SSE2: # %bb.0:
; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
; SSE2-NEXT: retq
;
; SSE41-LABEL: arg_f64_v2f64_undef:
; SSE41: # %bb.0:
; SSE41-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
; SSE41-NEXT: retq
;
; AVX-LABEL: arg_f64_v2f64_undef:
; AVX: # %bb.0:
; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX-NEXT: retq
;
; X86AVX2-LABEL: arg_f64_v2f64_undef:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; X86AVX2-NEXT: retl
%ins = insertelement <2 x double> undef, double %x, i32 %y
ret <2 x double> %ins
}
define <16 x i8> @load_i8_v16i8_undef(ptr %p, i32 %y) nounwind {
; SSE2-LABEL: load_i8_v16i8_undef:
; SSE2: # %bb.0:
; SSE2-NEXT: movzbl (%rdi), %eax
; SSE2-NEXT: movd %eax, %xmm0
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE2-NEXT: retq
;
; SSE41-LABEL: load_i8_v16i8_undef:
; SSE41: # %bb.0:
; SSE41-NEXT: movzbl (%rdi), %eax
; SSE41-NEXT: movd %eax, %xmm0
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: pshufb %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: load_i8_v16i8_undef:
; AVX1: # %bb.0:
; AVX1-NEXT: movzbl (%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_i8_v16i8_undef:
; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastb (%rdi), %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_i8_v16i8_undef:
; AVX512: # %bb.0:
; AVX512-NEXT: vpbroadcastb (%rdi), %xmm0
; AVX512-NEXT: retq
;
; X86AVX2-LABEL: load_i8_v16i8_undef:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86AVX2-NEXT: vpbroadcastb (%eax), %xmm0
; X86AVX2-NEXT: retl
%x = load i8, ptr %p
%ins = insertelement <16 x i8> undef, i8 %x, i32 %y
ret <16 x i8> %ins
}
define <8 x i16> @load_i16_v8i16_undef(ptr %p, i32 %y) nounwind {
; SSE-LABEL: load_i16_v8i16_undef:
; SSE: # %bb.0:
; SSE-NEXT: movzwl (%rdi), %eax
; SSE-NEXT: movd %eax, %xmm0
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE-NEXT: retq
;
; AVX1-LABEL: load_i16_v8i16_undef:
; AVX1: # %bb.0:
; AVX1-NEXT: movzwl (%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_i16_v8i16_undef:
; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastw (%rdi), %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_i16_v8i16_undef:
; AVX512: # %bb.0:
; AVX512-NEXT: vpbroadcastw (%rdi), %xmm0
; AVX512-NEXT: retq
;
; X86AVX2-LABEL: load_i16_v8i16_undef:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86AVX2-NEXT: vpbroadcastw (%eax), %xmm0
; X86AVX2-NEXT: retl
%x = load i16, ptr %p
%ins = insertelement <8 x i16> undef, i16 %x, i32 %y
ret <8 x i16> %ins
}
define <4 x i32> @load_i32_v4i32_undef(ptr %p, i32 %y) nounwind {
; SSE-LABEL: load_i32_v4i32_undef:
; SSE: # %bb.0:
; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE-NEXT: retq
;
; AVX-LABEL: load_i32_v4i32_undef:
; AVX: # %bb.0:
; AVX-NEXT: vbroadcastss (%rdi), %xmm0
; AVX-NEXT: retq
;
; X86AVX2-LABEL: load_i32_v4i32_undef:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86AVX2-NEXT: vbroadcastss (%eax), %xmm0
; X86AVX2-NEXT: retl
%x = load i32, ptr %p
%ins = insertelement <4 x i32> undef, i32 %x, i32 %y
ret <4 x i32> %ins
}
define <2 x i64> @load_i64_v2i64_undef(ptr %p, i32 %y) nounwind {
; SSE-LABEL: load_i64_v2i64_undef:
; SSE: # %bb.0:
; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; SSE-NEXT: retq
;
; AVX-LABEL: load_i64_v2i64_undef:
; AVX: # %bb.0:
; AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX-NEXT: retq
;
; X86AVX2-LABEL: load_i64_v2i64_undef:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86AVX2-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; X86AVX2-NEXT: retl
%x = load i64, ptr %p
%ins = insertelement <2 x i64> undef, i64 %x, i32 %y
ret <2 x i64> %ins
}
define <4 x float> @load_f32_v4f32_undef(ptr %p, i32 %y) nounwind {
; SSE-LABEL: load_f32_v4f32_undef:
; SSE: # %bb.0:
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE-NEXT: retq
;
; AVX-LABEL: load_f32_v4f32_undef:
; AVX: # %bb.0:
; AVX-NEXT: vbroadcastss (%rdi), %xmm0
; AVX-NEXT: retq
;
; X86AVX2-LABEL: load_f32_v4f32_undef:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86AVX2-NEXT: vbroadcastss (%eax), %xmm0
; X86AVX2-NEXT: retl
%x = load float, ptr %p
%ins = insertelement <4 x float> undef, float %x, i32 %y
ret <4 x float> %ins
}
define <2 x double> @load_f64_v2f64_undef(ptr %p, i32 %y) nounwind {
; SSE2-LABEL: load_f64_v2f64_undef:
; SSE2: # %bb.0:
; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
; SSE2-NEXT: retq
;
; SSE41-LABEL: load_f64_v2f64_undef:
; SSE41: # %bb.0:
; SSE41-NEXT: movddup {{.*#+}} xmm0 = mem[0,0]
; SSE41-NEXT: retq
;
; AVX-LABEL: load_f64_v2f64_undef:
; AVX: # %bb.0:
; AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX-NEXT: retq
;
; X86AVX2-LABEL: load_f64_v2f64_undef:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86AVX2-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; X86AVX2-NEXT: retl
%x = load double, ptr %p
%ins = insertelement <2 x double> undef, double %x, i32 %y
ret <2 x double> %ins
}
define <32 x i8> @arg_i8_v32i8_undef(i8 %x, i32 %y) nounwind {
; SSE-LABEL: arg_i8_v32i8_undef:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
; SSE-NEXT: andl $31, %esi
; SSE-NEXT: movb %dil, -40(%rsp,%rsi)
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: arg_i8_v32i8_undef:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %edi, %xmm0
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: arg_i8_v32i8_undef:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %edi, %xmm0
; AVX2-NEXT: vpbroadcastb %xmm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: arg_i8_v32i8_undef:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovd %edi, %xmm0
; AVX512F-NEXT: vpbroadcastb %xmm0, %ymm0
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: arg_i8_v32i8_undef:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpbroadcastb %edi, %ymm0
; AVX512BW-NEXT: retq
;
; X86AVX2-LABEL: arg_i8_v32i8_undef:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: vpbroadcastb {{[0-9]+}}(%esp), %ymm0
; X86AVX2-NEXT: retl
%ins = insertelement <32 x i8> undef, i8 %x, i32 %y
ret <32 x i8> %ins
}
define <16 x i16> @arg_i16_v16i16_undef(i16 %x, i32 %y) nounwind {
; SSE-LABEL: arg_i16_v16i16_undef:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
; SSE-NEXT: andl $15, %esi
; SSE-NEXT: movw %di, -40(%rsp,%rsi,2)
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: arg_i16_v16i16_undef:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %edi, %xmm0
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: arg_i16_v16i16_undef:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %edi, %xmm0
; AVX2-NEXT: vpbroadcastw %xmm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: arg_i16_v16i16_undef:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovd %edi, %xmm0
; AVX512F-NEXT: vpbroadcastw %xmm0, %ymm0
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: arg_i16_v16i16_undef:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpbroadcastw %edi, %ymm0
; AVX512BW-NEXT: retq
;
; X86AVX2-LABEL: arg_i16_v16i16_undef:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: vpbroadcastw {{[0-9]+}}(%esp), %ymm0
; X86AVX2-NEXT: retl
%ins = insertelement <16 x i16> undef, i16 %x, i32 %y
ret <16 x i16> %ins
}
define <8 x i32> @arg_i32_v8i32_undef(i32 %x, i32 %y) nounwind {
; SSE-LABEL: arg_i32_v8i32_undef:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
; SSE-NEXT: andl $7, %esi
; SSE-NEXT: movl %edi, -40(%rsp,%rsi,4)
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: arg_i32_v8i32_undef:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %edi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: arg_i32_v8i32_undef:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %edi, %xmm0
; AVX2-NEXT: vpbroadcastd %xmm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: arg_i32_v8i32_undef:
; AVX512: # %bb.0:
; AVX512-NEXT: vpbroadcastd %edi, %ymm0
; AVX512-NEXT: retq
;
; X86AVX2-LABEL: arg_i32_v8i32_undef:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: vbroadcastss {{[0-9]+}}(%esp), %ymm0
; X86AVX2-NEXT: retl
%ins = insertelement <8 x i32> undef, i32 %x, i32 %y
ret <8 x i32> %ins
}
define <4 x i64> @arg_i64_v4i64_undef(i64 %x, i32 %y) nounwind {
; SSE-LABEL: arg_i64_v4i64_undef:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
; SSE-NEXT: andl $3, %esi
; SSE-NEXT: movq %rdi, -40(%rsp,%rsi,8)
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: arg_i64_v4i64_undef:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovq %rdi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: arg_i64_v4i64_undef:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovq %rdi, %xmm0
; AVX2-NEXT: vpbroadcastq %xmm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: arg_i64_v4i64_undef:
; AVX512: # %bb.0:
; AVX512-NEXT: vpbroadcastq %rdi, %ymm0
; AVX512-NEXT: retq
;
; X86AVX2-LABEL: arg_i64_v4i64_undef:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: vbroadcastsd {{[0-9]+}}(%esp), %ymm0
; X86AVX2-NEXT: retl
%ins = insertelement <4 x i64> undef, i64 %x, i32 %y
ret <4 x i64> %ins
}
define <8 x float> @arg_f32_v8f32_undef(float %x, i32 %y) nounwind {
; SSE-LABEL: arg_f32_v8f32_undef:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $edi killed $edi def $rdi
; SSE-NEXT: andl $7, %edi
; SSE-NEXT: movss %xmm0, -40(%rsp,%rdi,4)
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: arg_f32_v8f32_undef:
; AVX1: # %bb.0:
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,0,0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: arg_f32_v8f32_undef:
; AVX2: # %bb.0:
; AVX2-NEXT: vbroadcastss %xmm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: arg_f32_v8f32_undef:
; AVX512: # %bb.0:
; AVX512-NEXT: vbroadcastss %xmm0, %ymm0
; AVX512-NEXT: retq
;
; X86AVX2-LABEL: arg_f32_v8f32_undef:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: vbroadcastss {{[0-9]+}}(%esp), %ymm0
; X86AVX2-NEXT: retl
%ins = insertelement <8 x float> undef, float %x, i32 %y
ret <8 x float> %ins
}
define <4 x double> @arg_f64_v4f64_undef(double %x, i32 %y) nounwind {
; SSE-LABEL: arg_f64_v4f64_undef:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $edi killed $edi def $rdi
; SSE-NEXT: andl $3, %edi
; SSE-NEXT: movsd %xmm0, -40(%rsp,%rdi,8)
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: arg_f64_v4f64_undef:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: arg_f64_v4f64_undef:
; AVX2: # %bb.0:
; AVX2-NEXT: vbroadcastsd %xmm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: arg_f64_v4f64_undef:
; AVX512: # %bb.0:
; AVX512-NEXT: vbroadcastsd %xmm0, %ymm0
; AVX512-NEXT: retq
;
; X86AVX2-LABEL: arg_f64_v4f64_undef:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: vbroadcastsd {{[0-9]+}}(%esp), %ymm0
; X86AVX2-NEXT: retl
%ins = insertelement <4 x double> undef, double %x, i32 %y
ret <4 x double> %ins
}
define <32 x i8> @load_i8_v32i8_undef(ptr %p, i32 %y) nounwind {
; SSE-LABEL: load_i8_v32i8_undef:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
; SSE-NEXT: movzbl (%rdi), %eax
; SSE-NEXT: andl $31, %esi
; SSE-NEXT: movb %al, -40(%rsp,%rsi)
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: load_i8_v32i8_undef:
; AVX1: # %bb.0:
; AVX1-NEXT: movzbl (%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_i8_v32i8_undef:
; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastb (%rdi), %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_i8_v32i8_undef:
; AVX512: # %bb.0:
; AVX512-NEXT: vpbroadcastb (%rdi), %ymm0
; AVX512-NEXT: retq
;
; X86AVX2-LABEL: load_i8_v32i8_undef:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86AVX2-NEXT: vpbroadcastb (%eax), %ymm0
; X86AVX2-NEXT: retl
%x = load i8, ptr %p
%ins = insertelement <32 x i8> undef, i8 %x, i32 %y
ret <32 x i8> %ins
}
define <16 x i16> @load_i16_v16i16_undef(ptr %p, i32 %y) nounwind {
; SSE-LABEL: load_i16_v16i16_undef:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
; SSE-NEXT: movzwl (%rdi), %eax
; SSE-NEXT: andl $15, %esi
; SSE-NEXT: movw %ax, -40(%rsp,%rsi,2)
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: load_i16_v16i16_undef:
; AVX1: # %bb.0:
; AVX1-NEXT: movzwl (%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_i16_v16i16_undef:
; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastw (%rdi), %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_i16_v16i16_undef:
; AVX512: # %bb.0:
; AVX512-NEXT: vpbroadcastw (%rdi), %ymm0
; AVX512-NEXT: retq
;
; X86AVX2-LABEL: load_i16_v16i16_undef:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86AVX2-NEXT: vpbroadcastw (%eax), %ymm0
; X86AVX2-NEXT: retl
%x = load i16, ptr %p
%ins = insertelement <16 x i16> undef, i16 %x, i32 %y
ret <16 x i16> %ins
}
define <8 x i32> @load_i32_v8i32_undef(ptr %p, i32 %y) nounwind {
; SSE-LABEL: load_i32_v8i32_undef:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
; SSE-NEXT: movl (%rdi), %eax
; SSE-NEXT: andl $7, %esi
; SSE-NEXT: movl %eax, -40(%rsp,%rsi,4)
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: load_i32_v8i32_undef:
; AVX: # %bb.0:
; AVX-NEXT: vbroadcastss (%rdi), %ymm0
; AVX-NEXT: retq
;
; X86AVX2-LABEL: load_i32_v8i32_undef:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86AVX2-NEXT: vbroadcastss (%eax), %ymm0
; X86AVX2-NEXT: retl
%x = load i32, ptr %p
%ins = insertelement <8 x i32> undef, i32 %x, i32 %y
ret <8 x i32> %ins
}
define <4 x i64> @load_i64_v4i64_undef(ptr %p, i32 %y) nounwind {
; SSE-LABEL: load_i64_v4i64_undef:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
; SSE-NEXT: movq (%rdi), %rax
; SSE-NEXT: andl $3, %esi
; SSE-NEXT: movq %rax, -40(%rsp,%rsi,8)
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: load_i64_v4i64_undef:
; AVX: # %bb.0:
; AVX-NEXT: vbroadcastsd (%rdi), %ymm0
; AVX-NEXT: retq
;
; X86AVX2-LABEL: load_i64_v4i64_undef:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86AVX2-NEXT: vbroadcastsd (%eax), %ymm0
; X86AVX2-NEXT: retl
%x = load i64, ptr %p
%ins = insertelement <4 x i64> undef, i64 %x, i32 %y
ret <4 x i64> %ins
}
define <8 x float> @load_f32_v8f32_undef(ptr %p, i32 %y) nounwind {
; SSE-LABEL: load_f32_v8f32_undef:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: andl $7, %esi
; SSE-NEXT: movss %xmm0, -40(%rsp,%rsi,4)
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: load_f32_v8f32_undef:
; AVX: # %bb.0:
; AVX-NEXT: vbroadcastss (%rdi), %ymm0
; AVX-NEXT: retq
;
; X86AVX2-LABEL: load_f32_v8f32_undef:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86AVX2-NEXT: vbroadcastss (%eax), %ymm0
; X86AVX2-NEXT: retl
%x = load float, ptr %p
%ins = insertelement <8 x float> undef, float %x, i32 %y
ret <8 x float> %ins
}
define <4 x double> @load_f64_v4f64_undef(ptr %p, i32 %y) nounwind {
; SSE-LABEL: load_f64_v4f64_undef:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: andl $3, %esi
; SSE-NEXT: movsd %xmm0, -40(%rsp,%rsi,8)
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: load_f64_v4f64_undef:
; AVX: # %bb.0:
; AVX-NEXT: vbroadcastsd (%rdi), %ymm0
; AVX-NEXT: retq
;
; X86AVX2-LABEL: load_f64_v4f64_undef:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86AVX2-NEXT: vbroadcastsd (%eax), %ymm0
; X86AVX2-NEXT: retl
%x = load double, ptr %p
%ins = insertelement <4 x double> undef, double %x, i32 %y
ret <4 x double> %ins
}
;
; Insertion into arg vectors
;
define <16 x i8> @arg_i8_v16i8(<16 x i8> %v, i8 %x, i32 %y) nounwind {
; SSE-LABEL: arg_i8_v16i8:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: andl $15, %esi
; SSE-NEXT: movb %dil, -24(%rsp,%rsi)
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: retq
;
; AVX1OR2-LABEL: arg_i8_v16i8:
; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: # kill: def $esi killed $esi def $rsi
; AVX1OR2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX1OR2-NEXT: andl $15, %esi
; AVX1OR2-NEXT: movb %dil, -24(%rsp,%rsi)
; AVX1OR2-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0
; AVX1OR2-NEXT: retq
;
; AVX512F-LABEL: arg_i8_v16i8:
; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: def $esi killed $esi def $rsi
; AVX512F-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX512F-NEXT: andl $15, %esi
; AVX512F-NEXT: movb %dil, -24(%rsp,%rsi)
; AVX512F-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: arg_i8_v16i8:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpbroadcastb %esi, %xmm1
; AVX512BW-NEXT: vpcmpeqb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %k1
; AVX512BW-NEXT: vpbroadcastb %edi, %xmm0 {%k1}
; AVX512BW-NEXT: retq
;
; X86AVX2-LABEL: arg_i8_v16i8:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: pushl %ebp
; X86AVX2-NEXT: movl %esp, %ebp
; X86AVX2-NEXT: andl $-16, %esp
; X86AVX2-NEXT: subl $32, %esp
; X86AVX2-NEXT: movl 12(%ebp), %eax
; X86AVX2-NEXT: andl $15, %eax
; X86AVX2-NEXT: movzbl 8(%ebp), %ecx
; X86AVX2-NEXT: vmovaps %xmm0, (%esp)
; X86AVX2-NEXT: movb %cl, (%esp,%eax)
; X86AVX2-NEXT: vmovaps (%esp), %xmm0
; X86AVX2-NEXT: movl %ebp, %esp
; X86AVX2-NEXT: popl %ebp
; X86AVX2-NEXT: retl
%ins = insertelement <16 x i8> %v, i8 %x, i32 %y
ret <16 x i8> %ins
}
define <8 x i16> @arg_i16_v8i16(<8 x i16> %v, i16 %x, i32 %y) nounwind {
; SSE-LABEL: arg_i16_v8i16:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: andl $7, %esi
; SSE-NEXT: movw %di, -24(%rsp,%rsi,2)
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: retq
;
; AVX1OR2-LABEL: arg_i16_v8i16:
; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: # kill: def $esi killed $esi def $rsi
; AVX1OR2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX1OR2-NEXT: andl $7, %esi
; AVX1OR2-NEXT: movw %di, -24(%rsp,%rsi,2)
; AVX1OR2-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0
; AVX1OR2-NEXT: retq
;
; AVX512F-LABEL: arg_i16_v8i16:
; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: def $esi killed $esi def $rsi
; AVX512F-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX512F-NEXT: andl $7, %esi
; AVX512F-NEXT: movw %di, -24(%rsp,%rsi,2)
; AVX512F-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: arg_i16_v8i16:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpbroadcastw %esi, %xmm1
; AVX512BW-NEXT: vpcmpeqw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %k1
; AVX512BW-NEXT: vpbroadcastw %edi, %xmm0 {%k1}
; AVX512BW-NEXT: retq
;
; X86AVX2-LABEL: arg_i16_v8i16:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: pushl %ebp
; X86AVX2-NEXT: movl %esp, %ebp
; X86AVX2-NEXT: andl $-16, %esp
; X86AVX2-NEXT: subl $32, %esp
; X86AVX2-NEXT: movl 12(%ebp), %eax
; X86AVX2-NEXT: andl $7, %eax
; X86AVX2-NEXT: movzwl 8(%ebp), %ecx
; X86AVX2-NEXT: vmovaps %xmm0, (%esp)
; X86AVX2-NEXT: movw %cx, (%esp,%eax,2)
; X86AVX2-NEXT: vmovaps (%esp), %xmm0
; X86AVX2-NEXT: movl %ebp, %esp
; X86AVX2-NEXT: popl %ebp
; X86AVX2-NEXT: retl
%ins = insertelement <8 x i16> %v, i16 %x, i32 %y
ret <8 x i16> %ins
}
define <4 x i32> @arg_i32_v4i32(<4 x i32> %v, i32 %x, i32 %y) nounwind {
; SSE-LABEL: arg_i32_v4i32:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: andl $3, %esi
; SSE-NEXT: movl %edi, -24(%rsp,%rsi,4)
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: retq
;
; AVX1OR2-LABEL: arg_i32_v4i32:
; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: # kill: def $esi killed $esi def $rsi
; AVX1OR2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX1OR2-NEXT: andl $3, %esi
; AVX1OR2-NEXT: movl %edi, -24(%rsp,%rsi,4)
; AVX1OR2-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0
; AVX1OR2-NEXT: retq
;
; AVX512-LABEL: arg_i32_v4i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpbroadcastd %esi, %xmm1
; AVX512-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %k1
; AVX512-NEXT: vpbroadcastd %edi, %xmm0 {%k1}
; AVX512-NEXT: retq
;
; X86AVX2-LABEL: arg_i32_v4i32:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: pushl %ebp
; X86AVX2-NEXT: movl %esp, %ebp
; X86AVX2-NEXT: andl $-16, %esp
; X86AVX2-NEXT: subl $32, %esp
; X86AVX2-NEXT: movl 12(%ebp), %eax
; X86AVX2-NEXT: andl $3, %eax
; X86AVX2-NEXT: movl 8(%ebp), %ecx
; X86AVX2-NEXT: vmovaps %xmm0, (%esp)
; X86AVX2-NEXT: movl %ecx, (%esp,%eax,4)
; X86AVX2-NEXT: vmovaps (%esp), %xmm0
; X86AVX2-NEXT: movl %ebp, %esp
; X86AVX2-NEXT: popl %ebp
; X86AVX2-NEXT: retl
%ins = insertelement <4 x i32> %v, i32 %x, i32 %y
ret <4 x i32> %ins
}
define <2 x i64> @arg_i64_v2i64(<2 x i64> %v, i64 %x, i32 %y) nounwind {
; SSE-LABEL: arg_i64_v2i64:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: andl $1, %esi
; SSE-NEXT: movq %rdi, -24(%rsp,%rsi,8)
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: retq
;
; AVX1OR2-LABEL: arg_i64_v2i64:
; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: # kill: def $esi killed $esi def $rsi
; AVX1OR2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX1OR2-NEXT: andl $1, %esi
; AVX1OR2-NEXT: movq %rdi, -24(%rsp,%rsi,8)
; AVX1OR2-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0
; AVX1OR2-NEXT: retq
;
; AVX512-LABEL: arg_i64_v2i64:
; AVX512: # %bb.0:
; AVX512-NEXT: movl %esi, %eax
; AVX512-NEXT: vpbroadcastq %rax, %xmm1
; AVX512-NEXT: vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %k1
; AVX512-NEXT: vpbroadcastq %rdi, %xmm0 {%k1}
; AVX512-NEXT: retq
;
; X86AVX2-LABEL: arg_i64_v2i64:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: pushl %ebp
; X86AVX2-NEXT: movl %esp, %ebp
; X86AVX2-NEXT: pushl %esi
; X86AVX2-NEXT: andl $-16, %esp
; X86AVX2-NEXT: subl $48, %esp
; X86AVX2-NEXT: movl 8(%ebp), %edx
; X86AVX2-NEXT: movl 12(%ebp), %eax
; X86AVX2-NEXT: movl 16(%ebp), %ecx
; X86AVX2-NEXT: vmovaps %xmm0, (%esp)
; X86AVX2-NEXT: addl %ecx, %ecx
; X86AVX2-NEXT: movl %ecx, %esi
; X86AVX2-NEXT: andl $3, %esi
; X86AVX2-NEXT: movl %edx, (%esp,%esi,4)
; X86AVX2-NEXT: vmovaps (%esp), %xmm0
; X86AVX2-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
; X86AVX2-NEXT: incl %ecx
; X86AVX2-NEXT: andl $3, %ecx
; X86AVX2-NEXT: movl %eax, 16(%esp,%ecx,4)
; X86AVX2-NEXT: vmovaps {{[0-9]+}}(%esp), %xmm0
; X86AVX2-NEXT: leal -4(%ebp), %esp
; X86AVX2-NEXT: popl %esi
; X86AVX2-NEXT: popl %ebp
; X86AVX2-NEXT: retl
%ins = insertelement <2 x i64> %v, i64 %x, i32 %y
ret <2 x i64> %ins
}
define <4 x float> @arg_f32_v4f32(<4 x float> %v, float %x, i32 %y) nounwind {
; SSE2-LABEL: arg_f32_v4f32:
; SSE2: # %bb.0:
; SSE2-NEXT: # kill: def $edi killed $edi def $rdi
; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: andl $3, %edi
; SSE2-NEXT: movss %xmm1, -24(%rsp,%rdi,4)
; SSE2-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: arg_f32_v4f32:
; SSE41: # %bb.0:
; SSE41-NEXT: movaps %xmm0, %xmm2
; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0,0,0]
; SSE41-NEXT: movd %edi, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE41-NEXT: pcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: blendvps %xmm0, %xmm1, %xmm2
; SSE41-NEXT: movaps %xmm2, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: arg_f32_v4f32:
; AVX1: # %bb.0:
; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,0,0,0]
; AVX1-NEXT: vmovd %edi, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; AVX1-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX1-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: arg_f32_v4f32:
; AVX2: # %bb.0:
; AVX2-NEXT: vbroadcastss %xmm1, %xmm1
; AVX2-NEXT: vmovd %edi, %xmm2
; AVX2-NEXT: vpbroadcastd %xmm2, %xmm2
; AVX2-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX2-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: arg_f32_v4f32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpbroadcastd %edi, %xmm2
; AVX512-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %k1
; AVX512-NEXT: vbroadcastss %xmm1, %xmm0 {%k1}
; AVX512-NEXT: retq
;
; X86AVX2-LABEL: arg_f32_v4f32:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: vpbroadcastd {{[0-9]+}}(%esp), %xmm1
; X86AVX2-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1
; X86AVX2-NEXT: vbroadcastss {{[0-9]+}}(%esp), %xmm2
; X86AVX2-NEXT: vblendvps %xmm1, %xmm2, %xmm0, %xmm0
; X86AVX2-NEXT: retl
%ins = insertelement <4 x float> %v, float %x, i32 %y
ret <4 x float> %ins
}
define <2 x double> @arg_f64_v2f64(<2 x double> %v, double %x, i32 %y) nounwind {
; SSE2-LABEL: arg_f64_v2f64:
; SSE2: # %bb.0:
; SSE2-NEXT: # kill: def $edi killed $edi def $rdi
; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: andl $1, %edi
; SSE2-NEXT: movsd %xmm1, -24(%rsp,%rdi,8)
; SSE2-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: arg_f64_v2f64:
; SSE41: # %bb.0:
; SSE41-NEXT: movapd %xmm0, %xmm2
; SSE41-NEXT: movddup {{.*#+}} xmm1 = xmm1[0,0]
; SSE41-NEXT: movd %edi, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; SSE41-NEXT: pcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm2
; SSE41-NEXT: movapd %xmm2, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: arg_f64_v2f64:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = xmm1[0,0]
; AVX1-NEXT: vmovd %edi, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,1]
; AVX1-NEXT: vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX1-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: arg_f64_v2f64:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovddup {{.*#+}} xmm1 = xmm1[0,0]
; AVX2-NEXT: movl %edi, %eax
; AVX2-NEXT: vmovq %rax, %xmm2
; AVX2-NEXT: vpbroadcastq %xmm2, %xmm2
; AVX2-NEXT: vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX2-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: arg_f64_v2f64:
; AVX512: # %bb.0:
; AVX512-NEXT: movl %edi, %eax
; AVX512-NEXT: vpbroadcastq %rax, %xmm2
; AVX512-NEXT: vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %k1
; AVX512-NEXT: vmovddup {{.*#+}} xmm0 {%k1} = xmm1[0,0]
; AVX512-NEXT: retq
;
; X86AVX2-LABEL: arg_f64_v2f64:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: pushl %ebp
; X86AVX2-NEXT: movl %esp, %ebp
; X86AVX2-NEXT: andl $-16, %esp
; X86AVX2-NEXT: subl $32, %esp
; X86AVX2-NEXT: movl 16(%ebp), %eax
; X86AVX2-NEXT: andl $1, %eax
; X86AVX2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; X86AVX2-NEXT: vmovaps %xmm0, (%esp)
; X86AVX2-NEXT: vmovsd %xmm1, (%esp,%eax,8)
; X86AVX2-NEXT: vmovaps (%esp), %xmm0
; X86AVX2-NEXT: movl %ebp, %esp
; X86AVX2-NEXT: popl %ebp
; X86AVX2-NEXT: retl
%ins = insertelement <2 x double> %v, double %x, i32 %y
ret <2 x double> %ins
}
define <16 x i8> @load_i8_v16i8(<16 x i8> %v, ptr %p, i32 %y) nounwind {
; SSE-LABEL: load_i8_v16i8:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
; SSE-NEXT: movzbl (%rdi), %eax
; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: andl $15, %esi
; SSE-NEXT: movb %al, -24(%rsp,%rsi)
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: retq
;
; AVX1OR2-LABEL: load_i8_v16i8:
; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: # kill: def $esi killed $esi def $rsi
; AVX1OR2-NEXT: movzbl (%rdi), %eax
; AVX1OR2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX1OR2-NEXT: andl $15, %esi
; AVX1OR2-NEXT: movb %al, -24(%rsp,%rsi)
; AVX1OR2-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0
; AVX1OR2-NEXT: retq
;
; AVX512F-LABEL: load_i8_v16i8:
; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: def $esi killed $esi def $rsi
; AVX512F-NEXT: movzbl (%rdi), %eax
; AVX512F-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX512F-NEXT: andl $15, %esi
; AVX512F-NEXT: movb %al, -24(%rsp,%rsi)
; AVX512F-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: load_i8_v16i8:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpbroadcastb %esi, %xmm1
; AVX512BW-NEXT: vpcmpeqb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %k1
; AVX512BW-NEXT: vpbroadcastb (%rdi), %xmm0 {%k1}
; AVX512BW-NEXT: retq
;
; X86AVX2-LABEL: load_i8_v16i8:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: pushl %ebp
; X86AVX2-NEXT: movl %esp, %ebp
; X86AVX2-NEXT: andl $-16, %esp
; X86AVX2-NEXT: subl $32, %esp
; X86AVX2-NEXT: movl 12(%ebp), %eax
; X86AVX2-NEXT: andl $15, %eax
; X86AVX2-NEXT: movl 8(%ebp), %ecx
; X86AVX2-NEXT: movzbl (%ecx), %ecx
; X86AVX2-NEXT: vmovaps %xmm0, (%esp)
; X86AVX2-NEXT: movb %cl, (%esp,%eax)
; X86AVX2-NEXT: vmovaps (%esp), %xmm0
; X86AVX2-NEXT: movl %ebp, %esp
; X86AVX2-NEXT: popl %ebp
; X86AVX2-NEXT: retl
%x = load i8, ptr %p
%ins = insertelement <16 x i8> %v, i8 %x, i32 %y
ret <16 x i8> %ins
}
define <8 x i16> @load_i16_v8i16(<8 x i16> %v, ptr %p, i32 %y) nounwind {
; SSE-LABEL: load_i16_v8i16:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
; SSE-NEXT: movzwl (%rdi), %eax
; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: andl $7, %esi
; SSE-NEXT: movw %ax, -24(%rsp,%rsi,2)
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: retq
;
; AVX1OR2-LABEL: load_i16_v8i16:
; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: # kill: def $esi killed $esi def $rsi
; AVX1OR2-NEXT: movzwl (%rdi), %eax
; AVX1OR2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX1OR2-NEXT: andl $7, %esi
; AVX1OR2-NEXT: movw %ax, -24(%rsp,%rsi,2)
; AVX1OR2-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0
; AVX1OR2-NEXT: retq
;
; AVX512F-LABEL: load_i16_v8i16:
; AVX512F: # %bb.0:
; AVX512F-NEXT: # kill: def $esi killed $esi def $rsi
; AVX512F-NEXT: movzwl (%rdi), %eax
; AVX512F-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX512F-NEXT: andl $7, %esi
; AVX512F-NEXT: movw %ax, -24(%rsp,%rsi,2)
; AVX512F-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: load_i16_v8i16:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpbroadcastw %esi, %xmm1
; AVX512BW-NEXT: vpcmpeqw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %k1
; AVX512BW-NEXT: vpbroadcastw (%rdi), %xmm0 {%k1}
; AVX512BW-NEXT: retq
;
; X86AVX2-LABEL: load_i16_v8i16:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: pushl %ebp
; X86AVX2-NEXT: movl %esp, %ebp
; X86AVX2-NEXT: andl $-16, %esp
; X86AVX2-NEXT: subl $32, %esp
; X86AVX2-NEXT: movl 12(%ebp), %eax
; X86AVX2-NEXT: andl $7, %eax
; X86AVX2-NEXT: movl 8(%ebp), %ecx
; X86AVX2-NEXT: movzwl (%ecx), %ecx
; X86AVX2-NEXT: vmovaps %xmm0, (%esp)
; X86AVX2-NEXT: movw %cx, (%esp,%eax,2)
; X86AVX2-NEXT: vmovaps (%esp), %xmm0
; X86AVX2-NEXT: movl %ebp, %esp
; X86AVX2-NEXT: popl %ebp
; X86AVX2-NEXT: retl
%x = load i16, ptr %p
%ins = insertelement <8 x i16> %v, i16 %x, i32 %y
ret <8 x i16> %ins
}
define <4 x i32> @load_i32_v4i32(<4 x i32> %v, ptr %p, i32 %y) nounwind {
; SSE-LABEL: load_i32_v4i32:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
; SSE-NEXT: movl (%rdi), %eax
; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: andl $3, %esi
; SSE-NEXT: movl %eax, -24(%rsp,%rsi,4)
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: retq
;
; AVX1OR2-LABEL: load_i32_v4i32:
; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: # kill: def $esi killed $esi def $rsi
; AVX1OR2-NEXT: movl (%rdi), %eax
; AVX1OR2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX1OR2-NEXT: andl $3, %esi
; AVX1OR2-NEXT: movl %eax, -24(%rsp,%rsi,4)
; AVX1OR2-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0
; AVX1OR2-NEXT: retq
;
; AVX512-LABEL: load_i32_v4i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpbroadcastd %esi, %xmm1
; AVX512-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %k1
; AVX512-NEXT: vpbroadcastd (%rdi), %xmm0 {%k1}
; AVX512-NEXT: retq
;
; X86AVX2-LABEL: load_i32_v4i32:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: pushl %ebp
; X86AVX2-NEXT: movl %esp, %ebp
; X86AVX2-NEXT: andl $-16, %esp
; X86AVX2-NEXT: subl $32, %esp
; X86AVX2-NEXT: movl 12(%ebp), %eax
; X86AVX2-NEXT: andl $3, %eax
; X86AVX2-NEXT: movl 8(%ebp), %ecx
; X86AVX2-NEXT: movl (%ecx), %ecx
; X86AVX2-NEXT: vmovaps %xmm0, (%esp)
; X86AVX2-NEXT: movl %ecx, (%esp,%eax,4)
; X86AVX2-NEXT: vmovaps (%esp), %xmm0
; X86AVX2-NEXT: movl %ebp, %esp
; X86AVX2-NEXT: popl %ebp
; X86AVX2-NEXT: retl
%x = load i32, ptr %p
%ins = insertelement <4 x i32> %v, i32 %x, i32 %y
ret <4 x i32> %ins
}
define <2 x i64> @load_i64_v2i64(<2 x i64> %v, ptr %p, i32 %y) nounwind {
; SSE-LABEL: load_i64_v2i64:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
; SSE-NEXT: movq (%rdi), %rax
; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: andl $1, %esi
; SSE-NEXT: movq %rax, -24(%rsp,%rsi,8)
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: retq
;
; AVX1OR2-LABEL: load_i64_v2i64:
; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: # kill: def $esi killed $esi def $rsi
; AVX1OR2-NEXT: movq (%rdi), %rax
; AVX1OR2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX1OR2-NEXT: andl $1, %esi
; AVX1OR2-NEXT: movq %rax, -24(%rsp,%rsi,8)
; AVX1OR2-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0
; AVX1OR2-NEXT: retq
;
; AVX512-LABEL: load_i64_v2i64:
; AVX512: # %bb.0:
; AVX512-NEXT: movl %esi, %eax
; AVX512-NEXT: vpbroadcastq %rax, %xmm1
; AVX512-NEXT: vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %k1
; AVX512-NEXT: vpbroadcastq (%rdi), %xmm0 {%k1}
; AVX512-NEXT: retq
;
; X86AVX2-LABEL: load_i64_v2i64:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: pushl %ebp
; X86AVX2-NEXT: movl %esp, %ebp
; X86AVX2-NEXT: pushl %esi
; X86AVX2-NEXT: andl $-16, %esp
; X86AVX2-NEXT: subl $48, %esp
; X86AVX2-NEXT: movl 12(%ebp), %eax
; X86AVX2-NEXT: movl 8(%ebp), %ecx
; X86AVX2-NEXT: movl (%ecx), %edx
; X86AVX2-NEXT: movl 4(%ecx), %ecx
; X86AVX2-NEXT: vmovaps %xmm0, (%esp)
; X86AVX2-NEXT: addl %eax, %eax
; X86AVX2-NEXT: movl %eax, %esi
; X86AVX2-NEXT: andl $3, %esi
; X86AVX2-NEXT: movl %edx, (%esp,%esi,4)
; X86AVX2-NEXT: vmovaps (%esp), %xmm0
; X86AVX2-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
; X86AVX2-NEXT: incl %eax
; X86AVX2-NEXT: andl $3, %eax
; X86AVX2-NEXT: movl %ecx, 16(%esp,%eax,4)
; X86AVX2-NEXT: vmovaps {{[0-9]+}}(%esp), %xmm0
; X86AVX2-NEXT: leal -4(%ebp), %esp
; X86AVX2-NEXT: popl %esi
; X86AVX2-NEXT: popl %ebp
; X86AVX2-NEXT: retl
%x = load i64, ptr %p
%ins = insertelement <2 x i64> %v, i64 %x, i32 %y
ret <2 x i64> %ins
}
define <4 x float> @load_f32_v4f32(<4 x float> %v, ptr %p, i32 %y) nounwind {
; SSE2-LABEL: load_f32_v4f32:
; SSE2: # %bb.0:
; SSE2-NEXT: # kill: def $esi killed $esi def $rsi
; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: andl $3, %esi
; SSE2-NEXT: movss %xmm1, -24(%rsp,%rsi,4)
; SSE2-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: load_f32_v4f32:
; SSE41: # %bb.0:
; SSE41-NEXT: movaps %xmm0, %xmm1
; SSE41-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0,0,0]
; SSE41-NEXT: movd %esi, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE41-NEXT: pcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: blendvps %xmm0, %xmm2, %xmm1
; SSE41-NEXT: movaps %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: load_f32_v4f32:
; AVX1: # %bb.0:
; AVX1-NEXT: vbroadcastss (%rdi), %xmm1
; AVX1-NEXT: vmovd %esi, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; AVX1-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX1-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_f32_v4f32:
; AVX2: # %bb.0:
; AVX2-NEXT: vbroadcastss (%rdi), %xmm1
; AVX2-NEXT: vmovd %esi, %xmm2
; AVX2-NEXT: vpbroadcastd %xmm2, %xmm2
; AVX2-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX2-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_f32_v4f32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpbroadcastd %esi, %xmm1
; AVX512-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %k1
; AVX512-NEXT: vbroadcastss (%rdi), %xmm0 {%k1}
; AVX512-NEXT: retq
;
; X86AVX2-LABEL: load_f32_v4f32:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86AVX2-NEXT: vpbroadcastd {{[0-9]+}}(%esp), %xmm1
; X86AVX2-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1
; X86AVX2-NEXT: vbroadcastss (%eax), %xmm2
; X86AVX2-NEXT: vblendvps %xmm1, %xmm2, %xmm0, %xmm0
; X86AVX2-NEXT: retl
%x = load float, ptr %p
%ins = insertelement <4 x float> %v, float %x, i32 %y
ret <4 x float> %ins
}
define <2 x double> @load_f64_v2f64(<2 x double> %v, ptr %p, i32 %y) nounwind {
; SSE2-LABEL: load_f64_v2f64:
; SSE2: # %bb.0:
; SSE2-NEXT: # kill: def $esi killed $esi def $rsi
; SSE2-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: andl $1, %esi
; SSE2-NEXT: movsd %xmm1, -24(%rsp,%rsi,8)
; SSE2-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: load_f64_v2f64:
; SSE41: # %bb.0:
; SSE41-NEXT: movapd %xmm0, %xmm1
; SSE41-NEXT: movddup {{.*#+}} xmm2 = mem[0,0]
; SSE41-NEXT: movd %esi, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; SSE41-NEXT: pcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm1
; SSE41-NEXT: movapd %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: load_f64_v2f64:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
; AVX1-NEXT: vmovd %esi, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,1]
; AVX1-NEXT: vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX1-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_f64_v2f64:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
; AVX2-NEXT: movl %esi, %eax
; AVX2-NEXT: vmovq %rax, %xmm2
; AVX2-NEXT: vpbroadcastq %xmm2, %xmm2
; AVX2-NEXT: vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX2-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_f64_v2f64:
; AVX512: # %bb.0:
; AVX512-NEXT: movl %esi, %eax
; AVX512-NEXT: vpbroadcastq %rax, %xmm1
; AVX512-NEXT: vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %k1
; AVX512-NEXT: vmovddup {{.*#+}} xmm0 {%k1} = mem[0,0]
; AVX512-NEXT: retq
;
; X86AVX2-LABEL: load_f64_v2f64:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: pushl %ebp
; X86AVX2-NEXT: movl %esp, %ebp
; X86AVX2-NEXT: andl $-16, %esp
; X86AVX2-NEXT: subl $32, %esp
; X86AVX2-NEXT: movl 12(%ebp), %eax
; X86AVX2-NEXT: andl $1, %eax
; X86AVX2-NEXT: movl 8(%ebp), %ecx
; X86AVX2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; X86AVX2-NEXT: vmovaps %xmm0, (%esp)
; X86AVX2-NEXT: vmovsd %xmm1, (%esp,%eax,8)
; X86AVX2-NEXT: vmovaps (%esp), %xmm0
; X86AVX2-NEXT: movl %ebp, %esp
; X86AVX2-NEXT: popl %ebp
; X86AVX2-NEXT: retl
%x = load double, ptr %p
%ins = insertelement <2 x double> %v, double %x, i32 %y
ret <2 x double> %ins
}
define <32 x i8> @arg_i8_v32i8(<32 x i8> %v, i8 %x, i32 %y) nounwind {
; SSE-LABEL: arg_i8_v32i8:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
; SSE-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: andl $31, %esi
; SSE-NEXT: movb %dil, -40(%rsp,%rsi)
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: retq
;
; AVX1OR2-LABEL: arg_i8_v32i8:
; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: pushq %rbp
; AVX1OR2-NEXT: movq %rsp, %rbp
; AVX1OR2-NEXT: andq $-32, %rsp
; AVX1OR2-NEXT: subq $64, %rsp
; AVX1OR2-NEXT: # kill: def $esi killed $esi def $rsi
; AVX1OR2-NEXT: vmovaps %ymm0, (%rsp)
; AVX1OR2-NEXT: andl $31, %esi
; AVX1OR2-NEXT: movb %dil, (%rsp,%rsi)
; AVX1OR2-NEXT: vmovaps (%rsp), %ymm0
; AVX1OR2-NEXT: movq %rbp, %rsp
; AVX1OR2-NEXT: popq %rbp
; AVX1OR2-NEXT: retq
;
; AVX512F-LABEL: arg_i8_v32i8:
; AVX512F: # %bb.0:
; AVX512F-NEXT: pushq %rbp
; AVX512F-NEXT: movq %rsp, %rbp
; AVX512F-NEXT: andq $-32, %rsp
; AVX512F-NEXT: subq $64, %rsp
; AVX512F-NEXT: # kill: def $esi killed $esi def $rsi
; AVX512F-NEXT: vmovaps %ymm0, (%rsp)
; AVX512F-NEXT: andl $31, %esi
; AVX512F-NEXT: movb %dil, (%rsp,%rsi)
; AVX512F-NEXT: vmovaps (%rsp), %ymm0
; AVX512F-NEXT: movq %rbp, %rsp
; AVX512F-NEXT: popq %rbp
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: arg_i8_v32i8:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpbroadcastb %esi, %ymm1
; AVX512BW-NEXT: vpcmpeqb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %k1
; AVX512BW-NEXT: vpbroadcastb %edi, %ymm0 {%k1}
; AVX512BW-NEXT: retq
;
; X86AVX2-LABEL: arg_i8_v32i8:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: pushl %ebp
; X86AVX2-NEXT: movl %esp, %ebp
; X86AVX2-NEXT: andl $-32, %esp
; X86AVX2-NEXT: subl $64, %esp
; X86AVX2-NEXT: movl 12(%ebp), %eax
; X86AVX2-NEXT: andl $31, %eax
; X86AVX2-NEXT: movzbl 8(%ebp), %ecx
; X86AVX2-NEXT: vmovaps %ymm0, (%esp)
; X86AVX2-NEXT: movb %cl, (%esp,%eax)
; X86AVX2-NEXT: vmovaps (%esp), %ymm0
; X86AVX2-NEXT: movl %ebp, %esp
; X86AVX2-NEXT: popl %ebp
; X86AVX2-NEXT: retl
%ins = insertelement <32 x i8> %v, i8 %x, i32 %y
ret <32 x i8> %ins
}
define <16 x i16> @arg_i16_v16i16(<16 x i16> %v, i16 %x, i32 %y) nounwind {
; SSE-LABEL: arg_i16_v16i16:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
; SSE-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: andl $15, %esi
; SSE-NEXT: movw %di, -40(%rsp,%rsi,2)
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: retq
;
; AVX1OR2-LABEL: arg_i16_v16i16:
; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: pushq %rbp
; AVX1OR2-NEXT: movq %rsp, %rbp
; AVX1OR2-NEXT: andq $-32, %rsp
; AVX1OR2-NEXT: subq $64, %rsp
; AVX1OR2-NEXT: # kill: def $esi killed $esi def $rsi
; AVX1OR2-NEXT: vmovaps %ymm0, (%rsp)
; AVX1OR2-NEXT: andl $15, %esi
; AVX1OR2-NEXT: movw %di, (%rsp,%rsi,2)
; AVX1OR2-NEXT: vmovaps (%rsp), %ymm0
; AVX1OR2-NEXT: movq %rbp, %rsp
; AVX1OR2-NEXT: popq %rbp
; AVX1OR2-NEXT: retq
;
; AVX512F-LABEL: arg_i16_v16i16:
; AVX512F: # %bb.0:
; AVX512F-NEXT: pushq %rbp
; AVX512F-NEXT: movq %rsp, %rbp
; AVX512F-NEXT: andq $-32, %rsp
; AVX512F-NEXT: subq $64, %rsp
; AVX512F-NEXT: # kill: def $esi killed $esi def $rsi
; AVX512F-NEXT: vmovaps %ymm0, (%rsp)
; AVX512F-NEXT: andl $15, %esi
; AVX512F-NEXT: movw %di, (%rsp,%rsi,2)
; AVX512F-NEXT: vmovaps (%rsp), %ymm0
; AVX512F-NEXT: movq %rbp, %rsp
; AVX512F-NEXT: popq %rbp
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: arg_i16_v16i16:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpbroadcastw %esi, %ymm1
; AVX512BW-NEXT: vpcmpeqw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %k1
; AVX512BW-NEXT: vpbroadcastw %edi, %ymm0 {%k1}
; AVX512BW-NEXT: retq
;
; X86AVX2-LABEL: arg_i16_v16i16:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: pushl %ebp
; X86AVX2-NEXT: movl %esp, %ebp
; X86AVX2-NEXT: andl $-32, %esp
; X86AVX2-NEXT: subl $64, %esp
; X86AVX2-NEXT: movl 12(%ebp), %eax
; X86AVX2-NEXT: andl $15, %eax
; X86AVX2-NEXT: movzwl 8(%ebp), %ecx
; X86AVX2-NEXT: vmovaps %ymm0, (%esp)
; X86AVX2-NEXT: movw %cx, (%esp,%eax,2)
; X86AVX2-NEXT: vmovaps (%esp), %ymm0
; X86AVX2-NEXT: movl %ebp, %esp
; X86AVX2-NEXT: popl %ebp
; X86AVX2-NEXT: retl
%ins = insertelement <16 x i16> %v, i16 %x, i32 %y
ret <16 x i16> %ins
}
define <8 x i32> @arg_i32_v8i32(<8 x i32> %v, i32 %x, i32 %y) nounwind {
; SSE-LABEL: arg_i32_v8i32:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
; SSE-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: andl $7, %esi
; SSE-NEXT: movl %edi, -40(%rsp,%rsi,4)
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: retq
;
; AVX1OR2-LABEL: arg_i32_v8i32:
; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: pushq %rbp
; AVX1OR2-NEXT: movq %rsp, %rbp
; AVX1OR2-NEXT: andq $-32, %rsp
; AVX1OR2-NEXT: subq $64, %rsp
; AVX1OR2-NEXT: # kill: def $esi killed $esi def $rsi
; AVX1OR2-NEXT: vmovaps %ymm0, (%rsp)
; AVX1OR2-NEXT: andl $7, %esi
; AVX1OR2-NEXT: movl %edi, (%rsp,%rsi,4)
; AVX1OR2-NEXT: vmovaps (%rsp), %ymm0
; AVX1OR2-NEXT: movq %rbp, %rsp
; AVX1OR2-NEXT: popq %rbp
; AVX1OR2-NEXT: retq
;
; AVX512-LABEL: arg_i32_v8i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpbroadcastd %esi, %ymm1
; AVX512-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %k1
; AVX512-NEXT: vpbroadcastd %edi, %ymm0 {%k1}
; AVX512-NEXT: retq
;
; X86AVX2-LABEL: arg_i32_v8i32:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: pushl %ebp
; X86AVX2-NEXT: movl %esp, %ebp
; X86AVX2-NEXT: andl $-32, %esp
; X86AVX2-NEXT: subl $64, %esp
; X86AVX2-NEXT: movl 12(%ebp), %eax
; X86AVX2-NEXT: andl $7, %eax
; X86AVX2-NEXT: movl 8(%ebp), %ecx
; X86AVX2-NEXT: vmovaps %ymm0, (%esp)
; X86AVX2-NEXT: movl %ecx, (%esp,%eax,4)
; X86AVX2-NEXT: vmovaps (%esp), %ymm0
; X86AVX2-NEXT: movl %ebp, %esp
; X86AVX2-NEXT: popl %ebp
; X86AVX2-NEXT: retl
%ins = insertelement <8 x i32> %v, i32 %x, i32 %y
ret <8 x i32> %ins
}
define <4 x i64> @arg_i64_v4i64(<4 x i64> %v, i64 %x, i32 %y) nounwind {
; SSE-LABEL: arg_i64_v4i64:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
; SSE-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: andl $3, %esi
; SSE-NEXT: movq %rdi, -40(%rsp,%rsi,8)
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: retq
;
; AVX1OR2-LABEL: arg_i64_v4i64:
; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: pushq %rbp
; AVX1OR2-NEXT: movq %rsp, %rbp
; AVX1OR2-NEXT: andq $-32, %rsp
; AVX1OR2-NEXT: subq $64, %rsp
; AVX1OR2-NEXT: # kill: def $esi killed $esi def $rsi
; AVX1OR2-NEXT: vmovaps %ymm0, (%rsp)
; AVX1OR2-NEXT: andl $3, %esi
; AVX1OR2-NEXT: movq %rdi, (%rsp,%rsi,8)
; AVX1OR2-NEXT: vmovaps (%rsp), %ymm0
; AVX1OR2-NEXT: movq %rbp, %rsp
; AVX1OR2-NEXT: popq %rbp
; AVX1OR2-NEXT: retq
;
; AVX512-LABEL: arg_i64_v4i64:
; AVX512: # %bb.0:
; AVX512-NEXT: movl %esi, %eax
; AVX512-NEXT: vpbroadcastq %rax, %ymm1
; AVX512-NEXT: vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %k1
; AVX512-NEXT: vpbroadcastq %rdi, %ymm0 {%k1}
; AVX512-NEXT: retq
;
; X86AVX2-LABEL: arg_i64_v4i64:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: pushl %ebp
; X86AVX2-NEXT: movl %esp, %ebp
; X86AVX2-NEXT: pushl %esi
; X86AVX2-NEXT: andl $-32, %esp
; X86AVX2-NEXT: subl $96, %esp
; X86AVX2-NEXT: movl 8(%ebp), %edx
; X86AVX2-NEXT: movl 12(%ebp), %eax
; X86AVX2-NEXT: movl 16(%ebp), %ecx
; X86AVX2-NEXT: vmovaps %ymm0, (%esp)
; X86AVX2-NEXT: addl %ecx, %ecx
; X86AVX2-NEXT: movl %ecx, %esi
; X86AVX2-NEXT: andl $7, %esi
; X86AVX2-NEXT: movl %edx, (%esp,%esi,4)
; X86AVX2-NEXT: vmovaps (%esp), %ymm0
; X86AVX2-NEXT: vmovaps %ymm0, {{[0-9]+}}(%esp)
; X86AVX2-NEXT: incl %ecx
; X86AVX2-NEXT: andl $7, %ecx
; X86AVX2-NEXT: movl %eax, 32(%esp,%ecx,4)
; X86AVX2-NEXT: vmovaps {{[0-9]+}}(%esp), %ymm0
; X86AVX2-NEXT: leal -4(%ebp), %esp
; X86AVX2-NEXT: popl %esi
; X86AVX2-NEXT: popl %ebp
; X86AVX2-NEXT: retl
%ins = insertelement <4 x i64> %v, i64 %x, i32 %y
ret <4 x i64> %ins
}
define <8 x float> @arg_f32_v8f32(<8 x float> %v, float %x, i32 %y) nounwind {
; SSE-LABEL: arg_f32_v8f32:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $edi killed $edi def $rdi
; SSE-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: andl $7, %edi
; SSE-NEXT: movss %xmm2, -40(%rsp,%rdi,4)
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: arg_f32_v8f32:
; AVX1: # %bb.0:
; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,0,0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
; AVX1-NEXT: vmovd %edi, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; AVX1-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
; AVX1-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}+16(%rip), %xmm2, %xmm2
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
; AVX1-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: arg_f32_v8f32:
; AVX2: # %bb.0:
; AVX2-NEXT: vbroadcastss %xmm1, %ymm1
; AVX2-NEXT: vmovd %edi, %xmm2
; AVX2-NEXT: vpbroadcastd %xmm2, %ymm2
; AVX2-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
; AVX2-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: arg_f32_v8f32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpbroadcastd %edi, %ymm2
; AVX512-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %k1
; AVX512-NEXT: vbroadcastss %xmm1, %ymm0 {%k1}
; AVX512-NEXT: retq
;
; X86AVX2-LABEL: arg_f32_v8f32:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: vpbroadcastd {{[0-9]+}}(%esp), %ymm1
; X86AVX2-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm1
; X86AVX2-NEXT: vbroadcastss {{[0-9]+}}(%esp), %ymm2
; X86AVX2-NEXT: vblendvps %ymm1, %ymm2, %ymm0, %ymm0
; X86AVX2-NEXT: retl
%ins = insertelement <8 x float> %v, float %x, i32 %y
ret <8 x float> %ins
}
define <4 x double> @arg_f64_v4f64(<4 x double> %v, double %x, i32 %y) nounwind {
; SSE-LABEL: arg_f64_v4f64:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $edi killed $edi def $rdi
; SSE-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: andl $3, %edi
; SSE-NEXT: movsd %xmm2, -40(%rsp,%rdi,8)
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: arg_f64_v4f64:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = xmm1[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
; AVX1-NEXT: movl %edi, %eax
; AVX1-NEXT: vmovq %rax, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,1]
; AVX1-NEXT: vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
; AVX1-NEXT: vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}+16(%rip), %xmm2, %xmm2
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
; AVX1-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: arg_f64_v4f64:
; AVX2: # %bb.0:
; AVX2-NEXT: vbroadcastsd %xmm1, %ymm1
; AVX2-NEXT: movl %edi, %eax
; AVX2-NEXT: vmovq %rax, %xmm2
; AVX2-NEXT: vpbroadcastq %xmm2, %ymm2
; AVX2-NEXT: vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
; AVX2-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: arg_f64_v4f64:
; AVX512: # %bb.0:
; AVX512-NEXT: movl %edi, %eax
; AVX512-NEXT: vpbroadcastq %rax, %ymm2
; AVX512-NEXT: vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %k1
; AVX512-NEXT: vbroadcastsd %xmm1, %ymm0 {%k1}
; AVX512-NEXT: retq
;
; X86AVX2-LABEL: arg_f64_v4f64:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: pushl %ebp
; X86AVX2-NEXT: movl %esp, %ebp
; X86AVX2-NEXT: andl $-32, %esp
; X86AVX2-NEXT: subl $64, %esp
; X86AVX2-NEXT: movl 16(%ebp), %eax
; X86AVX2-NEXT: andl $3, %eax
; X86AVX2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; X86AVX2-NEXT: vmovaps %ymm0, (%esp)
; X86AVX2-NEXT: vmovsd %xmm1, (%esp,%eax,8)
; X86AVX2-NEXT: vmovaps (%esp), %ymm0
; X86AVX2-NEXT: movl %ebp, %esp
; X86AVX2-NEXT: popl %ebp
; X86AVX2-NEXT: retl
%ins = insertelement <4 x double> %v, double %x, i32 %y
ret <4 x double> %ins
}
define <32 x i8> @load_i8_v32i8(<32 x i8> %v, ptr %p, i32 %y) nounwind {
; SSE-LABEL: load_i8_v32i8:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
; SSE-NEXT: movzbl (%rdi), %eax
; SSE-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: andl $31, %esi
; SSE-NEXT: movb %al, -40(%rsp,%rsi)
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: retq
;
; AVX1OR2-LABEL: load_i8_v32i8:
; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: pushq %rbp
; AVX1OR2-NEXT: movq %rsp, %rbp
; AVX1OR2-NEXT: andq $-32, %rsp
; AVX1OR2-NEXT: subq $64, %rsp
; AVX1OR2-NEXT: # kill: def $esi killed $esi def $rsi
; AVX1OR2-NEXT: movzbl (%rdi), %eax
; AVX1OR2-NEXT: vmovaps %ymm0, (%rsp)
; AVX1OR2-NEXT: andl $31, %esi
; AVX1OR2-NEXT: movb %al, (%rsp,%rsi)
; AVX1OR2-NEXT: vmovaps (%rsp), %ymm0
; AVX1OR2-NEXT: movq %rbp, %rsp
; AVX1OR2-NEXT: popq %rbp
; AVX1OR2-NEXT: retq
;
; AVX512F-LABEL: load_i8_v32i8:
; AVX512F: # %bb.0:
; AVX512F-NEXT: pushq %rbp
; AVX512F-NEXT: movq %rsp, %rbp
; AVX512F-NEXT: andq $-32, %rsp
; AVX512F-NEXT: subq $64, %rsp
; AVX512F-NEXT: # kill: def $esi killed $esi def $rsi
; AVX512F-NEXT: movzbl (%rdi), %eax
; AVX512F-NEXT: vmovaps %ymm0, (%rsp)
; AVX512F-NEXT: andl $31, %esi
; AVX512F-NEXT: movb %al, (%rsp,%rsi)
; AVX512F-NEXT: vmovaps (%rsp), %ymm0
; AVX512F-NEXT: movq %rbp, %rsp
; AVX512F-NEXT: popq %rbp
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: load_i8_v32i8:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpbroadcastb %esi, %ymm1
; AVX512BW-NEXT: vpcmpeqb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %k1
; AVX512BW-NEXT: vpbroadcastb (%rdi), %ymm0 {%k1}
; AVX512BW-NEXT: retq
;
; X86AVX2-LABEL: load_i8_v32i8:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: pushl %ebp
; X86AVX2-NEXT: movl %esp, %ebp
; X86AVX2-NEXT: andl $-32, %esp
; X86AVX2-NEXT: subl $64, %esp
; X86AVX2-NEXT: movl 12(%ebp), %eax
; X86AVX2-NEXT: andl $31, %eax
; X86AVX2-NEXT: movl 8(%ebp), %ecx
; X86AVX2-NEXT: movzbl (%ecx), %ecx
; X86AVX2-NEXT: vmovaps %ymm0, (%esp)
; X86AVX2-NEXT: movb %cl, (%esp,%eax)
; X86AVX2-NEXT: vmovaps (%esp), %ymm0
; X86AVX2-NEXT: movl %ebp, %esp
; X86AVX2-NEXT: popl %ebp
; X86AVX2-NEXT: retl
%x = load i8, ptr %p
%ins = insertelement <32 x i8> %v, i8 %x, i32 %y
ret <32 x i8> %ins
}
define <16 x i16> @load_i16_v16i16(<16 x i16> %v, ptr %p, i32 %y) nounwind {
; SSE-LABEL: load_i16_v16i16:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
; SSE-NEXT: movzwl (%rdi), %eax
; SSE-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: andl $15, %esi
; SSE-NEXT: movw %ax, -40(%rsp,%rsi,2)
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: retq
;
; AVX1OR2-LABEL: load_i16_v16i16:
; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: pushq %rbp
; AVX1OR2-NEXT: movq %rsp, %rbp
; AVX1OR2-NEXT: andq $-32, %rsp
; AVX1OR2-NEXT: subq $64, %rsp
; AVX1OR2-NEXT: # kill: def $esi killed $esi def $rsi
; AVX1OR2-NEXT: movzwl (%rdi), %eax
; AVX1OR2-NEXT: vmovaps %ymm0, (%rsp)
; AVX1OR2-NEXT: andl $15, %esi
; AVX1OR2-NEXT: movw %ax, (%rsp,%rsi,2)
; AVX1OR2-NEXT: vmovaps (%rsp), %ymm0
; AVX1OR2-NEXT: movq %rbp, %rsp
; AVX1OR2-NEXT: popq %rbp
; AVX1OR2-NEXT: retq
;
; AVX512F-LABEL: load_i16_v16i16:
; AVX512F: # %bb.0:
; AVX512F-NEXT: pushq %rbp
; AVX512F-NEXT: movq %rsp, %rbp
; AVX512F-NEXT: andq $-32, %rsp
; AVX512F-NEXT: subq $64, %rsp
; AVX512F-NEXT: # kill: def $esi killed $esi def $rsi
; AVX512F-NEXT: movzwl (%rdi), %eax
; AVX512F-NEXT: vmovaps %ymm0, (%rsp)
; AVX512F-NEXT: andl $15, %esi
; AVX512F-NEXT: movw %ax, (%rsp,%rsi,2)
; AVX512F-NEXT: vmovaps (%rsp), %ymm0
; AVX512F-NEXT: movq %rbp, %rsp
; AVX512F-NEXT: popq %rbp
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: load_i16_v16i16:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpbroadcastw %esi, %ymm1
; AVX512BW-NEXT: vpcmpeqw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %k1
; AVX512BW-NEXT: vpbroadcastw (%rdi), %ymm0 {%k1}
; AVX512BW-NEXT: retq
;
; X86AVX2-LABEL: load_i16_v16i16:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: pushl %ebp
; X86AVX2-NEXT: movl %esp, %ebp
; X86AVX2-NEXT: andl $-32, %esp
; X86AVX2-NEXT: subl $64, %esp
; X86AVX2-NEXT: movl 12(%ebp), %eax
; X86AVX2-NEXT: andl $15, %eax
; X86AVX2-NEXT: movl 8(%ebp), %ecx
; X86AVX2-NEXT: movzwl (%ecx), %ecx
; X86AVX2-NEXT: vmovaps %ymm0, (%esp)
; X86AVX2-NEXT: movw %cx, (%esp,%eax,2)
; X86AVX2-NEXT: vmovaps (%esp), %ymm0
; X86AVX2-NEXT: movl %ebp, %esp
; X86AVX2-NEXT: popl %ebp
; X86AVX2-NEXT: retl
%x = load i16, ptr %p
%ins = insertelement <16 x i16> %v, i16 %x, i32 %y
ret <16 x i16> %ins
}
define <8 x i32> @load_i32_v8i32(<8 x i32> %v, ptr %p, i32 %y) nounwind {
; SSE-LABEL: load_i32_v8i32:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
; SSE-NEXT: movl (%rdi), %eax
; SSE-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: andl $7, %esi
; SSE-NEXT: movl %eax, -40(%rsp,%rsi,4)
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: retq
;
; AVX1OR2-LABEL: load_i32_v8i32:
; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: pushq %rbp
; AVX1OR2-NEXT: movq %rsp, %rbp
; AVX1OR2-NEXT: andq $-32, %rsp
; AVX1OR2-NEXT: subq $64, %rsp
; AVX1OR2-NEXT: # kill: def $esi killed $esi def $rsi
; AVX1OR2-NEXT: movl (%rdi), %eax
; AVX1OR2-NEXT: vmovaps %ymm0, (%rsp)
; AVX1OR2-NEXT: andl $7, %esi
; AVX1OR2-NEXT: movl %eax, (%rsp,%rsi,4)
; AVX1OR2-NEXT: vmovaps (%rsp), %ymm0
; AVX1OR2-NEXT: movq %rbp, %rsp
; AVX1OR2-NEXT: popq %rbp
; AVX1OR2-NEXT: retq
;
; AVX512-LABEL: load_i32_v8i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpbroadcastd %esi, %ymm1
; AVX512-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %k1
; AVX512-NEXT: vpbroadcastd (%rdi), %ymm0 {%k1}
; AVX512-NEXT: retq
;
; X86AVX2-LABEL: load_i32_v8i32:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: pushl %ebp
; X86AVX2-NEXT: movl %esp, %ebp
; X86AVX2-NEXT: andl $-32, %esp
; X86AVX2-NEXT: subl $64, %esp
; X86AVX2-NEXT: movl 12(%ebp), %eax
; X86AVX2-NEXT: andl $7, %eax
; X86AVX2-NEXT: movl 8(%ebp), %ecx
; X86AVX2-NEXT: movl (%ecx), %ecx
; X86AVX2-NEXT: vmovaps %ymm0, (%esp)
; X86AVX2-NEXT: movl %ecx, (%esp,%eax,4)
; X86AVX2-NEXT: vmovaps (%esp), %ymm0
; X86AVX2-NEXT: movl %ebp, %esp
; X86AVX2-NEXT: popl %ebp
; X86AVX2-NEXT: retl
%x = load i32, ptr %p
%ins = insertelement <8 x i32> %v, i32 %x, i32 %y
ret <8 x i32> %ins
}
define <4 x i64> @load_i64_v4i64(<4 x i64> %v, ptr %p, i32 %y) nounwind {
; SSE-LABEL: load_i64_v4i64:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
; SSE-NEXT: movq (%rdi), %rax
; SSE-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: andl $3, %esi
; SSE-NEXT: movq %rax, -40(%rsp,%rsi,8)
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: retq
;
; AVX1OR2-LABEL: load_i64_v4i64:
; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: pushq %rbp
; AVX1OR2-NEXT: movq %rsp, %rbp
; AVX1OR2-NEXT: andq $-32, %rsp
; AVX1OR2-NEXT: subq $64, %rsp
; AVX1OR2-NEXT: # kill: def $esi killed $esi def $rsi
; AVX1OR2-NEXT: movq (%rdi), %rax
; AVX1OR2-NEXT: vmovaps %ymm0, (%rsp)
; AVX1OR2-NEXT: andl $3, %esi
; AVX1OR2-NEXT: movq %rax, (%rsp,%rsi,8)
; AVX1OR2-NEXT: vmovaps (%rsp), %ymm0
; AVX1OR2-NEXT: movq %rbp, %rsp
; AVX1OR2-NEXT: popq %rbp
; AVX1OR2-NEXT: retq
;
; AVX512-LABEL: load_i64_v4i64:
; AVX512: # %bb.0:
; AVX512-NEXT: movl %esi, %eax
; AVX512-NEXT: vpbroadcastq %rax, %ymm1
; AVX512-NEXT: vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %k1
; AVX512-NEXT: vpbroadcastq (%rdi), %ymm0 {%k1}
; AVX512-NEXT: retq
;
; X86AVX2-LABEL: load_i64_v4i64:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: pushl %ebp
; X86AVX2-NEXT: movl %esp, %ebp
; X86AVX2-NEXT: pushl %esi
; X86AVX2-NEXT: andl $-32, %esp
; X86AVX2-NEXT: subl $96, %esp
; X86AVX2-NEXT: movl 12(%ebp), %eax
; X86AVX2-NEXT: movl 8(%ebp), %ecx
; X86AVX2-NEXT: movl (%ecx), %edx
; X86AVX2-NEXT: movl 4(%ecx), %ecx
; X86AVX2-NEXT: vmovaps %ymm0, (%esp)
; X86AVX2-NEXT: addl %eax, %eax
; X86AVX2-NEXT: movl %eax, %esi
; X86AVX2-NEXT: andl $7, %esi
; X86AVX2-NEXT: movl %edx, (%esp,%esi,4)
; X86AVX2-NEXT: vmovaps (%esp), %ymm0
; X86AVX2-NEXT: vmovaps %ymm0, {{[0-9]+}}(%esp)
; X86AVX2-NEXT: incl %eax
; X86AVX2-NEXT: andl $7, %eax
; X86AVX2-NEXT: movl %ecx, 32(%esp,%eax,4)
; X86AVX2-NEXT: vmovaps {{[0-9]+}}(%esp), %ymm0
; X86AVX2-NEXT: leal -4(%ebp), %esp
; X86AVX2-NEXT: popl %esi
; X86AVX2-NEXT: popl %ebp
; X86AVX2-NEXT: retl
%x = load i64, ptr %p
%ins = insertelement <4 x i64> %v, i64 %x, i32 %y
ret <4 x i64> %ins
}
define <8 x float> @load_f32_v8f32(<8 x float> %v, ptr %p, i32 %y) nounwind {
; SSE-LABEL: load_f32_v8f32:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
; SSE-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSE-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: andl $7, %esi
; SSE-NEXT: movss %xmm2, -40(%rsp,%rsi,4)
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: load_f32_v8f32:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %esi, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; AVX1-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm2
; AVX1-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}+16(%rip), %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
; AVX1-NEXT: vbroadcastss (%rdi), %ymm2
; AVX1-NEXT: vblendvps %ymm1, %ymm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_f32_v8f32:
; AVX2: # %bb.0:
; AVX2-NEXT: vbroadcastss (%rdi), %ymm1
; AVX2-NEXT: vmovd %esi, %xmm2
; AVX2-NEXT: vpbroadcastd %xmm2, %ymm2
; AVX2-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
; AVX2-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_f32_v8f32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpbroadcastd %esi, %ymm1
; AVX512-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %k1
; AVX512-NEXT: vbroadcastss (%rdi), %ymm0 {%k1}
; AVX512-NEXT: retq
;
; X86AVX2-LABEL: load_f32_v8f32:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86AVX2-NEXT: vpbroadcastd {{[0-9]+}}(%esp), %ymm1
; X86AVX2-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm1
; X86AVX2-NEXT: vbroadcastss (%eax), %ymm2
; X86AVX2-NEXT: vblendvps %ymm1, %ymm2, %ymm0, %ymm0
; X86AVX2-NEXT: retl
%x = load float, ptr %p
%ins = insertelement <8 x float> %v, float %x, i32 %y
ret <8 x float> %ins
}
define <4 x double> @load_f64_v4f64(<4 x double> %v, ptr %p, i32 %y) nounwind {
; SSE-LABEL: load_f64_v4f64:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
; SSE-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: andl $3, %esi
; SSE-NEXT: movsd %xmm2, -40(%rsp,%rsi,8)
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: load_f64_v4f64:
; AVX1: # %bb.0:
; AVX1-NEXT: movl %esi, %eax
; AVX1-NEXT: vmovq %rax, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
; AVX1-NEXT: vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm2
; AVX1-NEXT: vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}+16(%rip), %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
; AVX1-NEXT: vbroadcastsd (%rdi), %ymm2
; AVX1-NEXT: vblendvpd %ymm1, %ymm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_f64_v4f64:
; AVX2: # %bb.0:
; AVX2-NEXT: vbroadcastsd (%rdi), %ymm1
; AVX2-NEXT: movl %esi, %eax
; AVX2-NEXT: vmovq %rax, %xmm2
; AVX2-NEXT: vpbroadcastq %xmm2, %ymm2
; AVX2-NEXT: vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
; AVX2-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_f64_v4f64:
; AVX512: # %bb.0:
; AVX512-NEXT: movl %esi, %eax
; AVX512-NEXT: vpbroadcastq %rax, %ymm1
; AVX512-NEXT: vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %k1
; AVX512-NEXT: vbroadcastsd (%rdi), %ymm0 {%k1}
; AVX512-NEXT: retq
;
; X86AVX2-LABEL: load_f64_v4f64:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: pushl %ebp
; X86AVX2-NEXT: movl %esp, %ebp
; X86AVX2-NEXT: andl $-32, %esp
; X86AVX2-NEXT: subl $64, %esp
; X86AVX2-NEXT: movl 12(%ebp), %eax
; X86AVX2-NEXT: andl $3, %eax
; X86AVX2-NEXT: movl 8(%ebp), %ecx
; X86AVX2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; X86AVX2-NEXT: vmovaps %ymm0, (%esp)
; X86AVX2-NEXT: vmovsd %xmm1, (%esp,%eax,8)
; X86AVX2-NEXT: vmovaps (%esp), %ymm0
; X86AVX2-NEXT: movl %ebp, %esp
; X86AVX2-NEXT: popl %ebp
; X86AVX2-NEXT: retl
%x = load double, ptr %p
%ins = insertelement <4 x double> %v, double %x, i32 %y
ret <4 x double> %ins
}
; Don't die trying to insert to an invalid index.
define i32 @PR44139(ptr %p) {
; SSE-LABEL: PR44139:
; SSE: # %bb.0:
; SSE-NEXT: movl (%rdi), %eax
; SSE-NEXT: pshufd {{.*#+}} xmm0 = mem[0,1,0,1]
; SSE-NEXT: movdqa %xmm0, 96(%rdi)
; SSE-NEXT: movdqa %xmm0, 112(%rdi)
; SSE-NEXT: movdqa %xmm0, 64(%rdi)
; SSE-NEXT: movdqa %xmm0, 80(%rdi)
; SSE-NEXT: movdqa %xmm0, 32(%rdi)
; SSE-NEXT: movdqa %xmm0, 48(%rdi)
; SSE-NEXT: movdqa %xmm0, (%rdi)
; SSE-NEXT: movdqa %xmm0, 16(%rdi)
; SSE-NEXT: leal 2147483647(%rax), %ecx
; SSE-NEXT: testl %eax, %eax
; SSE-NEXT: cmovnsl %eax, %ecx
; SSE-NEXT: andl $-2147483648, %ecx # imm = 0x80000000
; SSE-NEXT: addl %eax, %ecx
; SSE-NEXT: # kill: def $eax killed $eax killed $rax
; SSE-NEXT: xorl %edx, %edx
; SSE-NEXT: divl %ecx
; SSE-NEXT: retq
;
; AVX1-LABEL: PR44139:
; AVX1: # %bb.0:
; AVX1-NEXT: movq (%rdi), %rax
; AVX1-NEXT: vbroadcastsd (%rdi), %ymm0
; AVX1-NEXT: vpinsrq $1, %rax, %xmm0, %xmm1
; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX1-NEXT: vmovaps %ymm0, 64(%rdi)
; AVX1-NEXT: vmovaps %ymm0, 96(%rdi)
; AVX1-NEXT: vmovaps %ymm0, 32(%rdi)
; AVX1-NEXT: vmovaps %ymm1, (%rdi)
; AVX1-NEXT: leal 2147483647(%rax), %ecx
; AVX1-NEXT: testl %eax, %eax
; AVX1-NEXT: cmovnsl %eax, %ecx
; AVX1-NEXT: andl $-2147483648, %ecx # imm = 0x80000000
; AVX1-NEXT: addl %eax, %ecx
; AVX1-NEXT: # kill: def $eax killed $eax killed $rax
; AVX1-NEXT: xorl %edx, %edx
; AVX1-NEXT: divl %ecx
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: PR44139:
; AVX2: # %bb.0:
; AVX2-NEXT: movq (%rdi), %rax
; AVX2-NEXT: vpbroadcastq (%rdi), %ymm0
; AVX2-NEXT: vpinsrq $1, %rax, %xmm0, %xmm1
; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX2-NEXT: vmovdqa %ymm0, 64(%rdi)
; AVX2-NEXT: vmovdqa %ymm0, 96(%rdi)
; AVX2-NEXT: vmovdqa %ymm0, 32(%rdi)
; AVX2-NEXT: vmovdqa %ymm1, (%rdi)
; AVX2-NEXT: leal 2147483647(%rax), %ecx
; AVX2-NEXT: testl %eax, %eax
; AVX2-NEXT: cmovnsl %eax, %ecx
; AVX2-NEXT: andl $-2147483648, %ecx # imm = 0x80000000
; AVX2-NEXT: addl %eax, %ecx
; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
; AVX2-NEXT: xorl %edx, %edx
; AVX2-NEXT: divl %ecx
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: PR44139:
; AVX512: # %bb.0:
; AVX512-NEXT: movq (%rdi), %rax
; AVX512-NEXT: vpbroadcastq (%rdi), %zmm0
; AVX512-NEXT: vpinsrq $1, %rax, %xmm0, %xmm1
; AVX512-NEXT: vinserti32x4 $0, %xmm1, %zmm0, %zmm1
; AVX512-NEXT: vmovdqa64 %zmm0, 64(%rdi)
; AVX512-NEXT: vmovdqa64 %zmm1, (%rdi)
; AVX512-NEXT: leal 2147483647(%rax), %ecx
; AVX512-NEXT: testl %eax, %eax
; AVX512-NEXT: cmovnsl %eax, %ecx
; AVX512-NEXT: andl $-2147483648, %ecx # imm = 0x80000000
; AVX512-NEXT: addl %eax, %ecx
; AVX512-NEXT: # kill: def $eax killed $eax killed $rax
; AVX512-NEXT: xorl %edx, %edx
; AVX512-NEXT: divl %ecx
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
; X86AVX2-LABEL: PR44139:
; X86AVX2: # %bb.0:
; X86AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86AVX2-NEXT: vbroadcastsd (%ecx), %ymm0
; X86AVX2-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm0[0],mem[0]
; X86AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; X86AVX2-NEXT: vmovaps %ymm0, 64(%ecx)
; X86AVX2-NEXT: vmovaps %ymm0, 96(%ecx)
; X86AVX2-NEXT: vmovaps %ymm0, 32(%ecx)
; X86AVX2-NEXT: movl (%ecx), %eax
; X86AVX2-NEXT: vmovaps %ymm1, (%ecx)
; X86AVX2-NEXT: leal 2147483647(%eax), %ecx
; X86AVX2-NEXT: testl %eax, %eax
; X86AVX2-NEXT: cmovnsl %eax, %ecx
; X86AVX2-NEXT: andl $-2147483648, %ecx # imm = 0x80000000
; X86AVX2-NEXT: addl %eax, %ecx
; X86AVX2-NEXT: xorl %edx, %edx
; X86AVX2-NEXT: divl %ecx
; X86AVX2-NEXT: vzeroupper
; X86AVX2-NEXT: retl
%L = load <16 x i64>, ptr %p
%E1 = extractelement <16 x i64> %L, i64 0
%tempvector = insertelement <16 x i64> undef, i64 %E1, i32 0
%vector = shufflevector <16 x i64> %tempvector, <16 x i64> undef, <16 x i32> zeroinitializer
%C3 = icmp sgt i64 9223372036854775807, -9223372036854775808
%t0 = trunc <16 x i64> %vector to <16 x i32>
%I4 = insertelement <16 x i64> %vector, i64 %E1, i1 %C3
store <16 x i64> %I4, ptr %p
%elt = extractelement <16 x i32> %t0, i32 0
%B = srem i32 %elt, -2147483648
%B9 = udiv i32 %elt, %B
ret i32 %B9
}