llvm/llvm/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -show-mc-encoding -fast-isel -mtriple=i386-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X86,SSE,X86-SSE
; RUN: llc < %s -show-mc-encoding -fast-isel -mtriple=i386-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX1,X86-AVX1
; RUN: llc < %s -show-mc-encoding -fast-isel -mtriple=i386-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX512,X86-AVX512
; RUN: llc < %s -show-mc-encoding -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X64,SSE,X64-SSE
; RUN: llc < %s -show-mc-encoding -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX1,X64-AVX1
; RUN: llc < %s -show-mc-encoding -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX512,X64-AVX512
; RUN: llc < %s -show-mc-encoding -fast-isel -mtriple=x86_64-unknown-unknown-gnux32 -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X32,SSE,X32-SSE
; RUN: llc < %s -show-mc-encoding -fast-isel -mtriple=x86_64-unknown-unknown-gnux32 -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X32,AVX,X32-AVX,AVX1,X32-AVX1
; RUN: llc < %s -show-mc-encoding -fast-isel -mtriple=x86_64-unknown-unknown-gnux32 -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl | FileCheck %s --check-prefixes=CHECK,X32,AVX,X32-AVX,AVX512,X32-AVX512

; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/sse2-builtins.c

define <2 x i64> @test_mm_add_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE-LABEL: test_mm_add_epi8:
; SSE:       # %bb.0:
; SSE-NEXT:    paddb %xmm1, %xmm0 # encoding: [0x66,0x0f,0xfc,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_add_epi8:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpaddb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfc,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_add_epi8:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpaddb %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfc,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
  %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
  %res = add <16 x i8> %arg0, %arg1
  %bc = bitcast <16 x i8> %res to <2 x i64>
  ret <2 x i64> %bc
}

define <2 x i64> @test_mm_add_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE-LABEL: test_mm_add_epi16:
; SSE:       # %bb.0:
; SSE-NEXT:    paddw %xmm1, %xmm0 # encoding: [0x66,0x0f,0xfd,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_add_epi16:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpaddw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfd,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_add_epi16:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpaddw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfd,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
  %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
  %res = add <8 x i16> %arg0, %arg1
  %bc = bitcast <8 x i16> %res to <2 x i64>
  ret <2 x i64> %bc
}

define <2 x i64> @test_mm_add_epi32(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE-LABEL: test_mm_add_epi32:
; SSE:       # %bb.0:
; SSE-NEXT:    paddd %xmm1, %xmm0 # encoding: [0x66,0x0f,0xfe,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_add_epi32:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpaddd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfe,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_add_epi32:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpaddd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
  %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
  %res = add <4 x i32> %arg0, %arg1
  %bc = bitcast <4 x i32> %res to <2 x i64>
  ret <2 x i64> %bc
}

define <2 x i64> @test_mm_add_epi64(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE-LABEL: test_mm_add_epi64:
; SSE:       # %bb.0:
; SSE-NEXT:    paddq %xmm1, %xmm0 # encoding: [0x66,0x0f,0xd4,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_add_epi64:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpaddq %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xd4,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_add_epi64:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpaddq %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = add <2 x i64> %a0, %a1
  ret <2 x i64> %res
}

define <2 x double> @test_mm_add_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_add_pd:
; SSE:       # %bb.0:
; SSE-NEXT:    addpd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x58,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_add_pd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vaddpd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x58,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_add_pd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vaddpd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = fadd <2 x double> %a0, %a1
  ret <2 x double> %res
}

define <2 x double> @test_mm_add_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_add_sd:
; SSE:       # %bb.0:
; SSE-NEXT:    addsd %xmm1, %xmm0 # encoding: [0xf2,0x0f,0x58,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_add_sd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vaddsd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x58,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_add_sd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vaddsd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x58,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %ext0 = extractelement <2 x double> %a0, i32 0
  %ext1 = extractelement <2 x double> %a1, i32 0
  %fadd = fadd double %ext0, %ext1
  %res = insertelement <2 x double> %a0, double %fadd, i32 0
  ret <2 x double> %res
}

define <2 x i64> @test_mm_adds_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE-LABEL: test_mm_adds_epi8:
; SSE:       # %bb.0:
; SSE-NEXT:    paddsb %xmm1, %xmm0 # encoding: [0x66,0x0f,0xec,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_adds_epi8:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpaddsb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xec,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_adds_epi8:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpaddsb %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xec,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
  %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
  %res = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %arg0, <16 x i8> %arg1)
  %bc = bitcast <16 x i8> %res to <2 x i64>
  ret <2 x i64> %bc
}
declare <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8>, <16 x i8>) nounwind readnone

define <2 x i64> @test_mm_adds_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE-LABEL: test_mm_adds_epi16:
; SSE:       # %bb.0:
; SSE-NEXT:    paddsw %xmm1, %xmm0 # encoding: [0x66,0x0f,0xed,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_adds_epi16:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpaddsw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xed,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_adds_epi16:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpaddsw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xed,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
  %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
  %res = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %arg0, <8 x i16> %arg1)
  %bc = bitcast <8 x i16> %res to <2 x i64>
  ret <2 x i64> %bc
}
declare <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16>, <8 x i16>) nounwind readnone

define <2 x i64> @test_mm_adds_epu8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE-LABEL: test_mm_adds_epu8:
; SSE:       # %bb.0:
; SSE-NEXT:    paddusb %xmm1, %xmm0 # encoding: [0x66,0x0f,0xdc,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_adds_epu8:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpaddusb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xdc,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_adds_epu8:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpaddusb %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdc,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
  %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
  %res = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %arg0, <16 x i8> %arg1)
  %bc = bitcast <16 x i8> %res to <2 x i64>
  ret <2 x i64> %bc
}
declare <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8>, <16 x i8>)

define <2 x i64> @test_mm_adds_epu16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE-LABEL: test_mm_adds_epu16:
; SSE:       # %bb.0:
; SSE-NEXT:    paddusw %xmm1, %xmm0 # encoding: [0x66,0x0f,0xdd,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_adds_epu16:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpaddusw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xdd,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_adds_epu16:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpaddusw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdd,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
  %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
  %res = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %arg0, <8 x i16> %arg1)
  %bc = bitcast <8 x i16> %res to <2 x i64>
  ret <2 x i64> %bc
}
declare <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16>, <8 x i16>)

define <2 x double> @test_mm_and_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_and_pd:
; SSE:       # %bb.0:
; SSE-NEXT:    andps %xmm1, %xmm0 # encoding: [0x0f,0x54,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_and_pd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vandps %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x54,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_and_pd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vandps %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x54,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x double> %a0 to <4 x i32>
  %arg1 = bitcast <2 x double> %a1 to <4 x i32>
  %res = and <4 x i32> %arg0, %arg1
  %bc = bitcast <4 x i32> %res to <2 x double>
  ret <2 x double> %bc
}

define <2 x i64> @test_mm_and_si128(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE-LABEL: test_mm_and_si128:
; SSE:       # %bb.0:
; SSE-NEXT:    andps %xmm1, %xmm0 # encoding: [0x0f,0x54,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_and_si128:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vandps %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x54,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_and_si128:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vandps %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x54,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = and <2 x i64> %a0, %a1
  ret <2 x i64> %res
}

define <2 x double> @test_mm_andnot_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_andnot_pd:
; SSE:       # %bb.0:
; SSE-NEXT:    pcmpeqd %xmm2, %xmm2 # encoding: [0x66,0x0f,0x76,0xd2]
; SSE-NEXT:    pxor %xmm2, %xmm0 # encoding: [0x66,0x0f,0xef,0xc2]
; SSE-NEXT:    pand %xmm1, %xmm0 # encoding: [0x66,0x0f,0xdb,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_andnot_pd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2 # encoding: [0xc5,0xe9,0x76,0xd2]
; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xef,0xc2]
; AVX1-NEXT:    vpand %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xdb,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_andnot_pd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0 # encoding: [0x62,0xf3,0xfd,0x08,0x25,0xc0,0x0f]
; AVX512-NEXT:    # xmm0 = ~xmm0
; AVX512-NEXT:    vpand %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdb,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x double> %a0 to <4 x i32>
  %arg1 = bitcast <2 x double> %a1 to <4 x i32>
  %not = xor <4 x i32> %arg0, <i32 -1, i32 -1, i32 -1, i32 -1>
  %res = and <4 x i32> %not, %arg1
  %bc = bitcast <4 x i32> %res to <2 x double>
  ret <2 x double> %bc
}

define <2 x i64> @test_mm_andnot_si128(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE-LABEL: test_mm_andnot_si128:
; SSE:       # %bb.0:
; SSE-NEXT:    pcmpeqd %xmm2, %xmm2 # encoding: [0x66,0x0f,0x76,0xd2]
; SSE-NEXT:    pxor %xmm2, %xmm0 # encoding: [0x66,0x0f,0xef,0xc2]
; SSE-NEXT:    pand %xmm1, %xmm0 # encoding: [0x66,0x0f,0xdb,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_andnot_si128:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2 # encoding: [0xc5,0xe9,0x76,0xd2]
; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xef,0xc2]
; AVX1-NEXT:    vpand %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xdb,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_andnot_si128:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpternlogq $15, %xmm0, %xmm0, %xmm0 # encoding: [0x62,0xf3,0xfd,0x08,0x25,0xc0,0x0f]
; AVX512-NEXT:    # xmm0 = ~xmm0
; AVX512-NEXT:    vpand %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdb,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %not = xor <2 x i64> %a0, <i64 -1, i64 -1>
  %res = and <2 x i64> %not, %a1
  ret <2 x i64> %res
}

define <2 x i64> @test_mm_avg_epu8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE-LABEL: test_mm_avg_epu8:
; SSE:       # %bb.0:
; SSE-NEXT:    pavgb %xmm1, %xmm0 # encoding: [0x66,0x0f,0xe0,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_avg_epu8:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpavgb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xe0,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_avg_epu8:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpavgb %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe0,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
  %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
  %res = call <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8> %arg0, <16 x i8> %arg1)
  %bc = bitcast <16 x i8> %res to <2 x i64>
  ret <2 x i64> %bc
}
declare <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8> %arg0, <16 x i8> %arg1) nounwind readnone

define <2 x i64> @test_mm_avg_epu16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE-LABEL: test_mm_avg_epu16:
; SSE:       # %bb.0:
; SSE-NEXT:    pavgw %xmm1, %xmm0 # encoding: [0x66,0x0f,0xe3,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_avg_epu16:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpavgw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xe3,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_avg_epu16:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpavgw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe3,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
  %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
  %res = call <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16> %arg0, <8 x i16> %arg1)
  %bc = bitcast <8 x i16> %res to <2 x i64>
  ret <2 x i64> %bc
}
declare <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16>, <8 x i16>) nounwind readnone

define <2 x i64> @test_mm_bslli_si128(<2 x i64> %a0) nounwind {
; SSE-LABEL: test_mm_bslli_si128:
; SSE:       # %bb.0:
; SSE-NEXT:    pslldq $5, %xmm0 # encoding: [0x66,0x0f,0x73,0xf8,0x05]
; SSE-NEXT:    # xmm0 = zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_bslli_si128:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpslldq $5, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x73,0xf8,0x05]
; AVX1-NEXT:    # xmm0 = zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_bslli_si128:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpslldq $5, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xf8,0x05]
; AVX512-NEXT:    # xmm0 = zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
  %res = shufflevector <16 x i8> zeroinitializer, <16 x i8> %arg0, <16 x i32> <i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26>
  %bc = bitcast <16 x i8> %res to <2 x i64>
  ret <2 x i64> %bc
}

define <2 x i64> @test_mm_bsrli_si128(<2 x i64> %a0) nounwind {
; SSE-LABEL: test_mm_bsrli_si128:
; SSE:       # %bb.0:
; SSE-NEXT:    psrldq $5, %xmm0 # encoding: [0x66,0x0f,0x73,0xd8,0x05]
; SSE-NEXT:    # xmm0 = xmm0[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_bsrli_si128:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpsrldq $5, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x73,0xd8,0x05]
; AVX1-NEXT:    # xmm0 = xmm0[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_bsrli_si128:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpsrldq $5, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xd8,0x05]
; AVX512-NEXT:    # xmm0 = xmm0[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
  %res = shufflevector <16 x i8> %arg0, <16 x i8> zeroinitializer, <16 x i32> <i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20>
  %bc = bitcast <16 x i8> %res to <2 x i64>
  ret <2 x i64> %bc
}

define <4 x float> @test_mm_castpd_ps(<2 x double> %a0) nounwind {
; CHECK-LABEL: test_mm_castpd_ps:
; CHECK:       # %bb.0:
; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = bitcast <2 x double> %a0 to <4 x float>
  ret <4 x float> %res
}

define <2 x i64> @test_mm_castpd_si128(<2 x double> %a0) nounwind {
; CHECK-LABEL: test_mm_castpd_si128:
; CHECK:       # %bb.0:
; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = bitcast <2 x double> %a0 to <2 x i64>
  ret <2 x i64> %res
}

define <2 x double> @test_mm_castps_pd(<4 x float> %a0) nounwind {
; CHECK-LABEL: test_mm_castps_pd:
; CHECK:       # %bb.0:
; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = bitcast <4 x float> %a0 to <2 x double>
  ret <2 x double> %res
}

define <2 x i64> @test_mm_castps_si128(<4 x float> %a0) nounwind {
; CHECK-LABEL: test_mm_castps_si128:
; CHECK:       # %bb.0:
; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = bitcast <4 x float> %a0 to <2 x i64>
  ret <2 x i64> %res
}

define <2 x double> @test_mm_castsi128_pd(<2 x i64> %a0) nounwind {
; CHECK-LABEL: test_mm_castsi128_pd:
; CHECK:       # %bb.0:
; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = bitcast <2 x i64> %a0 to <2 x double>
  ret <2 x double> %res
}

define <4 x float> @test_mm_castsi128_ps(<2 x i64> %a0) nounwind {
; CHECK-LABEL: test_mm_castsi128_ps:
; CHECK:       # %bb.0:
; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = bitcast <2 x i64> %a0 to <4 x float>
  ret <4 x float> %res
}

define void @test_mm_clflush(ptr %a0) nounwind {
; X86-LABEL: test_mm_clflush:
; X86:       # %bb.0:
; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-NEXT:    clflush (%eax) # encoding: [0x0f,0xae,0x38]
; X86-NEXT:    retl # encoding: [0xc3]
;
; X64-LABEL: test_mm_clflush:
; X64:       # %bb.0:
; X64-NEXT:    clflush (%rdi) # encoding: [0x0f,0xae,0x3f]
; X64-NEXT:    retq # encoding: [0xc3]
;
; X32-LABEL: test_mm_clflush:
; X32:       # %bb.0:
; X32-NEXT:    clflush (%edi) # encoding: [0x67,0x0f,0xae,0x3f]
; X32-NEXT:    retq # encoding: [0xc3]
  call void @llvm.x86.sse2.clflush(ptr %a0)
  ret void
}
declare void @llvm.x86.sse2.clflush(ptr) nounwind readnone

define <2 x i64> @test_mm_cmpeq_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE-LABEL: test_mm_cmpeq_epi8:
; SSE:       # %bb.0:
; SSE-NEXT:    pcmpeqb %xmm1, %xmm0 # encoding: [0x66,0x0f,0x74,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_cmpeq_epi8:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpcmpeqb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x74,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_cmpeq_epi8:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpcmpeqb %xmm1, %xmm0, %k0 # encoding: [0x62,0xf1,0x7d,0x08,0x74,0xc1]
; AVX512-NEXT:    vpmovm2b %k0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x28,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
  %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
  %cmp = icmp eq <16 x i8> %arg0, %arg1
  %res = sext <16 x i1> %cmp to <16 x i8>
  %bc = bitcast <16 x i8> %res to <2 x i64>
  ret <2 x i64> %bc
}

define <2 x i64> @test_mm_cmpeq_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE-LABEL: test_mm_cmpeq_epi16:
; SSE:       # %bb.0:
; SSE-NEXT:    pcmpeqw %xmm1, %xmm0 # encoding: [0x66,0x0f,0x75,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_cmpeq_epi16:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpcmpeqw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x75,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_cmpeq_epi16:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpcmpeqw %xmm1, %xmm0, %k0 # encoding: [0x62,0xf1,0x7d,0x08,0x75,0xc1]
; AVX512-NEXT:    vpmovm2w %k0, %xmm0 # encoding: [0x62,0xf2,0xfe,0x08,0x28,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
  %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
  %cmp = icmp eq <8 x i16> %arg0, %arg1
  %res = sext <8 x i1> %cmp to <8 x i16>
  %bc = bitcast <8 x i16> %res to <2 x i64>
  ret <2 x i64> %bc
}

define <2 x i64> @test_mm_cmpeq_epi32(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE-LABEL: test_mm_cmpeq_epi32:
; SSE:       # %bb.0:
; SSE-NEXT:    pcmpeqd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x76,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_cmpeq_epi32:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x76,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_cmpeq_epi32:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpcmpeqd %xmm1, %xmm0, %k0 # encoding: [0x62,0xf1,0x7d,0x08,0x76,0xc1]
; AVX512-NEXT:    vpmovm2d %k0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x38,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
  %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
  %cmp = icmp eq <4 x i32> %arg0, %arg1
  %res = sext <4 x i1> %cmp to <4 x i32>
  %bc = bitcast <4 x i32> %res to <2 x i64>
  ret <2 x i64> %bc
}

define <2 x double> @test_mm_cmpeq_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_cmpeq_pd:
; SSE:       # %bb.0:
; SSE-NEXT:    cmpeqpd %xmm1, %xmm0 # encoding: [0x66,0x0f,0xc2,0xc1,0x00]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_cmpeq_pd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vcmpeqpd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc2,0xc1,0x00]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_cmpeq_pd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vcmpeqpd %xmm1, %xmm0, %k0 # encoding: [0x62,0xf1,0xfd,0x08,0xc2,0xc1,0x00]
; AVX512-NEXT:    vpmovm2q %k0, %xmm0 # encoding: [0x62,0xf2,0xfe,0x08,0x38,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %fcmp = fcmp oeq <2 x double> %a0, %a1
  %sext = sext <2 x i1> %fcmp to <2 x i64>
  %res = bitcast <2 x i64> %sext to <2 x double>
  ret <2 x double> %res
}

define <2 x double> @test_mm_cmpeq_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_cmpeq_sd:
; SSE:       # %bb.0:
; SSE-NEXT:    cmpeqsd %xmm1, %xmm0 # encoding: [0xf2,0x0f,0xc2,0xc1,0x00]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX-LABEL: test_mm_cmpeq_sd:
; AVX:       # %bb.0:
; AVX-NEXT:    vcmpeqsd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0xc2,0xc1,0x00]
; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 0)
  ret <2 x double> %res
}
declare <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double>, <2 x double>, i8) nounwind readnone

define <2 x double> @test_mm_cmpge_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_cmpge_pd:
; SSE:       # %bb.0:
; SSE-NEXT:    cmplepd %xmm0, %xmm1 # encoding: [0x66,0x0f,0xc2,0xc8,0x02]
; SSE-NEXT:    movapd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x28,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_cmpge_pd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vcmplepd %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xc2,0xc0,0x02]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_cmpge_pd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vcmplepd %xmm0, %xmm1, %k0 # encoding: [0x62,0xf1,0xf5,0x08,0xc2,0xc0,0x02]
; AVX512-NEXT:    vpmovm2q %k0, %xmm0 # encoding: [0x62,0xf2,0xfe,0x08,0x38,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %fcmp = fcmp ole <2 x double> %a1, %a0
  %sext = sext <2 x i1> %fcmp to <2 x i64>
  %res = bitcast <2 x i64> %sext to <2 x double>
  ret <2 x double> %res
}

define <2 x double> @test_mm_cmpge_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_cmpge_sd:
; SSE:       # %bb.0:
; SSE-NEXT:    cmplesd %xmm0, %xmm1 # encoding: [0xf2,0x0f,0xc2,0xc8,0x02]
; SSE-NEXT:    movsd %xmm1, %xmm0 # encoding: [0xf2,0x0f,0x10,0xc1]
; SSE-NEXT:    # xmm0 = xmm1[0],xmm0[1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX-LABEL: test_mm_cmpge_sd:
; AVX:       # %bb.0:
; AVX-NEXT:    vcmplesd %xmm0, %xmm1, %xmm1 # encoding: [0xc5,0xf3,0xc2,0xc8,0x02]
; AVX-NEXT:    vblendpd $1, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x0d,0xc1,0x01]
; AVX-NEXT:    # xmm0 = xmm1[0],xmm0[1]
; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %cmp = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a1, <2 x double> %a0, i8 2)
  %ext0 = extractelement <2 x double> %cmp, i32 0
  %ins0 = insertelement <2 x double> undef, double %ext0, i32 0
  %ext1 = extractelement <2 x double> %a0, i32 1
  %ins1 = insertelement <2 x double> %ins0, double %ext1, i32 1
  ret <2 x double> %ins1
}

define <2 x i64> @test_mm_cmpgt_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE-LABEL: test_mm_cmpgt_epi8:
; SSE:       # %bb.0:
; SSE-NEXT:    pcmpgtb %xmm1, %xmm0 # encoding: [0x66,0x0f,0x64,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_cmpgt_epi8:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpcmpgtb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x64,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_cmpgt_epi8:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpcmpgtb %xmm1, %xmm0, %k0 # encoding: [0x62,0xf1,0x7d,0x08,0x64,0xc1]
; AVX512-NEXT:    vpmovm2b %k0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x28,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
  %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
  %cmp = icmp sgt <16 x i8> %arg0, %arg1
  %res = sext <16 x i1> %cmp to <16 x i8>
  %bc = bitcast <16 x i8> %res to <2 x i64>
  ret <2 x i64> %bc
}

define <2 x i64> @test_mm_cmpgt_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE-LABEL: test_mm_cmpgt_epi16:
; SSE:       # %bb.0:
; SSE-NEXT:    pcmpgtw %xmm1, %xmm0 # encoding: [0x66,0x0f,0x65,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_cmpgt_epi16:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x65,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_cmpgt_epi16:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpcmpgtw %xmm1, %xmm0, %k0 # encoding: [0x62,0xf1,0x7d,0x08,0x65,0xc1]
; AVX512-NEXT:    vpmovm2w %k0, %xmm0 # encoding: [0x62,0xf2,0xfe,0x08,0x28,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
  %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
  %cmp = icmp sgt <8 x i16> %arg0, %arg1
  %res = sext <8 x i1> %cmp to <8 x i16>
  %bc = bitcast <8 x i16> %res to <2 x i64>
  ret <2 x i64> %bc
}

define <2 x i64> @test_mm_cmpgt_epi32(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE-LABEL: test_mm_cmpgt_epi32:
; SSE:       # %bb.0:
; SSE-NEXT:    pcmpgtd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x66,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_cmpgt_epi32:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x66,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_cmpgt_epi32:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpcmpgtd %xmm1, %xmm0, %k0 # encoding: [0x62,0xf1,0x7d,0x08,0x66,0xc1]
; AVX512-NEXT:    vpmovm2d %k0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x38,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
  %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
  %cmp = icmp sgt <4 x i32> %arg0, %arg1
  %res = sext <4 x i1> %cmp to <4 x i32>
  %bc = bitcast <4 x i32> %res to <2 x i64>
  ret <2 x i64> %bc
}

define <2 x double> @test_mm_cmpgt_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_cmpgt_pd:
; SSE:       # %bb.0:
; SSE-NEXT:    cmpltpd %xmm0, %xmm1 # encoding: [0x66,0x0f,0xc2,0xc8,0x01]
; SSE-NEXT:    movapd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x28,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_cmpgt_pd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vcmpltpd %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xc2,0xc0,0x01]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_cmpgt_pd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vcmpltpd %xmm0, %xmm1, %k0 # encoding: [0x62,0xf1,0xf5,0x08,0xc2,0xc0,0x01]
; AVX512-NEXT:    vpmovm2q %k0, %xmm0 # encoding: [0x62,0xf2,0xfe,0x08,0x38,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %fcmp = fcmp olt <2 x double> %a1, %a0
  %sext = sext <2 x i1> %fcmp to <2 x i64>
  %res = bitcast <2 x i64> %sext to <2 x double>
  ret <2 x double> %res
}

define <2 x double> @test_mm_cmpgt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_cmpgt_sd:
; SSE:       # %bb.0:
; SSE-NEXT:    cmpltsd %xmm0, %xmm1 # encoding: [0xf2,0x0f,0xc2,0xc8,0x01]
; SSE-NEXT:    movsd %xmm1, %xmm0 # encoding: [0xf2,0x0f,0x10,0xc1]
; SSE-NEXT:    # xmm0 = xmm1[0],xmm0[1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX-LABEL: test_mm_cmpgt_sd:
; AVX:       # %bb.0:
; AVX-NEXT:    vcmpltsd %xmm0, %xmm1, %xmm1 # encoding: [0xc5,0xf3,0xc2,0xc8,0x01]
; AVX-NEXT:    vblendpd $1, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x0d,0xc1,0x01]
; AVX-NEXT:    # xmm0 = xmm1[0],xmm0[1]
; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %cmp = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a1, <2 x double> %a0, i8 1)
  %ext0 = extractelement <2 x double> %cmp, i32 0
  %ins0 = insertelement <2 x double> undef, double %ext0, i32 0
  %ext1 = extractelement <2 x double> %a0, i32 1
  %ins1 = insertelement <2 x double> %ins0, double %ext1, i32 1
  ret <2 x double> %ins1
}

define <2 x double> @test_mm_cmple_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_cmple_pd:
; SSE:       # %bb.0:
; SSE-NEXT:    cmplepd %xmm1, %xmm0 # encoding: [0x66,0x0f,0xc2,0xc1,0x02]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_cmple_pd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vcmplepd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc2,0xc1,0x02]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_cmple_pd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vcmplepd %xmm1, %xmm0, %k0 # encoding: [0x62,0xf1,0xfd,0x08,0xc2,0xc1,0x02]
; AVX512-NEXT:    vpmovm2q %k0, %xmm0 # encoding: [0x62,0xf2,0xfe,0x08,0x38,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %fcmp = fcmp ole <2 x double> %a0, %a1
  %sext = sext <2 x i1> %fcmp to <2 x i64>
  %res = bitcast <2 x i64> %sext to <2 x double>
  ret <2 x double> %res
}

define <2 x double> @test_mm_cmple_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_cmple_sd:
; SSE:       # %bb.0:
; SSE-NEXT:    cmplesd %xmm1, %xmm0 # encoding: [0xf2,0x0f,0xc2,0xc1,0x02]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX-LABEL: test_mm_cmple_sd:
; AVX:       # %bb.0:
; AVX-NEXT:    vcmplesd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0xc2,0xc1,0x02]
; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 2)
  ret <2 x double> %res
}

define <2 x i64> @test_mm_cmplt_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE-LABEL: test_mm_cmplt_epi8:
; SSE:       # %bb.0:
; SSE-NEXT:    pcmpgtb %xmm0, %xmm1 # encoding: [0x66,0x0f,0x64,0xc8]
; SSE-NEXT:    movdqa %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6f,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_cmplt_epi8:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpcmpgtb %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0x64,0xc0]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_cmplt_epi8:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpcmpgtb %xmm0, %xmm1, %k0 # encoding: [0x62,0xf1,0x75,0x08,0x64,0xc0]
; AVX512-NEXT:    vpmovm2b %k0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x28,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
  %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
  %cmp = icmp sgt <16 x i8> %arg1, %arg0
  %res = sext <16 x i1> %cmp to <16 x i8>
  %bc = bitcast <16 x i8> %res to <2 x i64>
  ret <2 x i64> %bc
}

define <2 x i64> @test_mm_cmplt_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE-LABEL: test_mm_cmplt_epi16:
; SSE:       # %bb.0:
; SSE-NEXT:    pcmpgtw %xmm0, %xmm1 # encoding: [0x66,0x0f,0x65,0xc8]
; SSE-NEXT:    movdqa %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6f,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_cmplt_epi16:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpcmpgtw %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0x65,0xc0]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_cmplt_epi16:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpcmpgtw %xmm0, %xmm1, %k0 # encoding: [0x62,0xf1,0x75,0x08,0x65,0xc0]
; AVX512-NEXT:    vpmovm2w %k0, %xmm0 # encoding: [0x62,0xf2,0xfe,0x08,0x28,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
  %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
  %cmp = icmp sgt <8 x i16> %arg1, %arg0
  %res = sext <8 x i1> %cmp to <8 x i16>
  %bc = bitcast <8 x i16> %res to <2 x i64>
  ret <2 x i64> %bc
}

define <2 x i64> @test_mm_cmplt_epi32(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE-LABEL: test_mm_cmplt_epi32:
; SSE:       # %bb.0:
; SSE-NEXT:    pcmpgtd %xmm0, %xmm1 # encoding: [0x66,0x0f,0x66,0xc8]
; SSE-NEXT:    movdqa %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6f,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_cmplt_epi32:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpcmpgtd %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0x66,0xc0]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_cmplt_epi32:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpcmpgtd %xmm0, %xmm1, %k0 # encoding: [0x62,0xf1,0x75,0x08,0x66,0xc0]
; AVX512-NEXT:    vpmovm2d %k0, %xmm0 # encoding: [0x62,0xf2,0x7e,0x08,0x38,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
  %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
  %cmp = icmp sgt <4 x i32> %arg1, %arg0
  %res = sext <4 x i1> %cmp to <4 x i32>
  %bc = bitcast <4 x i32> %res to <2 x i64>
  ret <2 x i64> %bc
}

define <2 x double> @test_mm_cmplt_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_cmplt_pd:
; SSE:       # %bb.0:
; SSE-NEXT:    cmpltpd %xmm1, %xmm0 # encoding: [0x66,0x0f,0xc2,0xc1,0x01]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_cmplt_pd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vcmpltpd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc2,0xc1,0x01]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_cmplt_pd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vcmpltpd %xmm1, %xmm0, %k0 # encoding: [0x62,0xf1,0xfd,0x08,0xc2,0xc1,0x01]
; AVX512-NEXT:    vpmovm2q %k0, %xmm0 # encoding: [0x62,0xf2,0xfe,0x08,0x38,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %fcmp = fcmp olt <2 x double> %a0, %a1
  %sext = sext <2 x i1> %fcmp to <2 x i64>
  %res = bitcast <2 x i64> %sext to <2 x double>
  ret <2 x double> %res
}

define <2 x double> @test_mm_cmplt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_cmplt_sd:
; SSE:       # %bb.0:
; SSE-NEXT:    cmpltsd %xmm1, %xmm0 # encoding: [0xf2,0x0f,0xc2,0xc1,0x01]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX-LABEL: test_mm_cmplt_sd:
; AVX:       # %bb.0:
; AVX-NEXT:    vcmpltsd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0xc2,0xc1,0x01]
; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 1)
  ret <2 x double> %res
}

define <2 x double> @test_mm_cmpneq_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_cmpneq_pd:
; SSE:       # %bb.0:
; SSE-NEXT:    cmpneqpd %xmm1, %xmm0 # encoding: [0x66,0x0f,0xc2,0xc1,0x04]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_cmpneq_pd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vcmpneqpd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc2,0xc1,0x04]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_cmpneq_pd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vcmpneqpd %xmm1, %xmm0, %k0 # encoding: [0x62,0xf1,0xfd,0x08,0xc2,0xc1,0x04]
; AVX512-NEXT:    vpmovm2q %k0, %xmm0 # encoding: [0x62,0xf2,0xfe,0x08,0x38,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %fcmp = fcmp une <2 x double> %a0, %a1
  %sext = sext <2 x i1> %fcmp to <2 x i64>
  %res = bitcast <2 x i64> %sext to <2 x double>
  ret <2 x double> %res
}

define <2 x double> @test_mm_cmpneq_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_cmpneq_sd:
; SSE:       # %bb.0:
; SSE-NEXT:    cmpneqsd %xmm1, %xmm0 # encoding: [0xf2,0x0f,0xc2,0xc1,0x04]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX-LABEL: test_mm_cmpneq_sd:
; AVX:       # %bb.0:
; AVX-NEXT:    vcmpneqsd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0xc2,0xc1,0x04]
; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 4)
  ret <2 x double> %res
}

define <2 x double> @test_mm_cmpnge_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_cmpnge_pd:
; SSE:       # %bb.0:
; SSE-NEXT:    cmpnlepd %xmm0, %xmm1 # encoding: [0x66,0x0f,0xc2,0xc8,0x06]
; SSE-NEXT:    movapd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x28,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_cmpnge_pd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vcmpnlepd %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xc2,0xc0,0x06]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_cmpnge_pd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vcmpnlepd %xmm0, %xmm1, %k0 # encoding: [0x62,0xf1,0xf5,0x08,0xc2,0xc0,0x06]
; AVX512-NEXT:    vpmovm2q %k0, %xmm0 # encoding: [0x62,0xf2,0xfe,0x08,0x38,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %fcmp = fcmp ugt <2 x double> %a1, %a0
  %sext = sext <2 x i1> %fcmp to <2 x i64>
  %res = bitcast <2 x i64> %sext to <2 x double>
  ret <2 x double> %res
}

define <2 x double> @test_mm_cmpnge_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_cmpnge_sd:
; SSE:       # %bb.0:
; SSE-NEXT:    cmpnlesd %xmm0, %xmm1 # encoding: [0xf2,0x0f,0xc2,0xc8,0x06]
; SSE-NEXT:    movsd %xmm1, %xmm0 # encoding: [0xf2,0x0f,0x10,0xc1]
; SSE-NEXT:    # xmm0 = xmm1[0],xmm0[1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX-LABEL: test_mm_cmpnge_sd:
; AVX:       # %bb.0:
; AVX-NEXT:    vcmpnlesd %xmm0, %xmm1, %xmm1 # encoding: [0xc5,0xf3,0xc2,0xc8,0x06]
; AVX-NEXT:    vblendpd $1, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x0d,0xc1,0x01]
; AVX-NEXT:    # xmm0 = xmm1[0],xmm0[1]
; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %cmp = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a1, <2 x double> %a0, i8 6)
  %ext0 = extractelement <2 x double> %cmp, i32 0
  %ins0 = insertelement <2 x double> undef, double %ext0, i32 0
  %ext1 = extractelement <2 x double> %a0, i32 1
  %ins1 = insertelement <2 x double> %ins0, double %ext1, i32 1
  ret <2 x double> %ins1
}

define <2 x double> @test_mm_cmpngt_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_cmpngt_pd:
; SSE:       # %bb.0:
; SSE-NEXT:    cmpnltpd %xmm0, %xmm1 # encoding: [0x66,0x0f,0xc2,0xc8,0x05]
; SSE-NEXT:    movapd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x28,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_cmpngt_pd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vcmpnltpd %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0xc2,0xc0,0x05]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_cmpngt_pd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vcmpnltpd %xmm0, %xmm1, %k0 # encoding: [0x62,0xf1,0xf5,0x08,0xc2,0xc0,0x05]
; AVX512-NEXT:    vpmovm2q %k0, %xmm0 # encoding: [0x62,0xf2,0xfe,0x08,0x38,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %fcmp = fcmp uge <2 x double> %a1, %a0
  %sext = sext <2 x i1> %fcmp to <2 x i64>
  %res = bitcast <2 x i64> %sext to <2 x double>
  ret <2 x double> %res
}

define <2 x double> @test_mm_cmpngt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_cmpngt_sd:
; SSE:       # %bb.0:
; SSE-NEXT:    cmpnltsd %xmm0, %xmm1 # encoding: [0xf2,0x0f,0xc2,0xc8,0x05]
; SSE-NEXT:    movsd %xmm1, %xmm0 # encoding: [0xf2,0x0f,0x10,0xc1]
; SSE-NEXT:    # xmm0 = xmm1[0],xmm0[1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX-LABEL: test_mm_cmpngt_sd:
; AVX:       # %bb.0:
; AVX-NEXT:    vcmpnltsd %xmm0, %xmm1, %xmm1 # encoding: [0xc5,0xf3,0xc2,0xc8,0x05]
; AVX-NEXT:    vblendpd $1, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x0d,0xc1,0x01]
; AVX-NEXT:    # xmm0 = xmm1[0],xmm0[1]
; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %cmp = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a1, <2 x double> %a0, i8 5)
  %ext0 = extractelement <2 x double> %cmp, i32 0
  %ins0 = insertelement <2 x double> undef, double %ext0, i32 0
  %ext1 = extractelement <2 x double> %a0, i32 1
  %ins1 = insertelement <2 x double> %ins0, double %ext1, i32 1
  ret <2 x double> %ins1
}

define <2 x double> @test_mm_cmpnle_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_cmpnle_pd:
; SSE:       # %bb.0:
; SSE-NEXT:    cmpnlepd %xmm1, %xmm0 # encoding: [0x66,0x0f,0xc2,0xc1,0x06]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_cmpnle_pd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vcmpnlepd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc2,0xc1,0x06]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_cmpnle_pd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vcmpnlepd %xmm1, %xmm0, %k0 # encoding: [0x62,0xf1,0xfd,0x08,0xc2,0xc1,0x06]
; AVX512-NEXT:    vpmovm2q %k0, %xmm0 # encoding: [0x62,0xf2,0xfe,0x08,0x38,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %fcmp = fcmp ugt <2 x double> %a0, %a1
  %sext = sext <2 x i1> %fcmp to <2 x i64>
  %res = bitcast <2 x i64> %sext to <2 x double>
  ret <2 x double> %res
}

define <2 x double> @test_mm_cmpnle_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_cmpnle_sd:
; SSE:       # %bb.0:
; SSE-NEXT:    cmpnlesd %xmm1, %xmm0 # encoding: [0xf2,0x0f,0xc2,0xc1,0x06]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX-LABEL: test_mm_cmpnle_sd:
; AVX:       # %bb.0:
; AVX-NEXT:    vcmpnlesd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0xc2,0xc1,0x06]
; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 6)
  ret <2 x double> %res
}

define <2 x double> @test_mm_cmpnlt_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_cmpnlt_pd:
; SSE:       # %bb.0:
; SSE-NEXT:    cmpnltpd %xmm1, %xmm0 # encoding: [0x66,0x0f,0xc2,0xc1,0x05]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_cmpnlt_pd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vcmpnltpd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc2,0xc1,0x05]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_cmpnlt_pd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vcmpnltpd %xmm1, %xmm0, %k0 # encoding: [0x62,0xf1,0xfd,0x08,0xc2,0xc1,0x05]
; AVX512-NEXT:    vpmovm2q %k0, %xmm0 # encoding: [0x62,0xf2,0xfe,0x08,0x38,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %fcmp = fcmp uge <2 x double> %a0, %a1
  %sext = sext <2 x i1> %fcmp to <2 x i64>
  %res = bitcast <2 x i64> %sext to <2 x double>
  ret <2 x double> %res
}

define <2 x double> @test_mm_cmpnlt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_cmpnlt_sd:
; SSE:       # %bb.0:
; SSE-NEXT:    cmpnltsd %xmm1, %xmm0 # encoding: [0xf2,0x0f,0xc2,0xc1,0x05]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX-LABEL: test_mm_cmpnlt_sd:
; AVX:       # %bb.0:
; AVX-NEXT:    vcmpnltsd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0xc2,0xc1,0x05]
; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 5)
  ret <2 x double> %res
}

define <2 x double> @test_mm_cmpord_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_cmpord_pd:
; SSE:       # %bb.0:
; SSE-NEXT:    cmpordpd %xmm1, %xmm0 # encoding: [0x66,0x0f,0xc2,0xc1,0x07]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_cmpord_pd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vcmpordpd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc2,0xc1,0x07]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_cmpord_pd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vcmpordpd %xmm1, %xmm0, %k0 # encoding: [0x62,0xf1,0xfd,0x08,0xc2,0xc1,0x07]
; AVX512-NEXT:    vpmovm2q %k0, %xmm0 # encoding: [0x62,0xf2,0xfe,0x08,0x38,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %fcmp = fcmp ord <2 x double> %a0, %a1
  %sext = sext <2 x i1> %fcmp to <2 x i64>
  %res = bitcast <2 x i64> %sext to <2 x double>
  ret <2 x double> %res
}

define <2 x double> @test_mm_cmpord_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_cmpord_sd:
; SSE:       # %bb.0:
; SSE-NEXT:    cmpordsd %xmm1, %xmm0 # encoding: [0xf2,0x0f,0xc2,0xc1,0x07]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX-LABEL: test_mm_cmpord_sd:
; AVX:       # %bb.0:
; AVX-NEXT:    vcmpordsd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0xc2,0xc1,0x07]
; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 7)
  ret <2 x double> %res
}

define <2 x double> @test_mm_cmpunord_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_cmpunord_pd:
; SSE:       # %bb.0:
; SSE-NEXT:    cmpunordpd %xmm1, %xmm0 # encoding: [0x66,0x0f,0xc2,0xc1,0x03]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_cmpunord_pd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vcmpunordpd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc2,0xc1,0x03]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_cmpunord_pd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vcmpunordpd %xmm1, %xmm0, %k0 # encoding: [0x62,0xf1,0xfd,0x08,0xc2,0xc1,0x03]
; AVX512-NEXT:    vpmovm2q %k0, %xmm0 # encoding: [0x62,0xf2,0xfe,0x08,0x38,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %fcmp = fcmp uno <2 x double> %a0, %a1
  %sext = sext <2 x i1> %fcmp to <2 x i64>
  %res = bitcast <2 x i64> %sext to <2 x double>
  ret <2 x double> %res
}

define <2 x double> @test_mm_cmpunord_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_cmpunord_sd:
; SSE:       # %bb.0:
; SSE-NEXT:    cmpunordsd %xmm1, %xmm0 # encoding: [0xf2,0x0f,0xc2,0xc1,0x03]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX-LABEL: test_mm_cmpunord_sd:
; AVX:       # %bb.0:
; AVX-NEXT:    vcmpunordsd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0xc2,0xc1,0x03]
; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a0, <2 x double> %a1, i8 3)
  ret <2 x double> %res
}

define i32 @test_mm_comieq_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_comieq_sd:
; SSE:       # %bb.0:
; SSE-NEXT:    comisd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x2f,0xc1]
; SSE-NEXT:    setnp %al # encoding: [0x0f,0x9b,0xc0]
; SSE-NEXT:    sete %cl # encoding: [0x0f,0x94,0xc1]
; SSE-NEXT:    andb %al, %cl # encoding: [0x20,0xc1]
; SSE-NEXT:    movzbl %cl, %eax # encoding: [0x0f,0xb6,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_comieq_sd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vcomisd %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x2f,0xc1]
; AVX1-NEXT:    setnp %al # encoding: [0x0f,0x9b,0xc0]
; AVX1-NEXT:    sete %cl # encoding: [0x0f,0x94,0xc1]
; AVX1-NEXT:    andb %al, %cl # encoding: [0x20,0xc1]
; AVX1-NEXT:    movzbl %cl, %eax # encoding: [0x0f,0xb6,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_comieq_sd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vcomisd %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc1]
; AVX512-NEXT:    setnp %al # encoding: [0x0f,0x9b,0xc0]
; AVX512-NEXT:    sete %cl # encoding: [0x0f,0x94,0xc1]
; AVX512-NEXT:    andb %al, %cl # encoding: [0x20,0xc1]
; AVX512-NEXT:    movzbl %cl, %eax # encoding: [0x0f,0xb6,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = call i32 @llvm.x86.sse2.comieq.sd(<2 x double> %a0, <2 x double> %a1)
  ret i32 %res
}
declare i32 @llvm.x86.sse2.comieq.sd(<2 x double>, <2 x double>) nounwind readnone

define i32 @test_mm_comige_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_comige_sd:
; SSE:       # %bb.0:
; SSE-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
; SSE-NEXT:    comisd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x2f,0xc1]
; SSE-NEXT:    setae %al # encoding: [0x0f,0x93,0xc0]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_comige_sd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
; AVX1-NEXT:    vcomisd %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x2f,0xc1]
; AVX1-NEXT:    setae %al # encoding: [0x0f,0x93,0xc0]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_comige_sd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
; AVX512-NEXT:    vcomisd %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc1]
; AVX512-NEXT:    setae %al # encoding: [0x0f,0x93,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = call i32 @llvm.x86.sse2.comige.sd(<2 x double> %a0, <2 x double> %a1)
  ret i32 %res
}
declare i32 @llvm.x86.sse2.comige.sd(<2 x double>, <2 x double>) nounwind readnone

define i32 @test_mm_comigt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_comigt_sd:
; SSE:       # %bb.0:
; SSE-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
; SSE-NEXT:    comisd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x2f,0xc1]
; SSE-NEXT:    seta %al # encoding: [0x0f,0x97,0xc0]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_comigt_sd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
; AVX1-NEXT:    vcomisd %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x2f,0xc1]
; AVX1-NEXT:    seta %al # encoding: [0x0f,0x97,0xc0]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_comigt_sd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
; AVX512-NEXT:    vcomisd %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc1]
; AVX512-NEXT:    seta %al # encoding: [0x0f,0x97,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = call i32 @llvm.x86.sse2.comigt.sd(<2 x double> %a0, <2 x double> %a1)
  ret i32 %res
}
declare i32 @llvm.x86.sse2.comigt.sd(<2 x double>, <2 x double>) nounwind readnone

define i32 @test_mm_comile_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_comile_sd:
; SSE:       # %bb.0:
; SSE-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
; SSE-NEXT:    comisd %xmm0, %xmm1 # encoding: [0x66,0x0f,0x2f,0xc8]
; SSE-NEXT:    setae %al # encoding: [0x0f,0x93,0xc0]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_comile_sd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
; AVX1-NEXT:    vcomisd %xmm0, %xmm1 # encoding: [0xc5,0xf9,0x2f,0xc8]
; AVX1-NEXT:    setae %al # encoding: [0x0f,0x93,0xc0]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_comile_sd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
; AVX512-NEXT:    vcomisd %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc8]
; AVX512-NEXT:    setae %al # encoding: [0x0f,0x93,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = call i32 @llvm.x86.sse2.comile.sd(<2 x double> %a0, <2 x double> %a1)
  ret i32 %res
}
declare i32 @llvm.x86.sse2.comile.sd(<2 x double>, <2 x double>) nounwind readnone

define i32 @test_mm_comilt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_comilt_sd:
; SSE:       # %bb.0:
; SSE-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
; SSE-NEXT:    comisd %xmm0, %xmm1 # encoding: [0x66,0x0f,0x2f,0xc8]
; SSE-NEXT:    seta %al # encoding: [0x0f,0x97,0xc0]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_comilt_sd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
; AVX1-NEXT:    vcomisd %xmm0, %xmm1 # encoding: [0xc5,0xf9,0x2f,0xc8]
; AVX1-NEXT:    seta %al # encoding: [0x0f,0x97,0xc0]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_comilt_sd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
; AVX512-NEXT:    vcomisd %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc8]
; AVX512-NEXT:    seta %al # encoding: [0x0f,0x97,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = call i32 @llvm.x86.sse2.comilt.sd(<2 x double> %a0, <2 x double> %a1)
  ret i32 %res
}
declare i32 @llvm.x86.sse2.comilt.sd(<2 x double>, <2 x double>) nounwind readnone

define i32 @test_mm_comineq_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_comineq_sd:
; SSE:       # %bb.0:
; SSE-NEXT:    comisd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x2f,0xc1]
; SSE-NEXT:    setp %al # encoding: [0x0f,0x9a,0xc0]
; SSE-NEXT:    setne %cl # encoding: [0x0f,0x95,0xc1]
; SSE-NEXT:    orb %al, %cl # encoding: [0x08,0xc1]
; SSE-NEXT:    movzbl %cl, %eax # encoding: [0x0f,0xb6,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_comineq_sd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vcomisd %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x2f,0xc1]
; AVX1-NEXT:    setp %al # encoding: [0x0f,0x9a,0xc0]
; AVX1-NEXT:    setne %cl # encoding: [0x0f,0x95,0xc1]
; AVX1-NEXT:    orb %al, %cl # encoding: [0x08,0xc1]
; AVX1-NEXT:    movzbl %cl, %eax # encoding: [0x0f,0xb6,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_comineq_sd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vcomisd %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2f,0xc1]
; AVX512-NEXT:    setp %al # encoding: [0x0f,0x9a,0xc0]
; AVX512-NEXT:    setne %cl # encoding: [0x0f,0x95,0xc1]
; AVX512-NEXT:    orb %al, %cl # encoding: [0x08,0xc1]
; AVX512-NEXT:    movzbl %cl, %eax # encoding: [0x0f,0xb6,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = call i32 @llvm.x86.sse2.comineq.sd(<2 x double> %a0, <2 x double> %a1)
  ret i32 %res
}
declare i32 @llvm.x86.sse2.comineq.sd(<2 x double>, <2 x double>) nounwind readnone

define <2 x double> @test_mm_cvtepi32_pd(<2 x i64> %a0) nounwind {
; SSE-LABEL: test_mm_cvtepi32_pd:
; SSE:       # %bb.0:
; SSE-NEXT:    cvtdq2pd %xmm0, %xmm0 # encoding: [0xf3,0x0f,0xe6,0xc0]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_cvtepi32_pd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vcvtdq2pd %xmm0, %xmm0 # encoding: [0xc5,0xfa,0xe6,0xc0]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_cvtepi32_pd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vcvtdq2pd %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0xe6,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
  %ext = shufflevector <4 x i32> %arg0, <4 x i32> %arg0, <2 x i32> <i32 0, i32 1>
  %res = sitofp <2 x i32> %ext to <2 x double>
  ret <2 x double> %res
}

define <4 x float> @test_mm_cvtepi32_ps(<2 x i64> %a0) nounwind {
; SSE-LABEL: test_mm_cvtepi32_ps:
; SSE:       # %bb.0:
; SSE-NEXT:    cvtdq2ps %xmm0, %xmm0 # encoding: [0x0f,0x5b,0xc0]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_cvtepi32_ps:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vcvtdq2ps %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x5b,0xc0]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_cvtepi32_ps:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vcvtdq2ps %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5b,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
  %res = sitofp <4 x i32> %arg0 to <4 x float>
  ret <4 x float> %res
}

define <2 x i64> @test_mm_cvtpd_epi32(<2 x double> %a0) nounwind {
; SSE-LABEL: test_mm_cvtpd_epi32:
; SSE:       # %bb.0:
; SSE-NEXT:    cvtpd2dq %xmm0, %xmm0 # encoding: [0xf2,0x0f,0xe6,0xc0]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_cvtpd_epi32:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vcvtpd2dq %xmm0, %xmm0 # encoding: [0xc5,0xfb,0xe6,0xc0]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_cvtpd_epi32:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vcvtpd2dq %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0xe6,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = call <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double> %a0)
  %bc = bitcast <4 x i32> %res to <2 x i64>
  ret <2 x i64> %bc
}
declare <4 x i32> @llvm.x86.sse2.cvtpd2dq(<2 x double>) nounwind readnone

define <4 x float> @test_mm_cvtpd_ps(<2 x double> %a0) nounwind {
; SSE-LABEL: test_mm_cvtpd_ps:
; SSE:       # %bb.0:
; SSE-NEXT:    cvtpd2ps %xmm0, %xmm0 # encoding: [0x66,0x0f,0x5a,0xc0]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_cvtpd_ps:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vcvtpd2ps %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x5a,0xc0]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_cvtpd_ps:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vcvtpd2ps %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x5a,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = call <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double> %a0)
  ret <4 x float> %res
}
declare <4 x float> @llvm.x86.sse2.cvtpd2ps(<2 x double>) nounwind readnone

define <2 x i64> @test_mm_cvtps_epi32(<4 x float> %a0) nounwind {
; SSE-LABEL: test_mm_cvtps_epi32:
; SSE:       # %bb.0:
; SSE-NEXT:    cvtps2dq %xmm0, %xmm0 # encoding: [0x66,0x0f,0x5b,0xc0]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_cvtps_epi32:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vcvtps2dq %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x5b,0xc0]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_cvtps_epi32:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vcvtps2dq %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x5b,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = call <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float> %a0)
  %bc = bitcast <4 x i32> %res to <2 x i64>
  ret <2 x i64> %bc
}
declare <4 x i32> @llvm.x86.sse2.cvtps2dq(<4 x float>) nounwind readnone

define <2 x double> @test_mm_cvtps_pd(<4 x float> %a0) nounwind {
; SSE-LABEL: test_mm_cvtps_pd:
; SSE:       # %bb.0:
; SSE-NEXT:    cvtps2pd %xmm0, %xmm0 # encoding: [0x0f,0x5a,0xc0]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_cvtps_pd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vcvtps2pd %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x5a,0xc0]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_cvtps_pd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vcvtps2pd %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5a,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %ext = shufflevector <4 x float> %a0, <4 x float> %a0, <2 x i32> <i32 0, i32 1>
  %res = fpext <2 x float> %ext to <2 x double>
  ret <2 x double> %res
}

define double @test_mm_cvtsd_f64(<2 x double> %a0) nounwind {
; X86-SSE-LABEL: test_mm_cvtsd_f64:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    pushl %ebp # encoding: [0x55]
; X86-SSE-NEXT:    movl %esp, %ebp # encoding: [0x89,0xe5]
; X86-SSE-NEXT:    andl $-8, %esp # encoding: [0x83,0xe4,0xf8]
; X86-SSE-NEXT:    subl $8, %esp # encoding: [0x83,0xec,0x08]
; X86-SSE-NEXT:    movlps %xmm0, (%esp) # encoding: [0x0f,0x13,0x04,0x24]
; X86-SSE-NEXT:    fldl (%esp) # encoding: [0xdd,0x04,0x24]
; X86-SSE-NEXT:    movl %ebp, %esp # encoding: [0x89,0xec]
; X86-SSE-NEXT:    popl %ebp # encoding: [0x5d]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_cvtsd_f64:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    pushl %ebp # encoding: [0x55]
; X86-AVX1-NEXT:    movl %esp, %ebp # encoding: [0x89,0xe5]
; X86-AVX1-NEXT:    andl $-8, %esp # encoding: [0x83,0xe4,0xf8]
; X86-AVX1-NEXT:    subl $8, %esp # encoding: [0x83,0xec,0x08]
; X86-AVX1-NEXT:    vmovlps %xmm0, (%esp) # encoding: [0xc5,0xf8,0x13,0x04,0x24]
; X86-AVX1-NEXT:    fldl (%esp) # encoding: [0xdd,0x04,0x24]
; X86-AVX1-NEXT:    movl %ebp, %esp # encoding: [0x89,0xec]
; X86-AVX1-NEXT:    popl %ebp # encoding: [0x5d]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_cvtsd_f64:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    pushl %ebp # encoding: [0x55]
; X86-AVX512-NEXT:    movl %esp, %ebp # encoding: [0x89,0xe5]
; X86-AVX512-NEXT:    andl $-8, %esp # encoding: [0x83,0xe4,0xf8]
; X86-AVX512-NEXT:    subl $8, %esp # encoding: [0x83,0xec,0x08]
; X86-AVX512-NEXT:    vmovlps %xmm0, (%esp) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x13,0x04,0x24]
; X86-AVX512-NEXT:    fldl (%esp) # encoding: [0xdd,0x04,0x24]
; X86-AVX512-NEXT:    movl %ebp, %esp # encoding: [0x89,0xec]
; X86-AVX512-NEXT:    popl %ebp # encoding: [0x5d]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-LABEL: test_mm_cvtsd_f64:
; X64:       # %bb.0:
; X64-NEXT:    retq # encoding: [0xc3]
;
; X32-LABEL: test_mm_cvtsd_f64:
; X32:       # %bb.0:
; X32-NEXT:    retq # encoding: [0xc3]
  %res = extractelement <2 x double> %a0, i32 0
  ret double %res
}

define i32 @test_mm_cvtsd_si32(<2 x double> %a0) nounwind {
; SSE-LABEL: test_mm_cvtsd_si32:
; SSE:       # %bb.0:
; SSE-NEXT:    cvtsd2si %xmm0, %eax # encoding: [0xf2,0x0f,0x2d,0xc0]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_cvtsd_si32:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vcvtsd2si %xmm0, %eax # encoding: [0xc5,0xfb,0x2d,0xc0]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_cvtsd_si32:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vcvtsd2si %xmm0, %eax # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x2d,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> %a0)
  ret i32 %res
}
declare i32 @llvm.x86.sse2.cvtsd2si(<2 x double>) nounwind readnone

define <4 x float> @test_mm_cvtsd_ss(<4 x float> %a0, <2 x double> %a1) {
; SSE-LABEL: test_mm_cvtsd_ss:
; SSE:       # %bb.0:
; SSE-NEXT:    cvtsd2ss %xmm1, %xmm0 # encoding: [0xf2,0x0f,0x5a,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_cvtsd_ss:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vcvtsd2ss %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x5a,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_cvtsd_ss:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vcvtsd2ss %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x5a,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = call <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float> %a0, <2 x double> %a1)
  ret <4 x float> %res
}
declare <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float>, <2 x double>) nounwind readnone

define <4 x float> @test_mm_cvtsd_ss_load(<4 x float> %a0, ptr %p1) {
; X86-SSE-LABEL: test_mm_cvtsd_ss_load:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-SSE-NEXT:    cvtsd2ss (%eax), %xmm0 # encoding: [0xf2,0x0f,0x5a,0x00]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_cvtsd_ss_load:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vcvtsd2ss (%eax), %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x5a,0x00]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_cvtsd_ss_load:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vcvtsd2ss (%eax), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x5a,0x00]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_cvtsd_ss_load:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    cvtsd2ss (%rdi), %xmm0 # encoding: [0xf2,0x0f,0x5a,0x07]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_cvtsd_ss_load:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vcvtsd2ss (%rdi), %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x5a,0x07]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_cvtsd_ss_load:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vcvtsd2ss (%rdi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x5a,0x07]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_cvtsd_ss_load:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    cvtsd2ss (%edi), %xmm0 # encoding: [0x67,0xf2,0x0f,0x5a,0x07]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_cvtsd_ss_load:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vcvtsd2ss (%edi), %xmm0, %xmm0 # encoding: [0x67,0xc5,0xfb,0x5a,0x07]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_cvtsd_ss_load:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vcvtsd2ss (%edi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0x67,0xc5,0xfb,0x5a,0x07]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  %a1 = load <2 x double>, ptr %p1
  %res = call <4 x float> @llvm.x86.sse2.cvtsd2ss(<4 x float> %a0, <2 x double> %a1)
  ret <4 x float> %res
}

define i32 @test_mm_cvtsi128_si32(<2 x i64> %a0) nounwind {
; SSE-LABEL: test_mm_cvtsi128_si32:
; SSE:       # %bb.0:
; SSE-NEXT:    movd %xmm0, %eax # encoding: [0x66,0x0f,0x7e,0xc0]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_cvtsi128_si32:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vmovd %xmm0, %eax # encoding: [0xc5,0xf9,0x7e,0xc0]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_cvtsi128_si32:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vmovd %xmm0, %eax # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x7e,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
  %res = extractelement <4 x i32> %arg0, i32 0
  ret i32 %res
}

define <2 x double> @test_mm_cvtsi32_sd(<2 x double> %a0, i32 %a1) nounwind {
; X86-SSE-LABEL: test_mm_cvtsi32_sd:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    cvtsi2sdl {{[0-9]+}}(%esp), %xmm0 # encoding: [0xf2,0x0f,0x2a,0x44,0x24,0x04]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_cvtsi32_sd:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    vcvtsi2sdl {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x2a,0x44,0x24,0x04]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_cvtsi32_sd:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    vcvtsi2sdl {{[0-9]+}}(%esp), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x2a,0x44,0x24,0x04]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_cvtsi32_sd:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    cvtsi2sd %edi, %xmm0 # encoding: [0xf2,0x0f,0x2a,0xc7]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_cvtsi32_sd:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vcvtsi2sd %edi, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x2a,0xc7]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_cvtsi32_sd:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vcvtsi2sd %edi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x2a,0xc7]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_cvtsi32_sd:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    cvtsi2sd %edi, %xmm0 # encoding: [0xf2,0x0f,0x2a,0xc7]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_cvtsi32_sd:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vcvtsi2sd %edi, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x2a,0xc7]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_cvtsi32_sd:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vcvtsi2sd %edi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x2a,0xc7]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  %cvt = sitofp i32 %a1 to double
  %res = insertelement <2 x double> %a0, double %cvt, i32 0
  ret <2 x double> %res
}

define <2 x i64> @test_mm_cvtsi32_si128(i32 %a0) nounwind {
; X86-SSE-LABEL: test_mm_cvtsi32_si128:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE-NEXT:    # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x04]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_cvtsi32_si128:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-AVX1-NEXT:    # encoding: [0xc5,0xfa,0x10,0x44,0x24,0x04]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_cvtsi32_si128:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-AVX512-NEXT:    # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x44,0x24,0x04]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_cvtsi32_si128:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movd %edi, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc7]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_cvtsi32_si128:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vmovd %edi, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc7]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_cvtsi32_si128:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vmovd %edi, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc7]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_cvtsi32_si128:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    movd %edi, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc7]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_cvtsi32_si128:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vmovd %edi, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc7]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_cvtsi32_si128:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vmovd %edi, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc7]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  %res0 = insertelement <4 x i32> undef, i32 %a0, i32 0
  %res1 = insertelement <4 x i32> %res0, i32 0, i32 1
  %res2 = insertelement <4 x i32> %res1, i32 0, i32 2
  %res3 = insertelement <4 x i32> %res2, i32 0, i32 3
  %res = bitcast <4 x i32> %res3 to <2 x i64>
  ret <2 x i64> %res
}

define <2 x double> @test_mm_cvtss_sd(<2 x double> %a0, <4 x float> %a1) nounwind {
; SSE-LABEL: test_mm_cvtss_sd:
; SSE:       # %bb.0:
; SSE-NEXT:    cvtss2sd %xmm1, %xmm0 # encoding: [0xf3,0x0f,0x5a,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_cvtss_sd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vcvtss2sd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xfa,0x5a,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_cvtss_sd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vcvtss2sd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x5a,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %ext = extractelement <4 x float> %a1, i32 0
  %cvt = fpext float %ext to double
  %res = insertelement <2 x double> %a0, double %cvt, i32 0
  ret <2 x double> %res
}

define <2 x i64> @test_mm_cvttpd_epi32(<2 x double> %a0) nounwind {
; SSE-LABEL: test_mm_cvttpd_epi32:
; SSE:       # %bb.0:
; SSE-NEXT:    cvttpd2dq %xmm0, %xmm0 # encoding: [0x66,0x0f,0xe6,0xc0]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_cvttpd_epi32:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vcvttpd2dq %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xe6,0xc0]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_cvttpd_epi32:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vcvttpd2dq %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe6,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = call <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double> %a0)
  %bc = bitcast <4 x i32> %res to <2 x i64>
  ret <2 x i64> %bc
}
declare <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double>) nounwind readnone

define <2 x i64> @test_mm_cvttps_epi32(<4 x float> %a0) nounwind {
; SSE-LABEL: test_mm_cvttps_epi32:
; SSE:       # %bb.0:
; SSE-NEXT:    cvttps2dq %xmm0, %xmm0 # encoding: [0xf3,0x0f,0x5b,0xc0]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_cvttps_epi32:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vcvttps2dq %xmm0, %xmm0 # encoding: [0xc5,0xfa,0x5b,0xc0]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_cvttps_epi32:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vcvttps2dq %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x5b,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = call <4 x i32> @llvm.x86.sse2.cvttps2dq(<4 x float> %a0)
  %bc = bitcast <4 x i32> %res to <2 x i64>
  ret <2 x i64> %bc
}
declare <4 x i32> @llvm.x86.sse2.cvttps2dq(<4 x float>) nounwind readnone

define i32 @test_mm_cvttsd_si32(<2 x double> %a0) nounwind {
; SSE-LABEL: test_mm_cvttsd_si32:
; SSE:       # %bb.0:
; SSE-NEXT:    cvttsd2si %xmm0, %eax # encoding: [0xf2,0x0f,0x2c,0xc0]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_cvttsd_si32:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vcvttsd2si %xmm0, %eax # encoding: [0xc5,0xfb,0x2c,0xc0]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_cvttsd_si32:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vcvttsd2si %xmm0, %eax # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x2c,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> %a0)
  ret i32 %res
}
declare i32 @llvm.x86.sse2.cvttsd2si(<2 x double>) nounwind readnone

define <2 x double> @test_mm_div_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_div_pd:
; SSE:       # %bb.0:
; SSE-NEXT:    divpd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x5e,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_div_pd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vdivpd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x5e,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_div_pd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vdivpd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x5e,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = fdiv <2 x double> %a0, %a1
  ret <2 x double> %res
}

define <2 x double> @test_mm_div_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_div_sd:
; SSE:       # %bb.0:
; SSE-NEXT:    divsd %xmm1, %xmm0 # encoding: [0xf2,0x0f,0x5e,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_div_sd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vdivsd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x5e,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_div_sd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vdivsd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x5e,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %ext0 = extractelement <2 x double> %a0, i32 0
  %ext1 = extractelement <2 x double> %a1, i32 0
  %fdiv = fdiv double %ext0, %ext1
  %res = insertelement <2 x double> %a0, double %fdiv, i32 0
  ret <2 x double> %res
}

define i32 @test_mm_extract_epi16(<2 x i64> %a0) nounwind {
; SSE-LABEL: test_mm_extract_epi16:
; SSE:       # %bb.0:
; SSE-NEXT:    pextrw $1, %xmm0, %eax # encoding: [0x66,0x0f,0xc5,0xc0,0x01]
; SSE-NEXT:    movzwl %ax, %eax # encoding: [0x0f,0xb7,0xc0]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_extract_epi16:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpextrw $1, %xmm0, %eax # encoding: [0xc5,0xf9,0xc5,0xc0,0x01]
; AVX1-NEXT:    movzwl %ax, %eax # encoding: [0x0f,0xb7,0xc0]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_extract_epi16:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpextrw $1, %xmm0, %eax # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc5,0xc0,0x01]
; AVX512-NEXT:    movzwl %ax, %eax # encoding: [0x0f,0xb7,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
  %ext = extractelement <8 x i16> %arg0, i32 1
  %res = zext i16 %ext to i32
  ret i32 %res
}

define <2 x i64> @test_mm_insert_epi16(<2 x i64> %a0, i16 %a1) nounwind {
; X86-SSE-LABEL: test_mm_insert_epi16:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x04]
; X86-SSE-NEXT:    pinsrw $1, %eax, %xmm0 # encoding: [0x66,0x0f,0xc4,0xc0,0x01]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_insert_epi16:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vpinsrw $1, %eax, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc0,0x01]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_insert_epi16:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vpinsrw $1, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x01]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_insert_epi16:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    pinsrw $1, %edi, %xmm0 # encoding: [0x66,0x0f,0xc4,0xc7,0x01]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_insert_epi16:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vpinsrw $1, %edi, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc7,0x01]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_insert_epi16:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vpinsrw $1, %edi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc7,0x01]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_insert_epi16:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    pinsrw $1, %edi, %xmm0 # encoding: [0x66,0x0f,0xc4,0xc7,0x01]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_insert_epi16:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vpinsrw $1, %edi, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc7,0x01]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_insert_epi16:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vpinsrw $1, %edi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc7,0x01]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
  %res = insertelement <8 x i16> %arg0, i16 %a1,i32 1
  %bc = bitcast <8 x i16> %res to <2 x i64>
  ret <2 x i64> %bc
}

define void @test_mm_lfence() nounwind {
; CHECK-LABEL: test_mm_lfence:
; CHECK:       # %bb.0:
; CHECK-NEXT:    lfence # encoding: [0x0f,0xae,0xe8]
; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  call void @llvm.x86.sse2.lfence()
  ret void
}
declare void @llvm.x86.sse2.lfence() nounwind readnone

define <2 x double> @test_mm_load_pd(ptr %a0) nounwind {
; X86-SSE-LABEL: test_mm_load_pd:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-SSE-NEXT:    movaps (%eax), %xmm0 # encoding: [0x0f,0x28,0x00]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_load_pd:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vmovaps (%eax), %xmm0 # encoding: [0xc5,0xf8,0x28,0x00]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_load_pd:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vmovaps (%eax), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x00]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_load_pd:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movaps (%rdi), %xmm0 # encoding: [0x0f,0x28,0x07]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_load_pd:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vmovaps (%rdi), %xmm0 # encoding: [0xc5,0xf8,0x28,0x07]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_load_pd:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vmovaps (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x07]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_load_pd:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    movaps (%edi), %xmm0 # encoding: [0x67,0x0f,0x28,0x07]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_load_pd:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vmovaps (%edi), %xmm0 # encoding: [0x67,0xc5,0xf8,0x28,0x07]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_load_pd:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vmovaps (%edi), %xmm0 # EVEX TO VEX Compression encoding: [0x67,0xc5,0xf8,0x28,0x07]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  %res = load <2 x double>, ptr %a0, align 16
  ret <2 x double> %res
}

define <2 x double> @test_mm_load_sd(ptr %a0) nounwind {
; X86-SSE-LABEL: test_mm_load_sd:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE-NEXT:    # encoding: [0xf2,0x0f,0x10,0x00]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_load_sd:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX1-NEXT:    # encoding: [0xc5,0xfb,0x10,0x00]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_load_sd:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX512-NEXT:    # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x00]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_load_sd:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
; X64-SSE-NEXT:    # encoding: [0xf2,0x0f,0x10,0x07]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_load_sd:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
; X64-AVX1-NEXT:    # encoding: [0xc5,0xfb,0x10,0x07]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_load_sd:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
; X64-AVX512-NEXT:    # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_load_sd:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
; X32-SSE-NEXT:    # encoding: [0x67,0xf2,0x0f,0x10,0x07]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_load_sd:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-AVX1-NEXT:    # encoding: [0x67,0xc5,0xfb,0x10,0x07]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_load_sd:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-AVX512-NEXT:    # EVEX TO VEX Compression encoding: [0x67,0xc5,0xfb,0x10,0x07]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  %ld = load double, ptr %a0, align 1
  %res0 = insertelement <2 x double> undef, double %ld, i32 0
  %res1 = insertelement <2 x double> %res0, double 0.0, i32 1
  ret <2 x double> %res1
}

define <2 x i64> @test_mm_load_si128(ptr %a0) nounwind {
; X86-SSE-LABEL: test_mm_load_si128:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-SSE-NEXT:    movaps (%eax), %xmm0 # encoding: [0x0f,0x28,0x00]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_load_si128:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vmovaps (%eax), %xmm0 # encoding: [0xc5,0xf8,0x28,0x00]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_load_si128:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vmovaps (%eax), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x00]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_load_si128:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movaps (%rdi), %xmm0 # encoding: [0x0f,0x28,0x07]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_load_si128:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vmovaps (%rdi), %xmm0 # encoding: [0xc5,0xf8,0x28,0x07]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_load_si128:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vmovaps (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x07]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_load_si128:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    movaps (%edi), %xmm0 # encoding: [0x67,0x0f,0x28,0x07]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_load_si128:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vmovaps (%edi), %xmm0 # encoding: [0x67,0xc5,0xf8,0x28,0x07]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_load_si128:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vmovaps (%edi), %xmm0 # EVEX TO VEX Compression encoding: [0x67,0xc5,0xf8,0x28,0x07]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  %res = load <2 x i64>, ptr %a0, align 16
  ret <2 x i64> %res
}

define <2 x double> @test_mm_load1_pd(ptr %a0) nounwind {
; X86-SSE-LABEL: test_mm_load1_pd:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE-NEXT:    # encoding: [0xf2,0x0f,0x10,0x00]
; X86-SSE-NEXT:    movlhps %xmm0, %xmm0 # encoding: [0x0f,0x16,0xc0]
; X86-SSE-NEXT:    # xmm0 = xmm0[0,0]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_load1_pd:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vmovddup (%eax), %xmm0 # encoding: [0xc5,0xfb,0x12,0x00]
; X86-AVX1-NEXT:    # xmm0 = mem[0,0]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_load1_pd:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vmovddup (%eax), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0x00]
; X86-AVX512-NEXT:    # xmm0 = mem[0,0]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_load1_pd:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
; X64-SSE-NEXT:    # encoding: [0xf2,0x0f,0x10,0x07]
; X64-SSE-NEXT:    movlhps %xmm0, %xmm0 # encoding: [0x0f,0x16,0xc0]
; X64-SSE-NEXT:    # xmm0 = xmm0[0,0]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_load1_pd:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vmovddup (%rdi), %xmm0 # encoding: [0xc5,0xfb,0x12,0x07]
; X64-AVX1-NEXT:    # xmm0 = mem[0,0]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_load1_pd:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vmovddup (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0x07]
; X64-AVX512-NEXT:    # xmm0 = mem[0,0]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_load1_pd:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
; X32-SSE-NEXT:    # encoding: [0x67,0xf2,0x0f,0x10,0x07]
; X32-SSE-NEXT:    movlhps %xmm0, %xmm0 # encoding: [0x0f,0x16,0xc0]
; X32-SSE-NEXT:    # xmm0 = xmm0[0,0]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_load1_pd:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vmovddup (%edi), %xmm0 # encoding: [0x67,0xc5,0xfb,0x12,0x07]
; X32-AVX1-NEXT:    # xmm0 = mem[0,0]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_load1_pd:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vmovddup (%edi), %xmm0 # EVEX TO VEX Compression encoding: [0x67,0xc5,0xfb,0x12,0x07]
; X32-AVX512-NEXT:    # xmm0 = mem[0,0]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  %ld = load double, ptr %a0, align 8
  %res0 = insertelement <2 x double> undef, double %ld, i32 0
  %res1 = insertelement <2 x double> %res0, double %ld, i32 1
  ret <2 x double> %res1
}

define <2 x double> @test_mm_loadh_pd(<2 x double> %a0, ptr %a1) nounwind {
; X86-SSE-LABEL: test_mm_loadh_pd:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-SSE-NEXT:    movhps (%eax), %xmm0 # encoding: [0x0f,0x16,0x00]
; X86-SSE-NEXT:    # xmm0 = xmm0[0,1],mem[0,1]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_loadh_pd:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vmovhps (%eax), %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x16,0x00]
; X86-AVX1-NEXT:    # xmm0 = xmm0[0,1],mem[0,1]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_loadh_pd:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vmovhps (%eax), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x16,0x00]
; X86-AVX512-NEXT:    # xmm0 = xmm0[0,1],mem[0,1]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_loadh_pd:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movhps (%rdi), %xmm0 # encoding: [0x0f,0x16,0x07]
; X64-SSE-NEXT:    # xmm0 = xmm0[0,1],mem[0,1]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_loadh_pd:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vmovhps (%rdi), %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x16,0x07]
; X64-AVX1-NEXT:    # xmm0 = xmm0[0,1],mem[0,1]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_loadh_pd:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vmovhps (%rdi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x16,0x07]
; X64-AVX512-NEXT:    # xmm0 = xmm0[0,1],mem[0,1]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_loadh_pd:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    movhps (%edi), %xmm0 # encoding: [0x67,0x0f,0x16,0x07]
; X32-SSE-NEXT:    # xmm0 = xmm0[0,1],mem[0,1]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_loadh_pd:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vmovhps (%edi), %xmm0, %xmm0 # encoding: [0x67,0xc5,0xf8,0x16,0x07]
; X32-AVX1-NEXT:    # xmm0 = xmm0[0,1],mem[0,1]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_loadh_pd:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vmovhps (%edi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0x67,0xc5,0xf8,0x16,0x07]
; X32-AVX512-NEXT:    # xmm0 = xmm0[0,1],mem[0,1]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  %ld = load double, ptr %a1, align 8
  %res = insertelement <2 x double> %a0, double %ld, i32 1
  ret <2 x double> %res
}

define <2 x i64> @test_mm_loadl_epi64(<2 x i64> %a0, ptr %a1) nounwind {
; X86-SSE-LABEL: test_mm_loadl_epi64:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE-NEXT:    # encoding: [0xf2,0x0f,0x10,0x00]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_loadl_epi64:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX1-NEXT:    # encoding: [0xc5,0xfb,0x10,0x00]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_loadl_epi64:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX512-NEXT:    # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x00]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_loadl_epi64:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
; X64-SSE-NEXT:    # encoding: [0xf2,0x0f,0x10,0x07]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_loadl_epi64:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
; X64-AVX1-NEXT:    # encoding: [0xc5,0xfb,0x10,0x07]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_loadl_epi64:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
; X64-AVX512-NEXT:    # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_loadl_epi64:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
; X32-SSE-NEXT:    # encoding: [0x67,0xf2,0x0f,0x10,0x07]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_loadl_epi64:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-AVX1-NEXT:    # encoding: [0x67,0xc5,0xfb,0x10,0x07]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_loadl_epi64:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-AVX512-NEXT:    # EVEX TO VEX Compression encoding: [0x67,0xc5,0xfb,0x10,0x07]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  %ld = load i64, ptr %a1, align 1
  %res0 = insertelement <2 x i64> undef, i64 %ld, i32 0
  %res1 = insertelement <2 x i64> %res0, i64 0, i32 1
  ret <2 x i64> %res1
}

define <2 x double> @test_mm_loadl_pd(<2 x double> %a0, ptr %a1) nounwind {
; X86-SSE-LABEL: test_mm_loadl_pd:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-SSE-NEXT:    movlps (%eax), %xmm0 # encoding: [0x0f,0x12,0x00]
; X86-SSE-NEXT:    # xmm0 = mem[0,1],xmm0[2,3]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_loadl_pd:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vmovlps (%eax), %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x12,0x00]
; X86-AVX1-NEXT:    # xmm0 = mem[0,1],xmm0[2,3]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_loadl_pd:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vmovlps (%eax), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x12,0x00]
; X86-AVX512-NEXT:    # xmm0 = mem[0,1],xmm0[2,3]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_loadl_pd:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movlps (%rdi), %xmm0 # encoding: [0x0f,0x12,0x07]
; X64-SSE-NEXT:    # xmm0 = mem[0,1],xmm0[2,3]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_loadl_pd:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vmovlps (%rdi), %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x12,0x07]
; X64-AVX1-NEXT:    # xmm0 = mem[0,1],xmm0[2,3]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_loadl_pd:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vmovlps (%rdi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x12,0x07]
; X64-AVX512-NEXT:    # xmm0 = mem[0,1],xmm0[2,3]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_loadl_pd:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    movlps (%edi), %xmm0 # encoding: [0x67,0x0f,0x12,0x07]
; X32-SSE-NEXT:    # xmm0 = mem[0,1],xmm0[2,3]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_loadl_pd:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vmovlps (%edi), %xmm0, %xmm0 # encoding: [0x67,0xc5,0xf8,0x12,0x07]
; X32-AVX1-NEXT:    # xmm0 = mem[0,1],xmm0[2,3]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_loadl_pd:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vmovlps (%edi), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0x67,0xc5,0xf8,0x12,0x07]
; X32-AVX512-NEXT:    # xmm0 = mem[0,1],xmm0[2,3]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  %ld = load double, ptr %a1, align 8
  %res = insertelement <2 x double> %a0, double %ld, i32 0
  ret <2 x double> %res
}

define <2 x double> @test_mm_loadr_pd(ptr %a0) nounwind {
; X86-SSE-LABEL: test_mm_loadr_pd:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-SSE-NEXT:    movaps (%eax), %xmm0 # encoding: [0x0f,0x28,0x00]
; X86-SSE-NEXT:    shufps $78, %xmm0, %xmm0 # encoding: [0x0f,0xc6,0xc0,0x4e]
; X86-SSE-NEXT:    # xmm0 = xmm0[2,3,0,1]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_loadr_pd:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vpermilpd $1, (%eax), %xmm0 # encoding: [0xc4,0xe3,0x79,0x05,0x00,0x01]
; X86-AVX1-NEXT:    # xmm0 = mem[1,0]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_loadr_pd:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vpermilpd $1, (%eax), %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x05,0x00,0x01]
; X86-AVX512-NEXT:    # xmm0 = mem[1,0]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_loadr_pd:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movaps (%rdi), %xmm0 # encoding: [0x0f,0x28,0x07]
; X64-SSE-NEXT:    shufps $78, %xmm0, %xmm0 # encoding: [0x0f,0xc6,0xc0,0x4e]
; X64-SSE-NEXT:    # xmm0 = xmm0[2,3,0,1]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_loadr_pd:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vpermilpd $1, (%rdi), %xmm0 # encoding: [0xc4,0xe3,0x79,0x05,0x07,0x01]
; X64-AVX1-NEXT:    # xmm0 = mem[1,0]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_loadr_pd:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vpermilpd $1, (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x05,0x07,0x01]
; X64-AVX512-NEXT:    # xmm0 = mem[1,0]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_loadr_pd:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    movaps (%edi), %xmm0 # encoding: [0x67,0x0f,0x28,0x07]
; X32-SSE-NEXT:    shufps $78, %xmm0, %xmm0 # encoding: [0x0f,0xc6,0xc0,0x4e]
; X32-SSE-NEXT:    # xmm0 = xmm0[2,3,0,1]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_loadr_pd:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vpermilpd $1, (%edi), %xmm0 # encoding: [0x67,0xc4,0xe3,0x79,0x05,0x07,0x01]
; X32-AVX1-NEXT:    # xmm0 = mem[1,0]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_loadr_pd:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vpermilpd $1, (%edi), %xmm0 # EVEX TO VEX Compression encoding: [0x67,0xc4,0xe3,0x79,0x05,0x07,0x01]
; X32-AVX512-NEXT:    # xmm0 = mem[1,0]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  %ld = load <2 x double>, ptr %a0, align 16
  %res = shufflevector <2 x double> %ld, <2 x double> undef, <2 x i32> <i32 1, i32 0>
  ret <2 x double> %res
}

define <2 x double> @test_mm_loadu_pd(ptr %a0) nounwind {
; X86-SSE-LABEL: test_mm_loadu_pd:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-SSE-NEXT:    movups (%eax), %xmm0 # encoding: [0x0f,0x10,0x00]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_loadu_pd:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vmovups (%eax), %xmm0 # encoding: [0xc5,0xf8,0x10,0x00]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_loadu_pd:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vmovups (%eax), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x10,0x00]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_loadu_pd:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movups (%rdi), %xmm0 # encoding: [0x0f,0x10,0x07]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_loadu_pd:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vmovups (%rdi), %xmm0 # encoding: [0xc5,0xf8,0x10,0x07]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_loadu_pd:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vmovups (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x10,0x07]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_loadu_pd:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    movups (%edi), %xmm0 # encoding: [0x67,0x0f,0x10,0x07]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_loadu_pd:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vmovups (%edi), %xmm0 # encoding: [0x67,0xc5,0xf8,0x10,0x07]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_loadu_pd:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vmovups (%edi), %xmm0 # EVEX TO VEX Compression encoding: [0x67,0xc5,0xf8,0x10,0x07]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  %res = load <2 x double>, ptr %a0, align 1
  ret <2 x double> %res
}

define <2 x i64> @test_mm_loadu_si128(ptr %a0) nounwind {
; X86-SSE-LABEL: test_mm_loadu_si128:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-SSE-NEXT:    movups (%eax), %xmm0 # encoding: [0x0f,0x10,0x00]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_loadu_si128:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vmovups (%eax), %xmm0 # encoding: [0xc5,0xf8,0x10,0x00]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_loadu_si128:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vmovups (%eax), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x10,0x00]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_loadu_si128:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movups (%rdi), %xmm0 # encoding: [0x0f,0x10,0x07]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_loadu_si128:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vmovups (%rdi), %xmm0 # encoding: [0xc5,0xf8,0x10,0x07]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_loadu_si128:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vmovups (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x10,0x07]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_loadu_si128:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    movups (%edi), %xmm0 # encoding: [0x67,0x0f,0x10,0x07]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_loadu_si128:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vmovups (%edi), %xmm0 # encoding: [0x67,0xc5,0xf8,0x10,0x07]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_loadu_si128:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vmovups (%edi), %xmm0 # EVEX TO VEX Compression encoding: [0x67,0xc5,0xf8,0x10,0x07]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  %res = load <2 x i64>, ptr %a0, align 1
  ret <2 x i64> %res
}

define <2 x i64> @test_mm_loadu_si64(ptr nocapture readonly %A) {
; X86-SSE-LABEL: test_mm_loadu_si64:
; X86-SSE:       # %bb.0: # %entry
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE-NEXT:    # encoding: [0xf2,0x0f,0x10,0x00]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_loadu_si64:
; X86-AVX1:       # %bb.0: # %entry
; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX1-NEXT:    # encoding: [0xc5,0xfb,0x10,0x00]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_loadu_si64:
; X86-AVX512:       # %bb.0: # %entry
; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX512-NEXT:    # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x00]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_loadu_si64:
; X64-SSE:       # %bb.0: # %entry
; X64-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
; X64-SSE-NEXT:    # encoding: [0xf2,0x0f,0x10,0x07]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_loadu_si64:
; X64-AVX1:       # %bb.0: # %entry
; X64-AVX1-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
; X64-AVX1-NEXT:    # encoding: [0xc5,0xfb,0x10,0x07]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_loadu_si64:
; X64-AVX512:       # %bb.0: # %entry
; X64-AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
; X64-AVX512-NEXT:    # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x07]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_loadu_si64:
; X32-SSE:       # %bb.0: # %entry
; X32-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
; X32-SSE-NEXT:    # encoding: [0x67,0xf2,0x0f,0x10,0x07]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_loadu_si64:
; X32-AVX1:       # %bb.0: # %entry
; X32-AVX1-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-AVX1-NEXT:    # encoding: [0x67,0xc5,0xfb,0x10,0x07]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_loadu_si64:
; X32-AVX512:       # %bb.0: # %entry
; X32-AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-AVX512-NEXT:    # EVEX TO VEX Compression encoding: [0x67,0xc5,0xfb,0x10,0x07]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
entry:
  %0 = load i64, ptr %A, align 1
  %vecinit1.i = insertelement <2 x i64> <i64 undef, i64 0>, i64 %0, i32 0
  ret <2 x i64> %vecinit1.i
}

define <2 x i64> @test_mm_loadu_si32(ptr nocapture readonly %A) {
; X86-SSE-LABEL: test_mm_loadu_si32:
; X86-SSE:       # %bb.0: # %entry
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE-NEXT:    # encoding: [0xf3,0x0f,0x10,0x00]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_loadu_si32:
; X86-AVX1:       # %bb.0: # %entry
; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-AVX1-NEXT:    # encoding: [0xc5,0xfa,0x10,0x00]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_loadu_si32:
; X86-AVX512:       # %bb.0: # %entry
; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-AVX512-NEXT:    # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x00]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_loadu_si32:
; X64-SSE:       # %bb.0: # %entry
; X64-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-SSE-NEXT:    # encoding: [0xf3,0x0f,0x10,0x07]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_loadu_si32:
; X64-AVX1:       # %bb.0: # %entry
; X64-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-AVX1-NEXT:    # encoding: [0xc5,0xfa,0x10,0x07]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_loadu_si32:
; X64-AVX512:       # %bb.0: # %entry
; X64-AVX512-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-AVX512-NEXT:    # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_loadu_si32:
; X32-SSE:       # %bb.0: # %entry
; X32-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-SSE-NEXT:    # encoding: [0x67,0xf3,0x0f,0x10,0x07]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_loadu_si32:
; X32-AVX1:       # %bb.0: # %entry
; X32-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-AVX1-NEXT:    # encoding: [0x67,0xc5,0xfa,0x10,0x07]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_loadu_si32:
; X32-AVX512:       # %bb.0: # %entry
; X32-AVX512-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-AVX512-NEXT:    # EVEX TO VEX Compression encoding: [0x67,0xc5,0xfa,0x10,0x07]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
entry:
  %0 = load i32, ptr %A, align 1
  %vecinit3.i = insertelement <4 x i32> <i32 undef, i32 0, i32 0, i32 0>, i32 %0, i32 0
  %1 = bitcast <4 x i32> %vecinit3.i to <2 x i64>
  ret <2 x i64> %1
}

define <2 x i64> @test_mm_loadu_si16(ptr nocapture readonly %A) {
; X86-SSE-LABEL: test_mm_loadu_si16:
; X86-SSE:       # %bb.0: # %entry
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-SSE-NEXT:    movzwl (%eax), %eax # encoding: [0x0f,0xb7,0x00]
; X86-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_loadu_si16:
; X86-AVX1:       # %bb.0: # %entry
; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX1-NEXT:    movzwl (%eax), %eax # encoding: [0x0f,0xb7,0x00]
; X86-AVX1-NEXT:    vmovd %eax, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc0]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_loadu_si16:
; X86-AVX512:       # %bb.0: # %entry
; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512-NEXT:    movzwl (%eax), %eax # encoding: [0x0f,0xb7,0x00]
; X86-AVX512-NEXT:    vmovd %eax, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc0]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_loadu_si16:
; X64-SSE:       # %bb.0: # %entry
; X64-SSE-NEXT:    movzwl (%rdi), %eax # encoding: [0x0f,0xb7,0x07]
; X64-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_loadu_si16:
; X64-AVX1:       # %bb.0: # %entry
; X64-AVX1-NEXT:    movzwl (%rdi), %eax # encoding: [0x0f,0xb7,0x07]
; X64-AVX1-NEXT:    vmovd %eax, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc0]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_loadu_si16:
; X64-AVX512:       # %bb.0: # %entry
; X64-AVX512-NEXT:    movzwl (%rdi), %eax # encoding: [0x0f,0xb7,0x07]
; X64-AVX512-NEXT:    vmovd %eax, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc0]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_loadu_si16:
; X32-SSE:       # %bb.0: # %entry
; X32-SSE-NEXT:    movzwl (%edi), %eax # encoding: [0x67,0x0f,0xb7,0x07]
; X32-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_loadu_si16:
; X32-AVX1:       # %bb.0: # %entry
; X32-AVX1-NEXT:    movzwl (%edi), %eax # encoding: [0x67,0x0f,0xb7,0x07]
; X32-AVX1-NEXT:    vmovd %eax, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc0]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_loadu_si16:
; X32-AVX512:       # %bb.0: # %entry
; X32-AVX512-NEXT:    movzwl (%edi), %eax # encoding: [0x67,0x0f,0xb7,0x07]
; X32-AVX512-NEXT:    vmovd %eax, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc0]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
entry:
  %0 = load i16, ptr %A, align 1
  %vecinit7.i = insertelement <8 x i16> <i16 undef, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, i16 %0, i32 0
  %1 = bitcast <8 x i16> %vecinit7.i to <2 x i64>
  ret <2 x i64> %1
}

define <2 x i64> @test_mm_madd_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE-LABEL: test_mm_madd_epi16:
; SSE:       # %bb.0:
; SSE-NEXT:    pmaddwd %xmm1, %xmm0 # encoding: [0x66,0x0f,0xf5,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_madd_epi16:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpmaddwd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xf5,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_madd_epi16:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpmaddwd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf5,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
  %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
  %res = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %arg0, <8 x i16> %arg1)
  %bc = bitcast <4 x i32> %res to <2 x i64>
  ret <2 x i64> %bc
}
declare <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16>, <8 x i16>) nounwind readnone

define void @test_mm_maskmoveu_si128(<2 x i64> %a0, <2 x i64> %a1, ptr %a2) nounwind {
; X86-SSE-LABEL: test_mm_maskmoveu_si128:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    pushl %edi # encoding: [0x57]
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %edi # encoding: [0x8b,0x7c,0x24,0x08]
; X86-SSE-NEXT:    maskmovdqu %xmm1, %xmm0 # encoding: [0x66,0x0f,0xf7,0xc1]
; X86-SSE-NEXT:    popl %edi # encoding: [0x5f]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX-LABEL: test_mm_maskmoveu_si128:
; X86-AVX:       # %bb.0:
; X86-AVX-NEXT:    pushl %edi # encoding: [0x57]
; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %edi # encoding: [0x8b,0x7c,0x24,0x08]
; X86-AVX-NEXT:    vmaskmovdqu %xmm1, %xmm0 # encoding: [0xc5,0xf9,0xf7,0xc1]
; X86-AVX-NEXT:    popl %edi # encoding: [0x5f]
; X86-AVX-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_maskmoveu_si128:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    maskmovdqu %xmm1, %xmm0 # encoding: [0x66,0x0f,0xf7,0xc1]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX-LABEL: test_mm_maskmoveu_si128:
; X64-AVX:       # %bb.0:
; X64-AVX-NEXT:    vmaskmovdqu %xmm1, %xmm0 # encoding: [0xc5,0xf9,0xf7,0xc1]
; X64-AVX-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_maskmoveu_si128:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    # kill: def $edi killed $edi killed $rdi
; X32-SSE-NEXT:    addr32 maskmovdqu %xmm1, %xmm0 # encoding: [0x67,0x66,0x0f,0xf7,0xc1]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX-LABEL: test_mm_maskmoveu_si128:
; X32-AVX:       # %bb.0:
; X32-AVX-NEXT:    # kill: def $edi killed $edi killed $rdi
; X32-AVX-NEXT:    addr32 vmaskmovdqu %xmm1, %xmm0 # encoding: [0x67,0xc5,0xf9,0xf7,0xc1]
; X32-AVX-NEXT:    retq # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
  %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
  call void @llvm.x86.sse2.maskmov.dqu(<16 x i8> %arg0, <16 x i8> %arg1, ptr %a2)
  ret void
}
declare void @llvm.x86.sse2.maskmov.dqu(<16 x i8>, <16 x i8>, ptr) nounwind

define <2 x i64> @test_mm_max_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE-LABEL: test_mm_max_epi16:
; SSE:       # %bb.0:
; SSE-NEXT:    pmaxsw %xmm1, %xmm0 # encoding: [0x66,0x0f,0xee,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_max_epi16:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xee,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_max_epi16:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xee,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
  %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
  %sel = call <8 x i16> @llvm.smax.v8i16(<8 x i16> %arg0, <8 x i16> %arg1)
  %bc = bitcast <8 x i16> %sel to <2 x i64>
  ret <2 x i64> %bc
}
declare <8 x i16> @llvm.smax.v8i16(<8 x i16>, <8 x i16>)

define <2 x i64> @test_mm_max_epu8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE-LABEL: test_mm_max_epu8:
; SSE:       # %bb.0:
; SSE-NEXT:    pmaxub %xmm1, %xmm0 # encoding: [0x66,0x0f,0xde,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_max_epu8:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpmaxub %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xde,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_max_epu8:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpmaxub %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xde,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
  %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
  %sel = call <16 x i8> @llvm.umax.v16i8(<16 x i8> %arg0, <16 x i8> %arg1)
  %bc = bitcast <16 x i8> %sel to <2 x i64>
  ret <2 x i64> %bc
}
declare <16 x i8> @llvm.umax.v16i8(<16 x i8>, <16 x i8>)

define <2 x double> @test_mm_max_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_max_pd:
; SSE:       # %bb.0:
; SSE-NEXT:    maxpd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x5f,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_max_pd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vmaxpd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x5f,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_max_pd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vmaxpd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x5f,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> %a0, <2 x double> %a1)
  ret <2 x double> %res
}
declare <2 x double> @llvm.x86.sse2.max.pd(<2 x double>, <2 x double>) nounwind readnone

define <2 x double> @test_mm_max_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_max_sd:
; SSE:       # %bb.0:
; SSE-NEXT:    maxsd %xmm1, %xmm0 # encoding: [0xf2,0x0f,0x5f,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_max_sd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vmaxsd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x5f,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_max_sd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vmaxsd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x5f,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = call <2 x double> @llvm.x86.sse2.max.sd(<2 x double> %a0, <2 x double> %a1)
  ret <2 x double> %res
}
declare <2 x double> @llvm.x86.sse2.max.sd(<2 x double>, <2 x double>) nounwind readnone

define void @test_mm_mfence() nounwind {
; CHECK-LABEL: test_mm_mfence:
; CHECK:       # %bb.0:
; CHECK-NEXT:    mfence # encoding: [0x0f,0xae,0xf0]
; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  call void @llvm.x86.sse2.mfence()
  ret void
}
declare void @llvm.x86.sse2.mfence() nounwind readnone

define <2 x i64> @test_mm_min_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE-LABEL: test_mm_min_epi16:
; SSE:       # %bb.0:
; SSE-NEXT:    pminsw %xmm1, %xmm0 # encoding: [0x66,0x0f,0xea,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_min_epi16:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpminsw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xea,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_min_epi16:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpminsw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xea,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
  %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
  %sel = call <8 x i16> @llvm.smin.v8i16(<8 x i16> %arg0, <8 x i16> %arg1)
  %bc = bitcast <8 x i16> %sel to <2 x i64>
  ret <2 x i64> %bc
}
declare <8 x i16> @llvm.smin.v8i16(<8 x i16>, <8 x i16>)

define <2 x i64> @test_mm_min_epu8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE-LABEL: test_mm_min_epu8:
; SSE:       # %bb.0:
; SSE-NEXT:    pminub %xmm1, %xmm0 # encoding: [0x66,0x0f,0xda,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_min_epu8:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xda,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_min_epu8:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpminub %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xda,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
  %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
  %sel = call <16 x i8> @llvm.umin.v16i8(<16 x i8> %arg0, <16 x i8> %arg1)
  %bc = bitcast <16 x i8> %sel to <2 x i64>
  ret <2 x i64> %bc
}
declare <16 x i8> @llvm.umin.v16i8(<16 x i8>, <16 x i8>)

define <2 x double> @test_mm_min_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_min_pd:
; SSE:       # %bb.0:
; SSE-NEXT:    minpd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x5d,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_min_pd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vminpd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x5d,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_min_pd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vminpd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x5d,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> %a0, <2 x double> %a1)
  ret <2 x double> %res
}
declare <2 x double> @llvm.x86.sse2.min.pd(<2 x double>, <2 x double>) nounwind readnone

define <2 x double> @test_mm_min_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_min_sd:
; SSE:       # %bb.0:
; SSE-NEXT:    minsd %xmm1, %xmm0 # encoding: [0xf2,0x0f,0x5d,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_min_sd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vminsd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x5d,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_min_sd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vminsd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x5d,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = call <2 x double> @llvm.x86.sse2.min.sd(<2 x double> %a0, <2 x double> %a1)
  ret <2 x double> %res
}
declare <2 x double> @llvm.x86.sse2.min.sd(<2 x double>, <2 x double>) nounwind readnone

define <2 x i64> @test_mm_move_epi64(<2 x i64> %a0) nounwind {
; SSE-LABEL: test_mm_move_epi64:
; SSE:       # %bb.0:
; SSE-NEXT:    movq %xmm0, %xmm0 # encoding: [0xf3,0x0f,0x7e,0xc0]
; SSE-NEXT:    # xmm0 = xmm0[0],zero
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_move_epi64:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vmovq %xmm0, %xmm0 # encoding: [0xc5,0xfa,0x7e,0xc0]
; AVX1-NEXT:    # xmm0 = xmm0[0],zero
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_move_epi64:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vmovq %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7e,0xc0]
; AVX512-NEXT:    # xmm0 = xmm0[0],zero
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = shufflevector <2 x i64> %a0, <2 x i64> zeroinitializer, <2 x i32> <i32 0, i32 2>
  ret <2 x i64> %res
}

define <2 x double> @test_mm_move_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_move_sd:
; SSE:       # %bb.0:
; SSE-NEXT:    movsd %xmm1, %xmm0 # encoding: [0xf2,0x0f,0x10,0xc1]
; SSE-NEXT:    # xmm0 = xmm1[0],xmm0[1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX-LABEL: test_mm_move_sd:
; AVX:       # %bb.0:
; AVX-NEXT:    vblendps $3, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x03]
; AVX-NEXT:    # xmm0 = xmm1[0,1],xmm0[2,3]
; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %ext0 = extractelement <2 x double> %a1, i32 0
  %res0 = insertelement <2 x double> undef, double %ext0, i32 0
  %ext1 = extractelement <2 x double> %a0, i32 1
  %res1 = insertelement <2 x double> %res0, double %ext1, i32 1
  ret <2 x double> %res1
}

define i32 @test_mm_movemask_epi8(<2 x i64> %a0) nounwind {
; SSE-LABEL: test_mm_movemask_epi8:
; SSE:       # %bb.0:
; SSE-NEXT:    pmovmskb %xmm0, %eax # encoding: [0x66,0x0f,0xd7,0xc0]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX-LABEL: test_mm_movemask_epi8:
; AVX:       # %bb.0:
; AVX-NEXT:    vpmovmskb %xmm0, %eax # encoding: [0xc5,0xf9,0xd7,0xc0]
; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
  %res = call i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> %arg0)
  ret i32 %res
}
declare i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8>) nounwind readnone

define i32 @test_mm_movemask_pd(<2 x double> %a0) nounwind {
; SSE-LABEL: test_mm_movemask_pd:
; SSE:       # %bb.0:
; SSE-NEXT:    movmskpd %xmm0, %eax # encoding: [0x66,0x0f,0x50,0xc0]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX-LABEL: test_mm_movemask_pd:
; AVX:       # %bb.0:
; AVX-NEXT:    vmovmskpd %xmm0, %eax # encoding: [0xc5,0xf9,0x50,0xc0]
; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> %a0)
  ret i32 %res
}
declare i32 @llvm.x86.sse2.movmsk.pd(<2 x double>) nounwind readnone

define <2 x i64> @test_mm_mul_epu32(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE-LABEL: test_mm_mul_epu32:
; SSE:       # %bb.0:
; SSE-NEXT:    pmuludq %xmm1, %xmm0 # encoding: [0x66,0x0f,0xf4,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_mul_epu32:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xf4,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_mul_epu32:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpxor %xmm2, %xmm2, %xmm2 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; AVX512-NEXT:    vpblendd $10, %xmm2, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x02,0xc2,0x0a]
; AVX512-NEXT:    # xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; AVX512-NEXT:    vpblendd $10, %xmm2, %xmm1, %xmm1 # encoding: [0xc4,0xe3,0x71,0x02,0xca,0x0a]
; AVX512-NEXT:    # xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
; AVX512-NEXT:    vpmullq %xmm1, %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x40,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %A = and <2 x i64> %a0, <i64 4294967295, i64 4294967295>
  %B = and <2 x i64> %a1, <i64 4294967295, i64 4294967295>
  %res = mul nuw <2 x i64> %A, %B
  ret <2 x i64> %res
}

define <2 x double> @test_mm_mul_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_mul_pd:
; SSE:       # %bb.0:
; SSE-NEXT:    mulpd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x59,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_mul_pd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vmulpd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x59,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_mul_pd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vmulpd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x59,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = fmul <2 x double> %a0, %a1
  ret <2 x double> %res
}

define <2 x double> @test_mm_mul_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_mul_sd:
; SSE:       # %bb.0:
; SSE-NEXT:    mulsd %xmm1, %xmm0 # encoding: [0xf2,0x0f,0x59,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_mul_sd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vmulsd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x59,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_mul_sd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vmulsd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x59,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %ext0 = extractelement <2 x double> %a0, i32 0
  %ext1 = extractelement <2 x double> %a1, i32 0
  %fmul = fmul double %ext0, %ext1
  %res = insertelement <2 x double> %a0, double %fmul, i32 0
  ret <2 x double> %res
}

define <2 x i64> @test_mm_mulhi_epi16(<2 x i64> %a0, <2 x i64> %a1) {
; SSE-LABEL: test_mm_mulhi_epi16:
; SSE:       # %bb.0:
; SSE-NEXT:    pmulhw %xmm1, %xmm0 # encoding: [0x66,0x0f,0xe5,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_mulhi_epi16:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpmulhw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xe5,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_mulhi_epi16:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpmulhw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe5,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
  %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
  %res = call <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16> %arg0, <8 x i16> %arg1)
  %bc = bitcast <8 x i16> %res to <2 x i64>
  ret <2 x i64> %bc
}
declare <8 x i16> @llvm.x86.sse2.pmulh.w(<8 x i16>, <8 x i16>) nounwind readnone

define <2 x i64> @test_mm_mulhi_epu16(<2 x i64> %a0, <2 x i64> %a1) {
; SSE-LABEL: test_mm_mulhi_epu16:
; SSE:       # %bb.0:
; SSE-NEXT:    pmulhuw %xmm1, %xmm0 # encoding: [0x66,0x0f,0xe4,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_mulhi_epu16:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpmulhuw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xe4,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_mulhi_epu16:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpmulhuw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe4,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
  %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
  %res = call <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16> %arg0, <8 x i16> %arg1)
  %bc = bitcast <8 x i16> %res to <2 x i64>
  ret <2 x i64> %bc
}
declare <8 x i16> @llvm.x86.sse2.pmulhu.w(<8 x i16>, <8 x i16>) nounwind readnone

define <2 x i64> @test_mm_mullo_epi16(<2 x i64> %a0, <2 x i64> %a1) {
; SSE-LABEL: test_mm_mullo_epi16:
; SSE:       # %bb.0:
; SSE-NEXT:    pmullw %xmm1, %xmm0 # encoding: [0x66,0x0f,0xd5,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_mullo_epi16:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpmullw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xd5,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_mullo_epi16:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpmullw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd5,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
  %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
  %res = mul <8 x i16> %arg0, %arg1
  %bc = bitcast <8 x i16> %res to <2 x i64>
  ret <2 x i64> %bc
}

define <2 x double> @test_mm_or_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_or_pd:
; SSE:       # %bb.0:
; SSE-NEXT:    orps %xmm1, %xmm0 # encoding: [0x0f,0x56,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_or_pd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vorps %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x56,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_or_pd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vorps %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x56,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x double> %a0 to <4 x i32>
  %arg1 = bitcast <2 x double> %a1 to <4 x i32>
  %res = or <4 x i32> %arg0, %arg1
  %bc = bitcast <4 x i32> %res to <2 x double>
  ret <2 x double> %bc
}

define <2 x i64> @test_mm_or_si128(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE-LABEL: test_mm_or_si128:
; SSE:       # %bb.0:
; SSE-NEXT:    orps %xmm1, %xmm0 # encoding: [0x0f,0x56,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_or_si128:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vorps %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x56,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_or_si128:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vorps %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x56,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = or <2 x i64> %a0, %a1
  ret <2 x i64> %res
}

define <2 x i64> @test_mm_packs_epi16(<2 x i64> %a0, <2 x i64> %a1) {
; SSE-LABEL: test_mm_packs_epi16:
; SSE:       # %bb.0:
; SSE-NEXT:    packsswb %xmm1, %xmm0 # encoding: [0x66,0x0f,0x63,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_packs_epi16:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x63,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_packs_epi16:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x63,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
  %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
  %res = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> %arg0, <8 x i16> %arg1)
  %bc = bitcast <16 x i8> %res to <2 x i64>
  ret <2 x i64> %bc
}
declare <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16>, <8 x i16>) nounwind readnone

define <2 x i64> @test_mm_packs_epi32(<2 x i64> %a0, <2 x i64> %a1) {
; SSE-LABEL: test_mm_packs_epi32:
; SSE:       # %bb.0:
; SSE-NEXT:    packssdw %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6b,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_packs_epi32:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x6b,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_packs_epi32:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6b,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
  %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
  %res = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %arg0, <4 x i32> %arg1)
  %bc = bitcast <8 x i16> %res to <2 x i64>
  ret <2 x i64> %bc
}
declare <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32>, <4 x i32>) nounwind readnone

define <2 x i64> @test_mm_packus_epi16(<2 x i64> %a0, <2 x i64> %a1) {
; SSE-LABEL: test_mm_packus_epi16:
; SSE:       # %bb.0:
; SSE-NEXT:    packuswb %xmm1, %xmm0 # encoding: [0x66,0x0f,0x67,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_packus_epi16:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpackuswb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x67,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_packus_epi16:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpackuswb %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x67,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
  %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
  %res = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> %arg0, <8 x i16> %arg1)
  %bc = bitcast <16 x i8> %res to <2 x i64>
  ret <2 x i64> %bc
}
declare <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16>, <8 x i16>) nounwind readnone

define void @test_mm_pause() nounwind {
; CHECK-LABEL: test_mm_pause:
; CHECK:       # %bb.0:
; CHECK-NEXT:    pause # encoding: [0xf3,0x90]
; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  call void @llvm.x86.sse2.pause()
  ret void
}
declare void @llvm.x86.sse2.pause() nounwind readnone

define <2 x i64> @test_mm_sad_epu8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE-LABEL: test_mm_sad_epu8:
; SSE:       # %bb.0:
; SSE-NEXT:    psadbw %xmm1, %xmm0 # encoding: [0x66,0x0f,0xf6,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_sad_epu8:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xf6,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_sad_epu8:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpsadbw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf6,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
  %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
  %res = call <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8> %arg0, <16 x i8> %arg1)
  ret <2 x i64> %res
}
declare <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8>, <16 x i8>) nounwind readnone

define <2 x i64> @test_mm_set_epi8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8 %a5, i8 %a6, i8 %a7, i8 %a8, i8 %a9, i8 %a10, i8 %a11, i8 %a12, i8 %a13, i8 %a14, i8 %a15) nounwind {
; X86-SSE-LABEL: test_mm_set_epi8:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
; X86-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
; X86-SSE-NEXT:    movd %eax, %xmm1 # encoding: [0x66,0x0f,0x6e,0xc8]
; X86-SSE-NEXT:    punpcklbw %xmm0, %xmm1 # encoding: [0x66,0x0f,0x60,0xc8]
; X86-SSE-NEXT:    # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x0c]
; X86-SSE-NEXT:    movd %eax, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd0]
; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x10]
; X86-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X86-SSE-NEXT:    punpcklbw %xmm2, %xmm0 # encoding: [0x66,0x0f,0x60,0xc2]
; X86-SSE-NEXT:    # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
; X86-SSE-NEXT:    punpcklwd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x61,0xc1]
; X86-SSE-NEXT:    # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x14]
; X86-SSE-NEXT:    movd %eax, %xmm1 # encoding: [0x66,0x0f,0x6e,0xc8]
; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x18]
; X86-SSE-NEXT:    movd %eax, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd0]
; X86-SSE-NEXT:    punpcklbw %xmm1, %xmm2 # encoding: [0x66,0x0f,0x60,0xd1]
; X86-SSE-NEXT:    # xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x1c]
; X86-SSE-NEXT:    movd %eax, %xmm3 # encoding: [0x66,0x0f,0x6e,0xd8]
; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x20]
; X86-SSE-NEXT:    movd %eax, %xmm1 # encoding: [0x66,0x0f,0x6e,0xc8]
; X86-SSE-NEXT:    punpcklbw %xmm3, %xmm1 # encoding: [0x66,0x0f,0x60,0xcb]
; X86-SSE-NEXT:    # xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
; X86-SSE-NEXT:    punpcklwd %xmm2, %xmm1 # encoding: [0x66,0x0f,0x61,0xca]
; X86-SSE-NEXT:    # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
; X86-SSE-NEXT:    punpckldq %xmm0, %xmm1 # encoding: [0x66,0x0f,0x62,0xc8]
; X86-SSE-NEXT:    # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x24]
; X86-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x28]
; X86-SSE-NEXT:    movd %eax, %xmm3 # encoding: [0x66,0x0f,0x6e,0xd8]
; X86-SSE-NEXT:    punpcklbw %xmm0, %xmm3 # encoding: [0x66,0x0f,0x60,0xd8]
; X86-SSE-NEXT:    # xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x2c]
; X86-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x30]
; X86-SSE-NEXT:    movd %eax, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd0]
; X86-SSE-NEXT:    punpcklbw %xmm0, %xmm2 # encoding: [0x66,0x0f,0x60,0xd0]
; X86-SSE-NEXT:    # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; X86-SSE-NEXT:    punpcklwd %xmm3, %xmm2 # encoding: [0x66,0x0f,0x61,0xd3]
; X86-SSE-NEXT:    # xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x34]
; X86-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x38]
; X86-SSE-NEXT:    movd %eax, %xmm3 # encoding: [0x66,0x0f,0x6e,0xd8]
; X86-SSE-NEXT:    punpcklbw %xmm0, %xmm3 # encoding: [0x66,0x0f,0x60,0xd8]
; X86-SSE-NEXT:    # xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x3c]
; X86-SSE-NEXT:    movd %eax, %xmm4 # encoding: [0x66,0x0f,0x6e,0xe0]
; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x40]
; X86-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X86-SSE-NEXT:    punpcklbw %xmm4, %xmm0 # encoding: [0x66,0x0f,0x60,0xc4]
; X86-SSE-NEXT:    # xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
; X86-SSE-NEXT:    punpcklwd %xmm3, %xmm0 # encoding: [0x66,0x0f,0x61,0xc3]
; X86-SSE-NEXT:    # xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
; X86-SSE-NEXT:    punpckldq %xmm2, %xmm0 # encoding: [0x66,0x0f,0x62,0xc2]
; X86-SSE-NEXT:    # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; X86-SSE-NEXT:    punpcklqdq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6c,0xc1]
; X86-SSE-NEXT:    # xmm0 = xmm0[0],xmm1[0]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_set_epi8:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x40]
; X86-AVX1-NEXT:    vmovd %eax, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc0]
; X86-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x3c]
; X86-AVX1-NEXT:    vpinsrb $1, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x01]
; X86-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x38]
; X86-AVX1-NEXT:    vpinsrb $2, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
; X86-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x34]
; X86-AVX1-NEXT:    vpinsrb $3, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x03]
; X86-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x30]
; X86-AVX1-NEXT:    vpinsrb $4, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
; X86-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x2c]
; X86-AVX1-NEXT:    vpinsrb $5, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x05]
; X86-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x28]
; X86-AVX1-NEXT:    vpinsrb $6, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
; X86-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x24]
; X86-AVX1-NEXT:    vpinsrb $7, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x07]
; X86-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x20]
; X86-AVX1-NEXT:    vpinsrb $8, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
; X86-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x1c]
; X86-AVX1-NEXT:    vpinsrb $9, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x09]
; X86-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x18]
; X86-AVX1-NEXT:    vpinsrb $10, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
; X86-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x14]
; X86-AVX1-NEXT:    vpinsrb $11, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0b]
; X86-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x10]
; X86-AVX1-NEXT:    vpinsrb $12, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
; X86-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x0c]
; X86-AVX1-NEXT:    vpinsrb $13, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0d]
; X86-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
; X86-AVX1-NEXT:    vpinsrb $14, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
; X86-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vpinsrb $15, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0f]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_set_epi8:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x40]
; X86-AVX512-NEXT:    vmovd %eax, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc0]
; X86-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x3c]
; X86-AVX512-NEXT:    vpinsrb $1, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x01]
; X86-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x38]
; X86-AVX512-NEXT:    vpinsrb $2, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
; X86-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x34]
; X86-AVX512-NEXT:    vpinsrb $3, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x03]
; X86-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x30]
; X86-AVX512-NEXT:    vpinsrb $4, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
; X86-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x2c]
; X86-AVX512-NEXT:    vpinsrb $5, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x05]
; X86-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x28]
; X86-AVX512-NEXT:    vpinsrb $6, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
; X86-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x24]
; X86-AVX512-NEXT:    vpinsrb $7, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x07]
; X86-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x20]
; X86-AVX512-NEXT:    vpinsrb $8, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
; X86-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x1c]
; X86-AVX512-NEXT:    vpinsrb $9, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x09]
; X86-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x18]
; X86-AVX512-NEXT:    vpinsrb $10, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
; X86-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x14]
; X86-AVX512-NEXT:    vpinsrb $11, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0b]
; X86-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x10]
; X86-AVX512-NEXT:    vpinsrb $12, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
; X86-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x0c]
; X86-AVX512-NEXT:    vpinsrb $13, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0d]
; X86-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
; X86-AVX512-NEXT:    vpinsrb $14, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
; X86-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vpinsrb $15, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0f]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_set_epi8:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movzbl %dil, %eax # encoding: [0x40,0x0f,0xb6,0xc7]
; X64-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X64-SSE-NEXT:    movzbl %sil, %eax # encoding: [0x40,0x0f,0xb6,0xc6]
; X64-SSE-NEXT:    movd %eax, %xmm1 # encoding: [0x66,0x0f,0x6e,0xc8]
; X64-SSE-NEXT:    punpcklbw %xmm0, %xmm1 # encoding: [0x66,0x0f,0x60,0xc8]
; X64-SSE-NEXT:    # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; X64-SSE-NEXT:    movzbl %dl, %eax # encoding: [0x0f,0xb6,0xc2]
; X64-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X64-SSE-NEXT:    movzbl %cl, %eax # encoding: [0x0f,0xb6,0xc1]
; X64-SSE-NEXT:    movd %eax, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd0]
; X64-SSE-NEXT:    punpcklbw %xmm0, %xmm2 # encoding: [0x66,0x0f,0x60,0xd0]
; X64-SSE-NEXT:    # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; X64-SSE-NEXT:    punpcklwd %xmm1, %xmm2 # encoding: [0x66,0x0f,0x61,0xd1]
; X64-SSE-NEXT:    # xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
; X64-SSE-NEXT:    movzbl %r8b, %eax # encoding: [0x41,0x0f,0xb6,0xc0]
; X64-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X64-SSE-NEXT:    movzbl %r9b, %eax # encoding: [0x41,0x0f,0xb6,0xc1]
; X64-SSE-NEXT:    movd %eax, %xmm3 # encoding: [0x66,0x0f,0x6e,0xd8]
; X64-SSE-NEXT:    punpcklbw %xmm0, %xmm3 # encoding: [0x66,0x0f,0x60,0xd8]
; X64-SSE-NEXT:    # xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
; X64-SSE-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
; X64-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X64-SSE-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x10]
; X64-SSE-NEXT:    movd %eax, %xmm1 # encoding: [0x66,0x0f,0x6e,0xc8]
; X64-SSE-NEXT:    punpcklbw %xmm0, %xmm1 # encoding: [0x66,0x0f,0x60,0xc8]
; X64-SSE-NEXT:    # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; X64-SSE-NEXT:    punpcklwd %xmm3, %xmm1 # encoding: [0x66,0x0f,0x61,0xcb]
; X64-SSE-NEXT:    # xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
; X64-SSE-NEXT:    punpckldq %xmm2, %xmm1 # encoding: [0x66,0x0f,0x62,0xca]
; X64-SSE-NEXT:    # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; X64-SSE-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x18]
; X64-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X64-SSE-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x20]
; X64-SSE-NEXT:    movd %eax, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd0]
; X64-SSE-NEXT:    punpcklbw %xmm0, %xmm2 # encoding: [0x66,0x0f,0x60,0xd0]
; X64-SSE-NEXT:    # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; X64-SSE-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x28]
; X64-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X64-SSE-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x30]
; X64-SSE-NEXT:    movd %eax, %xmm3 # encoding: [0x66,0x0f,0x6e,0xd8]
; X64-SSE-NEXT:    punpcklbw %xmm0, %xmm3 # encoding: [0x66,0x0f,0x60,0xd8]
; X64-SSE-NEXT:    # xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
; X64-SSE-NEXT:    punpcklwd %xmm2, %xmm3 # encoding: [0x66,0x0f,0x61,0xda]
; X64-SSE-NEXT:    # xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
; X64-SSE-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x38]
; X64-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X64-SSE-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x40]
; X64-SSE-NEXT:    movd %eax, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd0]
; X64-SSE-NEXT:    punpcklbw %xmm0, %xmm2 # encoding: [0x66,0x0f,0x60,0xd0]
; X64-SSE-NEXT:    # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; X64-SSE-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x48]
; X64-SSE-NEXT:    movd %eax, %xmm4 # encoding: [0x66,0x0f,0x6e,0xe0]
; X64-SSE-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x50]
; X64-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X64-SSE-NEXT:    punpcklbw %xmm4, %xmm0 # encoding: [0x66,0x0f,0x60,0xc4]
; X64-SSE-NEXT:    # xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
; X64-SSE-NEXT:    punpcklwd %xmm2, %xmm0 # encoding: [0x66,0x0f,0x61,0xc2]
; X64-SSE-NEXT:    # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
; X64-SSE-NEXT:    punpckldq %xmm3, %xmm0 # encoding: [0x66,0x0f,0x62,0xc3]
; X64-SSE-NEXT:    # xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
; X64-SSE-NEXT:    punpcklqdq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6c,0xc1]
; X64-SSE-NEXT:    # xmm0 = xmm0[0],xmm1[0]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_set_epi8:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x50]
; X64-AVX1-NEXT:    vmovd %eax, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc0]
; X64-AVX1-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x48]
; X64-AVX1-NEXT:    vpinsrb $1, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x01]
; X64-AVX1-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x40]
; X64-AVX1-NEXT:    vpinsrb $2, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
; X64-AVX1-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x38]
; X64-AVX1-NEXT:    vpinsrb $3, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x03]
; X64-AVX1-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x30]
; X64-AVX1-NEXT:    vpinsrb $4, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
; X64-AVX1-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x28]
; X64-AVX1-NEXT:    vpinsrb $5, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x05]
; X64-AVX1-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x20]
; X64-AVX1-NEXT:    vpinsrb $6, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
; X64-AVX1-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x18]
; X64-AVX1-NEXT:    vpinsrb $7, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x07]
; X64-AVX1-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x10]
; X64-AVX1-NEXT:    vpinsrb $8, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
; X64-AVX1-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
; X64-AVX1-NEXT:    vpinsrb $9, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x09]
; X64-AVX1-NEXT:    vpinsrb $10, %r9d, %xmm0, %xmm0 # encoding: [0xc4,0xc3,0x79,0x20,0xc1,0x0a]
; X64-AVX1-NEXT:    vpinsrb $11, %r8d, %xmm0, %xmm0 # encoding: [0xc4,0xc3,0x79,0x20,0xc0,0x0b]
; X64-AVX1-NEXT:    vpinsrb $12, %ecx, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc1,0x0c]
; X64-AVX1-NEXT:    vpinsrb $13, %edx, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc2,0x0d]
; X64-AVX1-NEXT:    vpinsrb $14, %esi, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc6,0x0e]
; X64-AVX1-NEXT:    vpinsrb $15, %edi, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc7,0x0f]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_set_epi8:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x50]
; X64-AVX512-NEXT:    vmovd %eax, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc0]
; X64-AVX512-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x48]
; X64-AVX512-NEXT:    vpinsrb $1, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x01]
; X64-AVX512-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x40]
; X64-AVX512-NEXT:    vpinsrb $2, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
; X64-AVX512-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x38]
; X64-AVX512-NEXT:    vpinsrb $3, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x03]
; X64-AVX512-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x30]
; X64-AVX512-NEXT:    vpinsrb $4, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
; X64-AVX512-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x28]
; X64-AVX512-NEXT:    vpinsrb $5, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x05]
; X64-AVX512-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x20]
; X64-AVX512-NEXT:    vpinsrb $6, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
; X64-AVX512-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x18]
; X64-AVX512-NEXT:    vpinsrb $7, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x07]
; X64-AVX512-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x10]
; X64-AVX512-NEXT:    vpinsrb $8, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
; X64-AVX512-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
; X64-AVX512-NEXT:    vpinsrb $9, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x09]
; X64-AVX512-NEXT:    vpinsrb $10, %r9d, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xc3,0x79,0x20,0xc1,0x0a]
; X64-AVX512-NEXT:    vpinsrb $11, %r8d, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xc3,0x79,0x20,0xc0,0x0b]
; X64-AVX512-NEXT:    vpinsrb $12, %ecx, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc1,0x0c]
; X64-AVX512-NEXT:    vpinsrb $13, %edx, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc2,0x0d]
; X64-AVX512-NEXT:    vpinsrb $14, %esi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc6,0x0e]
; X64-AVX512-NEXT:    vpinsrb $15, %edi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc7,0x0f]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_set_epi8:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    movzbl %dil, %eax # encoding: [0x40,0x0f,0xb6,0xc7]
; X32-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X32-SSE-NEXT:    movzbl %sil, %eax # encoding: [0x40,0x0f,0xb6,0xc6]
; X32-SSE-NEXT:    movd %eax, %xmm1 # encoding: [0x66,0x0f,0x6e,0xc8]
; X32-SSE-NEXT:    punpcklbw %xmm0, %xmm1 # encoding: [0x66,0x0f,0x60,0xc8]
; X32-SSE-NEXT:    # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; X32-SSE-NEXT:    movzbl %dl, %eax # encoding: [0x0f,0xb6,0xc2]
; X32-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X32-SSE-NEXT:    movzbl %cl, %eax # encoding: [0x0f,0xb6,0xc1]
; X32-SSE-NEXT:    movd %eax, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd0]
; X32-SSE-NEXT:    punpcklbw %xmm0, %xmm2 # encoding: [0x66,0x0f,0x60,0xd0]
; X32-SSE-NEXT:    # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; X32-SSE-NEXT:    punpcklwd %xmm1, %xmm2 # encoding: [0x66,0x0f,0x61,0xd1]
; X32-SSE-NEXT:    # xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
; X32-SSE-NEXT:    movzbl %r8b, %eax # encoding: [0x41,0x0f,0xb6,0xc0]
; X32-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X32-SSE-NEXT:    movzbl %r9b, %eax # encoding: [0x41,0x0f,0xb6,0xc1]
; X32-SSE-NEXT:    movd %eax, %xmm3 # encoding: [0x66,0x0f,0x6e,0xd8]
; X32-SSE-NEXT:    punpcklbw %xmm0, %xmm3 # encoding: [0x66,0x0f,0x60,0xd8]
; X32-SSE-NEXT:    # xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
; X32-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x08]
; X32-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X32-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x10]
; X32-SSE-NEXT:    movd %eax, %xmm1 # encoding: [0x66,0x0f,0x6e,0xc8]
; X32-SSE-NEXT:    punpcklbw %xmm0, %xmm1 # encoding: [0x66,0x0f,0x60,0xc8]
; X32-SSE-NEXT:    # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; X32-SSE-NEXT:    punpcklwd %xmm3, %xmm1 # encoding: [0x66,0x0f,0x61,0xcb]
; X32-SSE-NEXT:    # xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
; X32-SSE-NEXT:    punpckldq %xmm2, %xmm1 # encoding: [0x66,0x0f,0x62,0xca]
; X32-SSE-NEXT:    # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; X32-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x18]
; X32-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X32-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x20]
; X32-SSE-NEXT:    movd %eax, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd0]
; X32-SSE-NEXT:    punpcklbw %xmm0, %xmm2 # encoding: [0x66,0x0f,0x60,0xd0]
; X32-SSE-NEXT:    # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; X32-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x28]
; X32-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X32-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x30]
; X32-SSE-NEXT:    movd %eax, %xmm3 # encoding: [0x66,0x0f,0x6e,0xd8]
; X32-SSE-NEXT:    punpcklbw %xmm0, %xmm3 # encoding: [0x66,0x0f,0x60,0xd8]
; X32-SSE-NEXT:    # xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
; X32-SSE-NEXT:    punpcklwd %xmm2, %xmm3 # encoding: [0x66,0x0f,0x61,0xda]
; X32-SSE-NEXT:    # xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
; X32-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x38]
; X32-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X32-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x40]
; X32-SSE-NEXT:    movd %eax, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd0]
; X32-SSE-NEXT:    punpcklbw %xmm0, %xmm2 # encoding: [0x66,0x0f,0x60,0xd0]
; X32-SSE-NEXT:    # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; X32-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x48]
; X32-SSE-NEXT:    movd %eax, %xmm4 # encoding: [0x66,0x0f,0x6e,0xe0]
; X32-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x50]
; X32-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X32-SSE-NEXT:    punpcklbw %xmm4, %xmm0 # encoding: [0x66,0x0f,0x60,0xc4]
; X32-SSE-NEXT:    # xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
; X32-SSE-NEXT:    punpcklwd %xmm2, %xmm0 # encoding: [0x66,0x0f,0x61,0xc2]
; X32-SSE-NEXT:    # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
; X32-SSE-NEXT:    punpckldq %xmm3, %xmm0 # encoding: [0x66,0x0f,0x62,0xc3]
; X32-SSE-NEXT:    # xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
; X32-SSE-NEXT:    punpcklqdq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6c,0xc1]
; X32-SSE-NEXT:    # xmm0 = xmm0[0],xmm1[0]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_set_epi8:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x50]
; X32-AVX1-NEXT:    vmovd %eax, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc0]
; X32-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x48]
; X32-AVX1-NEXT:    vpinsrb $1, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x01]
; X32-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x40]
; X32-AVX1-NEXT:    vpinsrb $2, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
; X32-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x38]
; X32-AVX1-NEXT:    vpinsrb $3, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x03]
; X32-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x30]
; X32-AVX1-NEXT:    vpinsrb $4, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
; X32-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x28]
; X32-AVX1-NEXT:    vpinsrb $5, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x05]
; X32-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x20]
; X32-AVX1-NEXT:    vpinsrb $6, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
; X32-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x18]
; X32-AVX1-NEXT:    vpinsrb $7, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x07]
; X32-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x10]
; X32-AVX1-NEXT:    vpinsrb $8, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
; X32-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x08]
; X32-AVX1-NEXT:    vpinsrb $9, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x09]
; X32-AVX1-NEXT:    vpinsrb $10, %r9d, %xmm0, %xmm0 # encoding: [0xc4,0xc3,0x79,0x20,0xc1,0x0a]
; X32-AVX1-NEXT:    vpinsrb $11, %r8d, %xmm0, %xmm0 # encoding: [0xc4,0xc3,0x79,0x20,0xc0,0x0b]
; X32-AVX1-NEXT:    vpinsrb $12, %ecx, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc1,0x0c]
; X32-AVX1-NEXT:    vpinsrb $13, %edx, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc2,0x0d]
; X32-AVX1-NEXT:    vpinsrb $14, %esi, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc6,0x0e]
; X32-AVX1-NEXT:    vpinsrb $15, %edi, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc7,0x0f]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_set_epi8:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x50]
; X32-AVX512-NEXT:    vmovd %eax, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc0]
; X32-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x48]
; X32-AVX512-NEXT:    vpinsrb $1, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x01]
; X32-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x40]
; X32-AVX512-NEXT:    vpinsrb $2, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
; X32-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x38]
; X32-AVX512-NEXT:    vpinsrb $3, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x03]
; X32-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x30]
; X32-AVX512-NEXT:    vpinsrb $4, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
; X32-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x28]
; X32-AVX512-NEXT:    vpinsrb $5, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x05]
; X32-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x20]
; X32-AVX512-NEXT:    vpinsrb $6, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
; X32-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x18]
; X32-AVX512-NEXT:    vpinsrb $7, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x07]
; X32-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x10]
; X32-AVX512-NEXT:    vpinsrb $8, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
; X32-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x08]
; X32-AVX512-NEXT:    vpinsrb $9, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x09]
; X32-AVX512-NEXT:    vpinsrb $10, %r9d, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xc3,0x79,0x20,0xc1,0x0a]
; X32-AVX512-NEXT:    vpinsrb $11, %r8d, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xc3,0x79,0x20,0xc0,0x0b]
; X32-AVX512-NEXT:    vpinsrb $12, %ecx, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc1,0x0c]
; X32-AVX512-NEXT:    vpinsrb $13, %edx, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc2,0x0d]
; X32-AVX512-NEXT:    vpinsrb $14, %esi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc6,0x0e]
; X32-AVX512-NEXT:    vpinsrb $15, %edi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc7,0x0f]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  %res0  = insertelement <16 x i8> undef,  i8 %a15, i32 0
  %res1  = insertelement <16 x i8> %res0,  i8 %a14, i32 1
  %res2  = insertelement <16 x i8> %res1,  i8 %a13, i32 2
  %res3  = insertelement <16 x i8> %res2,  i8 %a12, i32 3
  %res4  = insertelement <16 x i8> %res3,  i8 %a11, i32 4
  %res5  = insertelement <16 x i8> %res4,  i8 %a10, i32 5
  %res6  = insertelement <16 x i8> %res5,  i8 %a9 , i32 6
  %res7  = insertelement <16 x i8> %res6,  i8 %a8 , i32 7
  %res8  = insertelement <16 x i8> %res7,  i8 %a7 , i32 8
  %res9  = insertelement <16 x i8> %res8,  i8 %a6 , i32 9
  %res10 = insertelement <16 x i8> %res9,  i8 %a5 , i32 10
  %res11 = insertelement <16 x i8> %res10, i8 %a4 , i32 11
  %res12 = insertelement <16 x i8> %res11, i8 %a3 , i32 12
  %res13 = insertelement <16 x i8> %res12, i8 %a2 , i32 13
  %res14 = insertelement <16 x i8> %res13, i8 %a1 , i32 14
  %res15 = insertelement <16 x i8> %res14, i8 %a0 , i32 15
  %res = bitcast <16 x i8> %res15 to <2 x i64>
  ret <2 x i64> %res
}

define <2 x i64> @test_mm_set_epi16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4, i16 %a5, i16 %a6, i16 %a7) nounwind {
; X86-SSE-LABEL: test_mm_set_epi16:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x04]
; X86-SSE-NEXT:    movd %eax, %xmm1 # encoding: [0x66,0x0f,0x6e,0xc8]
; X86-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x08]
; X86-SSE-NEXT:    movd %eax, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd0]
; X86-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x0c]
; X86-SSE-NEXT:    movd %eax, %xmm4 # encoding: [0x66,0x0f,0x6e,0xe0]
; X86-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x10]
; X86-SSE-NEXT:    movd %eax, %xmm3 # encoding: [0x66,0x0f,0x6e,0xd8]
; X86-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x14]
; X86-SSE-NEXT:    movd %eax, %xmm5 # encoding: [0x66,0x0f,0x6e,0xe8]
; X86-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x18]
; X86-SSE-NEXT:    movd %eax, %xmm6 # encoding: [0x66,0x0f,0x6e,0xf0]
; X86-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x1c]
; X86-SSE-NEXT:    movd %eax, %xmm7 # encoding: [0x66,0x0f,0x6e,0xf8]
; X86-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x20]
; X86-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X86-SSE-NEXT:    punpcklwd %xmm1, %xmm2 # encoding: [0x66,0x0f,0x61,0xd1]
; X86-SSE-NEXT:    # xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
; X86-SSE-NEXT:    punpcklwd %xmm4, %xmm3 # encoding: [0x66,0x0f,0x61,0xdc]
; X86-SSE-NEXT:    # xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
; X86-SSE-NEXT:    punpckldq %xmm2, %xmm3 # encoding: [0x66,0x0f,0x62,0xda]
; X86-SSE-NEXT:    # xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
; X86-SSE-NEXT:    punpcklwd %xmm5, %xmm6 # encoding: [0x66,0x0f,0x61,0xf5]
; X86-SSE-NEXT:    # xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
; X86-SSE-NEXT:    punpcklwd %xmm7, %xmm0 # encoding: [0x66,0x0f,0x61,0xc7]
; X86-SSE-NEXT:    # xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3]
; X86-SSE-NEXT:    punpckldq %xmm6, %xmm0 # encoding: [0x66,0x0f,0x62,0xc6]
; X86-SSE-NEXT:    # xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
; X86-SSE-NEXT:    punpcklqdq %xmm3, %xmm0 # encoding: [0x66,0x0f,0x6c,0xc3]
; X86-SSE-NEXT:    # xmm0 = xmm0[0],xmm3[0]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_set_epi16:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x20]
; X86-AVX1-NEXT:    vmovd %eax, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc0]
; X86-AVX1-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x1c]
; X86-AVX1-NEXT:    vpinsrw $1, %eax, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc0,0x01]
; X86-AVX1-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x18]
; X86-AVX1-NEXT:    vpinsrw $2, %eax, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc0,0x02]
; X86-AVX1-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x14]
; X86-AVX1-NEXT:    vpinsrw $3, %eax, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc0,0x03]
; X86-AVX1-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x10]
; X86-AVX1-NEXT:    vpinsrw $4, %eax, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc0,0x04]
; X86-AVX1-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x0c]
; X86-AVX1-NEXT:    vpinsrw $5, %eax, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc0,0x05]
; X86-AVX1-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x08]
; X86-AVX1-NEXT:    vpinsrw $6, %eax, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc0,0x06]
; X86-AVX1-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vpinsrw $7, %eax, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc0,0x07]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_set_epi16:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x20]
; X86-AVX512-NEXT:    vmovd %eax, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc0]
; X86-AVX512-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x1c]
; X86-AVX512-NEXT:    vpinsrw $1, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x01]
; X86-AVX512-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x18]
; X86-AVX512-NEXT:    vpinsrw $2, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x02]
; X86-AVX512-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x14]
; X86-AVX512-NEXT:    vpinsrw $3, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x03]
; X86-AVX512-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x10]
; X86-AVX512-NEXT:    vpinsrw $4, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x04]
; X86-AVX512-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x0c]
; X86-AVX512-NEXT:    vpinsrw $5, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x05]
; X86-AVX512-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x08]
; X86-AVX512-NEXT:    vpinsrw $6, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x06]
; X86-AVX512-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vpinsrw $7, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x07]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_set_epi16:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movzwl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x10]
; X64-SSE-NEXT:    movzwl {{[0-9]+}}(%rsp), %r10d # encoding: [0x44,0x0f,0xb7,0x54,0x24,0x08]
; X64-SSE-NEXT:    movd %edi, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc7]
; X64-SSE-NEXT:    movd %esi, %xmm1 # encoding: [0x66,0x0f,0x6e,0xce]
; X64-SSE-NEXT:    punpcklwd %xmm0, %xmm1 # encoding: [0x66,0x0f,0x61,0xc8]
; X64-SSE-NEXT:    # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; X64-SSE-NEXT:    movd %edx, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc2]
; X64-SSE-NEXT:    movd %ecx, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd1]
; X64-SSE-NEXT:    punpcklwd %xmm0, %xmm2 # encoding: [0x66,0x0f,0x61,0xd0]
; X64-SSE-NEXT:    # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
; X64-SSE-NEXT:    punpckldq %xmm1, %xmm2 # encoding: [0x66,0x0f,0x62,0xd1]
; X64-SSE-NEXT:    # xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; X64-SSE-NEXT:    movd %r8d, %xmm0 # encoding: [0x66,0x41,0x0f,0x6e,0xc0]
; X64-SSE-NEXT:    movd %r9d, %xmm1 # encoding: [0x66,0x41,0x0f,0x6e,0xc9]
; X64-SSE-NEXT:    punpcklwd %xmm0, %xmm1 # encoding: [0x66,0x0f,0x61,0xc8]
; X64-SSE-NEXT:    # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; X64-SSE-NEXT:    movd %r10d, %xmm3 # encoding: [0x66,0x41,0x0f,0x6e,0xda]
; X64-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X64-SSE-NEXT:    punpcklwd %xmm3, %xmm0 # encoding: [0x66,0x0f,0x61,0xc3]
; X64-SSE-NEXT:    # xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
; X64-SSE-NEXT:    punpckldq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x62,0xc1]
; X64-SSE-NEXT:    # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X64-SSE-NEXT:    punpcklqdq %xmm2, %xmm0 # encoding: [0x66,0x0f,0x6c,0xc2]
; X64-SSE-NEXT:    # xmm0 = xmm0[0],xmm2[0]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_set_epi16:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    movzwl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x10]
; X64-AVX1-NEXT:    movzwl {{[0-9]+}}(%rsp), %r10d # encoding: [0x44,0x0f,0xb7,0x54,0x24,0x08]
; X64-AVX1-NEXT:    vmovd %eax, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc0]
; X64-AVX1-NEXT:    vpinsrw $1, %r10d, %xmm0, %xmm0 # encoding: [0xc4,0xc1,0x79,0xc4,0xc2,0x01]
; X64-AVX1-NEXT:    vpinsrw $2, %r9d, %xmm0, %xmm0 # encoding: [0xc4,0xc1,0x79,0xc4,0xc1,0x02]
; X64-AVX1-NEXT:    vpinsrw $3, %r8d, %xmm0, %xmm0 # encoding: [0xc4,0xc1,0x79,0xc4,0xc0,0x03]
; X64-AVX1-NEXT:    vpinsrw $4, %ecx, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc1,0x04]
; X64-AVX1-NEXT:    vpinsrw $5, %edx, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc2,0x05]
; X64-AVX1-NEXT:    vpinsrw $6, %esi, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc6,0x06]
; X64-AVX1-NEXT:    vpinsrw $7, %edi, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc7,0x07]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_set_epi16:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    movzwl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x10]
; X64-AVX512-NEXT:    movzwl {{[0-9]+}}(%rsp), %r10d # encoding: [0x44,0x0f,0xb7,0x54,0x24,0x08]
; X64-AVX512-NEXT:    vmovd %eax, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc0]
; X64-AVX512-NEXT:    vpinsrw $1, %r10d, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xc1,0x79,0xc4,0xc2,0x01]
; X64-AVX512-NEXT:    vpinsrw $2, %r9d, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xc1,0x79,0xc4,0xc1,0x02]
; X64-AVX512-NEXT:    vpinsrw $3, %r8d, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xc1,0x79,0xc4,0xc0,0x03]
; X64-AVX512-NEXT:    vpinsrw $4, %ecx, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc1,0x04]
; X64-AVX512-NEXT:    vpinsrw $5, %edx, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc2,0x05]
; X64-AVX512-NEXT:    vpinsrw $6, %esi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc6,0x06]
; X64-AVX512-NEXT:    vpinsrw $7, %edi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc7,0x07]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_set_epi16:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb7,0x44,0x24,0x10]
; X32-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %r10d # encoding: [0x67,0x44,0x0f,0xb7,0x54,0x24,0x08]
; X32-SSE-NEXT:    movd %edi, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc7]
; X32-SSE-NEXT:    movd %esi, %xmm1 # encoding: [0x66,0x0f,0x6e,0xce]
; X32-SSE-NEXT:    punpcklwd %xmm0, %xmm1 # encoding: [0x66,0x0f,0x61,0xc8]
; X32-SSE-NEXT:    # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; X32-SSE-NEXT:    movd %edx, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc2]
; X32-SSE-NEXT:    movd %ecx, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd1]
; X32-SSE-NEXT:    punpcklwd %xmm0, %xmm2 # encoding: [0x66,0x0f,0x61,0xd0]
; X32-SSE-NEXT:    # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
; X32-SSE-NEXT:    punpckldq %xmm1, %xmm2 # encoding: [0x66,0x0f,0x62,0xd1]
; X32-SSE-NEXT:    # xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; X32-SSE-NEXT:    movd %r8d, %xmm0 # encoding: [0x66,0x41,0x0f,0x6e,0xc0]
; X32-SSE-NEXT:    movd %r9d, %xmm1 # encoding: [0x66,0x41,0x0f,0x6e,0xc9]
; X32-SSE-NEXT:    punpcklwd %xmm0, %xmm1 # encoding: [0x66,0x0f,0x61,0xc8]
; X32-SSE-NEXT:    # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; X32-SSE-NEXT:    movd %r10d, %xmm3 # encoding: [0x66,0x41,0x0f,0x6e,0xda]
; X32-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X32-SSE-NEXT:    punpcklwd %xmm3, %xmm0 # encoding: [0x66,0x0f,0x61,0xc3]
; X32-SSE-NEXT:    # xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
; X32-SSE-NEXT:    punpckldq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x62,0xc1]
; X32-SSE-NEXT:    # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X32-SSE-NEXT:    punpcklqdq %xmm2, %xmm0 # encoding: [0x66,0x0f,0x6c,0xc2]
; X32-SSE-NEXT:    # xmm0 = xmm0[0],xmm2[0]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_set_epi16:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb7,0x44,0x24,0x10]
; X32-AVX1-NEXT:    movzwl {{[0-9]+}}(%esp), %r10d # encoding: [0x67,0x44,0x0f,0xb7,0x54,0x24,0x08]
; X32-AVX1-NEXT:    vmovd %eax, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc0]
; X32-AVX1-NEXT:    vpinsrw $1, %r10d, %xmm0, %xmm0 # encoding: [0xc4,0xc1,0x79,0xc4,0xc2,0x01]
; X32-AVX1-NEXT:    vpinsrw $2, %r9d, %xmm0, %xmm0 # encoding: [0xc4,0xc1,0x79,0xc4,0xc1,0x02]
; X32-AVX1-NEXT:    vpinsrw $3, %r8d, %xmm0, %xmm0 # encoding: [0xc4,0xc1,0x79,0xc4,0xc0,0x03]
; X32-AVX1-NEXT:    vpinsrw $4, %ecx, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc1,0x04]
; X32-AVX1-NEXT:    vpinsrw $5, %edx, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc2,0x05]
; X32-AVX1-NEXT:    vpinsrw $6, %esi, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc6,0x06]
; X32-AVX1-NEXT:    vpinsrw $7, %edi, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc7,0x07]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_set_epi16:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb7,0x44,0x24,0x10]
; X32-AVX512-NEXT:    movzwl {{[0-9]+}}(%esp), %r10d # encoding: [0x67,0x44,0x0f,0xb7,0x54,0x24,0x08]
; X32-AVX512-NEXT:    vmovd %eax, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc0]
; X32-AVX512-NEXT:    vpinsrw $1, %r10d, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xc1,0x79,0xc4,0xc2,0x01]
; X32-AVX512-NEXT:    vpinsrw $2, %r9d, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xc1,0x79,0xc4,0xc1,0x02]
; X32-AVX512-NEXT:    vpinsrw $3, %r8d, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xc1,0x79,0xc4,0xc0,0x03]
; X32-AVX512-NEXT:    vpinsrw $4, %ecx, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc1,0x04]
; X32-AVX512-NEXT:    vpinsrw $5, %edx, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc2,0x05]
; X32-AVX512-NEXT:    vpinsrw $6, %esi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc6,0x06]
; X32-AVX512-NEXT:    vpinsrw $7, %edi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc7,0x07]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  %res0  = insertelement <8 x i16> undef, i16 %a7, i32 0
  %res1  = insertelement <8 x i16> %res0, i16 %a6, i32 1
  %res2  = insertelement <8 x i16> %res1, i16 %a5, i32 2
  %res3  = insertelement <8 x i16> %res2, i16 %a4, i32 3
  %res4  = insertelement <8 x i16> %res3, i16 %a3, i32 4
  %res5  = insertelement <8 x i16> %res4, i16 %a2, i32 5
  %res6  = insertelement <8 x i16> %res5, i16 %a1, i32 6
  %res7  = insertelement <8 x i16> %res6, i16 %a0, i32 7
  %res = bitcast <8 x i16> %res7 to <2 x i64>
  ret <2 x i64> %res
}

define <2 x i64> @test_mm_set_epi32(i32 %a0, i32 %a1, i32 %a2, i32 %a3) nounwind {
; X86-SSE-LABEL: test_mm_set_epi32:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE-NEXT:    # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x04]
; X86-SSE-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X86-SSE-NEXT:    # encoding: [0xf3,0x0f,0x10,0x4c,0x24,0x08]
; X86-SSE-NEXT:    unpcklps %xmm0, %xmm1 # encoding: [0x0f,0x14,0xc8]
; X86-SSE-NEXT:    # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; X86-SSE-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; X86-SSE-NEXT:    # encoding: [0xf3,0x0f,0x10,0x54,0x24,0x0c]
; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE-NEXT:    # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x10]
; X86-SSE-NEXT:    unpcklps %xmm2, %xmm0 # encoding: [0x0f,0x14,0xc2]
; X86-SSE-NEXT:    # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; X86-SSE-NEXT:    movlhps %xmm1, %xmm0 # encoding: [0x0f,0x16,0xc1]
; X86-SSE-NEXT:    # xmm0 = xmm0[0],xmm1[0]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_set_epi32:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-AVX1-NEXT:    # encoding: [0xc5,0xf9,0x6e,0x44,0x24,0x10]
; X86-AVX1-NEXT:    vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x0c,0x01]
; X86-AVX1-NEXT:    vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x08,0x02]
; X86-AVX1-NEXT:    vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x04,0x03]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_set_epi32:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-AVX512-NEXT:    # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0x44,0x24,0x10]
; X86-AVX512-NEXT:    vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x0c,0x01]
; X86-AVX512-NEXT:    vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x08,0x02]
; X86-AVX512-NEXT:    vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x04,0x03]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_set_epi32:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movd %edi, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc7]
; X64-SSE-NEXT:    movd %esi, %xmm1 # encoding: [0x66,0x0f,0x6e,0xce]
; X64-SSE-NEXT:    punpckldq %xmm0, %xmm1 # encoding: [0x66,0x0f,0x62,0xc8]
; X64-SSE-NEXT:    # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; X64-SSE-NEXT:    movd %edx, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd2]
; X64-SSE-NEXT:    movd %ecx, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc1]
; X64-SSE-NEXT:    punpckldq %xmm2, %xmm0 # encoding: [0x66,0x0f,0x62,0xc2]
; X64-SSE-NEXT:    # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; X64-SSE-NEXT:    punpcklqdq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6c,0xc1]
; X64-SSE-NEXT:    # xmm0 = xmm0[0],xmm1[0]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_set_epi32:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vmovd %ecx, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc1]
; X64-AVX1-NEXT:    vpinsrd $1, %edx, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0xc2,0x01]
; X64-AVX1-NEXT:    vpinsrd $2, %esi, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0xc6,0x02]
; X64-AVX1-NEXT:    vpinsrd $3, %edi, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0xc7,0x03]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_set_epi32:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vmovd %ecx, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; X64-AVX512-NEXT:    vpinsrd $1, %edx, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0xc2,0x01]
; X64-AVX512-NEXT:    vpinsrd $2, %esi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0xc6,0x02]
; X64-AVX512-NEXT:    vpinsrd $3, %edi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0xc7,0x03]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_set_epi32:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    movd %edi, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc7]
; X32-SSE-NEXT:    movd %esi, %xmm1 # encoding: [0x66,0x0f,0x6e,0xce]
; X32-SSE-NEXT:    punpckldq %xmm0, %xmm1 # encoding: [0x66,0x0f,0x62,0xc8]
; X32-SSE-NEXT:    # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; X32-SSE-NEXT:    movd %edx, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd2]
; X32-SSE-NEXT:    movd %ecx, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc1]
; X32-SSE-NEXT:    punpckldq %xmm2, %xmm0 # encoding: [0x66,0x0f,0x62,0xc2]
; X32-SSE-NEXT:    # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; X32-SSE-NEXT:    punpcklqdq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6c,0xc1]
; X32-SSE-NEXT:    # xmm0 = xmm0[0],xmm1[0]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_set_epi32:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vmovd %ecx, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc1]
; X32-AVX1-NEXT:    vpinsrd $1, %edx, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0xc2,0x01]
; X32-AVX1-NEXT:    vpinsrd $2, %esi, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0xc6,0x02]
; X32-AVX1-NEXT:    vpinsrd $3, %edi, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0xc7,0x03]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_set_epi32:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vmovd %ecx, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; X32-AVX512-NEXT:    vpinsrd $1, %edx, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0xc2,0x01]
; X32-AVX512-NEXT:    vpinsrd $2, %esi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0xc6,0x02]
; X32-AVX512-NEXT:    vpinsrd $3, %edi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0xc7,0x03]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  %res0  = insertelement <4 x i32> undef, i32 %a3, i32 0
  %res1  = insertelement <4 x i32> %res0, i32 %a2, i32 1
  %res2  = insertelement <4 x i32> %res1, i32 %a1, i32 2
  %res3  = insertelement <4 x i32> %res2, i32 %a0, i32 3
  %res = bitcast <4 x i32> %res3 to <2 x i64>
  ret <2 x i64> %res
}

; TODO test_mm_set_epi64

define <2 x i64> @test_mm_set_epi64x(i64 %a0, i64 %a1) nounwind {
; X86-SSE-LABEL: test_mm_set_epi64x:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X86-SSE-NEXT:    # encoding: [0xf3,0x0f,0x10,0x4c,0x24,0x04]
; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE-NEXT:    # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x08]
; X86-SSE-NEXT:    unpcklps %xmm0, %xmm1 # encoding: [0x0f,0x14,0xc8]
; X86-SSE-NEXT:    # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE-NEXT:    # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x0c]
; X86-SSE-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; X86-SSE-NEXT:    # encoding: [0xf3,0x0f,0x10,0x54,0x24,0x10]
; X86-SSE-NEXT:    unpcklps %xmm2, %xmm0 # encoding: [0x0f,0x14,0xc2]
; X86-SSE-NEXT:    # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; X86-SSE-NEXT:    movlhps %xmm1, %xmm0 # encoding: [0x0f,0x16,0xc1]
; X86-SSE-NEXT:    # xmm0 = xmm0[0],xmm1[0]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_set_epi64x:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-AVX1-NEXT:    # encoding: [0xc5,0xf9,0x6e,0x44,0x24,0x0c]
; X86-AVX1-NEXT:    vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x10,0x01]
; X86-AVX1-NEXT:    vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x04,0x02]
; X86-AVX1-NEXT:    vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x08,0x03]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_set_epi64x:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-AVX512-NEXT:    # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0x44,0x24,0x0c]
; X86-AVX512-NEXT:    vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x10,0x01]
; X86-AVX512-NEXT:    vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x04,0x02]
; X86-AVX512-NEXT:    vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x08,0x03]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_set_epi64x:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movq %rdi, %xmm1 # encoding: [0x66,0x48,0x0f,0x6e,0xcf]
; X64-SSE-NEXT:    movq %rsi, %xmm0 # encoding: [0x66,0x48,0x0f,0x6e,0xc6]
; X64-SSE-NEXT:    punpcklqdq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6c,0xc1]
; X64-SSE-NEXT:    # xmm0 = xmm0[0],xmm1[0]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_set_epi64x:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vmovq %rdi, %xmm0 # encoding: [0xc4,0xe1,0xf9,0x6e,0xc7]
; X64-AVX1-NEXT:    vmovq %rsi, %xmm1 # encoding: [0xc4,0xe1,0xf9,0x6e,0xce]
; X64-AVX1-NEXT:    vpunpcklqdq %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0x6c,0xc0]
; X64-AVX1-NEXT:    # xmm0 = xmm1[0],xmm0[0]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_set_epi64x:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vmovq %rdi, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe1,0xf9,0x6e,0xc7]
; X64-AVX512-NEXT:    vmovq %rsi, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe1,0xf9,0x6e,0xce]
; X64-AVX512-NEXT:    vpunpcklqdq %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x6c,0xc0]
; X64-AVX512-NEXT:    # xmm0 = xmm1[0],xmm0[0]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_set_epi64x:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    movq %rdi, %xmm1 # encoding: [0x66,0x48,0x0f,0x6e,0xcf]
; X32-SSE-NEXT:    movq %rsi, %xmm0 # encoding: [0x66,0x48,0x0f,0x6e,0xc6]
; X32-SSE-NEXT:    punpcklqdq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6c,0xc1]
; X32-SSE-NEXT:    # xmm0 = xmm0[0],xmm1[0]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_set_epi64x:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vmovq %rdi, %xmm0 # encoding: [0xc4,0xe1,0xf9,0x6e,0xc7]
; X32-AVX1-NEXT:    vmovq %rsi, %xmm1 # encoding: [0xc4,0xe1,0xf9,0x6e,0xce]
; X32-AVX1-NEXT:    vpunpcklqdq %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0x6c,0xc0]
; X32-AVX1-NEXT:    # xmm0 = xmm1[0],xmm0[0]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_set_epi64x:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vmovq %rdi, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe1,0xf9,0x6e,0xc7]
; X32-AVX512-NEXT:    vmovq %rsi, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe1,0xf9,0x6e,0xce]
; X32-AVX512-NEXT:    vpunpcklqdq %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x6c,0xc0]
; X32-AVX512-NEXT:    # xmm0 = xmm1[0],xmm0[0]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  %res0  = insertelement <2 x i64> undef, i64 %a1, i32 0
  %res1  = insertelement <2 x i64> %res0, i64 %a0, i32 1
  ret <2 x i64> %res1
}

define <2 x double> @test_mm_set_pd(double %a0, double %a1) nounwind {
; X86-SSE-LABEL: test_mm_set_pd:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE-NEXT:    # encoding: [0xf2,0x0f,0x10,0x44,0x24,0x0c]
; X86-SSE-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
; X86-SSE-NEXT:    # encoding: [0xf2,0x0f,0x10,0x4c,0x24,0x04]
; X86-SSE-NEXT:    movlhps %xmm1, %xmm0 # encoding: [0x0f,0x16,0xc1]
; X86-SSE-NEXT:    # xmm0 = xmm0[0],xmm1[0]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_set_pd:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX1-NEXT:    # encoding: [0xc5,0xfb,0x10,0x44,0x24,0x0c]
; X86-AVX1-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
; X86-AVX1-NEXT:    # encoding: [0xc5,0xfb,0x10,0x4c,0x24,0x04]
; X86-AVX1-NEXT:    vmovlhps %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x16,0xc1]
; X86-AVX1-NEXT:    # xmm0 = xmm0[0],xmm1[0]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_set_pd:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX512-NEXT:    # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x44,0x24,0x0c]
; X86-AVX512-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
; X86-AVX512-NEXT:    # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x4c,0x24,0x04]
; X86-AVX512-NEXT:    vmovlhps %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x16,0xc1]
; X86-AVX512-NEXT:    # xmm0 = xmm0[0],xmm1[0]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_set_pd:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movlhps %xmm0, %xmm1 # encoding: [0x0f,0x16,0xc8]
; X64-SSE-NEXT:    # xmm1 = xmm1[0],xmm0[0]
; X64-SSE-NEXT:    movaps %xmm1, %xmm0 # encoding: [0x0f,0x28,0xc1]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_set_pd:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vmovlhps %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf0,0x16,0xc0]
; X64-AVX1-NEXT:    # xmm0 = xmm1[0],xmm0[0]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_set_pd:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vmovlhps %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf0,0x16,0xc0]
; X64-AVX512-NEXT:    # xmm0 = xmm1[0],xmm0[0]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_set_pd:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    movlhps %xmm0, %xmm1 # encoding: [0x0f,0x16,0xc8]
; X32-SSE-NEXT:    # xmm1 = xmm1[0],xmm0[0]
; X32-SSE-NEXT:    movaps %xmm1, %xmm0 # encoding: [0x0f,0x28,0xc1]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_set_pd:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vmovlhps %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf0,0x16,0xc0]
; X32-AVX1-NEXT:    # xmm0 = xmm1[0],xmm0[0]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_set_pd:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vmovlhps %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf0,0x16,0xc0]
; X32-AVX512-NEXT:    # xmm0 = xmm1[0],xmm0[0]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  %res0  = insertelement <2 x double> undef, double %a1, i32 0
  %res1  = insertelement <2 x double> %res0, double %a0, i32 1
  ret <2 x double> %res1
}

define <2 x double> @test_mm_set_pd1(double %a0) nounwind {
; X86-SSE-LABEL: test_mm_set_pd1:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE-NEXT:    # encoding: [0xf2,0x0f,0x10,0x44,0x24,0x04]
; X86-SSE-NEXT:    movlhps %xmm0, %xmm0 # encoding: [0x0f,0x16,0xc0]
; X86-SSE-NEXT:    # xmm0 = xmm0[0,0]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_set_pd1:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX1-NEXT:    # encoding: [0xc5,0xfb,0x10,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0]
; X86-AVX1-NEXT:    # xmm0 = xmm0[0,0]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_set_pd1:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX512-NEXT:    # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0]
; X86-AVX512-NEXT:    # xmm0 = xmm0[0,0]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_set_pd1:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movlhps %xmm0, %xmm0 # encoding: [0x0f,0x16,0xc0]
; X64-SSE-NEXT:    # xmm0 = xmm0[0,0]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_set_pd1:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0]
; X64-AVX1-NEXT:    # xmm0 = xmm0[0,0]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_set_pd1:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0]
; X64-AVX512-NEXT:    # xmm0 = xmm0[0,0]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_set_pd1:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    movlhps %xmm0, %xmm0 # encoding: [0x0f,0x16,0xc0]
; X32-SSE-NEXT:    # xmm0 = xmm0[0,0]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_set_pd1:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0]
; X32-AVX1-NEXT:    # xmm0 = xmm0[0,0]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_set_pd1:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0]
; X32-AVX512-NEXT:    # xmm0 = xmm0[0,0]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  %res0  = insertelement <2 x double> undef, double %a0, i32 0
  %res1  = insertelement <2 x double> %res0, double %a0, i32 1
  ret <2 x double> %res1
}

define <2 x double> @test_mm_set_sd(double %a0) nounwind {
; X86-SSE-LABEL: test_mm_set_sd:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
; X86-SSE-NEXT:    # encoding: [0xf3,0x0f,0x7e,0x44,0x24,0x04]
; X86-SSE-NEXT:    movq %xmm0, %xmm0 # encoding: [0xf3,0x0f,0x7e,0xc0]
; X86-SSE-NEXT:    # xmm0 = xmm0[0],zero
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_set_sd:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
; X86-AVX1-NEXT:    # encoding: [0xc5,0xfa,0x7e,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vmovq %xmm0, %xmm0 # encoding: [0xc5,0xfa,0x7e,0xc0]
; X86-AVX1-NEXT:    # xmm0 = xmm0[0],zero
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_set_sd:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
; X86-AVX512-NEXT:    # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7e,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vmovq %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7e,0xc0]
; X86-AVX512-NEXT:    # xmm0 = xmm0[0],zero
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_set_sd:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movq %xmm0, %xmm0 # encoding: [0xf3,0x0f,0x7e,0xc0]
; X64-SSE-NEXT:    # xmm0 = xmm0[0],zero
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_set_sd:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vmovq %xmm0, %xmm0 # encoding: [0xc5,0xfa,0x7e,0xc0]
; X64-AVX1-NEXT:    # xmm0 = xmm0[0],zero
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_set_sd:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vmovq %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7e,0xc0]
; X64-AVX512-NEXT:    # xmm0 = xmm0[0],zero
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_set_sd:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    movq %xmm0, %xmm0 # encoding: [0xf3,0x0f,0x7e,0xc0]
; X32-SSE-NEXT:    # xmm0 = xmm0[0],zero
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_set_sd:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vmovq %xmm0, %xmm0 # encoding: [0xc5,0xfa,0x7e,0xc0]
; X32-AVX1-NEXT:    # xmm0 = xmm0[0],zero
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_set_sd:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vmovq %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7e,0xc0]
; X32-AVX512-NEXT:    # xmm0 = xmm0[0],zero
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  %res0  = insertelement <2 x double> undef, double %a0, i32 0
  %res1  = insertelement <2 x double> %res0, double 0.0, i32 1
  ret <2 x double> %res1
}

define <2 x i64> @test_mm_set1_epi8(i8 %a0) nounwind {
; X86-SSE-LABEL: test_mm_set1_epi8:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
; X86-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X86-SSE-NEXT:    punpcklbw %xmm0, %xmm0 # encoding: [0x66,0x0f,0x60,0xc0]
; X86-SSE-NEXT:    # xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; X86-SSE-NEXT:    pshuflw $0, %xmm0, %xmm0 # encoding: [0xf2,0x0f,0x70,0xc0,0x00]
; X86-SSE-NEXT:    # xmm0 = xmm0[0,0,0,0,4,5,6,7]
; X86-SSE-NEXT:    pshufd $0, %xmm0, %xmm0 # encoding: [0x66,0x0f,0x70,0xc0,0x00]
; X86-SSE-NEXT:    # xmm0 = xmm0[0,0,0,0]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_set1_epi8:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vmovd %eax, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc0]
; X86-AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xef,0xc9]
; X86-AVX1-NEXT:    vpshufb %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x00,0xc1]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_set1_epi8:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vpbroadcastb %eax, %xmm0 # encoding: [0x62,0xf2,0x7d,0x08,0x7a,0xc0]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_set1_epi8:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movzbl %dil, %eax # encoding: [0x40,0x0f,0xb6,0xc7]
; X64-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X64-SSE-NEXT:    punpcklbw %xmm0, %xmm0 # encoding: [0x66,0x0f,0x60,0xc0]
; X64-SSE-NEXT:    # xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; X64-SSE-NEXT:    pshuflw $0, %xmm0, %xmm0 # encoding: [0xf2,0x0f,0x70,0xc0,0x00]
; X64-SSE-NEXT:    # xmm0 = xmm0[0,0,0,0,4,5,6,7]
; X64-SSE-NEXT:    pshufd $0, %xmm0, %xmm0 # encoding: [0x66,0x0f,0x70,0xc0,0x00]
; X64-SSE-NEXT:    # xmm0 = xmm0[0,0,0,0]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_set1_epi8:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vmovd %edi, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc7]
; X64-AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xef,0xc9]
; X64-AVX1-NEXT:    vpshufb %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x00,0xc1]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_set1_epi8:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vpbroadcastb %edi, %xmm0 # encoding: [0x62,0xf2,0x7d,0x08,0x7a,0xc7]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_set1_epi8:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    movzbl %dil, %eax # encoding: [0x40,0x0f,0xb6,0xc7]
; X32-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X32-SSE-NEXT:    punpcklbw %xmm0, %xmm0 # encoding: [0x66,0x0f,0x60,0xc0]
; X32-SSE-NEXT:    # xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; X32-SSE-NEXT:    pshuflw $0, %xmm0, %xmm0 # encoding: [0xf2,0x0f,0x70,0xc0,0x00]
; X32-SSE-NEXT:    # xmm0 = xmm0[0,0,0,0,4,5,6,7]
; X32-SSE-NEXT:    pshufd $0, %xmm0, %xmm0 # encoding: [0x66,0x0f,0x70,0xc0,0x00]
; X32-SSE-NEXT:    # xmm0 = xmm0[0,0,0,0]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_set1_epi8:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vmovd %edi, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc7]
; X32-AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xef,0xc9]
; X32-AVX1-NEXT:    vpshufb %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x00,0xc1]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_set1_epi8:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vpbroadcastb %edi, %xmm0 # encoding: [0x62,0xf2,0x7d,0x08,0x7a,0xc7]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  %res0  = insertelement <16 x i8> undef,  i8 %a0, i32 0
  %res1  = insertelement <16 x i8> %res0,  i8 %a0, i32 1
  %res2  = insertelement <16 x i8> %res1,  i8 %a0, i32 2
  %res3  = insertelement <16 x i8> %res2,  i8 %a0, i32 3
  %res4  = insertelement <16 x i8> %res3,  i8 %a0, i32 4
  %res5  = insertelement <16 x i8> %res4,  i8 %a0, i32 5
  %res6  = insertelement <16 x i8> %res5,  i8 %a0, i32 6
  %res7  = insertelement <16 x i8> %res6,  i8 %a0, i32 7
  %res8  = insertelement <16 x i8> %res7,  i8 %a0, i32 8
  %res9  = insertelement <16 x i8> %res8,  i8 %a0, i32 9
  %res10 = insertelement <16 x i8> %res9,  i8 %a0, i32 10
  %res11 = insertelement <16 x i8> %res10, i8 %a0, i32 11
  %res12 = insertelement <16 x i8> %res11, i8 %a0, i32 12
  %res13 = insertelement <16 x i8> %res12, i8 %a0, i32 13
  %res14 = insertelement <16 x i8> %res13, i8 %a0, i32 14
  %res15 = insertelement <16 x i8> %res14, i8 %a0, i32 15
  %res = bitcast <16 x i8> %res15 to <2 x i64>
  ret <2 x i64> %res
}

define <2 x i64> @test_mm_set1_epi16(i16 %a0) nounwind {
; X86-SSE-LABEL: test_mm_set1_epi16:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x04]
; X86-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X86-SSE-NEXT:    pshuflw $0, %xmm0, %xmm0 # encoding: [0xf2,0x0f,0x70,0xc0,0x00]
; X86-SSE-NEXT:    # xmm0 = xmm0[0,0,0,0,4,5,6,7]
; X86-SSE-NEXT:    pshufd $0, %xmm0, %xmm0 # encoding: [0x66,0x0f,0x70,0xc0,0x00]
; X86-SSE-NEXT:    # xmm0 = xmm0[0,0,0,0]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_set1_epi16:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vmovd %eax, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc0]
; X86-AVX1-NEXT:    vpshuflw $0, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x70,0xc0,0x00]
; X86-AVX1-NEXT:    # xmm0 = xmm0[0,0,0,0,4,5,6,7]
; X86-AVX1-NEXT:    vpshufd $0, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x70,0xc0,0x00]
; X86-AVX1-NEXT:    # xmm0 = xmm0[0,0,0,0]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_set1_epi16:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vpbroadcastw %eax, %xmm0 # encoding: [0x62,0xf2,0x7d,0x08,0x7b,0xc0]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_set1_epi16:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movd %edi, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc7]
; X64-SSE-NEXT:    pshuflw $0, %xmm0, %xmm0 # encoding: [0xf2,0x0f,0x70,0xc0,0x00]
; X64-SSE-NEXT:    # xmm0 = xmm0[0,0,0,0,4,5,6,7]
; X64-SSE-NEXT:    pshufd $0, %xmm0, %xmm0 # encoding: [0x66,0x0f,0x70,0xc0,0x00]
; X64-SSE-NEXT:    # xmm0 = xmm0[0,0,0,0]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_set1_epi16:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vmovd %edi, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc7]
; X64-AVX1-NEXT:    vpshuflw $0, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x70,0xc0,0x00]
; X64-AVX1-NEXT:    # xmm0 = xmm0[0,0,0,0,4,5,6,7]
; X64-AVX1-NEXT:    vpshufd $0, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x70,0xc0,0x00]
; X64-AVX1-NEXT:    # xmm0 = xmm0[0,0,0,0]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_set1_epi16:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vpbroadcastw %edi, %xmm0 # encoding: [0x62,0xf2,0x7d,0x08,0x7b,0xc7]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_set1_epi16:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    movd %edi, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc7]
; X32-SSE-NEXT:    pshuflw $0, %xmm0, %xmm0 # encoding: [0xf2,0x0f,0x70,0xc0,0x00]
; X32-SSE-NEXT:    # xmm0 = xmm0[0,0,0,0,4,5,6,7]
; X32-SSE-NEXT:    pshufd $0, %xmm0, %xmm0 # encoding: [0x66,0x0f,0x70,0xc0,0x00]
; X32-SSE-NEXT:    # xmm0 = xmm0[0,0,0,0]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_set1_epi16:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vmovd %edi, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc7]
; X32-AVX1-NEXT:    vpshuflw $0, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x70,0xc0,0x00]
; X32-AVX1-NEXT:    # xmm0 = xmm0[0,0,0,0,4,5,6,7]
; X32-AVX1-NEXT:    vpshufd $0, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x70,0xc0,0x00]
; X32-AVX1-NEXT:    # xmm0 = xmm0[0,0,0,0]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_set1_epi16:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vpbroadcastw %edi, %xmm0 # encoding: [0x62,0xf2,0x7d,0x08,0x7b,0xc7]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  %res0  = insertelement <8 x i16> undef, i16 %a0, i32 0
  %res1  = insertelement <8 x i16> %res0, i16 %a0, i32 1
  %res2  = insertelement <8 x i16> %res1, i16 %a0, i32 2
  %res3  = insertelement <8 x i16> %res2, i16 %a0, i32 3
  %res4  = insertelement <8 x i16> %res3, i16 %a0, i32 4
  %res5  = insertelement <8 x i16> %res4, i16 %a0, i32 5
  %res6  = insertelement <8 x i16> %res5, i16 %a0, i32 6
  %res7  = insertelement <8 x i16> %res6, i16 %a0, i32 7
  %res = bitcast <8 x i16> %res7 to <2 x i64>
  ret <2 x i64> %res
}

define <2 x i64> @test_mm_set1_epi32(i32 %a0) nounwind {
; X86-SSE-LABEL: test_mm_set1_epi32:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE-NEXT:    # encoding: [0x66,0x0f,0x6e,0x44,0x24,0x04]
; X86-SSE-NEXT:    pshufd $0, %xmm0, %xmm0 # encoding: [0x66,0x0f,0x70,0xc0,0x00]
; X86-SSE-NEXT:    # xmm0 = xmm0[0,0,0,0]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_set1_epi32:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-AVX1-NEXT:    # encoding: [0xc5,0xfa,0x10,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vshufps $0, %xmm0, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0xc6,0xc0,0x00]
; X86-AVX1-NEXT:    # xmm0 = xmm0[0,0,0,0]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_set1_epi32:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vpbroadcastd %eax, %xmm0 # encoding: [0x62,0xf2,0x7d,0x08,0x7c,0xc0]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_set1_epi32:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movd %edi, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc7]
; X64-SSE-NEXT:    pshufd $0, %xmm0, %xmm0 # encoding: [0x66,0x0f,0x70,0xc0,0x00]
; X64-SSE-NEXT:    # xmm0 = xmm0[0,0,0,0]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_set1_epi32:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vmovd %edi, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc7]
; X64-AVX1-NEXT:    vpshufd $0, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x70,0xc0,0x00]
; X64-AVX1-NEXT:    # xmm0 = xmm0[0,0,0,0]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_set1_epi32:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vpbroadcastd %edi, %xmm0 # encoding: [0x62,0xf2,0x7d,0x08,0x7c,0xc7]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_set1_epi32:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    movd %edi, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc7]
; X32-SSE-NEXT:    pshufd $0, %xmm0, %xmm0 # encoding: [0x66,0x0f,0x70,0xc0,0x00]
; X32-SSE-NEXT:    # xmm0 = xmm0[0,0,0,0]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_set1_epi32:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vmovd %edi, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc7]
; X32-AVX1-NEXT:    vpshufd $0, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x70,0xc0,0x00]
; X32-AVX1-NEXT:    # xmm0 = xmm0[0,0,0,0]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_set1_epi32:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vpbroadcastd %edi, %xmm0 # encoding: [0x62,0xf2,0x7d,0x08,0x7c,0xc7]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  %res0  = insertelement <4 x i32> undef, i32 %a0, i32 0
  %res1  = insertelement <4 x i32> %res0, i32 %a0, i32 1
  %res2  = insertelement <4 x i32> %res1, i32 %a0, i32 2
  %res3  = insertelement <4 x i32> %res2, i32 %a0, i32 3
  %res = bitcast <4 x i32> %res3 to <2 x i64>
  ret <2 x i64> %res
}

; TODO test_mm_set1_epi64

define <2 x i64> @test_mm_set1_epi64x(i64 %a0) nounwind {
; X86-SSE-LABEL: test_mm_set1_epi64x:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE-NEXT:    # encoding: [0x66,0x0f,0x6e,0x44,0x24,0x04]
; X86-SSE-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X86-SSE-NEXT:    # encoding: [0x66,0x0f,0x6e,0x4c,0x24,0x08]
; X86-SSE-NEXT:    punpckldq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x62,0xc1]
; X86-SSE-NEXT:    # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X86-SSE-NEXT:    pshufd $68, %xmm0, %xmm0 # encoding: [0x66,0x0f,0x70,0xc0,0x44]
; X86-SSE-NEXT:    # xmm0 = xmm0[0,1,0,1]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_set1_epi64x:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-AVX1-NEXT:    # encoding: [0xc5,0xf9,0x6e,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x08,0x01]
; X86-AVX1-NEXT:    vpshufd $68, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x70,0xc0,0x44]
; X86-AVX1-NEXT:    # xmm0 = xmm0[0,1,0,1]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_set1_epi64x:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-AVX512-NEXT:    # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x08,0x01]
; X86-AVX512-NEXT:    vpbroadcastq %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x59,0xc0]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_set1_epi64x:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movq %rdi, %xmm0 # encoding: [0x66,0x48,0x0f,0x6e,0xc7]
; X64-SSE-NEXT:    pshufd $68, %xmm0, %xmm0 # encoding: [0x66,0x0f,0x70,0xc0,0x44]
; X64-SSE-NEXT:    # xmm0 = xmm0[0,1,0,1]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_set1_epi64x:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vmovq %rdi, %xmm0 # encoding: [0xc4,0xe1,0xf9,0x6e,0xc7]
; X64-AVX1-NEXT:    vpshufd $68, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x70,0xc0,0x44]
; X64-AVX1-NEXT:    # xmm0 = xmm0[0,1,0,1]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_set1_epi64x:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vpbroadcastq %rdi, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x7c,0xc7]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_set1_epi64x:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    movq %rdi, %xmm0 # encoding: [0x66,0x48,0x0f,0x6e,0xc7]
; X32-SSE-NEXT:    pshufd $68, %xmm0, %xmm0 # encoding: [0x66,0x0f,0x70,0xc0,0x44]
; X32-SSE-NEXT:    # xmm0 = xmm0[0,1,0,1]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_set1_epi64x:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vmovq %rdi, %xmm0 # encoding: [0xc4,0xe1,0xf9,0x6e,0xc7]
; X32-AVX1-NEXT:    vpshufd $68, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x70,0xc0,0x44]
; X32-AVX1-NEXT:    # xmm0 = xmm0[0,1,0,1]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_set1_epi64x:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vpbroadcastq %rdi, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x7c,0xc7]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  %res0  = insertelement <2 x i64> undef, i64 %a0, i32 0
  %res1  = insertelement <2 x i64> %res0, i64 %a0, i32 1
  ret <2 x i64> %res1
}

define <2 x double> @test_mm_set1_pd(double %a0) nounwind {
; X86-SSE-LABEL: test_mm_set1_pd:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE-NEXT:    # encoding: [0xf2,0x0f,0x10,0x44,0x24,0x04]
; X86-SSE-NEXT:    movlhps %xmm0, %xmm0 # encoding: [0x0f,0x16,0xc0]
; X86-SSE-NEXT:    # xmm0 = xmm0[0,0]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_set1_pd:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX1-NEXT:    # encoding: [0xc5,0xfb,0x10,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0]
; X86-AVX1-NEXT:    # xmm0 = xmm0[0,0]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_set1_pd:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX512-NEXT:    # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0]
; X86-AVX512-NEXT:    # xmm0 = xmm0[0,0]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_set1_pd:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movlhps %xmm0, %xmm0 # encoding: [0x0f,0x16,0xc0]
; X64-SSE-NEXT:    # xmm0 = xmm0[0,0]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_set1_pd:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0]
; X64-AVX1-NEXT:    # xmm0 = xmm0[0,0]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_set1_pd:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0]
; X64-AVX512-NEXT:    # xmm0 = xmm0[0,0]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_set1_pd:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    movlhps %xmm0, %xmm0 # encoding: [0x0f,0x16,0xc0]
; X32-SSE-NEXT:    # xmm0 = xmm0[0,0]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_set1_pd:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0]
; X32-AVX1-NEXT:    # xmm0 = xmm0[0,0]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_set1_pd:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0]
; X32-AVX512-NEXT:    # xmm0 = xmm0[0,0]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  %res0  = insertelement <2 x double> undef, double %a0, i32 0
  %res1  = insertelement <2 x double> %res0, double %a0, i32 1
  ret <2 x double> %res1
}

define <2 x i64> @test_mm_setr_epi8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8 %a5, i8 %a6, i8 %a7, i8 %a8, i8 %a9, i8 %a10, i8 %a11, i8 %a12, i8 %a13, i8 %a14, i8 %a15) nounwind {
; X86-SSE-LABEL: test_mm_setr_epi8:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x40]
; X86-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x3c]
; X86-SSE-NEXT:    movd %eax, %xmm1 # encoding: [0x66,0x0f,0x6e,0xc8]
; X86-SSE-NEXT:    punpcklbw %xmm0, %xmm1 # encoding: [0x66,0x0f,0x60,0xc8]
; X86-SSE-NEXT:    # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x38]
; X86-SSE-NEXT:    movd %eax, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd0]
; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x34]
; X86-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X86-SSE-NEXT:    punpcklbw %xmm2, %xmm0 # encoding: [0x66,0x0f,0x60,0xc2]
; X86-SSE-NEXT:    # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
; X86-SSE-NEXT:    punpcklwd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x61,0xc1]
; X86-SSE-NEXT:    # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x30]
; X86-SSE-NEXT:    movd %eax, %xmm1 # encoding: [0x66,0x0f,0x6e,0xc8]
; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x2c]
; X86-SSE-NEXT:    movd %eax, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd0]
; X86-SSE-NEXT:    punpcklbw %xmm1, %xmm2 # encoding: [0x66,0x0f,0x60,0xd1]
; X86-SSE-NEXT:    # xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x28]
; X86-SSE-NEXT:    movd %eax, %xmm3 # encoding: [0x66,0x0f,0x6e,0xd8]
; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x24]
; X86-SSE-NEXT:    movd %eax, %xmm1 # encoding: [0x66,0x0f,0x6e,0xc8]
; X86-SSE-NEXT:    punpcklbw %xmm3, %xmm1 # encoding: [0x66,0x0f,0x60,0xcb]
; X86-SSE-NEXT:    # xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
; X86-SSE-NEXT:    punpcklwd %xmm2, %xmm1 # encoding: [0x66,0x0f,0x61,0xca]
; X86-SSE-NEXT:    # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
; X86-SSE-NEXT:    punpckldq %xmm0, %xmm1 # encoding: [0x66,0x0f,0x62,0xc8]
; X86-SSE-NEXT:    # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x20]
; X86-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x1c]
; X86-SSE-NEXT:    movd %eax, %xmm3 # encoding: [0x66,0x0f,0x6e,0xd8]
; X86-SSE-NEXT:    punpcklbw %xmm0, %xmm3 # encoding: [0x66,0x0f,0x60,0xd8]
; X86-SSE-NEXT:    # xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x18]
; X86-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x14]
; X86-SSE-NEXT:    movd %eax, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd0]
; X86-SSE-NEXT:    punpcklbw %xmm0, %xmm2 # encoding: [0x66,0x0f,0x60,0xd0]
; X86-SSE-NEXT:    # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; X86-SSE-NEXT:    punpcklwd %xmm3, %xmm2 # encoding: [0x66,0x0f,0x61,0xd3]
; X86-SSE-NEXT:    # xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x10]
; X86-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x0c]
; X86-SSE-NEXT:    movd %eax, %xmm3 # encoding: [0x66,0x0f,0x6e,0xd8]
; X86-SSE-NEXT:    punpcklbw %xmm0, %xmm3 # encoding: [0x66,0x0f,0x60,0xd8]
; X86-SSE-NEXT:    # xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
; X86-SSE-NEXT:    movd %eax, %xmm4 # encoding: [0x66,0x0f,0x6e,0xe0]
; X86-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
; X86-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X86-SSE-NEXT:    punpcklbw %xmm4, %xmm0 # encoding: [0x66,0x0f,0x60,0xc4]
; X86-SSE-NEXT:    # xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
; X86-SSE-NEXT:    punpcklwd %xmm3, %xmm0 # encoding: [0x66,0x0f,0x61,0xc3]
; X86-SSE-NEXT:    # xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
; X86-SSE-NEXT:    punpckldq %xmm2, %xmm0 # encoding: [0x66,0x0f,0x62,0xc2]
; X86-SSE-NEXT:    # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; X86-SSE-NEXT:    punpcklqdq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6c,0xc1]
; X86-SSE-NEXT:    # xmm0 = xmm0[0],xmm1[0]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_setr_epi8:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vmovd %eax, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc0]
; X86-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
; X86-AVX1-NEXT:    vpinsrb $1, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x01]
; X86-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x0c]
; X86-AVX1-NEXT:    vpinsrb $2, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
; X86-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x10]
; X86-AVX1-NEXT:    vpinsrb $3, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x03]
; X86-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x14]
; X86-AVX1-NEXT:    vpinsrb $4, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
; X86-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x18]
; X86-AVX1-NEXT:    vpinsrb $5, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x05]
; X86-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x1c]
; X86-AVX1-NEXT:    vpinsrb $6, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
; X86-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x20]
; X86-AVX1-NEXT:    vpinsrb $7, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x07]
; X86-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x24]
; X86-AVX1-NEXT:    vpinsrb $8, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
; X86-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x28]
; X86-AVX1-NEXT:    vpinsrb $9, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x09]
; X86-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x2c]
; X86-AVX1-NEXT:    vpinsrb $10, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
; X86-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x30]
; X86-AVX1-NEXT:    vpinsrb $11, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0b]
; X86-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x34]
; X86-AVX1-NEXT:    vpinsrb $12, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
; X86-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x38]
; X86-AVX1-NEXT:    vpinsrb $13, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0d]
; X86-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x3c]
; X86-AVX1-NEXT:    vpinsrb $14, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
; X86-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x40]
; X86-AVX1-NEXT:    vpinsrb $15, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0f]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_setr_epi8:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vmovd %eax, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc0]
; X86-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
; X86-AVX512-NEXT:    vpinsrb $1, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x01]
; X86-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x0c]
; X86-AVX512-NEXT:    vpinsrb $2, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
; X86-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x10]
; X86-AVX512-NEXT:    vpinsrb $3, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x03]
; X86-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x14]
; X86-AVX512-NEXT:    vpinsrb $4, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
; X86-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x18]
; X86-AVX512-NEXT:    vpinsrb $5, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x05]
; X86-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x1c]
; X86-AVX512-NEXT:    vpinsrb $6, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
; X86-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x20]
; X86-AVX512-NEXT:    vpinsrb $7, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x07]
; X86-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x24]
; X86-AVX512-NEXT:    vpinsrb $8, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
; X86-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x28]
; X86-AVX512-NEXT:    vpinsrb $9, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x09]
; X86-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x2c]
; X86-AVX512-NEXT:    vpinsrb $10, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
; X86-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x30]
; X86-AVX512-NEXT:    vpinsrb $11, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0b]
; X86-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x34]
; X86-AVX512-NEXT:    vpinsrb $12, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
; X86-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x38]
; X86-AVX512-NEXT:    vpinsrb $13, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0d]
; X86-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x3c]
; X86-AVX512-NEXT:    vpinsrb $14, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
; X86-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x40]
; X86-AVX512-NEXT:    vpinsrb $15, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0f]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_setr_epi8:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x50]
; X64-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X64-SSE-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x48]
; X64-SSE-NEXT:    movd %eax, %xmm1 # encoding: [0x66,0x0f,0x6e,0xc8]
; X64-SSE-NEXT:    punpcklbw %xmm0, %xmm1 # encoding: [0x66,0x0f,0x60,0xc8]
; X64-SSE-NEXT:    # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; X64-SSE-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x40]
; X64-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X64-SSE-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x38]
; X64-SSE-NEXT:    movd %eax, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd0]
; X64-SSE-NEXT:    punpcklbw %xmm0, %xmm2 # encoding: [0x66,0x0f,0x60,0xd0]
; X64-SSE-NEXT:    # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; X64-SSE-NEXT:    punpcklwd %xmm1, %xmm2 # encoding: [0x66,0x0f,0x61,0xd1]
; X64-SSE-NEXT:    # xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
; X64-SSE-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x30]
; X64-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X64-SSE-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x28]
; X64-SSE-NEXT:    movd %eax, %xmm3 # encoding: [0x66,0x0f,0x6e,0xd8]
; X64-SSE-NEXT:    punpcklbw %xmm0, %xmm3 # encoding: [0x66,0x0f,0x60,0xd8]
; X64-SSE-NEXT:    # xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
; X64-SSE-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x20]
; X64-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X64-SSE-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x18]
; X64-SSE-NEXT:    movd %eax, %xmm1 # encoding: [0x66,0x0f,0x6e,0xc8]
; X64-SSE-NEXT:    punpcklbw %xmm0, %xmm1 # encoding: [0x66,0x0f,0x60,0xc8]
; X64-SSE-NEXT:    # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; X64-SSE-NEXT:    punpcklwd %xmm3, %xmm1 # encoding: [0x66,0x0f,0x61,0xcb]
; X64-SSE-NEXT:    # xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
; X64-SSE-NEXT:    punpckldq %xmm2, %xmm1 # encoding: [0x66,0x0f,0x62,0xca]
; X64-SSE-NEXT:    # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; X64-SSE-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x10]
; X64-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X64-SSE-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
; X64-SSE-NEXT:    movd %eax, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd0]
; X64-SSE-NEXT:    punpcklbw %xmm0, %xmm2 # encoding: [0x66,0x0f,0x60,0xd0]
; X64-SSE-NEXT:    # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; X64-SSE-NEXT:    movzbl %r9b, %eax # encoding: [0x41,0x0f,0xb6,0xc1]
; X64-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X64-SSE-NEXT:    movzbl %r8b, %eax # encoding: [0x41,0x0f,0xb6,0xc0]
; X64-SSE-NEXT:    movd %eax, %xmm3 # encoding: [0x66,0x0f,0x6e,0xd8]
; X64-SSE-NEXT:    punpcklbw %xmm0, %xmm3 # encoding: [0x66,0x0f,0x60,0xd8]
; X64-SSE-NEXT:    # xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
; X64-SSE-NEXT:    punpcklwd %xmm2, %xmm3 # encoding: [0x66,0x0f,0x61,0xda]
; X64-SSE-NEXT:    # xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
; X64-SSE-NEXT:    movzbl %cl, %eax # encoding: [0x0f,0xb6,0xc1]
; X64-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X64-SSE-NEXT:    movzbl %dl, %eax # encoding: [0x0f,0xb6,0xc2]
; X64-SSE-NEXT:    movd %eax, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd0]
; X64-SSE-NEXT:    punpcklbw %xmm0, %xmm2 # encoding: [0x66,0x0f,0x60,0xd0]
; X64-SSE-NEXT:    # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; X64-SSE-NEXT:    movzbl %sil, %eax # encoding: [0x40,0x0f,0xb6,0xc6]
; X64-SSE-NEXT:    movd %eax, %xmm4 # encoding: [0x66,0x0f,0x6e,0xe0]
; X64-SSE-NEXT:    movzbl %dil, %eax # encoding: [0x40,0x0f,0xb6,0xc7]
; X64-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X64-SSE-NEXT:    punpcklbw %xmm4, %xmm0 # encoding: [0x66,0x0f,0x60,0xc4]
; X64-SSE-NEXT:    # xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
; X64-SSE-NEXT:    punpcklwd %xmm2, %xmm0 # encoding: [0x66,0x0f,0x61,0xc2]
; X64-SSE-NEXT:    # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
; X64-SSE-NEXT:    punpckldq %xmm3, %xmm0 # encoding: [0x66,0x0f,0x62,0xc3]
; X64-SSE-NEXT:    # xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
; X64-SSE-NEXT:    punpcklqdq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6c,0xc1]
; X64-SSE-NEXT:    # xmm0 = xmm0[0],xmm1[0]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_setr_epi8:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vmovd %edi, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc7]
; X64-AVX1-NEXT:    vpinsrb $1, %esi, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc6,0x01]
; X64-AVX1-NEXT:    vpinsrb $2, %edx, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc2,0x02]
; X64-AVX1-NEXT:    vpinsrb $3, %ecx, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc1,0x03]
; X64-AVX1-NEXT:    vpinsrb $4, %r8d, %xmm0, %xmm0 # encoding: [0xc4,0xc3,0x79,0x20,0xc0,0x04]
; X64-AVX1-NEXT:    vpinsrb $5, %r9d, %xmm0, %xmm0 # encoding: [0xc4,0xc3,0x79,0x20,0xc1,0x05]
; X64-AVX1-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
; X64-AVX1-NEXT:    vpinsrb $6, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
; X64-AVX1-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x10]
; X64-AVX1-NEXT:    vpinsrb $7, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x07]
; X64-AVX1-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x18]
; X64-AVX1-NEXT:    vpinsrb $8, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
; X64-AVX1-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x20]
; X64-AVX1-NEXT:    vpinsrb $9, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x09]
; X64-AVX1-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x28]
; X64-AVX1-NEXT:    vpinsrb $10, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
; X64-AVX1-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x30]
; X64-AVX1-NEXT:    vpinsrb $11, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0b]
; X64-AVX1-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x38]
; X64-AVX1-NEXT:    vpinsrb $12, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
; X64-AVX1-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x40]
; X64-AVX1-NEXT:    vpinsrb $13, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0d]
; X64-AVX1-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x48]
; X64-AVX1-NEXT:    vpinsrb $14, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
; X64-AVX1-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x50]
; X64-AVX1-NEXT:    vpinsrb $15, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0f]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_setr_epi8:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vmovd %edi, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc7]
; X64-AVX512-NEXT:    vpinsrb $1, %esi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc6,0x01]
; X64-AVX512-NEXT:    vpinsrb $2, %edx, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc2,0x02]
; X64-AVX512-NEXT:    vpinsrb $3, %ecx, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc1,0x03]
; X64-AVX512-NEXT:    vpinsrb $4, %r8d, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xc3,0x79,0x20,0xc0,0x04]
; X64-AVX512-NEXT:    vpinsrb $5, %r9d, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xc3,0x79,0x20,0xc1,0x05]
; X64-AVX512-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
; X64-AVX512-NEXT:    vpinsrb $6, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
; X64-AVX512-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x10]
; X64-AVX512-NEXT:    vpinsrb $7, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x07]
; X64-AVX512-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x18]
; X64-AVX512-NEXT:    vpinsrb $8, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
; X64-AVX512-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x20]
; X64-AVX512-NEXT:    vpinsrb $9, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x09]
; X64-AVX512-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x28]
; X64-AVX512-NEXT:    vpinsrb $10, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
; X64-AVX512-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x30]
; X64-AVX512-NEXT:    vpinsrb $11, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0b]
; X64-AVX512-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x38]
; X64-AVX512-NEXT:    vpinsrb $12, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
; X64-AVX512-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x40]
; X64-AVX512-NEXT:    vpinsrb $13, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0d]
; X64-AVX512-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x48]
; X64-AVX512-NEXT:    vpinsrb $14, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
; X64-AVX512-NEXT:    movzbl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x50]
; X64-AVX512-NEXT:    vpinsrb $15, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0f]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_setr_epi8:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x50]
; X32-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X32-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x48]
; X32-SSE-NEXT:    movd %eax, %xmm1 # encoding: [0x66,0x0f,0x6e,0xc8]
; X32-SSE-NEXT:    punpcklbw %xmm0, %xmm1 # encoding: [0x66,0x0f,0x60,0xc8]
; X32-SSE-NEXT:    # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; X32-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x40]
; X32-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X32-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x38]
; X32-SSE-NEXT:    movd %eax, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd0]
; X32-SSE-NEXT:    punpcklbw %xmm0, %xmm2 # encoding: [0x66,0x0f,0x60,0xd0]
; X32-SSE-NEXT:    # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; X32-SSE-NEXT:    punpcklwd %xmm1, %xmm2 # encoding: [0x66,0x0f,0x61,0xd1]
; X32-SSE-NEXT:    # xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
; X32-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x30]
; X32-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X32-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x28]
; X32-SSE-NEXT:    movd %eax, %xmm3 # encoding: [0x66,0x0f,0x6e,0xd8]
; X32-SSE-NEXT:    punpcklbw %xmm0, %xmm3 # encoding: [0x66,0x0f,0x60,0xd8]
; X32-SSE-NEXT:    # xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
; X32-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x20]
; X32-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X32-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x18]
; X32-SSE-NEXT:    movd %eax, %xmm1 # encoding: [0x66,0x0f,0x6e,0xc8]
; X32-SSE-NEXT:    punpcklbw %xmm0, %xmm1 # encoding: [0x66,0x0f,0x60,0xc8]
; X32-SSE-NEXT:    # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; X32-SSE-NEXT:    punpcklwd %xmm3, %xmm1 # encoding: [0x66,0x0f,0x61,0xcb]
; X32-SSE-NEXT:    # xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
; X32-SSE-NEXT:    punpckldq %xmm2, %xmm1 # encoding: [0x66,0x0f,0x62,0xca]
; X32-SSE-NEXT:    # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; X32-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x10]
; X32-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X32-SSE-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x08]
; X32-SSE-NEXT:    movd %eax, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd0]
; X32-SSE-NEXT:    punpcklbw %xmm0, %xmm2 # encoding: [0x66,0x0f,0x60,0xd0]
; X32-SSE-NEXT:    # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; X32-SSE-NEXT:    movzbl %r9b, %eax # encoding: [0x41,0x0f,0xb6,0xc1]
; X32-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X32-SSE-NEXT:    movzbl %r8b, %eax # encoding: [0x41,0x0f,0xb6,0xc0]
; X32-SSE-NEXT:    movd %eax, %xmm3 # encoding: [0x66,0x0f,0x6e,0xd8]
; X32-SSE-NEXT:    punpcklbw %xmm0, %xmm3 # encoding: [0x66,0x0f,0x60,0xd8]
; X32-SSE-NEXT:    # xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
; X32-SSE-NEXT:    punpcklwd %xmm2, %xmm3 # encoding: [0x66,0x0f,0x61,0xda]
; X32-SSE-NEXT:    # xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
; X32-SSE-NEXT:    movzbl %cl, %eax # encoding: [0x0f,0xb6,0xc1]
; X32-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X32-SSE-NEXT:    movzbl %dl, %eax # encoding: [0x0f,0xb6,0xc2]
; X32-SSE-NEXT:    movd %eax, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd0]
; X32-SSE-NEXT:    punpcklbw %xmm0, %xmm2 # encoding: [0x66,0x0f,0x60,0xd0]
; X32-SSE-NEXT:    # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; X32-SSE-NEXT:    movzbl %sil, %eax # encoding: [0x40,0x0f,0xb6,0xc6]
; X32-SSE-NEXT:    movd %eax, %xmm4 # encoding: [0x66,0x0f,0x6e,0xe0]
; X32-SSE-NEXT:    movzbl %dil, %eax # encoding: [0x40,0x0f,0xb6,0xc7]
; X32-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X32-SSE-NEXT:    punpcklbw %xmm4, %xmm0 # encoding: [0x66,0x0f,0x60,0xc4]
; X32-SSE-NEXT:    # xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
; X32-SSE-NEXT:    punpcklwd %xmm2, %xmm0 # encoding: [0x66,0x0f,0x61,0xc2]
; X32-SSE-NEXT:    # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
; X32-SSE-NEXT:    punpckldq %xmm3, %xmm0 # encoding: [0x66,0x0f,0x62,0xc3]
; X32-SSE-NEXT:    # xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
; X32-SSE-NEXT:    punpcklqdq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6c,0xc1]
; X32-SSE-NEXT:    # xmm0 = xmm0[0],xmm1[0]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_setr_epi8:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vmovd %edi, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc7]
; X32-AVX1-NEXT:    vpinsrb $1, %esi, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc6,0x01]
; X32-AVX1-NEXT:    vpinsrb $2, %edx, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc2,0x02]
; X32-AVX1-NEXT:    vpinsrb $3, %ecx, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc1,0x03]
; X32-AVX1-NEXT:    vpinsrb $4, %r8d, %xmm0, %xmm0 # encoding: [0xc4,0xc3,0x79,0x20,0xc0,0x04]
; X32-AVX1-NEXT:    vpinsrb $5, %r9d, %xmm0, %xmm0 # encoding: [0xc4,0xc3,0x79,0x20,0xc1,0x05]
; X32-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x08]
; X32-AVX1-NEXT:    vpinsrb $6, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
; X32-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x10]
; X32-AVX1-NEXT:    vpinsrb $7, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x07]
; X32-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x18]
; X32-AVX1-NEXT:    vpinsrb $8, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
; X32-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x20]
; X32-AVX1-NEXT:    vpinsrb $9, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x09]
; X32-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x28]
; X32-AVX1-NEXT:    vpinsrb $10, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
; X32-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x30]
; X32-AVX1-NEXT:    vpinsrb $11, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0b]
; X32-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x38]
; X32-AVX1-NEXT:    vpinsrb $12, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
; X32-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x40]
; X32-AVX1-NEXT:    vpinsrb $13, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0d]
; X32-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x48]
; X32-AVX1-NEXT:    vpinsrb $14, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
; X32-AVX1-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x50]
; X32-AVX1-NEXT:    vpinsrb $15, %eax, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0f]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_setr_epi8:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vmovd %edi, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc7]
; X32-AVX512-NEXT:    vpinsrb $1, %esi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc6,0x01]
; X32-AVX512-NEXT:    vpinsrb $2, %edx, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc2,0x02]
; X32-AVX512-NEXT:    vpinsrb $3, %ecx, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc1,0x03]
; X32-AVX512-NEXT:    vpinsrb $4, %r8d, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xc3,0x79,0x20,0xc0,0x04]
; X32-AVX512-NEXT:    vpinsrb $5, %r9d, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xc3,0x79,0x20,0xc1,0x05]
; X32-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x08]
; X32-AVX512-NEXT:    vpinsrb $6, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
; X32-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x10]
; X32-AVX512-NEXT:    vpinsrb $7, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x07]
; X32-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x18]
; X32-AVX512-NEXT:    vpinsrb $8, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
; X32-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x20]
; X32-AVX512-NEXT:    vpinsrb $9, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x09]
; X32-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x28]
; X32-AVX512-NEXT:    vpinsrb $10, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
; X32-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x30]
; X32-AVX512-NEXT:    vpinsrb $11, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0b]
; X32-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x38]
; X32-AVX512-NEXT:    vpinsrb $12, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
; X32-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x40]
; X32-AVX512-NEXT:    vpinsrb $13, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0d]
; X32-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x48]
; X32-AVX512-NEXT:    vpinsrb $14, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
; X32-AVX512-NEXT:    movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb6,0x44,0x24,0x50]
; X32-AVX512-NEXT:    vpinsrb $15, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0f]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  %res0  = insertelement <16 x i8> undef,  i8 %a0 , i32 0
  %res1  = insertelement <16 x i8> %res0,  i8 %a1 , i32 1
  %res2  = insertelement <16 x i8> %res1,  i8 %a2 , i32 2
  %res3  = insertelement <16 x i8> %res2,  i8 %a3 , i32 3
  %res4  = insertelement <16 x i8> %res3,  i8 %a4 , i32 4
  %res5  = insertelement <16 x i8> %res4,  i8 %a5 , i32 5
  %res6  = insertelement <16 x i8> %res5,  i8 %a6 , i32 6
  %res7  = insertelement <16 x i8> %res6,  i8 %a7 , i32 7
  %res8  = insertelement <16 x i8> %res7,  i8 %a8 , i32 8
  %res9  = insertelement <16 x i8> %res8,  i8 %a9 , i32 9
  %res10 = insertelement <16 x i8> %res9,  i8 %a10, i32 10
  %res11 = insertelement <16 x i8> %res10, i8 %a11, i32 11
  %res12 = insertelement <16 x i8> %res11, i8 %a12, i32 12
  %res13 = insertelement <16 x i8> %res12, i8 %a13, i32 13
  %res14 = insertelement <16 x i8> %res13, i8 %a14, i32 14
  %res15 = insertelement <16 x i8> %res14, i8 %a15, i32 15
  %res = bitcast <16 x i8> %res15 to <2 x i64>
  ret <2 x i64> %res
}

define <2 x i64> @test_mm_setr_epi16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4, i16 %a5, i16 %a6, i16 %a7) nounwind {
; X86-SSE-LABEL: test_mm_setr_epi16:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x20]
; X86-SSE-NEXT:    movd %eax, %xmm1 # encoding: [0x66,0x0f,0x6e,0xc8]
; X86-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x1c]
; X86-SSE-NEXT:    movd %eax, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd0]
; X86-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x18]
; X86-SSE-NEXT:    movd %eax, %xmm4 # encoding: [0x66,0x0f,0x6e,0xe0]
; X86-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x14]
; X86-SSE-NEXT:    movd %eax, %xmm3 # encoding: [0x66,0x0f,0x6e,0xd8]
; X86-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x10]
; X86-SSE-NEXT:    movd %eax, %xmm5 # encoding: [0x66,0x0f,0x6e,0xe8]
; X86-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x0c]
; X86-SSE-NEXT:    movd %eax, %xmm6 # encoding: [0x66,0x0f,0x6e,0xf0]
; X86-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x08]
; X86-SSE-NEXT:    movd %eax, %xmm7 # encoding: [0x66,0x0f,0x6e,0xf8]
; X86-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x04]
; X86-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X86-SSE-NEXT:    punpcklwd %xmm1, %xmm2 # encoding: [0x66,0x0f,0x61,0xd1]
; X86-SSE-NEXT:    # xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
; X86-SSE-NEXT:    punpcklwd %xmm4, %xmm3 # encoding: [0x66,0x0f,0x61,0xdc]
; X86-SSE-NEXT:    # xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
; X86-SSE-NEXT:    punpckldq %xmm2, %xmm3 # encoding: [0x66,0x0f,0x62,0xda]
; X86-SSE-NEXT:    # xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
; X86-SSE-NEXT:    punpcklwd %xmm5, %xmm6 # encoding: [0x66,0x0f,0x61,0xf5]
; X86-SSE-NEXT:    # xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
; X86-SSE-NEXT:    punpcklwd %xmm7, %xmm0 # encoding: [0x66,0x0f,0x61,0xc7]
; X86-SSE-NEXT:    # xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3]
; X86-SSE-NEXT:    punpckldq %xmm6, %xmm0 # encoding: [0x66,0x0f,0x62,0xc6]
; X86-SSE-NEXT:    # xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
; X86-SSE-NEXT:    punpcklqdq %xmm3, %xmm0 # encoding: [0x66,0x0f,0x6c,0xc3]
; X86-SSE-NEXT:    # xmm0 = xmm0[0],xmm3[0]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_setr_epi16:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vmovd %eax, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc0]
; X86-AVX1-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x08]
; X86-AVX1-NEXT:    vpinsrw $1, %eax, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc0,0x01]
; X86-AVX1-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x0c]
; X86-AVX1-NEXT:    vpinsrw $2, %eax, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc0,0x02]
; X86-AVX1-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x10]
; X86-AVX1-NEXT:    vpinsrw $3, %eax, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc0,0x03]
; X86-AVX1-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x14]
; X86-AVX1-NEXT:    vpinsrw $4, %eax, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc0,0x04]
; X86-AVX1-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x18]
; X86-AVX1-NEXT:    vpinsrw $5, %eax, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc0,0x05]
; X86-AVX1-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x1c]
; X86-AVX1-NEXT:    vpinsrw $6, %eax, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc0,0x06]
; X86-AVX1-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x20]
; X86-AVX1-NEXT:    vpinsrw $7, %eax, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc0,0x07]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_setr_epi16:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vmovd %eax, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc0]
; X86-AVX512-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x08]
; X86-AVX512-NEXT:    vpinsrw $1, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x01]
; X86-AVX512-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x0c]
; X86-AVX512-NEXT:    vpinsrw $2, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x02]
; X86-AVX512-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x10]
; X86-AVX512-NEXT:    vpinsrw $3, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x03]
; X86-AVX512-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x14]
; X86-AVX512-NEXT:    vpinsrw $4, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x04]
; X86-AVX512-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x18]
; X86-AVX512-NEXT:    vpinsrw $5, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x05]
; X86-AVX512-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x1c]
; X86-AVX512-NEXT:    vpinsrw $6, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x06]
; X86-AVX512-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x20]
; X86-AVX512-NEXT:    vpinsrw $7, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x07]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_setr_epi16:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movzwl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x10]
; X64-SSE-NEXT:    movzwl {{[0-9]+}}(%rsp), %r10d # encoding: [0x44,0x0f,0xb7,0x54,0x24,0x08]
; X64-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X64-SSE-NEXT:    movd %r10d, %xmm1 # encoding: [0x66,0x41,0x0f,0x6e,0xca]
; X64-SSE-NEXT:    punpcklwd %xmm0, %xmm1 # encoding: [0x66,0x0f,0x61,0xc8]
; X64-SSE-NEXT:    # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; X64-SSE-NEXT:    movd %r9d, %xmm0 # encoding: [0x66,0x41,0x0f,0x6e,0xc1]
; X64-SSE-NEXT:    movd %r8d, %xmm2 # encoding: [0x66,0x41,0x0f,0x6e,0xd0]
; X64-SSE-NEXT:    punpcklwd %xmm0, %xmm2 # encoding: [0x66,0x0f,0x61,0xd0]
; X64-SSE-NEXT:    # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
; X64-SSE-NEXT:    punpckldq %xmm1, %xmm2 # encoding: [0x66,0x0f,0x62,0xd1]
; X64-SSE-NEXT:    # xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; X64-SSE-NEXT:    movd %ecx, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc1]
; X64-SSE-NEXT:    movd %edx, %xmm1 # encoding: [0x66,0x0f,0x6e,0xca]
; X64-SSE-NEXT:    punpcklwd %xmm0, %xmm1 # encoding: [0x66,0x0f,0x61,0xc8]
; X64-SSE-NEXT:    # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; X64-SSE-NEXT:    movd %esi, %xmm3 # encoding: [0x66,0x0f,0x6e,0xde]
; X64-SSE-NEXT:    movd %edi, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc7]
; X64-SSE-NEXT:    punpcklwd %xmm3, %xmm0 # encoding: [0x66,0x0f,0x61,0xc3]
; X64-SSE-NEXT:    # xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
; X64-SSE-NEXT:    punpckldq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x62,0xc1]
; X64-SSE-NEXT:    # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X64-SSE-NEXT:    punpcklqdq %xmm2, %xmm0 # encoding: [0x66,0x0f,0x6c,0xc2]
; X64-SSE-NEXT:    # xmm0 = xmm0[0],xmm2[0]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_setr_epi16:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    movzwl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x10]
; X64-AVX1-NEXT:    movzwl {{[0-9]+}}(%rsp), %r10d # encoding: [0x44,0x0f,0xb7,0x54,0x24,0x08]
; X64-AVX1-NEXT:    vmovd %edi, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc7]
; X64-AVX1-NEXT:    vpinsrw $1, %esi, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc6,0x01]
; X64-AVX1-NEXT:    vpinsrw $2, %edx, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc2,0x02]
; X64-AVX1-NEXT:    vpinsrw $3, %ecx, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc1,0x03]
; X64-AVX1-NEXT:    vpinsrw $4, %r8d, %xmm0, %xmm0 # encoding: [0xc4,0xc1,0x79,0xc4,0xc0,0x04]
; X64-AVX1-NEXT:    vpinsrw $5, %r9d, %xmm0, %xmm0 # encoding: [0xc4,0xc1,0x79,0xc4,0xc1,0x05]
; X64-AVX1-NEXT:    vpinsrw $6, %r10d, %xmm0, %xmm0 # encoding: [0xc4,0xc1,0x79,0xc4,0xc2,0x06]
; X64-AVX1-NEXT:    vpinsrw $7, %eax, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc0,0x07]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_setr_epi16:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    movzwl {{[0-9]+}}(%rsp), %eax # encoding: [0x0f,0xb7,0x44,0x24,0x10]
; X64-AVX512-NEXT:    movzwl {{[0-9]+}}(%rsp), %r10d # encoding: [0x44,0x0f,0xb7,0x54,0x24,0x08]
; X64-AVX512-NEXT:    vmovd %edi, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc7]
; X64-AVX512-NEXT:    vpinsrw $1, %esi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc6,0x01]
; X64-AVX512-NEXT:    vpinsrw $2, %edx, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc2,0x02]
; X64-AVX512-NEXT:    vpinsrw $3, %ecx, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc1,0x03]
; X64-AVX512-NEXT:    vpinsrw $4, %r8d, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xc1,0x79,0xc4,0xc0,0x04]
; X64-AVX512-NEXT:    vpinsrw $5, %r9d, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xc1,0x79,0xc4,0xc1,0x05]
; X64-AVX512-NEXT:    vpinsrw $6, %r10d, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xc1,0x79,0xc4,0xc2,0x06]
; X64-AVX512-NEXT:    vpinsrw $7, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x07]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_setr_epi16:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb7,0x44,0x24,0x10]
; X32-SSE-NEXT:    movzwl {{[0-9]+}}(%esp), %r10d # encoding: [0x67,0x44,0x0f,0xb7,0x54,0x24,0x08]
; X32-SSE-NEXT:    movd %eax, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc0]
; X32-SSE-NEXT:    movd %r10d, %xmm1 # encoding: [0x66,0x41,0x0f,0x6e,0xca]
; X32-SSE-NEXT:    punpcklwd %xmm0, %xmm1 # encoding: [0x66,0x0f,0x61,0xc8]
; X32-SSE-NEXT:    # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; X32-SSE-NEXT:    movd %r9d, %xmm0 # encoding: [0x66,0x41,0x0f,0x6e,0xc1]
; X32-SSE-NEXT:    movd %r8d, %xmm2 # encoding: [0x66,0x41,0x0f,0x6e,0xd0]
; X32-SSE-NEXT:    punpcklwd %xmm0, %xmm2 # encoding: [0x66,0x0f,0x61,0xd0]
; X32-SSE-NEXT:    # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
; X32-SSE-NEXT:    punpckldq %xmm1, %xmm2 # encoding: [0x66,0x0f,0x62,0xd1]
; X32-SSE-NEXT:    # xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; X32-SSE-NEXT:    movd %ecx, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc1]
; X32-SSE-NEXT:    movd %edx, %xmm1 # encoding: [0x66,0x0f,0x6e,0xca]
; X32-SSE-NEXT:    punpcklwd %xmm0, %xmm1 # encoding: [0x66,0x0f,0x61,0xc8]
; X32-SSE-NEXT:    # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; X32-SSE-NEXT:    movd %esi, %xmm3 # encoding: [0x66,0x0f,0x6e,0xde]
; X32-SSE-NEXT:    movd %edi, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc7]
; X32-SSE-NEXT:    punpcklwd %xmm3, %xmm0 # encoding: [0x66,0x0f,0x61,0xc3]
; X32-SSE-NEXT:    # xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
; X32-SSE-NEXT:    punpckldq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x62,0xc1]
; X32-SSE-NEXT:    # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X32-SSE-NEXT:    punpcklqdq %xmm2, %xmm0 # encoding: [0x66,0x0f,0x6c,0xc2]
; X32-SSE-NEXT:    # xmm0 = xmm0[0],xmm2[0]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_setr_epi16:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb7,0x44,0x24,0x10]
; X32-AVX1-NEXT:    movzwl {{[0-9]+}}(%esp), %r10d # encoding: [0x67,0x44,0x0f,0xb7,0x54,0x24,0x08]
; X32-AVX1-NEXT:    vmovd %edi, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc7]
; X32-AVX1-NEXT:    vpinsrw $1, %esi, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc6,0x01]
; X32-AVX1-NEXT:    vpinsrw $2, %edx, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc2,0x02]
; X32-AVX1-NEXT:    vpinsrw $3, %ecx, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc1,0x03]
; X32-AVX1-NEXT:    vpinsrw $4, %r8d, %xmm0, %xmm0 # encoding: [0xc4,0xc1,0x79,0xc4,0xc0,0x04]
; X32-AVX1-NEXT:    vpinsrw $5, %r9d, %xmm0, %xmm0 # encoding: [0xc4,0xc1,0x79,0xc4,0xc1,0x05]
; X32-AVX1-NEXT:    vpinsrw $6, %r10d, %xmm0, %xmm0 # encoding: [0xc4,0xc1,0x79,0xc4,0xc2,0x06]
; X32-AVX1-NEXT:    vpinsrw $7, %eax, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc4,0xc0,0x07]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_setr_epi16:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    movzwl {{[0-9]+}}(%esp), %eax # encoding: [0x67,0x0f,0xb7,0x44,0x24,0x10]
; X32-AVX512-NEXT:    movzwl {{[0-9]+}}(%esp), %r10d # encoding: [0x67,0x44,0x0f,0xb7,0x54,0x24,0x08]
; X32-AVX512-NEXT:    vmovd %edi, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc7]
; X32-AVX512-NEXT:    vpinsrw $1, %esi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc6,0x01]
; X32-AVX512-NEXT:    vpinsrw $2, %edx, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc2,0x02]
; X32-AVX512-NEXT:    vpinsrw $3, %ecx, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc1,0x03]
; X32-AVX512-NEXT:    vpinsrw $4, %r8d, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xc1,0x79,0xc4,0xc0,0x04]
; X32-AVX512-NEXT:    vpinsrw $5, %r9d, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xc1,0x79,0xc4,0xc1,0x05]
; X32-AVX512-NEXT:    vpinsrw $6, %r10d, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xc1,0x79,0xc4,0xc2,0x06]
; X32-AVX512-NEXT:    vpinsrw $7, %eax, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc0,0x07]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  %res0  = insertelement <8 x i16> undef, i16 %a0, i32 0
  %res1  = insertelement <8 x i16> %res0, i16 %a1, i32 1
  %res2  = insertelement <8 x i16> %res1, i16 %a2, i32 2
  %res3  = insertelement <8 x i16> %res2, i16 %a3, i32 3
  %res4  = insertelement <8 x i16> %res3, i16 %a4, i32 4
  %res5  = insertelement <8 x i16> %res4, i16 %a5, i32 5
  %res6  = insertelement <8 x i16> %res5, i16 %a6, i32 6
  %res7  = insertelement <8 x i16> %res6, i16 %a7, i32 7
  %res = bitcast <8 x i16> %res7 to <2 x i64>
  ret <2 x i64> %res
}

define <2 x i64> @test_mm_setr_epi32(i32 %a0, i32 %a1, i32 %a2, i32 %a3) nounwind {
; X86-SSE-LABEL: test_mm_setr_epi32:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE-NEXT:    # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x10]
; X86-SSE-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X86-SSE-NEXT:    # encoding: [0xf3,0x0f,0x10,0x4c,0x24,0x0c]
; X86-SSE-NEXT:    unpcklps %xmm0, %xmm1 # encoding: [0x0f,0x14,0xc8]
; X86-SSE-NEXT:    # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; X86-SSE-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; X86-SSE-NEXT:    # encoding: [0xf3,0x0f,0x10,0x54,0x24,0x08]
; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE-NEXT:    # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x04]
; X86-SSE-NEXT:    unpcklps %xmm2, %xmm0 # encoding: [0x0f,0x14,0xc2]
; X86-SSE-NEXT:    # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; X86-SSE-NEXT:    movlhps %xmm1, %xmm0 # encoding: [0x0f,0x16,0xc1]
; X86-SSE-NEXT:    # xmm0 = xmm0[0],xmm1[0]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_setr_epi32:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-AVX1-NEXT:    # encoding: [0xc5,0xf9,0x6e,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x08,0x01]
; X86-AVX1-NEXT:    vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x0c,0x02]
; X86-AVX1-NEXT:    vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x10,0x03]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_setr_epi32:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-AVX512-NEXT:    # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x08,0x01]
; X86-AVX512-NEXT:    vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x0c,0x02]
; X86-AVX512-NEXT:    vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x10,0x03]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_setr_epi32:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movd %ecx, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc1]
; X64-SSE-NEXT:    movd %edx, %xmm1 # encoding: [0x66,0x0f,0x6e,0xca]
; X64-SSE-NEXT:    punpckldq %xmm0, %xmm1 # encoding: [0x66,0x0f,0x62,0xc8]
; X64-SSE-NEXT:    # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; X64-SSE-NEXT:    movd %esi, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd6]
; X64-SSE-NEXT:    movd %edi, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc7]
; X64-SSE-NEXT:    punpckldq %xmm2, %xmm0 # encoding: [0x66,0x0f,0x62,0xc2]
; X64-SSE-NEXT:    # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; X64-SSE-NEXT:    punpcklqdq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6c,0xc1]
; X64-SSE-NEXT:    # xmm0 = xmm0[0],xmm1[0]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_setr_epi32:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vmovd %edi, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc7]
; X64-AVX1-NEXT:    vpinsrd $1, %esi, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0xc6,0x01]
; X64-AVX1-NEXT:    vpinsrd $2, %edx, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0xc2,0x02]
; X64-AVX1-NEXT:    vpinsrd $3, %ecx, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0xc1,0x03]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_setr_epi32:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vmovd %edi, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc7]
; X64-AVX512-NEXT:    vpinsrd $1, %esi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0xc6,0x01]
; X64-AVX512-NEXT:    vpinsrd $2, %edx, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0xc2,0x02]
; X64-AVX512-NEXT:    vpinsrd $3, %ecx, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0xc1,0x03]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_setr_epi32:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    movd %ecx, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc1]
; X32-SSE-NEXT:    movd %edx, %xmm1 # encoding: [0x66,0x0f,0x6e,0xca]
; X32-SSE-NEXT:    punpckldq %xmm0, %xmm1 # encoding: [0x66,0x0f,0x62,0xc8]
; X32-SSE-NEXT:    # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; X32-SSE-NEXT:    movd %esi, %xmm2 # encoding: [0x66,0x0f,0x6e,0xd6]
; X32-SSE-NEXT:    movd %edi, %xmm0 # encoding: [0x66,0x0f,0x6e,0xc7]
; X32-SSE-NEXT:    punpckldq %xmm2, %xmm0 # encoding: [0x66,0x0f,0x62,0xc2]
; X32-SSE-NEXT:    # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; X32-SSE-NEXT:    punpcklqdq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6c,0xc1]
; X32-SSE-NEXT:    # xmm0 = xmm0[0],xmm1[0]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_setr_epi32:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vmovd %edi, %xmm0 # encoding: [0xc5,0xf9,0x6e,0xc7]
; X32-AVX1-NEXT:    vpinsrd $1, %esi, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0xc6,0x01]
; X32-AVX1-NEXT:    vpinsrd $2, %edx, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0xc2,0x02]
; X32-AVX1-NEXT:    vpinsrd $3, %ecx, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0xc1,0x03]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_setr_epi32:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vmovd %edi, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc7]
; X32-AVX512-NEXT:    vpinsrd $1, %esi, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0xc6,0x01]
; X32-AVX512-NEXT:    vpinsrd $2, %edx, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0xc2,0x02]
; X32-AVX512-NEXT:    vpinsrd $3, %ecx, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0xc1,0x03]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  %res0  = insertelement <4 x i32> undef, i32 %a0, i32 0
  %res1  = insertelement <4 x i32> %res0, i32 %a1, i32 1
  %res2  = insertelement <4 x i32> %res1, i32 %a2, i32 2
  %res3  = insertelement <4 x i32> %res2, i32 %a3, i32 3
  %res = bitcast <4 x i32> %res3 to <2 x i64>
  ret <2 x i64> %res
}

; TODO test_mm_setr_epi64

define <2 x i64> @test_mm_setr_epi64x(i64 %a0, i64 %a1) nounwind {
; X86-SSE-LABEL: test_mm_setr_epi64x:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X86-SSE-NEXT:    # encoding: [0xf3,0x0f,0x10,0x4c,0x24,0x0c]
; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE-NEXT:    # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x10]
; X86-SSE-NEXT:    unpcklps %xmm0, %xmm1 # encoding: [0x0f,0x14,0xc8]
; X86-SSE-NEXT:    # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE-NEXT:    # encoding: [0xf3,0x0f,0x10,0x44,0x24,0x04]
; X86-SSE-NEXT:    movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; X86-SSE-NEXT:    # encoding: [0xf3,0x0f,0x10,0x54,0x24,0x08]
; X86-SSE-NEXT:    unpcklps %xmm2, %xmm0 # encoding: [0x0f,0x14,0xc2]
; X86-SSE-NEXT:    # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; X86-SSE-NEXT:    movlhps %xmm1, %xmm0 # encoding: [0x0f,0x16,0xc1]
; X86-SSE-NEXT:    # xmm0 = xmm0[0],xmm1[0]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_setr_epi64x:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-AVX1-NEXT:    # encoding: [0xc5,0xf9,0x6e,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x08,0x01]
; X86-AVX1-NEXT:    vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x0c,0x02]
; X86-AVX1-NEXT:    vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x10,0x03]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_setr_epi64x:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-AVX512-NEXT:    # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x08,0x01]
; X86-AVX512-NEXT:    vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x0c,0x02]
; X86-AVX512-NEXT:    vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x22,0x44,0x24,0x10,0x03]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_setr_epi64x:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movq %rsi, %xmm1 # encoding: [0x66,0x48,0x0f,0x6e,0xce]
; X64-SSE-NEXT:    movq %rdi, %xmm0 # encoding: [0x66,0x48,0x0f,0x6e,0xc7]
; X64-SSE-NEXT:    punpcklqdq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6c,0xc1]
; X64-SSE-NEXT:    # xmm0 = xmm0[0],xmm1[0]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_setr_epi64x:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vmovq %rsi, %xmm0 # encoding: [0xc4,0xe1,0xf9,0x6e,0xc6]
; X64-AVX1-NEXT:    vmovq %rdi, %xmm1 # encoding: [0xc4,0xe1,0xf9,0x6e,0xcf]
; X64-AVX1-NEXT:    vpunpcklqdq %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0x6c,0xc0]
; X64-AVX1-NEXT:    # xmm0 = xmm1[0],xmm0[0]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_setr_epi64x:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vmovq %rsi, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe1,0xf9,0x6e,0xc6]
; X64-AVX512-NEXT:    vmovq %rdi, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe1,0xf9,0x6e,0xcf]
; X64-AVX512-NEXT:    vpunpcklqdq %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x6c,0xc0]
; X64-AVX512-NEXT:    # xmm0 = xmm1[0],xmm0[0]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_setr_epi64x:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    movq %rsi, %xmm1 # encoding: [0x66,0x48,0x0f,0x6e,0xce]
; X32-SSE-NEXT:    movq %rdi, %xmm0 # encoding: [0x66,0x48,0x0f,0x6e,0xc7]
; X32-SSE-NEXT:    punpcklqdq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6c,0xc1]
; X32-SSE-NEXT:    # xmm0 = xmm0[0],xmm1[0]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_setr_epi64x:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vmovq %rsi, %xmm0 # encoding: [0xc4,0xe1,0xf9,0x6e,0xc6]
; X32-AVX1-NEXT:    vmovq %rdi, %xmm1 # encoding: [0xc4,0xe1,0xf9,0x6e,0xcf]
; X32-AVX1-NEXT:    vpunpcklqdq %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf1,0x6c,0xc0]
; X32-AVX1-NEXT:    # xmm0 = xmm1[0],xmm0[0]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_setr_epi64x:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vmovq %rsi, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe1,0xf9,0x6e,0xc6]
; X32-AVX512-NEXT:    vmovq %rdi, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe1,0xf9,0x6e,0xcf]
; X32-AVX512-NEXT:    vpunpcklqdq %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x6c,0xc0]
; X32-AVX512-NEXT:    # xmm0 = xmm1[0],xmm0[0]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  %res0  = insertelement <2 x i64> undef, i64 %a0, i32 0
  %res1  = insertelement <2 x i64> %res0, i64 %a1, i32 1
  ret <2 x i64> %res1
}

define <2 x double> @test_mm_setr_pd(double %a0, double %a1) nounwind {
; X86-SSE-LABEL: test_mm_setr_pd:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
; X86-SSE-NEXT:    # encoding: [0xf2,0x0f,0x10,0x4c,0x24,0x0c]
; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE-NEXT:    # encoding: [0xf2,0x0f,0x10,0x44,0x24,0x04]
; X86-SSE-NEXT:    movlhps %xmm1, %xmm0 # encoding: [0x0f,0x16,0xc1]
; X86-SSE-NEXT:    # xmm0 = xmm0[0],xmm1[0]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_setr_pd:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX1-NEXT:    # encoding: [0xc5,0xfb,0x10,0x44,0x24,0x0c]
; X86-AVX1-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
; X86-AVX1-NEXT:    # encoding: [0xc5,0xfb,0x10,0x4c,0x24,0x04]
; X86-AVX1-NEXT:    vmovlhps %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf0,0x16,0xc0]
; X86-AVX1-NEXT:    # xmm0 = xmm1[0],xmm0[0]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_setr_pd:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX512-NEXT:    # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x44,0x24,0x0c]
; X86-AVX512-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
; X86-AVX512-NEXT:    # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x4c,0x24,0x04]
; X86-AVX512-NEXT:    vmovlhps %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf0,0x16,0xc0]
; X86-AVX512-NEXT:    # xmm0 = xmm1[0],xmm0[0]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_setr_pd:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movlhps %xmm1, %xmm0 # encoding: [0x0f,0x16,0xc1]
; X64-SSE-NEXT:    # xmm0 = xmm0[0],xmm1[0]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_setr_pd:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vmovlhps %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x16,0xc1]
; X64-AVX1-NEXT:    # xmm0 = xmm0[0],xmm1[0]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_setr_pd:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vmovlhps %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x16,0xc1]
; X64-AVX512-NEXT:    # xmm0 = xmm0[0],xmm1[0]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_setr_pd:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    movlhps %xmm1, %xmm0 # encoding: [0x0f,0x16,0xc1]
; X32-SSE-NEXT:    # xmm0 = xmm0[0],xmm1[0]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_setr_pd:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vmovlhps %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x16,0xc1]
; X32-AVX1-NEXT:    # xmm0 = xmm0[0],xmm1[0]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_setr_pd:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vmovlhps %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x16,0xc1]
; X32-AVX512-NEXT:    # xmm0 = xmm0[0],xmm1[0]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  %res0  = insertelement <2 x double> undef, double %a0, i32 0
  %res1  = insertelement <2 x double> %res0, double %a1, i32 1
  ret <2 x double> %res1
}

define <2 x double> @test_mm_setzero_pd() {
; SSE-LABEL: test_mm_setzero_pd:
; SSE:       # %bb.0:
; SSE-NEXT:    xorps %xmm0, %xmm0 # encoding: [0x0f,0x57,0xc0]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_setzero_pd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vxorps %xmm0, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x57,0xc0]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_setzero_pd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vxorps %xmm0, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x57,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  ret <2 x double> zeroinitializer
}

define <2 x i64> @test_mm_setzero_si128() {
; SSE-LABEL: test_mm_setzero_si128:
; SSE:       # %bb.0:
; SSE-NEXT:    xorps %xmm0, %xmm0 # encoding: [0x0f,0x57,0xc0]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_setzero_si128:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vxorps %xmm0, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x57,0xc0]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_setzero_si128:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vxorps %xmm0, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x57,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  ret <2 x i64> zeroinitializer
}

define <2 x i64> @test_mm_shuffle_epi32(<2 x i64> %a0) {
; SSE-LABEL: test_mm_shuffle_epi32:
; SSE:       # %bb.0:
; SSE-NEXT:    pshufd $0, %xmm0, %xmm0 # encoding: [0x66,0x0f,0x70,0xc0,0x00]
; SSE-NEXT:    # xmm0 = xmm0[0,0,0,0]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_shuffle_epi32:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vshufps $0, %xmm0, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0xc6,0xc0,0x00]
; AVX1-NEXT:    # xmm0 = xmm0[0,0,0,0]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_shuffle_epi32:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vbroadcastss %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x18,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
  %res = shufflevector <4 x i32> %arg0, <4 x i32> undef, <4 x i32> zeroinitializer
  %bc = bitcast <4 x i32> %res to <2 x i64>
  ret <2 x i64> %bc
}

define <2 x double> @test_mm_shuffle_pd(<2 x double> %a0, <2 x double> %a1) {
; SSE-LABEL: test_mm_shuffle_pd:
; SSE:       # %bb.0:
; SSE-NEXT:    shufps $78, %xmm1, %xmm0 # encoding: [0x0f,0xc6,0xc1,0x4e]
; SSE-NEXT:    # xmm0 = xmm0[2,3],xmm1[0,1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_shuffle_pd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vshufpd $1, %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc6,0xc1,0x01]
; AVX1-NEXT:    # xmm0 = xmm0[1],xmm1[0]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_shuffle_pd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vshufpd $1, %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc6,0xc1,0x01]
; AVX512-NEXT:    # xmm0 = xmm0[1],xmm1[0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 1, i32 2>
  ret <2 x double> %res
}

define <2 x i64> @test_mm_shufflehi_epi16(<2 x i64> %a0) {
; SSE-LABEL: test_mm_shufflehi_epi16:
; SSE:       # %bb.0:
; SSE-NEXT:    pshufhw $0, %xmm0, %xmm0 # encoding: [0xf3,0x0f,0x70,0xc0,0x00]
; SSE-NEXT:    # xmm0 = xmm0[0,1,2,3,4,4,4,4]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_shufflehi_epi16:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpshufhw $0, %xmm0, %xmm0 # encoding: [0xc5,0xfa,0x70,0xc0,0x00]
; AVX1-NEXT:    # xmm0 = xmm0[0,1,2,3,4,4,4,4]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_shufflehi_epi16:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpshufhw $0, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x70,0xc0,0x00]
; AVX512-NEXT:    # xmm0 = xmm0[0,1,2,3,4,4,4,4]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
  %res = shufflevector <8 x i16> %arg0, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 4, i32 4>
  %bc = bitcast <8 x i16> %res to <2 x i64>
  ret <2 x i64> %bc
}

define <2 x i64> @test_mm_shufflelo_epi16(<2 x i64> %a0) {
; SSE-LABEL: test_mm_shufflelo_epi16:
; SSE:       # %bb.0:
; SSE-NEXT:    pshuflw $0, %xmm0, %xmm0 # encoding: [0xf2,0x0f,0x70,0xc0,0x00]
; SSE-NEXT:    # xmm0 = xmm0[0,0,0,0,4,5,6,7]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_shufflelo_epi16:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpshuflw $0, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x70,0xc0,0x00]
; AVX1-NEXT:    # xmm0 = xmm0[0,0,0,0,4,5,6,7]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_shufflelo_epi16:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpshuflw $0, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x70,0xc0,0x00]
; AVX512-NEXT:    # xmm0 = xmm0[0,0,0,0,4,5,6,7]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
  %res = shufflevector <8 x i16> %arg0, <8 x i16> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 5, i32 6, i32 7>
  %bc = bitcast <8 x i16> %res to <2 x i64>
  ret <2 x i64> %bc
}

define <2 x i64> @test_mm_sll_epi16(<2 x i64> %a0, <2 x i64> %a1) {
; SSE-LABEL: test_mm_sll_epi16:
; SSE:       # %bb.0:
; SSE-NEXT:    psllw %xmm1, %xmm0 # encoding: [0x66,0x0f,0xf1,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_sll_epi16:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpsllw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xf1,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_sll_epi16:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpsllw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf1,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
  %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
  %res = call <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16> %arg0, <8 x i16> %arg1)
  %bc = bitcast <8 x i16> %res to <2 x i64>
  ret <2 x i64> %bc
}
declare <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16>, <8 x i16>) nounwind readnone

define <2 x i64> @test_mm_sll_epi32(<2 x i64> %a0, <2 x i64> %a1) {
; SSE-LABEL: test_mm_sll_epi32:
; SSE:       # %bb.0:
; SSE-NEXT:    pslld %xmm1, %xmm0 # encoding: [0x66,0x0f,0xf2,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_sll_epi32:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpslld %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xf2,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_sll_epi32:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpslld %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf2,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
  %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
  %res = call <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32> %arg0, <4 x i32> %arg1)
  %bc = bitcast <4 x i32> %res to <2 x i64>
  ret <2 x i64> %bc
}
declare <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32>, <4 x i32>) nounwind readnone

define <2 x i64> @test_mm_sll_epi64(<2 x i64> %a0, <2 x i64> %a1) {
; SSE-LABEL: test_mm_sll_epi64:
; SSE:       # %bb.0:
; SSE-NEXT:    psllq %xmm1, %xmm0 # encoding: [0x66,0x0f,0xf3,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_sll_epi64:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpsllq %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xf3,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_sll_epi64:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpsllq %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf3,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = call <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64> %a0, <2 x i64> %a1)
  ret <2 x i64> %res
}
declare <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64>, <2 x i64>) nounwind readnone

define <2 x i64> @test_mm_slli_epi16(<2 x i64> %a0) {
; SSE-LABEL: test_mm_slli_epi16:
; SSE:       # %bb.0:
; SSE-NEXT:    psllw $1, %xmm0 # encoding: [0x66,0x0f,0x71,0xf0,0x01]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_slli_epi16:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpsllw $1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x71,0xf0,0x01]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_slli_epi16:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpsllw $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x71,0xf0,0x01]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
  %res = call <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16> %arg0, i32 1)
  %bc = bitcast <8 x i16> %res to <2 x i64>
  ret <2 x i64> %bc
}
declare <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16>, i32) nounwind readnone

define <2 x i64> @test_mm_slli_epi32(<2 x i64> %a0) {
; SSE-LABEL: test_mm_slli_epi32:
; SSE:       # %bb.0:
; SSE-NEXT:    pslld $1, %xmm0 # encoding: [0x66,0x0f,0x72,0xf0,0x01]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_slli_epi32:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpslld $1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x72,0xf0,0x01]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_slli_epi32:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpslld $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x72,0xf0,0x01]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
  %res = call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> %arg0, i32 1)
  %bc = bitcast <4 x i32> %res to <2 x i64>
  ret <2 x i64> %bc
}
declare <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32>, i32) nounwind readnone

define <2 x i64> @test_mm_slli_epi64(<2 x i64> %a0) {
; SSE-LABEL: test_mm_slli_epi64:
; SSE:       # %bb.0:
; SSE-NEXT:    psllq $1, %xmm0 # encoding: [0x66,0x0f,0x73,0xf0,0x01]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_slli_epi64:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpsllq $1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x73,0xf0,0x01]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_slli_epi64:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpsllq $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xf0,0x01]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> %a0, i32 1)
  ret <2 x i64> %res
}
declare <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64>, i32) nounwind readnone

define <2 x i64> @test_mm_slli_si128(<2 x i64> %a0) nounwind {
; SSE-LABEL: test_mm_slli_si128:
; SSE:       # %bb.0:
; SSE-NEXT:    pslldq $5, %xmm0 # encoding: [0x66,0x0f,0x73,0xf8,0x05]
; SSE-NEXT:    # xmm0 = zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_slli_si128:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpslldq $5, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x73,0xf8,0x05]
; AVX1-NEXT:    # xmm0 = zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_slli_si128:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpslldq $5, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xf8,0x05]
; AVX512-NEXT:    # xmm0 = zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
  %res = shufflevector <16 x i8> zeroinitializer, <16 x i8> %arg0, <16 x i32> <i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26>
  %bc = bitcast <16 x i8> %res to <2 x i64>
  ret <2 x i64> %bc
}

define <2 x double> @test_mm_sqrt_pd(<2 x double> %a0) nounwind {
; SSE-LABEL: test_mm_sqrt_pd:
; SSE:       # %bb.0:
; SSE-NEXT:    sqrtpd %xmm0, %xmm0 # encoding: [0x66,0x0f,0x51,0xc0]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_sqrt_pd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vsqrtpd %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x51,0xc0]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_sqrt_pd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vsqrtpd %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x51,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = call <2 x double> @llvm.sqrt.v2f64(<2 x double> %a0)
  ret <2 x double> %res
}
declare <2 x double> @llvm.sqrt.v2f64(<2 x double>) nounwind readnone

define <2 x double> @test_mm_sqrt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_sqrt_sd:
; SSE:       # %bb.0:
; SSE-NEXT:    sqrtsd %xmm0, %xmm1 # encoding: [0xf2,0x0f,0x51,0xc8]
; SSE-NEXT:    movapd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x28,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_sqrt_sd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vsqrtsd %xmm0, %xmm1, %xmm0 # encoding: [0xc5,0xf3,0x51,0xc0]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_sqrt_sd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vsqrtsd %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf3,0x51,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %ext = extractelement <2 x double> %a0, i32 0
  %sqrt = call double @llvm.sqrt.f64(double %ext)
  %ins = insertelement <2 x double> %a1, double %sqrt, i32 0
  ret <2 x double> %ins
}
declare double @llvm.sqrt.f64(double) nounwind readnone

; This doesn't match a clang test, but helps with fast-isel coverage.
define double @test_mm_sqrt_sd_scalar(double %a0) nounwind {
; X86-SSE-LABEL: test_mm_sqrt_sd_scalar:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    pushl %ebp # encoding: [0x55]
; X86-SSE-NEXT:    movl %esp, %ebp # encoding: [0x89,0xe5]
; X86-SSE-NEXT:    andl $-8, %esp # encoding: [0x83,0xe4,0xf8]
; X86-SSE-NEXT:    subl $8, %esp # encoding: [0x83,0xec,0x08]
; X86-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE-NEXT:    # encoding: [0xf2,0x0f,0x10,0x45,0x08]
; X86-SSE-NEXT:    sqrtsd %xmm0, %xmm0 # encoding: [0xf2,0x0f,0x51,0xc0]
; X86-SSE-NEXT:    movsd %xmm0, (%esp) # encoding: [0xf2,0x0f,0x11,0x04,0x24]
; X86-SSE-NEXT:    fldl (%esp) # encoding: [0xdd,0x04,0x24]
; X86-SSE-NEXT:    movl %ebp, %esp # encoding: [0x89,0xec]
; X86-SSE-NEXT:    popl %ebp # encoding: [0x5d]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_sqrt_sd_scalar:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    pushl %ebp # encoding: [0x55]
; X86-AVX1-NEXT:    movl %esp, %ebp # encoding: [0x89,0xe5]
; X86-AVX1-NEXT:    andl $-8, %esp # encoding: [0x83,0xe4,0xf8]
; X86-AVX1-NEXT:    subl $8, %esp # encoding: [0x83,0xec,0x08]
; X86-AVX1-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX1-NEXT:    # encoding: [0xc5,0xfb,0x10,0x45,0x08]
; X86-AVX1-NEXT:    vsqrtsd %xmm0, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x51,0xc0]
; X86-AVX1-NEXT:    vmovsd %xmm0, (%esp) # encoding: [0xc5,0xfb,0x11,0x04,0x24]
; X86-AVX1-NEXT:    fldl (%esp) # encoding: [0xdd,0x04,0x24]
; X86-AVX1-NEXT:    movl %ebp, %esp # encoding: [0x89,0xec]
; X86-AVX1-NEXT:    popl %ebp # encoding: [0x5d]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_sqrt_sd_scalar:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    pushl %ebp # encoding: [0x55]
; X86-AVX512-NEXT:    movl %esp, %ebp # encoding: [0x89,0xe5]
; X86-AVX512-NEXT:    andl $-8, %esp # encoding: [0x83,0xe4,0xf8]
; X86-AVX512-NEXT:    subl $8, %esp # encoding: [0x83,0xec,0x08]
; X86-AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX512-NEXT:    # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x45,0x08]
; X86-AVX512-NEXT:    vsqrtsd %xmm0, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x51,0xc0]
; X86-AVX512-NEXT:    vmovsd %xmm0, (%esp) # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x11,0x04,0x24]
; X86-AVX512-NEXT:    fldl (%esp) # encoding: [0xdd,0x04,0x24]
; X86-AVX512-NEXT:    movl %ebp, %esp # encoding: [0x89,0xec]
; X86-AVX512-NEXT:    popl %ebp # encoding: [0x5d]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_sqrt_sd_scalar:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    sqrtsd %xmm0, %xmm0 # encoding: [0xf2,0x0f,0x51,0xc0]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_sqrt_sd_scalar:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vsqrtsd %xmm0, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x51,0xc0]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_sqrt_sd_scalar:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vsqrtsd %xmm0, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x51,0xc0]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_sqrt_sd_scalar:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    sqrtsd %xmm0, %xmm0 # encoding: [0xf2,0x0f,0x51,0xc0]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_sqrt_sd_scalar:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vsqrtsd %xmm0, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x51,0xc0]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_sqrt_sd_scalar:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vsqrtsd %xmm0, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x51,0xc0]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  %sqrt = call double @llvm.sqrt.f64(double %a0)
  ret double %sqrt
}

define <2 x i64> @test_mm_sra_epi16(<2 x i64> %a0, <2 x i64> %a1) {
; SSE-LABEL: test_mm_sra_epi16:
; SSE:       # %bb.0:
; SSE-NEXT:    psraw %xmm1, %xmm0 # encoding: [0x66,0x0f,0xe1,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_sra_epi16:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpsraw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xe1,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_sra_epi16:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpsraw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe1,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
  %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
  %res = call <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16> %arg0, <8 x i16> %arg1)
  %bc = bitcast <8 x i16> %res to <2 x i64>
  ret <2 x i64> %bc
}
declare <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16>, <8 x i16>) nounwind readnone

define <2 x i64> @test_mm_sra_epi32(<2 x i64> %a0, <2 x i64> %a1) {
; SSE-LABEL: test_mm_sra_epi32:
; SSE:       # %bb.0:
; SSE-NEXT:    psrad %xmm1, %xmm0 # encoding: [0x66,0x0f,0xe2,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_sra_epi32:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpsrad %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xe2,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_sra_epi32:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpsrad %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe2,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
  %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
  %res = call <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32> %arg0, <4 x i32> %arg1)
  %bc = bitcast <4 x i32> %res to <2 x i64>
  ret <2 x i64> %bc
}
declare <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32>, <4 x i32>) nounwind readnone

define <2 x i64> @test_mm_srai_epi16(<2 x i64> %a0) {
; SSE-LABEL: test_mm_srai_epi16:
; SSE:       # %bb.0:
; SSE-NEXT:    psraw $1, %xmm0 # encoding: [0x66,0x0f,0x71,0xe0,0x01]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_srai_epi16:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpsraw $1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x71,0xe0,0x01]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_srai_epi16:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpsraw $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x71,0xe0,0x01]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
  %res = call <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16> %arg0, i32 1)
  %bc = bitcast <8 x i16> %res to <2 x i64>
  ret <2 x i64> %bc
}
declare <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16>, i32) nounwind readnone

define <2 x i64> @test_mm_srai_epi32(<2 x i64> %a0) {
; SSE-LABEL: test_mm_srai_epi32:
; SSE:       # %bb.0:
; SSE-NEXT:    psrad $1, %xmm0 # encoding: [0x66,0x0f,0x72,0xe0,0x01]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_srai_epi32:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpsrad $1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x72,0xe0,0x01]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_srai_epi32:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpsrad $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x72,0xe0,0x01]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
  %res = call <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32> %arg0, i32 1)
  %bc = bitcast <4 x i32> %res to <2 x i64>
  ret <2 x i64> %bc
}
declare <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32>, i32) nounwind readnone

define <2 x i64> @test_mm_srl_epi16(<2 x i64> %a0, <2 x i64> %a1) {
; SSE-LABEL: test_mm_srl_epi16:
; SSE:       # %bb.0:
; SSE-NEXT:    psrlw %xmm1, %xmm0 # encoding: [0x66,0x0f,0xd1,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_srl_epi16:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpsrlw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xd1,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_srl_epi16:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpsrlw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd1,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
  %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
  %res = call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> %arg0, <8 x i16> %arg1)
  %bc = bitcast <8 x i16> %res to <2 x i64>
  ret <2 x i64> %bc
}
declare <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16>, <8 x i16>) nounwind readnone

define <2 x i64> @test_mm_srl_epi32(<2 x i64> %a0, <2 x i64> %a1) {
; SSE-LABEL: test_mm_srl_epi32:
; SSE:       # %bb.0:
; SSE-NEXT:    psrld %xmm1, %xmm0 # encoding: [0x66,0x0f,0xd2,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_srl_epi32:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpsrld %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xd2,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_srl_epi32:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpsrld %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd2,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
  %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
  %res = call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> %arg0, <4 x i32> %arg1)
  %bc = bitcast <4 x i32> %res to <2 x i64>
  ret <2 x i64> %bc
}
declare <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32>, <4 x i32>) nounwind readnone

define <2 x i64> @test_mm_srl_epi64(<2 x i64> %a0, <2 x i64> %a1) {
; SSE-LABEL: test_mm_srl_epi64:
; SSE:       # %bb.0:
; SSE-NEXT:    psrlq %xmm1, %xmm0 # encoding: [0x66,0x0f,0xd3,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_srl_epi64:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpsrlq %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xd3,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_srl_epi64:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpsrlq %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd3,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = call <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64> %a0, <2 x i64> %a1)
  ret <2 x i64> %res
}
declare <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64>, <2 x i64>) nounwind readnone

define <2 x i64> @test_mm_srli_epi16(<2 x i64> %a0) {
; SSE-LABEL: test_mm_srli_epi16:
; SSE:       # %bb.0:
; SSE-NEXT:    psrlw $1, %xmm0 # encoding: [0x66,0x0f,0x71,0xd0,0x01]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_srli_epi16:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpsrlw $1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x71,0xd0,0x01]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_srli_epi16:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpsrlw $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x71,0xd0,0x01]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
  %res = call <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16> %arg0, i32 1)
  %bc = bitcast <8 x i16> %res to <2 x i64>
  ret <2 x i64> %bc
}
declare <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16>, i32) nounwind readnone

define <2 x i64> @test_mm_srli_epi32(<2 x i64> %a0) {
; SSE-LABEL: test_mm_srli_epi32:
; SSE:       # %bb.0:
; SSE-NEXT:    psrld $1, %xmm0 # encoding: [0x66,0x0f,0x72,0xd0,0x01]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_srli_epi32:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpsrld $1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x72,0xd0,0x01]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_srli_epi32:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpsrld $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x72,0xd0,0x01]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
  %res = call <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32> %arg0, i32 1)
  %bc = bitcast <4 x i32> %res to <2 x i64>
  ret <2 x i64> %bc
}
declare <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32>, i32) nounwind readnone

define <2 x i64> @test_mm_srli_epi64(<2 x i64> %a0) {
; SSE-LABEL: test_mm_srli_epi64:
; SSE:       # %bb.0:
; SSE-NEXT:    psrlq $1, %xmm0 # encoding: [0x66,0x0f,0x73,0xd0,0x01]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_srli_epi64:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpsrlq $1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x73,0xd0,0x01]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_srli_epi64:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpsrlq $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xd0,0x01]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = call <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64> %a0, i32 1)
  ret <2 x i64> %res
}
declare <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64>, i32) nounwind readnone

define <2 x i64> @test_mm_srli_si128(<2 x i64> %a0) nounwind {
; SSE-LABEL: test_mm_srli_si128:
; SSE:       # %bb.0:
; SSE-NEXT:    psrldq $5, %xmm0 # encoding: [0x66,0x0f,0x73,0xd8,0x05]
; SSE-NEXT:    # xmm0 = xmm0[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_srli_si128:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpsrldq $5, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x73,0xd8,0x05]
; AVX1-NEXT:    # xmm0 = xmm0[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_srli_si128:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpsrldq $5, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xd8,0x05]
; AVX512-NEXT:    # xmm0 = xmm0[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
  %res = shufflevector <16 x i8> %arg0, <16 x i8> zeroinitializer, <16 x i32> <i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20>
  %bc = bitcast <16 x i8> %res to <2 x i64>
  ret <2 x i64> %bc
}

define void @test_mm_store_pd(ptr%a0, <2 x double> %a1) {
; X86-SSE-LABEL: test_mm_store_pd:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-SSE-NEXT:    movaps %xmm0, (%eax) # encoding: [0x0f,0x29,0x00]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_store_pd:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vmovaps %xmm0, (%eax) # encoding: [0xc5,0xf8,0x29,0x00]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_store_pd:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vmovaps %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x00]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_store_pd:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movaps %xmm0, (%rdi) # encoding: [0x0f,0x29,0x07]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_store_pd:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vmovaps %xmm0, (%rdi) # encoding: [0xc5,0xf8,0x29,0x07]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_store_pd:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vmovaps %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x07]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_store_pd:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    movaps %xmm0, (%edi) # encoding: [0x67,0x0f,0x29,0x07]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_store_pd:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vmovaps %xmm0, (%edi) # encoding: [0x67,0xc5,0xf8,0x29,0x07]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_store_pd:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vmovaps %xmm0, (%edi) # EVEX TO VEX Compression encoding: [0x67,0xc5,0xf8,0x29,0x07]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  store <2 x double> %a1, ptr %a0, align 16
  ret void
}

define void @test_mm_store_pd1(ptr%a0, <2 x double> %a1) {
; X86-SSE-LABEL: test_mm_store_pd1:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-SSE-NEXT:    movlhps %xmm0, %xmm0 # encoding: [0x0f,0x16,0xc0]
; X86-SSE-NEXT:    # xmm0 = xmm0[0,0]
; X86-SSE-NEXT:    movaps %xmm0, (%eax) # encoding: [0x0f,0x29,0x00]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_store_pd1:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0]
; X86-AVX1-NEXT:    # xmm0 = xmm0[0,0]
; X86-AVX1-NEXT:    vmovaps %xmm0, (%eax) # encoding: [0xc5,0xf8,0x29,0x00]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_store_pd1:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0]
; X86-AVX512-NEXT:    # xmm0 = xmm0[0,0]
; X86-AVX512-NEXT:    vmovaps %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x00]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_store_pd1:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movlhps %xmm0, %xmm0 # encoding: [0x0f,0x16,0xc0]
; X64-SSE-NEXT:    # xmm0 = xmm0[0,0]
; X64-SSE-NEXT:    movaps %xmm0, (%rdi) # encoding: [0x0f,0x29,0x07]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_store_pd1:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0]
; X64-AVX1-NEXT:    # xmm0 = xmm0[0,0]
; X64-AVX1-NEXT:    vmovaps %xmm0, (%rdi) # encoding: [0xc5,0xf8,0x29,0x07]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_store_pd1:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0]
; X64-AVX512-NEXT:    # xmm0 = xmm0[0,0]
; X64-AVX512-NEXT:    vmovaps %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x07]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_store_pd1:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    movlhps %xmm0, %xmm0 # encoding: [0x0f,0x16,0xc0]
; X32-SSE-NEXT:    # xmm0 = xmm0[0,0]
; X32-SSE-NEXT:    movaps %xmm0, (%edi) # encoding: [0x67,0x0f,0x29,0x07]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_store_pd1:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0]
; X32-AVX1-NEXT:    # xmm0 = xmm0[0,0]
; X32-AVX1-NEXT:    vmovaps %xmm0, (%edi) # encoding: [0x67,0xc5,0xf8,0x29,0x07]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_store_pd1:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0]
; X32-AVX512-NEXT:    # xmm0 = xmm0[0,0]
; X32-AVX512-NEXT:    vmovaps %xmm0, (%edi) # EVEX TO VEX Compression encoding: [0x67,0xc5,0xf8,0x29,0x07]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  %shuf = shufflevector <2 x double> %a1, <2 x double> undef, <2 x i32> zeroinitializer
  store <2 x double> %shuf, ptr %a0, align 16
  ret void
}

define void @test_mm_store_sd(ptr%a0, <2 x double> %a1) {
; X86-SSE-LABEL: test_mm_store_sd:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-SSE-NEXT:    movsd %xmm0, (%eax) # encoding: [0xf2,0x0f,0x11,0x00]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_store_sd:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vmovsd %xmm0, (%eax) # encoding: [0xc5,0xfb,0x11,0x00]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_store_sd:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vmovsd %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x11,0x00]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_store_sd:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movsd %xmm0, (%rdi) # encoding: [0xf2,0x0f,0x11,0x07]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_store_sd:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vmovsd %xmm0, (%rdi) # encoding: [0xc5,0xfb,0x11,0x07]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_store_sd:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vmovsd %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x11,0x07]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_store_sd:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    movsd %xmm0, (%edi) # encoding: [0x67,0xf2,0x0f,0x11,0x07]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_store_sd:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vmovsd %xmm0, (%edi) # encoding: [0x67,0xc5,0xfb,0x11,0x07]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_store_sd:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vmovsd %xmm0, (%edi) # EVEX TO VEX Compression encoding: [0x67,0xc5,0xfb,0x11,0x07]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  %ext = extractelement <2 x double> %a1, i32 0
  store double %ext, ptr %a0, align 1
  ret void
}

define void @test_mm_store_si128(ptr%a0, <2 x i64> %a1) {
; X86-SSE-LABEL: test_mm_store_si128:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-SSE-NEXT:    movaps %xmm0, (%eax) # encoding: [0x0f,0x29,0x00]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_store_si128:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vmovaps %xmm0, (%eax) # encoding: [0xc5,0xf8,0x29,0x00]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_store_si128:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vmovaps %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x00]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_store_si128:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movaps %xmm0, (%rdi) # encoding: [0x0f,0x29,0x07]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_store_si128:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vmovaps %xmm0, (%rdi) # encoding: [0xc5,0xf8,0x29,0x07]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_store_si128:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vmovaps %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x07]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_store_si128:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    movaps %xmm0, (%edi) # encoding: [0x67,0x0f,0x29,0x07]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_store_si128:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vmovaps %xmm0, (%edi) # encoding: [0x67,0xc5,0xf8,0x29,0x07]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_store_si128:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vmovaps %xmm0, (%edi) # EVEX TO VEX Compression encoding: [0x67,0xc5,0xf8,0x29,0x07]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  store <2 x i64> %a1, ptr %a0, align 16
  ret void
}

define void @test_mm_store1_pd(ptr%a0, <2 x double> %a1) {
; X86-SSE-LABEL: test_mm_store1_pd:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-SSE-NEXT:    movlhps %xmm0, %xmm0 # encoding: [0x0f,0x16,0xc0]
; X86-SSE-NEXT:    # xmm0 = xmm0[0,0]
; X86-SSE-NEXT:    movaps %xmm0, (%eax) # encoding: [0x0f,0x29,0x00]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_store1_pd:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0]
; X86-AVX1-NEXT:    # xmm0 = xmm0[0,0]
; X86-AVX1-NEXT:    vmovaps %xmm0, (%eax) # encoding: [0xc5,0xf8,0x29,0x00]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_store1_pd:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0]
; X86-AVX512-NEXT:    # xmm0 = xmm0[0,0]
; X86-AVX512-NEXT:    vmovaps %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x00]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_store1_pd:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movlhps %xmm0, %xmm0 # encoding: [0x0f,0x16,0xc0]
; X64-SSE-NEXT:    # xmm0 = xmm0[0,0]
; X64-SSE-NEXT:    movaps %xmm0, (%rdi) # encoding: [0x0f,0x29,0x07]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_store1_pd:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0]
; X64-AVX1-NEXT:    # xmm0 = xmm0[0,0]
; X64-AVX1-NEXT:    vmovaps %xmm0, (%rdi) # encoding: [0xc5,0xf8,0x29,0x07]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_store1_pd:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0]
; X64-AVX512-NEXT:    # xmm0 = xmm0[0,0]
; X64-AVX512-NEXT:    vmovaps %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x07]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_store1_pd:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    movlhps %xmm0, %xmm0 # encoding: [0x0f,0x16,0xc0]
; X32-SSE-NEXT:    # xmm0 = xmm0[0,0]
; X32-SSE-NEXT:    movaps %xmm0, (%edi) # encoding: [0x67,0x0f,0x29,0x07]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_store1_pd:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vmovddup %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x12,0xc0]
; X32-AVX1-NEXT:    # xmm0 = xmm0[0,0]
; X32-AVX1-NEXT:    vmovaps %xmm0, (%edi) # encoding: [0x67,0xc5,0xf8,0x29,0x07]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_store1_pd:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vmovddup %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x12,0xc0]
; X32-AVX512-NEXT:    # xmm0 = xmm0[0,0]
; X32-AVX512-NEXT:    vmovaps %xmm0, (%edi) # EVEX TO VEX Compression encoding: [0x67,0xc5,0xf8,0x29,0x07]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  %shuf = shufflevector <2 x double> %a1, <2 x double> undef, <2 x i32> zeroinitializer
  store <2 x double> %shuf, ptr %a0, align 16
  ret void
}

define void @test_mm_storeh_sd(ptr%a0, <2 x double> %a1) {
; X86-SSE-LABEL: test_mm_storeh_sd:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-SSE-NEXT:    movhlps %xmm0, %xmm0 # encoding: [0x0f,0x12,0xc0]
; X86-SSE-NEXT:    # xmm0 = xmm0[1,1]
; X86-SSE-NEXT:    movsd %xmm0, (%eax) # encoding: [0xf2,0x0f,0x11,0x00]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_storeh_sd:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vshufpd $1, %xmm0, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc6,0xc0,0x01]
; X86-AVX1-NEXT:    # xmm0 = xmm0[1,0]
; X86-AVX1-NEXT:    vmovsd %xmm0, (%eax) # encoding: [0xc5,0xfb,0x11,0x00]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_storeh_sd:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vshufpd $1, %xmm0, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc6,0xc0,0x01]
; X86-AVX512-NEXT:    # xmm0 = xmm0[1,0]
; X86-AVX512-NEXT:    vmovsd %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x11,0x00]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_storeh_sd:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movhlps %xmm0, %xmm0 # encoding: [0x0f,0x12,0xc0]
; X64-SSE-NEXT:    # xmm0 = xmm0[1,1]
; X64-SSE-NEXT:    movsd %xmm0, (%rdi) # encoding: [0xf2,0x0f,0x11,0x07]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_storeh_sd:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vshufpd $1, %xmm0, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc6,0xc0,0x01]
; X64-AVX1-NEXT:    # xmm0 = xmm0[1,0]
; X64-AVX1-NEXT:    vmovsd %xmm0, (%rdi) # encoding: [0xc5,0xfb,0x11,0x07]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_storeh_sd:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vshufpd $1, %xmm0, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc6,0xc0,0x01]
; X64-AVX512-NEXT:    # xmm0 = xmm0[1,0]
; X64-AVX512-NEXT:    vmovsd %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x11,0x07]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_storeh_sd:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    movhlps %xmm0, %xmm0 # encoding: [0x0f,0x12,0xc0]
; X32-SSE-NEXT:    # xmm0 = xmm0[1,1]
; X32-SSE-NEXT:    movsd %xmm0, (%edi) # encoding: [0x67,0xf2,0x0f,0x11,0x07]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_storeh_sd:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vshufpd $1, %xmm0, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc6,0xc0,0x01]
; X32-AVX1-NEXT:    # xmm0 = xmm0[1,0]
; X32-AVX1-NEXT:    vmovsd %xmm0, (%edi) # encoding: [0x67,0xc5,0xfb,0x11,0x07]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_storeh_sd:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vshufpd $1, %xmm0, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc6,0xc0,0x01]
; X32-AVX512-NEXT:    # xmm0 = xmm0[1,0]
; X32-AVX512-NEXT:    vmovsd %xmm0, (%edi) # EVEX TO VEX Compression encoding: [0x67,0xc5,0xfb,0x11,0x07]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  %ext = extractelement <2 x double> %a1, i32 1
  store double %ext, ptr %a0, align 8
  ret void
}

define void @test_mm_storel_epi64(ptr%a0, <2 x i64> %a1) {
; X86-SSE-LABEL: test_mm_storel_epi64:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-SSE-NEXT:    movlps %xmm0, (%eax) # encoding: [0x0f,0x13,0x00]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_storel_epi64:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vmovlps %xmm0, (%eax) # encoding: [0xc5,0xf8,0x13,0x00]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_storel_epi64:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vmovlps %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x13,0x00]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_storel_epi64:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movq %xmm0, %rax # encoding: [0x66,0x48,0x0f,0x7e,0xc0]
; X64-SSE-NEXT:    movq %rax, (%rdi) # encoding: [0x48,0x89,0x07]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_storel_epi64:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vmovq %xmm0, %rax # encoding: [0xc4,0xe1,0xf9,0x7e,0xc0]
; X64-AVX1-NEXT:    movq %rax, (%rdi) # encoding: [0x48,0x89,0x07]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_storel_epi64:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vmovq %xmm0, %rax # EVEX TO VEX Compression encoding: [0xc4,0xe1,0xf9,0x7e,0xc0]
; X64-AVX512-NEXT:    movq %rax, (%rdi) # encoding: [0x48,0x89,0x07]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_storel_epi64:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    movq %xmm0, %rax # encoding: [0x66,0x48,0x0f,0x7e,0xc0]
; X32-SSE-NEXT:    movq %rax, (%edi) # encoding: [0x67,0x48,0x89,0x07]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_storel_epi64:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vmovq %xmm0, %rax # encoding: [0xc4,0xe1,0xf9,0x7e,0xc0]
; X32-AVX1-NEXT:    movq %rax, (%edi) # encoding: [0x67,0x48,0x89,0x07]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_storel_epi64:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vmovq %xmm0, %rax # EVEX TO VEX Compression encoding: [0xc4,0xe1,0xf9,0x7e,0xc0]
; X32-AVX512-NEXT:    movq %rax, (%edi) # encoding: [0x67,0x48,0x89,0x07]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  %ext = extractelement <2 x i64> %a1, i32 0
  %bc = bitcast ptr%a0 to ptr
  store i64 %ext, ptr %bc, align 8
  ret void
}

define void @test_mm_storel_sd(ptr%a0, <2 x double> %a1) {
; X86-SSE-LABEL: test_mm_storel_sd:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-SSE-NEXT:    movsd %xmm0, (%eax) # encoding: [0xf2,0x0f,0x11,0x00]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_storel_sd:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vmovsd %xmm0, (%eax) # encoding: [0xc5,0xfb,0x11,0x00]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_storel_sd:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vmovsd %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x11,0x00]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_storel_sd:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movsd %xmm0, (%rdi) # encoding: [0xf2,0x0f,0x11,0x07]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_storel_sd:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vmovsd %xmm0, (%rdi) # encoding: [0xc5,0xfb,0x11,0x07]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_storel_sd:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vmovsd %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x11,0x07]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_storel_sd:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    movsd %xmm0, (%edi) # encoding: [0x67,0xf2,0x0f,0x11,0x07]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_storel_sd:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vmovsd %xmm0, (%edi) # encoding: [0x67,0xc5,0xfb,0x11,0x07]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_storel_sd:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vmovsd %xmm0, (%edi) # EVEX TO VEX Compression encoding: [0x67,0xc5,0xfb,0x11,0x07]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  %ext = extractelement <2 x double> %a1, i32 0
  store double %ext, ptr %a0, align 8
  ret void
}

define void @test_mm_storer_pd(ptr%a0, <2 x double> %a1) {
; X86-SSE-LABEL: test_mm_storer_pd:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-SSE-NEXT:    shufps $78, %xmm0, %xmm0 # encoding: [0x0f,0xc6,0xc0,0x4e]
; X86-SSE-NEXT:    # xmm0 = xmm0[2,3,0,1]
; X86-SSE-NEXT:    movaps %xmm0, (%eax) # encoding: [0x0f,0x29,0x00]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_storer_pd:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vshufpd $1, %xmm0, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc6,0xc0,0x01]
; X86-AVX1-NEXT:    # xmm0 = xmm0[1,0]
; X86-AVX1-NEXT:    vmovapd %xmm0, (%eax) # encoding: [0xc5,0xf9,0x29,0x00]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_storer_pd:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vshufpd $1, %xmm0, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc6,0xc0,0x01]
; X86-AVX512-NEXT:    # xmm0 = xmm0[1,0]
; X86-AVX512-NEXT:    vmovapd %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x29,0x00]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_storer_pd:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    shufps $78, %xmm0, %xmm0 # encoding: [0x0f,0xc6,0xc0,0x4e]
; X64-SSE-NEXT:    # xmm0 = xmm0[2,3,0,1]
; X64-SSE-NEXT:    movaps %xmm0, (%rdi) # encoding: [0x0f,0x29,0x07]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_storer_pd:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vshufpd $1, %xmm0, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc6,0xc0,0x01]
; X64-AVX1-NEXT:    # xmm0 = xmm0[1,0]
; X64-AVX1-NEXT:    vmovapd %xmm0, (%rdi) # encoding: [0xc5,0xf9,0x29,0x07]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_storer_pd:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vshufpd $1, %xmm0, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc6,0xc0,0x01]
; X64-AVX512-NEXT:    # xmm0 = xmm0[1,0]
; X64-AVX512-NEXT:    vmovapd %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x29,0x07]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_storer_pd:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    shufps $78, %xmm0, %xmm0 # encoding: [0x0f,0xc6,0xc0,0x4e]
; X32-SSE-NEXT:    # xmm0 = xmm0[2,3,0,1]
; X32-SSE-NEXT:    movaps %xmm0, (%edi) # encoding: [0x67,0x0f,0x29,0x07]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_storer_pd:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vshufpd $1, %xmm0, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xc6,0xc0,0x01]
; X32-AVX1-NEXT:    # xmm0 = xmm0[1,0]
; X32-AVX1-NEXT:    vmovapd %xmm0, (%edi) # encoding: [0x67,0xc5,0xf9,0x29,0x07]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_storer_pd:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vshufpd $1, %xmm0, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc6,0xc0,0x01]
; X32-AVX512-NEXT:    # xmm0 = xmm0[1,0]
; X32-AVX512-NEXT:    vmovapd %xmm0, (%edi) # EVEX TO VEX Compression encoding: [0x67,0xc5,0xf9,0x29,0x07]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  %shuf = shufflevector <2 x double> %a1, <2 x double> undef, <2 x i32> <i32 1, i32 0>
  store <2 x double> %shuf, ptr %a0, align 16
  ret void
}

define void @test_mm_storeu_pd(ptr%a0, <2 x double> %a1) {
; X86-SSE-LABEL: test_mm_storeu_pd:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-SSE-NEXT:    movups %xmm0, (%eax) # encoding: [0x0f,0x11,0x00]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_storeu_pd:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vmovups %xmm0, (%eax) # encoding: [0xc5,0xf8,0x11,0x00]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_storeu_pd:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vmovups %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x11,0x00]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_storeu_pd:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movups %xmm0, (%rdi) # encoding: [0x0f,0x11,0x07]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_storeu_pd:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vmovups %xmm0, (%rdi) # encoding: [0xc5,0xf8,0x11,0x07]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_storeu_pd:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vmovups %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x11,0x07]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_storeu_pd:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    movups %xmm0, (%edi) # encoding: [0x67,0x0f,0x11,0x07]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_storeu_pd:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vmovups %xmm0, (%edi) # encoding: [0x67,0xc5,0xf8,0x11,0x07]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_storeu_pd:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vmovups %xmm0, (%edi) # EVEX TO VEX Compression encoding: [0x67,0xc5,0xf8,0x11,0x07]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  store <2 x double> %a1, ptr %a0, align 1
  ret void
}

define void @test_mm_storeu_si128(ptr%a0, <2 x i64> %a1) {
; X86-SSE-LABEL: test_mm_storeu_si128:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-SSE-NEXT:    movups %xmm0, (%eax) # encoding: [0x0f,0x11,0x00]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_storeu_si128:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vmovups %xmm0, (%eax) # encoding: [0xc5,0xf8,0x11,0x00]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_storeu_si128:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vmovups %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x11,0x00]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_storeu_si128:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movups %xmm0, (%rdi) # encoding: [0x0f,0x11,0x07]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_storeu_si128:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vmovups %xmm0, (%rdi) # encoding: [0xc5,0xf8,0x11,0x07]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_storeu_si128:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vmovups %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x11,0x07]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_storeu_si128:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    movups %xmm0, (%edi) # encoding: [0x67,0x0f,0x11,0x07]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_storeu_si128:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vmovups %xmm0, (%edi) # encoding: [0x67,0xc5,0xf8,0x11,0x07]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_storeu_si128:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vmovups %xmm0, (%edi) # EVEX TO VEX Compression encoding: [0x67,0xc5,0xf8,0x11,0x07]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  store <2 x i64> %a1, ptr %a0, align 1
  ret void
}

define void @test_mm_storeu_si64(ptr nocapture %A, <2 x i64> %B) {
; X86-SSE-LABEL: test_mm_storeu_si64:
; X86-SSE:       # %bb.0: # %entry
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-SSE-NEXT:    movlps %xmm0, (%eax) # encoding: [0x0f,0x13,0x00]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_storeu_si64:
; X86-AVX1:       # %bb.0: # %entry
; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vmovlps %xmm0, (%eax) # encoding: [0xc5,0xf8,0x13,0x00]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_storeu_si64:
; X86-AVX512:       # %bb.0: # %entry
; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vmovlps %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x13,0x00]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_storeu_si64:
; X64-SSE:       # %bb.0: # %entry
; X64-SSE-NEXT:    movq %xmm0, %rax # encoding: [0x66,0x48,0x0f,0x7e,0xc0]
; X64-SSE-NEXT:    movq %rax, (%rdi) # encoding: [0x48,0x89,0x07]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_storeu_si64:
; X64-AVX1:       # %bb.0: # %entry
; X64-AVX1-NEXT:    vmovq %xmm0, %rax # encoding: [0xc4,0xe1,0xf9,0x7e,0xc0]
; X64-AVX1-NEXT:    movq %rax, (%rdi) # encoding: [0x48,0x89,0x07]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_storeu_si64:
; X64-AVX512:       # %bb.0: # %entry
; X64-AVX512-NEXT:    vmovq %xmm0, %rax # EVEX TO VEX Compression encoding: [0xc4,0xe1,0xf9,0x7e,0xc0]
; X64-AVX512-NEXT:    movq %rax, (%rdi) # encoding: [0x48,0x89,0x07]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_storeu_si64:
; X32-SSE:       # %bb.0: # %entry
; X32-SSE-NEXT:    movq %xmm0, %rax # encoding: [0x66,0x48,0x0f,0x7e,0xc0]
; X32-SSE-NEXT:    movq %rax, (%edi) # encoding: [0x67,0x48,0x89,0x07]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_storeu_si64:
; X32-AVX1:       # %bb.0: # %entry
; X32-AVX1-NEXT:    vmovq %xmm0, %rax # encoding: [0xc4,0xe1,0xf9,0x7e,0xc0]
; X32-AVX1-NEXT:    movq %rax, (%edi) # encoding: [0x67,0x48,0x89,0x07]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_storeu_si64:
; X32-AVX512:       # %bb.0: # %entry
; X32-AVX512-NEXT:    vmovq %xmm0, %rax # EVEX TO VEX Compression encoding: [0xc4,0xe1,0xf9,0x7e,0xc0]
; X32-AVX512-NEXT:    movq %rax, (%edi) # encoding: [0x67,0x48,0x89,0x07]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
entry:
  %vecext.i = extractelement <2 x i64> %B, i32 0
  store i64 %vecext.i, ptr %A, align 1
  ret void
}

define void @test_mm_storeu_si32(ptr nocapture %A, <2 x i64> %B) {
; X86-SSE-LABEL: test_mm_storeu_si32:
; X86-SSE:       # %bb.0: # %entry
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-SSE-NEXT:    movd %xmm0, %ecx # encoding: [0x66,0x0f,0x7e,0xc1]
; X86-SSE-NEXT:    movl %ecx, (%eax) # encoding: [0x89,0x08]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_storeu_si32:
; X86-AVX1:       # %bb.0: # %entry
; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vmovd %xmm0, %ecx # encoding: [0xc5,0xf9,0x7e,0xc1]
; X86-AVX1-NEXT:    movl %ecx, (%eax) # encoding: [0x89,0x08]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_storeu_si32:
; X86-AVX512:       # %bb.0: # %entry
; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vmovd %xmm0, %ecx # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x7e,0xc1]
; X86-AVX512-NEXT:    movl %ecx, (%eax) # encoding: [0x89,0x08]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_storeu_si32:
; X64-SSE:       # %bb.0: # %entry
; X64-SSE-NEXT:    movd %xmm0, %eax # encoding: [0x66,0x0f,0x7e,0xc0]
; X64-SSE-NEXT:    movl %eax, (%rdi) # encoding: [0x89,0x07]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_storeu_si32:
; X64-AVX1:       # %bb.0: # %entry
; X64-AVX1-NEXT:    vmovd %xmm0, %eax # encoding: [0xc5,0xf9,0x7e,0xc0]
; X64-AVX1-NEXT:    movl %eax, (%rdi) # encoding: [0x89,0x07]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_storeu_si32:
; X64-AVX512:       # %bb.0: # %entry
; X64-AVX512-NEXT:    vmovd %xmm0, %eax # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x7e,0xc0]
; X64-AVX512-NEXT:    movl %eax, (%rdi) # encoding: [0x89,0x07]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_storeu_si32:
; X32-SSE:       # %bb.0: # %entry
; X32-SSE-NEXT:    movd %xmm0, %eax # encoding: [0x66,0x0f,0x7e,0xc0]
; X32-SSE-NEXT:    movl %eax, (%edi) # encoding: [0x67,0x89,0x07]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_storeu_si32:
; X32-AVX1:       # %bb.0: # %entry
; X32-AVX1-NEXT:    vmovd %xmm0, %eax # encoding: [0xc5,0xf9,0x7e,0xc0]
; X32-AVX1-NEXT:    movl %eax, (%edi) # encoding: [0x67,0x89,0x07]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_storeu_si32:
; X32-AVX512:       # %bb.0: # %entry
; X32-AVX512-NEXT:    vmovd %xmm0, %eax # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x7e,0xc0]
; X32-AVX512-NEXT:    movl %eax, (%edi) # encoding: [0x67,0x89,0x07]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
entry:
  %0 = bitcast <2 x i64> %B to <4 x i32>
  %vecext.i = extractelement <4 x i32> %0, i32 0
  store i32 %vecext.i, ptr %A, align 1
  ret void
}

define void @test_mm_storeu_si16(ptr nocapture %A, <2 x i64> %B) {
; X86-SSE-LABEL: test_mm_storeu_si16:
; X86-SSE:       # %bb.0: # %entry
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-SSE-NEXT:    movd %xmm0, %ecx # encoding: [0x66,0x0f,0x7e,0xc1]
; X86-SSE-NEXT:    movw %cx, (%eax) # encoding: [0x66,0x89,0x08]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_storeu_si16:
; X86-AVX1:       # %bb.0: # %entry
; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vmovd %xmm0, %ecx # encoding: [0xc5,0xf9,0x7e,0xc1]
; X86-AVX1-NEXT:    movw %cx, (%eax) # encoding: [0x66,0x89,0x08]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_storeu_si16:
; X86-AVX512:       # %bb.0: # %entry
; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vmovd %xmm0, %ecx # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x7e,0xc1]
; X86-AVX512-NEXT:    movw %cx, (%eax) # encoding: [0x66,0x89,0x08]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_storeu_si16:
; X64-SSE:       # %bb.0: # %entry
; X64-SSE-NEXT:    movd %xmm0, %eax # encoding: [0x66,0x0f,0x7e,0xc0]
; X64-SSE-NEXT:    movw %ax, (%rdi) # encoding: [0x66,0x89,0x07]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_storeu_si16:
; X64-AVX1:       # %bb.0: # %entry
; X64-AVX1-NEXT:    vmovd %xmm0, %eax # encoding: [0xc5,0xf9,0x7e,0xc0]
; X64-AVX1-NEXT:    movw %ax, (%rdi) # encoding: [0x66,0x89,0x07]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_storeu_si16:
; X64-AVX512:       # %bb.0: # %entry
; X64-AVX512-NEXT:    vmovd %xmm0, %eax # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x7e,0xc0]
; X64-AVX512-NEXT:    movw %ax, (%rdi) # encoding: [0x66,0x89,0x07]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_storeu_si16:
; X32-SSE:       # %bb.0: # %entry
; X32-SSE-NEXT:    movd %xmm0, %eax # encoding: [0x66,0x0f,0x7e,0xc0]
; X32-SSE-NEXT:    movw %ax, (%edi) # encoding: [0x67,0x66,0x89,0x07]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_storeu_si16:
; X32-AVX1:       # %bb.0: # %entry
; X32-AVX1-NEXT:    vmovd %xmm0, %eax # encoding: [0xc5,0xf9,0x7e,0xc0]
; X32-AVX1-NEXT:    movw %ax, (%edi) # encoding: [0x67,0x66,0x89,0x07]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_storeu_si16:
; X32-AVX512:       # %bb.0: # %entry
; X32-AVX512-NEXT:    vmovd %xmm0, %eax # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x7e,0xc0]
; X32-AVX512-NEXT:    movw %ax, (%edi) # encoding: [0x67,0x66,0x89,0x07]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
entry:
  %0 = bitcast <2 x i64> %B to <8 x i16>
  %vecext.i = extractelement <8 x i16> %0, i32 0
  store i16 %vecext.i, ptr %A, align 1
  ret void
}

define void @test_mm_stream_pd(ptr%a0, <2 x double> %a1) {
; X86-SSE-LABEL: test_mm_stream_pd:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-SSE-NEXT:    movntps %xmm0, (%eax) # encoding: [0x0f,0x2b,0x00]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_stream_pd:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vmovntps %xmm0, (%eax) # encoding: [0xc5,0xf8,0x2b,0x00]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_stream_pd:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vmovntps %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2b,0x00]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_stream_pd:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movntps %xmm0, (%rdi) # encoding: [0x0f,0x2b,0x07]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_stream_pd:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vmovntps %xmm0, (%rdi) # encoding: [0xc5,0xf8,0x2b,0x07]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_stream_pd:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vmovntps %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2b,0x07]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_stream_pd:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    movntps %xmm0, (%edi) # encoding: [0x67,0x0f,0x2b,0x07]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_stream_pd:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vmovntps %xmm0, (%edi) # encoding: [0x67,0xc5,0xf8,0x2b,0x07]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_stream_pd:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vmovntps %xmm0, (%edi) # EVEX TO VEX Compression encoding: [0x67,0xc5,0xf8,0x2b,0x07]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  store <2 x double> %a1, ptr %a0, align 16, !nontemporal !0
  ret void
}

define void @test_mm_stream_si32(ptr%a0, i32 %a1) {
; X86-LABEL: test_mm_stream_si32:
; X86:       # %bb.0:
; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04]
; X86-NEXT:    movntil %eax, (%ecx) # encoding: [0x0f,0xc3,0x01]
; X86-NEXT:    retl # encoding: [0xc3]
;
; X64-LABEL: test_mm_stream_si32:
; X64:       # %bb.0:
; X64-NEXT:    movntil %esi, (%rdi) # encoding: [0x0f,0xc3,0x37]
; X64-NEXT:    retq # encoding: [0xc3]
;
; X32-LABEL: test_mm_stream_si32:
; X32:       # %bb.0:
; X32-NEXT:    movntil %esi, (%edi) # encoding: [0x67,0x0f,0xc3,0x37]
; X32-NEXT:    retq # encoding: [0xc3]
  store i32 %a1, ptr %a0, align 1, !nontemporal !0
  ret void
}

define void @test_mm_stream_si128(ptr%a0, <2 x i64> %a1) {
; X86-SSE-LABEL: test_mm_stream_si128:
; X86-SSE:       # %bb.0:
; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-SSE-NEXT:    movntps %xmm0, (%eax) # encoding: [0x0f,0x2b,0x00]
; X86-SSE-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX1-LABEL: test_mm_stream_si128:
; X86-AVX1:       # %bb.0:
; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX1-NEXT:    vmovntps %xmm0, (%eax) # encoding: [0xc5,0xf8,0x2b,0x00]
; X86-AVX1-NEXT:    retl # encoding: [0xc3]
;
; X86-AVX512-LABEL: test_mm_stream_si128:
; X86-AVX512:       # %bb.0:
; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512-NEXT:    vmovntps %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2b,0x00]
; X86-AVX512-NEXT:    retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_mm_stream_si128:
; X64-SSE:       # %bb.0:
; X64-SSE-NEXT:    movntps %xmm0, (%rdi) # encoding: [0x0f,0x2b,0x07]
; X64-SSE-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX1-LABEL: test_mm_stream_si128:
; X64-AVX1:       # %bb.0:
; X64-AVX1-NEXT:    vmovntps %xmm0, (%rdi) # encoding: [0xc5,0xf8,0x2b,0x07]
; X64-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X64-AVX512-LABEL: test_mm_stream_si128:
; X64-AVX512:       # %bb.0:
; X64-AVX512-NEXT:    vmovntps %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2b,0x07]
; X64-AVX512-NEXT:    retq # encoding: [0xc3]
;
; X32-SSE-LABEL: test_mm_stream_si128:
; X32-SSE:       # %bb.0:
; X32-SSE-NEXT:    movntps %xmm0, (%edi) # encoding: [0x67,0x0f,0x2b,0x07]
; X32-SSE-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX1-LABEL: test_mm_stream_si128:
; X32-AVX1:       # %bb.0:
; X32-AVX1-NEXT:    vmovntps %xmm0, (%edi) # encoding: [0x67,0xc5,0xf8,0x2b,0x07]
; X32-AVX1-NEXT:    retq # encoding: [0xc3]
;
; X32-AVX512-LABEL: test_mm_stream_si128:
; X32-AVX512:       # %bb.0:
; X32-AVX512-NEXT:    vmovntps %xmm0, (%edi) # EVEX TO VEX Compression encoding: [0x67,0xc5,0xf8,0x2b,0x07]
; X32-AVX512-NEXT:    retq # encoding: [0xc3]
  store <2 x i64> %a1, ptr %a0, align 16, !nontemporal !0
  ret void
}

define <2 x i64> @test_mm_sub_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE-LABEL: test_mm_sub_epi8:
; SSE:       # %bb.0:
; SSE-NEXT:    psubb %xmm1, %xmm0 # encoding: [0x66,0x0f,0xf8,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_sub_epi8:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpsubb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xf8,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_sub_epi8:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpsubb %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf8,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
  %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
  %res = sub <16 x i8> %arg0, %arg1
  %bc = bitcast <16 x i8> %res to <2 x i64>
  ret <2 x i64> %bc
}

define <2 x i64> @test_mm_sub_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE-LABEL: test_mm_sub_epi16:
; SSE:       # %bb.0:
; SSE-NEXT:    psubw %xmm1, %xmm0 # encoding: [0x66,0x0f,0xf9,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_sub_epi16:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpsubw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xf9,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_sub_epi16:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpsubw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf9,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
  %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
  %res = sub <8 x i16> %arg0, %arg1
  %bc = bitcast <8 x i16> %res to <2 x i64>
  ret <2 x i64> %bc
}

define <2 x i64> @test_mm_sub_epi32(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE-LABEL: test_mm_sub_epi32:
; SSE:       # %bb.0:
; SSE-NEXT:    psubd %xmm1, %xmm0 # encoding: [0x66,0x0f,0xfa,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_sub_epi32:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpsubd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfa,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_sub_epi32:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpsubd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfa,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
  %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
  %res = sub <4 x i32> %arg0, %arg1
  %bc = bitcast <4 x i32> %res to <2 x i64>
  ret <2 x i64> %bc
}

define <2 x i64> @test_mm_sub_epi64(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE-LABEL: test_mm_sub_epi64:
; SSE:       # %bb.0:
; SSE-NEXT:    psubq %xmm1, %xmm0 # encoding: [0x66,0x0f,0xfb,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_sub_epi64:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpsubq %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfb,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_sub_epi64:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpsubq %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfb,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = sub <2 x i64> %a0, %a1
  ret <2 x i64> %res
}

define <2 x double> @test_mm_sub_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_sub_pd:
; SSE:       # %bb.0:
; SSE-NEXT:    subpd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x5c,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_sub_pd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vsubpd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x5c,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_sub_pd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vsubpd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x5c,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = fsub <2 x double> %a0, %a1
  ret <2 x double> %res
}

define <2 x double> @test_mm_sub_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_sub_sd:
; SSE:       # %bb.0:
; SSE-NEXT:    subsd %xmm1, %xmm0 # encoding: [0xf2,0x0f,0x5c,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_sub_sd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vsubsd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xfb,0x5c,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_sub_sd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vsubsd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x5c,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %ext0 = extractelement <2 x double> %a0, i32 0
  %ext1 = extractelement <2 x double> %a1, i32 0
  %fsub = fsub double %ext0, %ext1
  %res = insertelement <2 x double> %a0, double %fsub, i32 0
  ret <2 x double> %res
}

define <2 x i64> @test_mm_subs_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE-LABEL: test_mm_subs_epi8:
; SSE:       # %bb.0:
; SSE-NEXT:    psubsb %xmm1, %xmm0 # encoding: [0x66,0x0f,0xe8,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_subs_epi8:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xe8,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_subs_epi8:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe8,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
  %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
  %res = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %arg0, <16 x i8> %arg1)
  %bc = bitcast <16 x i8> %res to <2 x i64>
  ret <2 x i64> %bc
}
declare <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8>, <16 x i8>) nounwind readnone

define <2 x i64> @test_mm_subs_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE-LABEL: test_mm_subs_epi16:
; SSE:       # %bb.0:
; SSE-NEXT:    psubsw %xmm1, %xmm0 # encoding: [0x66,0x0f,0xe9,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_subs_epi16:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpsubsw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xe9,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_subs_epi16:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpsubsw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe9,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
  %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
  %res = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %arg0, <8 x i16> %arg1)
  %bc = bitcast <8 x i16> %res to <2 x i64>
  ret <2 x i64> %bc
}
declare <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16>, <8 x i16>) nounwind readnone

define <2 x i64> @test_mm_subs_epu8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE-LABEL: test_mm_subs_epu8:
; SSE:       # %bb.0:
; SSE-NEXT:    psubusb %xmm1, %xmm0 # encoding: [0x66,0x0f,0xd8,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_subs_epu8:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xd8,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_subs_epu8:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd8,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
  %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
  %res = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %arg0, <16 x i8> %arg1)
  %bc = bitcast <16 x i8> %res to <2 x i64>
  ret <2 x i64> %bc
}
declare <16 x i8> @llvm.usub.sat.v16i8(<16 x i8>, <16 x i8>)

define <2 x i64> @test_mm_subs_epu16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE-LABEL: test_mm_subs_epu16:
; SSE:       # %bb.0:
; SSE-NEXT:    psubusw %xmm1, %xmm0 # encoding: [0x66,0x0f,0xd9,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_subs_epu16:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpsubusw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xd9,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_subs_epu16:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpsubusw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd9,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
  %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
  %res = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %arg0, <8 x i16> %arg1)
  %bc = bitcast <8 x i16> %res to <2 x i64>
  ret <2 x i64> %bc
}
declare <8 x i16> @llvm.usub.sat.v8i16(<8 x i16>, <8 x i16>)

define i32 @test_mm_ucomieq_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_ucomieq_sd:
; SSE:       # %bb.0:
; SSE-NEXT:    ucomisd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x2e,0xc1]
; SSE-NEXT:    setnp %al # encoding: [0x0f,0x9b,0xc0]
; SSE-NEXT:    sete %cl # encoding: [0x0f,0x94,0xc1]
; SSE-NEXT:    andb %al, %cl # encoding: [0x20,0xc1]
; SSE-NEXT:    movzbl %cl, %eax # encoding: [0x0f,0xb6,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_ucomieq_sd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vucomisd %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x2e,0xc1]
; AVX1-NEXT:    setnp %al # encoding: [0x0f,0x9b,0xc0]
; AVX1-NEXT:    sete %cl # encoding: [0x0f,0x94,0xc1]
; AVX1-NEXT:    andb %al, %cl # encoding: [0x20,0xc1]
; AVX1-NEXT:    movzbl %cl, %eax # encoding: [0x0f,0xb6,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_ucomieq_sd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vucomisd %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc1]
; AVX512-NEXT:    setnp %al # encoding: [0x0f,0x9b,0xc0]
; AVX512-NEXT:    sete %cl # encoding: [0x0f,0x94,0xc1]
; AVX512-NEXT:    andb %al, %cl # encoding: [0x20,0xc1]
; AVX512-NEXT:    movzbl %cl, %eax # encoding: [0x0f,0xb6,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = call i32 @llvm.x86.sse2.ucomieq.sd(<2 x double> %a0, <2 x double> %a1)
  ret i32 %res
}
declare i32 @llvm.x86.sse2.ucomieq.sd(<2 x double>, <2 x double>) nounwind readnone

define i32 @test_mm_ucomige_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_ucomige_sd:
; SSE:       # %bb.0:
; SSE-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
; SSE-NEXT:    ucomisd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x2e,0xc1]
; SSE-NEXT:    setae %al # encoding: [0x0f,0x93,0xc0]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_ucomige_sd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
; AVX1-NEXT:    vucomisd %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x2e,0xc1]
; AVX1-NEXT:    setae %al # encoding: [0x0f,0x93,0xc0]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_ucomige_sd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
; AVX512-NEXT:    vucomisd %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc1]
; AVX512-NEXT:    setae %al # encoding: [0x0f,0x93,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = call i32 @llvm.x86.sse2.ucomige.sd(<2 x double> %a0, <2 x double> %a1)
  ret i32 %res
}
declare i32 @llvm.x86.sse2.ucomige.sd(<2 x double>, <2 x double>) nounwind readnone

define i32 @test_mm_ucomigt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_ucomigt_sd:
; SSE:       # %bb.0:
; SSE-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
; SSE-NEXT:    ucomisd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x2e,0xc1]
; SSE-NEXT:    seta %al # encoding: [0x0f,0x97,0xc0]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_ucomigt_sd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
; AVX1-NEXT:    vucomisd %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x2e,0xc1]
; AVX1-NEXT:    seta %al # encoding: [0x0f,0x97,0xc0]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_ucomigt_sd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
; AVX512-NEXT:    vucomisd %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc1]
; AVX512-NEXT:    seta %al # encoding: [0x0f,0x97,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = call i32 @llvm.x86.sse2.ucomigt.sd(<2 x double> %a0, <2 x double> %a1)
  ret i32 %res
}
declare i32 @llvm.x86.sse2.ucomigt.sd(<2 x double>, <2 x double>) nounwind readnone

define i32 @test_mm_ucomile_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_ucomile_sd:
; SSE:       # %bb.0:
; SSE-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
; SSE-NEXT:    ucomisd %xmm0, %xmm1 # encoding: [0x66,0x0f,0x2e,0xc8]
; SSE-NEXT:    setae %al # encoding: [0x0f,0x93,0xc0]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_ucomile_sd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
; AVX1-NEXT:    vucomisd %xmm0, %xmm1 # encoding: [0xc5,0xf9,0x2e,0xc8]
; AVX1-NEXT:    setae %al # encoding: [0x0f,0x93,0xc0]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_ucomile_sd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
; AVX512-NEXT:    vucomisd %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc8]
; AVX512-NEXT:    setae %al # encoding: [0x0f,0x93,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = call i32 @llvm.x86.sse2.ucomile.sd(<2 x double> %a0, <2 x double> %a1)
  ret i32 %res
}
declare i32 @llvm.x86.sse2.ucomile.sd(<2 x double>, <2 x double>) nounwind readnone

define i32 @test_mm_ucomilt_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_ucomilt_sd:
; SSE:       # %bb.0:
; SSE-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
; SSE-NEXT:    ucomisd %xmm0, %xmm1 # encoding: [0x66,0x0f,0x2e,0xc8]
; SSE-NEXT:    seta %al # encoding: [0x0f,0x97,0xc0]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_ucomilt_sd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
; AVX1-NEXT:    vucomisd %xmm0, %xmm1 # encoding: [0xc5,0xf9,0x2e,0xc8]
; AVX1-NEXT:    seta %al # encoding: [0x0f,0x97,0xc0]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_ucomilt_sd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
; AVX512-NEXT:    vucomisd %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc8]
; AVX512-NEXT:    seta %al # encoding: [0x0f,0x97,0xc0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = call i32 @llvm.x86.sse2.ucomilt.sd(<2 x double> %a0, <2 x double> %a1)
  ret i32 %res
}
declare i32 @llvm.x86.sse2.ucomilt.sd(<2 x double>, <2 x double>) nounwind readnone

define i32 @test_mm_ucomineq_sd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_ucomineq_sd:
; SSE:       # %bb.0:
; SSE-NEXT:    ucomisd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x2e,0xc1]
; SSE-NEXT:    setp %al # encoding: [0x0f,0x9a,0xc0]
; SSE-NEXT:    setne %cl # encoding: [0x0f,0x95,0xc1]
; SSE-NEXT:    orb %al, %cl # encoding: [0x08,0xc1]
; SSE-NEXT:    movzbl %cl, %eax # encoding: [0x0f,0xb6,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_ucomineq_sd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vucomisd %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x2e,0xc1]
; AVX1-NEXT:    setp %al # encoding: [0x0f,0x9a,0xc0]
; AVX1-NEXT:    setne %cl # encoding: [0x0f,0x95,0xc1]
; AVX1-NEXT:    orb %al, %cl # encoding: [0x08,0xc1]
; AVX1-NEXT:    movzbl %cl, %eax # encoding: [0x0f,0xb6,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_ucomineq_sd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vucomisd %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x2e,0xc1]
; AVX512-NEXT:    setp %al # encoding: [0x0f,0x9a,0xc0]
; AVX512-NEXT:    setne %cl # encoding: [0x0f,0x95,0xc1]
; AVX512-NEXT:    orb %al, %cl # encoding: [0x08,0xc1]
; AVX512-NEXT:    movzbl %cl, %eax # encoding: [0x0f,0xb6,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = call i32 @llvm.x86.sse2.ucomineq.sd(<2 x double> %a0, <2 x double> %a1)
  ret i32 %res
}
declare i32 @llvm.x86.sse2.ucomineq.sd(<2 x double>, <2 x double>) nounwind readnone

define <2 x double> @test_mm_undefined_pd() {
; CHECK-LABEL: test_mm_undefined_pd:
; CHECK:       # %bb.0:
; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  ret <2 x double> undef
}

define <2 x i64> @test_mm_undefined_si128() {
; CHECK-LABEL: test_mm_undefined_si128:
; CHECK:       # %bb.0:
; CHECK-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  ret <2 x i64> undef
}

define <2 x i64> @test_mm_unpackhi_epi8(<2 x i64> %a0, <2 x i64> %a1) {
; SSE-LABEL: test_mm_unpackhi_epi8:
; SSE:       # %bb.0:
; SSE-NEXT:    punpckhbw %xmm1, %xmm0 # encoding: [0x66,0x0f,0x68,0xc1]
; SSE-NEXT:    # xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_unpackhi_epi8:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpunpckhbw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x68,0xc1]
; AVX1-NEXT:    # xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_unpackhi_epi8:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpunpckhbw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x68,0xc1]
; AVX512-NEXT:    # xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
  %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
  %res = shufflevector <16 x i8> %arg0, <16 x i8> %arg1, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
  %bc = bitcast <16 x i8> %res to <2 x i64>
  ret <2 x i64> %bc
}

define <2 x i64> @test_mm_unpackhi_epi16(<2 x i64> %a0, <2 x i64> %a1) {
; SSE-LABEL: test_mm_unpackhi_epi16:
; SSE:       # %bb.0:
; SSE-NEXT:    punpckhwd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x69,0xc1]
; SSE-NEXT:    # xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_unpackhi_epi16:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpunpckhwd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x69,0xc1]
; AVX1-NEXT:    # xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_unpackhi_epi16:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpunpckhwd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x69,0xc1]
; AVX512-NEXT:    # xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
  %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
  %res = shufflevector <8 x i16> %arg0, <8 x i16> %arg1, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
  %bc = bitcast <8 x i16> %res to <2 x i64>
  ret <2 x i64> %bc
}

define <2 x i64> @test_mm_unpackhi_epi32(<2 x i64> %a0, <2 x i64> %a1) {
; SSE-LABEL: test_mm_unpackhi_epi32:
; SSE:       # %bb.0:
; SSE-NEXT:    unpckhps %xmm1, %xmm0 # encoding: [0x0f,0x15,0xc1]
; SSE-NEXT:    # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_unpackhi_epi32:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vunpckhps %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x15,0xc1]
; AVX1-NEXT:    # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_unpackhi_epi32:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vunpckhps %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x15,0xc1]
; AVX512-NEXT:    # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
  %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
  %res = shufflevector <4 x i32> %arg0,<4 x i32> %arg1, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
  %bc = bitcast <4 x i32> %res to <2 x i64>
  ret <2 x i64> %bc
}

define <2 x i64> @test_mm_unpackhi_epi64(<2 x i64> %a0, <2 x i64> %a1) {
; SSE-LABEL: test_mm_unpackhi_epi64:
; SSE:       # %bb.0:
; SSE-NEXT:    unpckhpd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x15,0xc1]
; SSE-NEXT:    # xmm0 = xmm0[1],xmm1[1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_unpackhi_epi64:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vunpckhpd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x15,0xc1]
; AVX1-NEXT:    # xmm0 = xmm0[1],xmm1[1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_unpackhi_epi64:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vunpckhpd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x15,0xc1]
; AVX512-NEXT:    # xmm0 = xmm0[1],xmm1[1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = shufflevector <2 x i64> %a0, <2 x i64> %a1, <2 x i32> <i32 1, i32 3>
  ret <2 x i64> %res
}

define <2 x double> @test_mm_unpackhi_pd(<2 x double> %a0, <2 x double> %a1) {
; SSE-LABEL: test_mm_unpackhi_pd:
; SSE:       # %bb.0:
; SSE-NEXT:    unpckhpd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x15,0xc1]
; SSE-NEXT:    # xmm0 = xmm0[1],xmm1[1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_unpackhi_pd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vunpckhpd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x15,0xc1]
; AVX1-NEXT:    # xmm0 = xmm0[1],xmm1[1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_unpackhi_pd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vunpckhpd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x15,0xc1]
; AVX512-NEXT:    # xmm0 = xmm0[1],xmm1[1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 1, i32 3>
  ret <2 x double> %res
}

define <2 x i64> @test_mm_unpacklo_epi8(<2 x i64> %a0, <2 x i64> %a1) {
; SSE-LABEL: test_mm_unpacklo_epi8:
; SSE:       # %bb.0:
; SSE-NEXT:    punpcklbw %xmm1, %xmm0 # encoding: [0x66,0x0f,0x60,0xc1]
; SSE-NEXT:    # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_unpacklo_epi8:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpunpcklbw %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x60,0xc1]
; AVX1-NEXT:    # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_unpacklo_epi8:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpunpcklbw %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x60,0xc1]
; AVX512-NEXT:    # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <16 x i8>
  %arg1 = bitcast <2 x i64> %a1 to <16 x i8>
  %res = shufflevector <16 x i8> %arg0, <16 x i8> %arg1, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
  %bc = bitcast <16 x i8> %res to <2 x i64>
  ret <2 x i64> %bc
}

define <2 x i64> @test_mm_unpacklo_epi16(<2 x i64> %a0, <2 x i64> %a1) {
; SSE-LABEL: test_mm_unpacklo_epi16:
; SSE:       # %bb.0:
; SSE-NEXT:    punpcklwd %xmm1, %xmm0 # encoding: [0x66,0x0f,0x61,0xc1]
; SSE-NEXT:    # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_unpacklo_epi16:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vpunpcklwd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x61,0xc1]
; AVX1-NEXT:    # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_unpacklo_epi16:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vpunpcklwd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x61,0xc1]
; AVX512-NEXT:    # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <8 x i16>
  %arg1 = bitcast <2 x i64> %a1 to <8 x i16>
  %res = shufflevector <8 x i16> %arg0, <8 x i16> %arg1, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
  %bc = bitcast <8 x i16> %res to <2 x i64>
  ret <2 x i64> %bc
}

define <2 x i64> @test_mm_unpacklo_epi32(<2 x i64> %a0, <2 x i64> %a1) {
; SSE-LABEL: test_mm_unpacklo_epi32:
; SSE:       # %bb.0:
; SSE-NEXT:    unpcklps %xmm1, %xmm0 # encoding: [0x0f,0x14,0xc1]
; SSE-NEXT:    # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_unpacklo_epi32:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vunpcklps %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x14,0xc1]
; AVX1-NEXT:    # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_unpacklo_epi32:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vunpcklps %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x14,0xc1]
; AVX512-NEXT:    # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x i64> %a0 to <4 x i32>
  %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
  %res = shufflevector <4 x i32> %arg0,<4 x i32> %arg1, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
  %bc = bitcast <4 x i32> %res to <2 x i64>
  ret <2 x i64> %bc
}

define <2 x i64> @test_mm_unpacklo_epi64(<2 x i64> %a0, <2 x i64> %a1) {
; SSE-LABEL: test_mm_unpacklo_epi64:
; SSE:       # %bb.0:
; SSE-NEXT:    movlhps %xmm1, %xmm0 # encoding: [0x0f,0x16,0xc1]
; SSE-NEXT:    # xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_unpacklo_epi64:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vmovlhps %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x16,0xc1]
; AVX1-NEXT:    # xmm0 = xmm0[0],xmm1[0]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_unpacklo_epi64:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vmovlhps %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x16,0xc1]
; AVX512-NEXT:    # xmm0 = xmm0[0],xmm1[0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = shufflevector <2 x i64> %a0, <2 x i64> %a1, <2 x i32> <i32 0, i32 2>
  ret <2 x i64> %res
}

define <2 x double> @test_mm_unpacklo_pd(<2 x double> %a0, <2 x double> %a1) {
; SSE-LABEL: test_mm_unpacklo_pd:
; SSE:       # %bb.0:
; SSE-NEXT:    movlhps %xmm1, %xmm0 # encoding: [0x0f,0x16,0xc1]
; SSE-NEXT:    # xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_unpacklo_pd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vmovlhps %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x16,0xc1]
; AVX1-NEXT:    # xmm0 = xmm0[0],xmm1[0]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_unpacklo_pd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vmovlhps %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x16,0xc1]
; AVX512-NEXT:    # xmm0 = xmm0[0],xmm1[0]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 0, i32 2>
  ret <2 x double> %res
}

define <2 x double> @test_mm_xor_pd(<2 x double> %a0, <2 x double> %a1) nounwind {
; SSE-LABEL: test_mm_xor_pd:
; SSE:       # %bb.0:
; SSE-NEXT:    xorps %xmm1, %xmm0 # encoding: [0x0f,0x57,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_xor_pd:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vxorps %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x57,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_xor_pd:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vxorps %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x57,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %arg0 = bitcast <2 x double> %a0 to <4 x i32>
  %arg1 = bitcast <2 x double> %a1 to <4 x i32>
  %res = xor <4 x i32> %arg0, %arg1
  %bc = bitcast <4 x i32> %res to <2 x double>
  ret <2 x double> %bc
}

define <2 x i64> @test_mm_xor_si128(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE-LABEL: test_mm_xor_si128:
; SSE:       # %bb.0:
; SSE-NEXT:    xorps %xmm1, %xmm0 # encoding: [0x0f,0x57,0xc1]
; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_xor_si128:
; AVX1:       # %bb.0:
; AVX1-NEXT:    vxorps %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x57,0xc1]
; AVX1-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_xor_si128:
; AVX512:       # %bb.0:
; AVX512-NEXT:    vxorps %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x57,0xc1]
; AVX512-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
  %res = xor <2 x i64> %a0, %a1
  ret <2 x i64> %res
}

!0 = !{i32 1}