; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-unknown -mattr=+mmx,+sse2 | FileCheck %s --check-prefix=X86
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+mmx,+sse2 | FileCheck %s --check-prefix=X64
define i32 @t0(i64 %x) nounwind {
; X86-LABEL: t0:
; X86: # %bb.0: # %entry
; X86-NEXT: pshufw $238, {{[0-9]+}}(%esp), %mm0 # mm0 = mem[2,3,2,3]
; X86-NEXT: movd %mm0, %eax
; X86-NEXT: retl
;
; X64-LABEL: t0:
; X64: # %bb.0: # %entry
; X64-NEXT: movq %rdi, %mm0
; X64-NEXT: pshufw $238, %mm0, %mm0 # mm0 = mm0[2,3,2,3]
; X64-NEXT: movd %mm0, %eax
; X64-NEXT: retq
entry:
%0 = bitcast i64 %x to <4 x i16>
%1 = bitcast <4 x i16> %0 to <1 x i64>
%2 = tail call <1 x i64> @llvm.x86.sse.pshuf.w(<1 x i64> %1, i8 -18)
%3 = bitcast <1 x i64> %2 to <4 x i16>
%4 = bitcast <4 x i16> %3 to <1 x i64>
%5 = extractelement <1 x i64> %4, i32 0
%6 = bitcast i64 %5 to <2 x i32>
%7 = extractelement <2 x i32> %6, i32 0
ret i32 %7
}
define i64 @t1(i64 %x, i32 %n) nounwind {
; X86-LABEL: t1:
; X86: # %bb.0: # %entry
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
; X86-NEXT: subl $8, %esp
; X86-NEXT: movq 8(%ebp), %mm0
; X86-NEXT: movd 16(%ebp), %mm1
; X86-NEXT: psllq %mm1, %mm0
; X86-NEXT: movq %mm0, (%esp)
; X86-NEXT: movl (%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl %ebp, %esp
; X86-NEXT: popl %ebp
; X86-NEXT: retl
;
; X64-LABEL: t1:
; X64: # %bb.0: # %entry
; X64-NEXT: movq %rdi, %mm0
; X64-NEXT: movd %esi, %mm1
; X64-NEXT: psllq %mm1, %mm0
; X64-NEXT: movq %mm0, %rax
; X64-NEXT: retq
entry:
%0 = bitcast i64 %x to <1 x i64>
%1 = tail call <1 x i64> @llvm.x86.mmx.pslli.q(<1 x i64> %0, i32 %n)
%2 = bitcast <1 x i64> %1 to i64
ret i64 %2
}
define i64 @t2(i64 %x, i32 %n, i32 %w) nounwind {
; X86-LABEL: t2:
; X86: # %bb.0: # %entry
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
; X86-NEXT: subl $8, %esp
; X86-NEXT: movd 20(%ebp), %mm0
; X86-NEXT: movd 16(%ebp), %mm1
; X86-NEXT: psllq %mm1, %mm0
; X86-NEXT: por 8(%ebp), %mm0
; X86-NEXT: movq %mm0, (%esp)
; X86-NEXT: movl (%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl %ebp, %esp
; X86-NEXT: popl %ebp
; X86-NEXT: retl
;
; X64-LABEL: t2:
; X64: # %bb.0: # %entry
; X64-NEXT: movd %edx, %mm0
; X64-NEXT: movd %esi, %mm1
; X64-NEXT: psllq %mm1, %mm0
; X64-NEXT: movq %rdi, %mm1
; X64-NEXT: por %mm0, %mm1
; X64-NEXT: movq %mm1, %rax
; X64-NEXT: retq
entry:
%0 = insertelement <2 x i32> undef, i32 %w, i32 0
%1 = insertelement <2 x i32> %0, i32 0, i32 1
%2 = bitcast <2 x i32> %1 to <1 x i64>
%3 = tail call <1 x i64> @llvm.x86.mmx.pslli.q(<1 x i64> %2, i32 %n)
%4 = bitcast i64 %x to <1 x i64>
%5 = tail call <1 x i64> @llvm.x86.mmx.por(<1 x i64> %4, <1 x i64> %3)
%6 = bitcast <1 x i64> %5 to i64
ret i64 %6
}
define i64 @t3(ptr %y, ptr %n) nounwind {
; X86-LABEL: t3:
; X86: # %bb.0: # %entry
; X86-NEXT: pushl %ebp
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
; X86-NEXT: subl $8, %esp
; X86-NEXT: movl 12(%ebp), %eax
; X86-NEXT: movl 8(%ebp), %ecx
; X86-NEXT: movq (%ecx), %mm0
; X86-NEXT: movd (%eax), %mm1
; X86-NEXT: psllq %mm1, %mm0
; X86-NEXT: movq %mm0, (%esp)
; X86-NEXT: movl (%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl %ebp, %esp
; X86-NEXT: popl %ebp
; X86-NEXT: retl
;
; X64-LABEL: t3:
; X64: # %bb.0: # %entry
; X64-NEXT: movq (%rdi), %mm0
; X64-NEXT: movd (%rsi), %mm1
; X64-NEXT: psllq %mm1, %mm0
; X64-NEXT: movq %mm0, %rax
; X64-NEXT: retq
entry:
%0 = load <1 x i64>, ptr %y, align 8
%1 = load i32, ptr %n, align 4
%2 = tail call <1 x i64> @llvm.x86.mmx.pslli.q(<1 x i64> %0, i32 %1)
%3 = bitcast <1 x i64> %2 to i64
ret i64 %3
}
declare <1 x i64> @llvm.x86.sse.pshuf.w(<1 x i64>, i8)
declare <1 x i64> @llvm.x86.mmx.pslli.q(<1 x i64>, i32)
declare <1 x i64> @llvm.x86.mmx.por(<1 x i64>, <1 x i64>)