; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --no_x86_scrub_sp
; RUN: llc < %s -mtriple=i686-- | FileCheck %s --check-prefixes=X86
; RUN: llc < %s -mtriple=x86_64-pc-linux-gnu | FileCheck %s --check-prefixes=X64
define i32 @extract3(ptr, i32) nounwind {
; X86-LABEL: extract3:
; X86: # %bb.0: # %_L1
; X86-NEXT: pushl %ebx
; X86-NEXT: pushl %esi
; X86-NEXT: subl $8, %esp
; X86-NEXT: movl 24(%esp), %esi
; X86-NEXT: andl $7, %esi
; X86-NEXT: movl 20(%esp), %eax
; X86-NEXT: movzwl (%eax), %ebx
; X86-NEXT: movl %ebx, %ecx
; X86-NEXT: shrb $3, %cl
; X86-NEXT: andb $7, %cl
; X86-NEXT: movb %bl, %ch
; X86-NEXT: andb $7, %ch
; X86-NEXT: movl %ebx, %eax
; X86-NEXT: shrl $6, %eax
; X86-NEXT: andb $7, %al
; X86-NEXT: movl %ebx, %edx
; X86-NEXT: shrl $9, %edx
; X86-NEXT: andb $7, %dl
; X86-NEXT: shrl $12, %ebx
; X86-NEXT: movb %bl, 4(%esp)
; X86-NEXT: movb %dl, 3(%esp)
; X86-NEXT: movb %al, 2(%esp)
; X86-NEXT: movb %ch, (%esp)
; X86-NEXT: movb %cl, 1(%esp)
; X86-NEXT: movzbl (%esp,%esi), %eax
; X86-NEXT: andl $7, %eax
; X86-NEXT: addl $8, %esp
; X86-NEXT: popl %esi
; X86-NEXT: popl %ebx
; X86-NEXT: retl
;
; X64-LABEL: extract3:
; X64: # %bb.0: # %_L1
; X64-NEXT: # kill: def $esi killed $esi def $rsi
; X64-NEXT: movzwl (%rdi), %eax
; X64-NEXT: movl %eax, %ecx
; X64-NEXT: shrl $9, %ecx
; X64-NEXT: andl $7, %ecx
; X64-NEXT: movd %ecx, %xmm0
; X64-NEXT: movl %eax, %ecx
; X64-NEXT: shrl $6, %ecx
; X64-NEXT: andl $7, %ecx
; X64-NEXT: movd %ecx, %xmm1
; X64-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; X64-NEXT: movl %eax, %ecx
; X64-NEXT: andl $7, %ecx
; X64-NEXT: movd %ecx, %xmm0
; X64-NEXT: movd %eax, %xmm2
; X64-NEXT: shrl $3, %eax
; X64-NEXT: andl $7, %eax
; X64-NEXT: movd %eax, %xmm3
; X64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
; X64-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X64-NEXT: psrld $12, %xmm2
; X64-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; X64-NEXT: movdqa %xmm0, -24(%rsp)
; X64-NEXT: andl $7, %esi
; X64-NEXT: movzwl -24(%rsp,%rsi,2), %eax
; X64-NEXT: andl $7, %eax
; X64-NEXT: retq
_L1:
%2 = load i15, ptr %0
%3 = bitcast i15 %2 to <5 x i3>
%4 = extractelement <5 x i3> %3, i32 %1
%5 = zext i3 %4 to i32
ret i32 %5
}