llvm/llvm/test/CodeGen/X86/combine-bextr.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2,+bmi | FileCheck %s --check-prefixes=CHECK,X86
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2,+bmi | FileCheck %s --check-prefixes=CHECK,X64

declare i32 @llvm.x86.bmi.bextr.32(i32, i32)

define i32 @bextr_zero_length(i32 %x, i32 %y) nounwind {
; CHECK-LABEL: bextr_zero_length:
; CHECK:       # %bb.0:
; CHECK-NEXT:    xorl %eax, %eax
; CHECK-NEXT:    ret{{[l|q]}}
  %1 = and i32 %y, 255
  %2 = tail call i32 @llvm.x86.bmi.bextr.32(i32 %x, i32 %1)
  ret i32 %2
}

define i32 @bextr_big_shift(i32 %x, i32 %y) nounwind {
; X86-LABEL: bextr_big_shift:
; X86:       # %bb.0:
; X86-NEXT:    movl $255, %eax
; X86-NEXT:    orl {{[0-9]+}}(%esp), %eax
; X86-NEXT:    bextrl %eax, {{[0-9]+}}(%esp), %eax
; X86-NEXT:    retl
;
; X64-LABEL: bextr_big_shift:
; X64:       # %bb.0:
; X64-NEXT:    orl $255, %esi
; X64-NEXT:    bextrl %esi, %edi, %eax
; X64-NEXT:    retq
  %1 = or i32 %y, 255
  %2 = tail call i32 @llvm.x86.bmi.bextr.32(i32 %x, i32 %1)
  ret i32 %2
}

define float @bextr_uitofp(i32 %x, i32 %y) nounwind {
; X86-LABEL: bextr_uitofp:
; X86:       # %bb.0:
; X86-NEXT:    pushl %eax
; X86-NEXT:    movl $3855, %eax # imm = 0xF0F
; X86-NEXT:    bextrl %eax, {{[0-9]+}}(%esp), %eax
; X86-NEXT:    movd %eax, %xmm0
; X86-NEXT:    por {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT:    subsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT:    cvtsd2ss %xmm0, %xmm0
; X86-NEXT:    movss %xmm0, (%esp)
; X86-NEXT:    flds (%esp)
; X86-NEXT:    popl %eax
; X86-NEXT:    retl
;
; X64-LABEL: bextr_uitofp:
; X64:       # %bb.0:
; X64-NEXT:    movl $3855, %eax # imm = 0xF0F
; X64-NEXT:    bextrl %eax, %edi, %eax
; X64-NEXT:    cvtsi2ss %eax, %xmm0
; X64-NEXT:    retq
  %1 = tail call i32 @llvm.x86.bmi.bextr.32(i32 %x, i32 3855)
  %2 = uitofp i32 %1 to float
  ret float %2
}