llvm/llvm/test/CodeGen/X86/lea-2.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-linux          | FileCheck %s --check-prefix=X86
; RUN: llc < %s -mtriple=x86_64-linux        | FileCheck %s --check-prefix=X64
; RUN: llc < %s -mtriple=x86_64-linux-gnux32 | FileCheck %s --check-prefix=X64
; RUN: llc < %s -mtriple=x86_64-nacl         | FileCheck %s --check-prefix=X64

; The computation of %t4 should match a single lea, without using actual add instructions.

define i32 @test1(i32 %A, i32 %B) {
; X86-LABEL: test1:
; X86:       # %bb.0:
; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT:    leal -5(%ecx,%eax,4), %eax
; X86-NEXT:    retl
;
; X64-LABEL: test1:
; X64:       # %bb.0:
; X64-NEXT:    # kill: def $esi killed $esi def $rsi
; X64-NEXT:    # kill: def $edi killed $edi def $rdi
; X64-NEXT:    leal -5(%rsi,%rdi,4), %eax
; X64-NEXT:    retq
  %t1 = shl i32 %A, 2
  %t3 = add i32 %B, -5
  %t4 = add i32 %t3, %t1
  ret i32 %t4
}

; The addlike OR instruction should fold into the LEA.

define i64 @test2(i32 %a0, i64 %a1) {
; X86-LABEL: test2:
; X86:       # %bb.0:
; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
; X86-NEXT:    movl %edx, %eax
; X86-NEXT:    andl $2147483640, %eax # imm = 0x7FFFFFF8
; X86-NEXT:    shrl $31, %edx
; X86-NEXT:    leal 4(%eax,%eax), %eax
; X86-NEXT:    addl {{[0-9]+}}(%esp), %eax
; X86-NEXT:    adcl {{[0-9]+}}(%esp), %edx
; X86-NEXT:    retl
;
; X64-LABEL: test2:
; X64:       # %bb.0:
; X64-NEXT:    # kill: def $edi killed $edi def $rdi
; X64-NEXT:    andl $-8, %edi
; X64-NEXT:    leaq 4(%rsi,%rdi,2), %rax
; X64-NEXT:    retq
  %x1 = and i32 %a0, -8
  %x2 = or i32 %x1, 2
  %x3 = zext i32 %x2 to i64
  %x4 = shl i64 %x3, 1
  %x5 = add i64 %a1, %x4
  ret i64 %x5
}