llvm/llvm/test/CodeGen/X86/lea.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s --check-prefixes=LINUX
; RUN: llc < %s -mtriple=x86_64-win32 | FileCheck %s --check-prefixes=WIN
; RUN: llc < %s -mtriple=x86_64-linux-gnux32 | FileCheck %s --check-prefixes=LINUX
; RUN: llc < %s -mtriple=x86_64-nacl | FileCheck %s --check-prefixes=LINUX

define i32 @test1(i32 %x) nounwind {
; LINUX-LABEL: test1:
; LINUX:       # %bb.0:
; LINUX-NEXT:    # kill: def $edi killed $edi def $rdi
; LINUX-NEXT:    leal 7(,%rdi,8), %eax
; LINUX-NEXT:    retq
;
; WIN-LABEL: test1:
; WIN:       # %bb.0:
; WIN-NEXT:    # kill: def $ecx killed $ecx def $rcx
; WIN-NEXT:    leal 7(,%rcx,8), %eax
; WIN-NEXT:    retq
  %tmp1 = shl i32 %x, 3
  %tmp2 = add i32 %tmp1, 7
  ret i32 %tmp2
}


; ISel the add of -4 with a neg and use an lea for the rest of the
; arithmetic.
define i32 @test2(i32 %x_offs) nounwind readnone {
; LINUX-LABEL: test2:
; LINUX:       # %bb.0: # %entry
; LINUX-NEXT:    # kill: def $edi killed $edi def $rdi
; LINUX-NEXT:    cmpl $5, %edi
; LINUX-NEXT:    jl .LBB1_2
; LINUX-NEXT:  # %bb.1: # %bb.nph
; LINUX-NEXT:    leal -5(%rdi), %eax
; LINUX-NEXT:    andl $-4, %eax
; LINUX-NEXT:    negl %eax
; LINUX-NEXT:    leal -4(%rdi,%rax), %eax
; LINUX-NEXT:    retq
; LINUX-NEXT:  .LBB1_2: # %bb2
; LINUX-NEXT:    movl %edi, %eax
; LINUX-NEXT:    retq
;
; WIN-LABEL: test2:
; WIN:       # %bb.0: # %entry
; WIN-NEXT:    # kill: def $ecx killed $ecx def $rcx
; WIN-NEXT:    cmpl $5, %ecx
; WIN-NEXT:    jl .LBB1_2
; WIN-NEXT:  # %bb.1: # %bb.nph
; WIN-NEXT:    leal -5(%rcx), %eax
; WIN-NEXT:    andl $-4, %eax
; WIN-NEXT:    negl %eax
; WIN-NEXT:    leal -4(%rcx,%rax), %eax
; WIN-NEXT:    retq
; WIN-NEXT:  .LBB1_2: # %bb2
; WIN-NEXT:    movl %ecx, %eax
; WIN-NEXT:    retq
entry:
  %t0 = icmp sgt i32 %x_offs, 4
  br i1 %t0, label %bb.nph, label %bb2

bb.nph:
  %tmp = add i32 %x_offs, -5
  %tmp6 = lshr i32 %tmp, 2
  %tmp7 = mul i32 %tmp6, -4
  %tmp8 = add i32 %tmp7, %x_offs
  %tmp9 = add i32 %tmp8, -4
  ret i32 %tmp9

bb2:
  ret i32 %x_offs
}