; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
; RUN: llc < %s -mtriple=armv7-apple-ios | FileCheck %s
;rdar://8003725
declare void @llvm.trap()
@G1 = external global i32
@G2 = external global i32
define i32 @f1(i32 %cond1, i32 %x1, i32 %x2, i32 %x3) {
; CHECK-LABEL: f1:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: cmp r0, #0
; CHECK-NEXT: moveq r3, r2
; CHECK-NEXT: movne r1, r2
; CHECK-NEXT: add r0, r1, r3
; CHECK-NEXT: bx lr
entry:
%tmp1 = icmp eq i32 %cond1, 0
%tmp2 = select i1 %tmp1, i32 %x1, i32 %x2
%tmp3 = select i1 %tmp1, i32 %x2, i32 %x3
%tmp4 = add i32 %tmp2, %tmp3
ret i32 %tmp4
}
@foo = external global i32
@bar = external global [250 x i8], align 1
; CSE of cmp across BB boundary
; rdar://10660865
define void @f2() nounwind ssp {
; CHECK-LABEL: f2:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: movw r0, :lower16:(L_foo$non_lazy_ptr-(LPC1_0+8))
; CHECK-NEXT: movt r0, :upper16:(L_foo$non_lazy_ptr-(LPC1_0+8))
; CHECK-NEXT: LPC1_0:
; CHECK-NEXT: ldr r0, [pc, r0]
; CHECK-NEXT: ldr r2, [r0]
; CHECK-NEXT: cmp r2, #1
; CHECK-NEXT: bxlt lr
; CHECK-NEXT: LBB1_1: @ %for.body.lr.ph
; CHECK-NEXT: push {lr}
; CHECK-NEXT: movw r0, :lower16:(L_bar$non_lazy_ptr-(LPC1_1+8))
; CHECK-NEXT: movle r2, #1
; CHECK-NEXT: movt r0, :upper16:(L_bar$non_lazy_ptr-(LPC1_1+8))
; CHECK-NEXT: mov r1, #0
; CHECK-NEXT: LPC1_1:
; CHECK-NEXT: ldr r0, [pc, r0]
; CHECK-NEXT: bl _memset
; CHECK-NEXT: trap
entry:
%0 = load i32, ptr @foo, align 4
%cmp28 = icmp sgt i32 %0, 0
br i1 %cmp28, label %for.body.lr.ph, label %for.cond1.preheader
for.body.lr.ph: ; preds = %entry
%1 = icmp sgt i32 %0, 1
%smax = select i1 %1, i32 %0, i32 1
call void @llvm.memset.p0.i32(ptr @bar, i8 0, i32 %smax, i1 false)
call void @llvm.trap()
unreachable
for.cond1.preheader: ; preds = %entry
ret void
}
declare void @llvm.memset.p0.i32(ptr nocapture, i8, i32, i1) nounwind
; rdar://12462006
define ptr @f3(ptr %base, ptr nocapture %offset, i32 %size) nounwind {
; CHECK-LABEL: f3:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: ldr r3, [r1]
; CHECK-NEXT: mov r9, #0
; CHECK-NEXT: cmp r3, r2
; CHECK-NEXT: blt LBB2_2
; CHECK-NEXT: @ %bb.1: @ %if.end
; CHECK-NEXT: sub r3, r3, r2
; CHECK-NEXT: add r9, r0, r3
; CHECK-NEXT: sub r2, r2, r3
; CHECK-NEXT: add r2, r3, r2
; CHECK-NEXT: str r2, [r1]
; CHECK-NEXT: LBB2_2: @ %return
; CHECK-NEXT: mov r0, r9
; CHECK-NEXT: bx lr
entry:
%0 = load i32, ptr %offset, align 4
%cmp = icmp slt i32 %0, %size
%s = sub nsw i32 %0, %size
%size2 = sub nsw i32 %size, 0
br i1 %cmp, label %return, label %if.end
if.end:
; We are checking cse between %sub here and %s in entry block.
%sub = sub nsw i32 %0, %size2
%s2 = sub nsw i32 %s, %size
%s3 = sub nsw i32 %sub, %s2
store i32 %s3, ptr %offset, align 4
%add.ptr = getelementptr inbounds i8, ptr %base, i32 %sub
br label %return
return:
%retval.0 = phi ptr [ %add.ptr, %if.end ], [ null, %entry ]
ret ptr %retval.0
}
; The cmp of %val should not be hoisted above the preceding conditional branch
define void @f4(ptr %ptr1, ptr %ptr2, i64 %val) {
; CHECK-LABEL: f4:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: cmp r0, #0
; CHECK-NEXT: movne r9, #0
; CHECK-NEXT: strne r9, [r0]
; CHECK-NEXT: orrs r0, r2, r3
; CHECK-NEXT: beq LBB3_2
; CHECK-NEXT: @ %bb.1: @ %if.end
; CHECK-NEXT: subs r0, r2, #10
; CHECK-NEXT: sbcs r0, r3, #0
; CHECK-NEXT: bxlt lr
; CHECK-NEXT: LBB3_2: @ %if.end3
; CHECK-NEXT: subs r0, r2, #10
; CHECK-NEXT: sbc r3, r3, #0
; CHECK-NEXT: stm r1, {r0, r3}
; CHECK-NEXT: bx lr
entry:
%tobool.not = icmp eq ptr %ptr1, null
br i1 %tobool.not, label %if.end, label %if.then
if.then:
store ptr null, ptr %ptr1, align 4
br label %if.end
if.end:
%tobool1 = icmp ne i64 %val, 0
%cmp = icmp slt i64 %val, 10
%or.cond = and i1 %tobool1, %cmp
br i1 %or.cond, label %cleanup, label %if.end3
if.end3:
%sub = add nsw i64 %val, -10
store i64 %sub, ptr %ptr2, align 8
br label %cleanup
cleanup:
ret void
}