# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple aarch64-apple-darwin -debugify-and-strip-all-safe -run-pass=aarch64-prelegalizer-combiner -global-isel -verify-machineinstrs %s -o - | FileCheck %s
# Check that we propagate the G_SEXT to the sources of the phi operand.
---
name: sext_icst_through_phi
tracksRegLiveness: true
body: |
; CHECK-LABEL: name: sext_icst_through_phi
; CHECK: bb.0.entry:
; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
; CHECK-NEXT: liveins: $w0, $w1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
; CHECK-NEXT: %one:_(s32) = G_CONSTANT i32 2
; CHECK-NEXT: %cmp:_(s1) = G_ICMP intpred(sle), [[COPY]](s32), %one
; CHECK-NEXT: G_BRCOND %cmp(s1), %bb.2
; CHECK-NEXT: G_BR %bb.1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.3(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK-NEXT: G_BR %bb.3
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: successors: %bb.3(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.3:
; CHECK-NEXT: %ext:_(s64) = G_PHI [[C]](s64), %bb.1, [[C1]](s64), %bb.2
; CHECK-NEXT: $x0 = COPY %ext(s64)
; CHECK-NEXT: RET_ReallyLR implicit $x0
bb.1.entry:
liveins: $w0, $w1
%0:_(s32) = COPY $w0
%1:_(s32) = COPY $w1
%zero:_(s32) = G_CONSTANT i32 0
%one:_(s32) = G_CONSTANT i32 2
%cmp:_(s1) = G_ICMP intpred(sgt), %0(s32), %one
G_BRCOND %cmp(s1), %bb.2
G_BR %bb.3
bb.2:
%cst32_4:_(s32) = G_CONSTANT i32 4
G_BR %bb.4
bb.3:
%cst32_10:_(s32) = G_CONSTANT i32 10
bb.4:
%phi:_(s32) = G_PHI %cst32_4(s32), %bb.2, %cst32_10(s32), %bb.3
%ext:_(s64) = G_SEXT %phi
$x0 = COPY %ext(s64)
RET_ReallyLR implicit $x0
...
# Check that we propagate the G_ZEXT to the sources of the phi operand.
---
name: zext_icst_through_phi
tracksRegLiveness: true
body: |
; CHECK-LABEL: name: zext_icst_through_phi
; CHECK: bb.0.entry:
; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
; CHECK-NEXT: liveins: $w0, $w1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
; CHECK-NEXT: %one:_(s32) = G_CONSTANT i32 2
; CHECK-NEXT: %cmp:_(s1) = G_ICMP intpred(sle), [[COPY]](s32), %one
; CHECK-NEXT: G_BRCOND %cmp(s1), %bb.2
; CHECK-NEXT: G_BR %bb.1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.3(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK-NEXT: G_BR %bb.3
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: successors: %bb.3(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.3:
; CHECK-NEXT: %ext:_(s64) = G_PHI [[C]](s64), %bb.1, [[C1]](s64), %bb.2
; CHECK-NEXT: $x0 = COPY %ext(s64)
; CHECK-NEXT: RET_ReallyLR implicit $x0
bb.1.entry:
liveins: $w0, $w1
%0:_(s32) = COPY $w0
%1:_(s32) = COPY $w1
%zero:_(s32) = G_CONSTANT i32 0
%one:_(s32) = G_CONSTANT i32 2
%cmp:_(s1) = G_ICMP intpred(sgt), %0(s32), %one
G_BRCOND %cmp(s1), %bb.2
G_BR %bb.3
bb.2:
%cst32_4:_(s32) = G_CONSTANT i32 4
G_BR %bb.4
bb.3:
%cst32_10:_(s32) = G_CONSTANT i32 10
bb.4:
%phi:_(s32) = G_PHI %cst32_4(s32), %bb.2, %cst32_10(s32), %bb.3
%ext:_(s64) = G_ZEXT %phi
$x0 = COPY %ext(s64)
RET_ReallyLR implicit $x0
...
# Don't handle vectors because of potential cost issues.
---
name: sext_load_through_phi_vector
tracksRegLiveness: true
body: |
; CHECK-LABEL: name: sext_load_through_phi_vector
; CHECK: bb.0.entry:
; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
; CHECK-NEXT: liveins: $x0, $q0, $q1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %ptr:_(p0) = COPY $x0
; CHECK-NEXT: %cmp:_(s1) = G_IMPLICIT_DEF
; CHECK-NEXT: G_BRCOND %cmp(s1), %bb.2
; CHECK-NEXT: G_BR %bb.1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.3(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %ld1:_(<4 x s32>) = G_LOAD %ptr(p0) :: (load (<4 x s32>))
; CHECK-NEXT: G_BR %bb.3
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: successors: %bb.3(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %ld2:_(<4 x s32>) = G_LOAD %ptr(p0) :: (load (<4 x s32>))
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.3:
; CHECK-NEXT: %phi:_(<4 x s32>) = G_PHI %ld1(<4 x s32>), %bb.1, %ld2(<4 x s32>), %bb.2
; CHECK-NEXT: %ext:_(<4 x s64>) = G_SEXT %phi(<4 x s32>)
; CHECK-NEXT: G_STORE %ext(<4 x s64>), %ptr(p0) :: (store (<4 x s64>))
; CHECK-NEXT: RET_ReallyLR
bb.1.entry:
liveins: $x0, $q0, $q1
%0:_(<4 x s32>) = COPY $q0
%1:_(<4 x s32>) = COPY $q1
%ptr:_(p0) = COPY $x0
%cmp:_(s1) = G_IMPLICIT_DEF
G_BRCOND %cmp(s1), %bb.2
G_BR %bb.3
bb.2:
%ld1:_(<4 x s32>) = G_LOAD %ptr(p0) :: (load (<4 x s32>))
G_BR %bb.4
bb.3:
%ld2:_(<4 x s32>) = G_LOAD %ptr(p0) :: (load (<4 x s32>))
bb.4:
%phi:_(<4 x s32>) = G_PHI %ld1(<4 x s32>), %bb.2, %ld2(<4 x s32>), %bb.3
%ext:_(<4 x s64>) = G_SEXT %phi
G_STORE %ext(<4 x s64>), %ptr(p0) :: (store (<4 x s64>))
RET_ReallyLR
...
# Check that we don't propagate if the extend is used by a G_PTR_ADD, which on
# AArch64 has a good chance of folding in the extend.
---
name: sext_icst_through_phi_used_by_ptradd
tracksRegLiveness: true
body: |
; CHECK-LABEL: name: sext_icst_through_phi_used_by_ptradd
; CHECK: bb.0.entry:
; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
; CHECK-NEXT: liveins: $w0, $w1, $x2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
; CHECK-NEXT: %base:_(p0) = COPY $x2
; CHECK-NEXT: %one:_(s32) = G_CONSTANT i32 2
; CHECK-NEXT: %cmp:_(s1) = G_ICMP intpred(sle), [[COPY]](s32), %one
; CHECK-NEXT: G_BRCOND %cmp(s1), %bb.2
; CHECK-NEXT: G_BR %bb.1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.3(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %cst32_4:_(s32) = G_CONSTANT i32 4
; CHECK-NEXT: G_BR %bb.3
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: successors: %bb.3(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %cst32_10:_(s32) = G_CONSTANT i32 10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.3:
; CHECK-NEXT: %phi:_(s32) = G_PHI %cst32_4(s32), %bb.1, %cst32_10(s32), %bb.2
; CHECK-NEXT: %ext:_(s64) = G_SEXT %phi(s32)
; CHECK-NEXT: %ptr:_(p0) = G_PTR_ADD %base, %ext(s64)
; CHECK-NEXT: $x0 = COPY %ptr(p0)
; CHECK-NEXT: RET_ReallyLR implicit $x0
bb.1.entry:
liveins: $w0, $w1, $x2
%0:_(s32) = COPY $w0
%1:_(s32) = COPY $w1
%base:_(p0) = COPY $x2
%zero:_(s32) = G_CONSTANT i32 0
%one:_(s32) = G_CONSTANT i32 2
%cmp:_(s1) = G_ICMP intpred(sgt), %0(s32), %one
G_BRCOND %cmp(s1), %bb.2
G_BR %bb.3
bb.2:
%cst32_4:_(s32) = G_CONSTANT i32 4
G_BR %bb.4
bb.3:
%cst32_10:_(s32) = G_CONSTANT i32 10
bb.4:
%phi:_(s32) = G_PHI %cst32_4(s32), %bb.2, %cst32_10(s32), %bb.3
%ext:_(s64) = G_SEXT %phi
%ptr:_(p0) = G_PTR_ADD %base, %ext
$x0 = COPY %ptr(p0)
RET_ReallyLR implicit $x0
...
# Same as above but we do it here because the extend has multiple users, so the
# it probably won't cost extra instructions if we remove it.
---
name: sext_icst_through_phi_used_by_ptradd_multiuse
tracksRegLiveness: true
body: |
; CHECK-LABEL: name: sext_icst_through_phi_used_by_ptradd_multiuse
; CHECK: bb.0.entry:
; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
; CHECK-NEXT: liveins: $w0, $w1, $x2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
; CHECK-NEXT: %base:_(p0) = COPY $x2
; CHECK-NEXT: %one:_(s32) = G_CONSTANT i32 2
; CHECK-NEXT: %cmp:_(s1) = G_ICMP intpred(sle), [[COPY]](s32), %one
; CHECK-NEXT: G_BRCOND %cmp(s1), %bb.2
; CHECK-NEXT: G_BR %bb.1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.3(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK-NEXT: G_BR %bb.3
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: successors: %bb.3(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.3:
; CHECK-NEXT: %ext:_(s64) = G_PHI [[C]](s64), %bb.1, [[C1]](s64), %bb.2
; CHECK-NEXT: %ptr:_(p0) = G_PTR_ADD %base, %ext(s64)
; CHECK-NEXT: $x0 = COPY %ptr(p0)
; CHECK-NEXT: $x1 = COPY %ext(s64)
; CHECK-NEXT: RET_ReallyLR implicit $x0
bb.1.entry:
liveins: $w0, $w1, $x2
%0:_(s32) = COPY $w0
%1:_(s32) = COPY $w1
%base:_(p0) = COPY $x2
%zero:_(s32) = G_CONSTANT i32 0
%one:_(s32) = G_CONSTANT i32 2
%cmp:_(s1) = G_ICMP intpred(sgt), %0(s32), %one
G_BRCOND %cmp(s1), %bb.2
G_BR %bb.3
bb.2:
%cst32_4:_(s32) = G_CONSTANT i32 4
G_BR %bb.4
bb.3:
%cst32_10:_(s32) = G_CONSTANT i32 10
bb.4:
%phi:_(s32) = G_PHI %cst32_4(s32), %bb.2, %cst32_10(s32), %bb.3
%ext:_(s64) = G_SEXT %phi
%ptr:_(p0) = G_PTR_ADD %base, %ext
$x0 = COPY %ptr(p0)
$x1 = COPY %ext(s64)
RET_ReallyLR implicit $x0
...
# Check we don't propagate if there are more than 2 unique incoming values in the phi.
# Doing so might cause too much code bloat.
---
name: zext_icst_through_phi_too_many_incoming
tracksRegLiveness: true
body: |
; CHECK-LABEL: name: zext_icst_through_phi_too_many_incoming
; CHECK: bb.0.entry:
; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
; CHECK-NEXT: liveins: $w0, $w1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
; CHECK-NEXT: %one:_(s32) = G_CONSTANT i32 2
; CHECK-NEXT: %cmp:_(s1) = G_ICMP intpred(sle), [[COPY]](s32), %one
; CHECK-NEXT: G_BRCOND %cmp(s1), %bb.2
; CHECK-NEXT: G_BR %bb.1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.3(0x40000000), %bb.4(0x40000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %cst32_4:_(s32) = G_CONSTANT i32 4
; CHECK-NEXT: %cond:_(s1) = G_IMPLICIT_DEF
; CHECK-NEXT: G_BRCOND %cond(s1), %bb.3
; CHECK-NEXT: G_BR %bb.4
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: successors: %bb.4(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %cst32_10:_(s32) = G_CONSTANT i32 10
; CHECK-NEXT: G_BR %bb.4
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.3:
; CHECK-NEXT: successors: %bb.4(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %cst32_42:_(s32) = G_CONSTANT i32 42
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.4:
; CHECK-NEXT: %phi:_(s32) = G_PHI %cst32_4(s32), %bb.1, %cst32_10(s32), %bb.2, %cst32_42(s32), %bb.3
; CHECK-NEXT: %ext:_(s64) = G_ZEXT %phi(s32)
; CHECK-NEXT: $x0 = COPY %ext(s64)
; CHECK-NEXT: RET_ReallyLR implicit $x0
bb.1.entry:
liveins: $w0, $w1
%0:_(s32) = COPY $w0
%1:_(s32) = COPY $w1
%zero:_(s32) = G_CONSTANT i32 0
%one:_(s32) = G_CONSTANT i32 2
%cmp:_(s1) = G_ICMP intpred(sgt), %0(s32), %one
G_BRCOND %cmp(s1), %bb.2
G_BR %bb.3
bb.2:
%cst32_4:_(s32) = G_CONSTANT i32 4
%cond:_(s1) = G_IMPLICIT_DEF
G_BRCOND %cond, %bb.5
G_BR %bb.4
bb.3:
%cst32_10:_(s32) = G_CONSTANT i32 10
G_BR %bb.4
bb.5:
%cst32_42:_(s32) = G_CONSTANT i32 42
bb.4:
%phi:_(s32) = G_PHI %cst32_4(s32), %bb.2, %cst32_10(s32), %bb.3, %cst32_42(s32), %bb.5
%ext:_(s64) = G_ZEXT %phi
$x0 = COPY %ext(s64)
RET_ReallyLR implicit $x0
...
# Check that we don't propagate if the extension would be of a non-allowed inst.
---
name: sext_add_through_phi
tracksRegLiveness: true
body: |
; CHECK-LABEL: name: sext_add_through_phi
; CHECK: bb.0.entry:
; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
; CHECK-NEXT: liveins: $w0, $w1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
; CHECK-NEXT: %one:_(s32) = G_CONSTANT i32 2
; CHECK-NEXT: %cmp:_(s1) = G_ICMP intpred(sle), [[COPY]](s32), %one
; CHECK-NEXT: G_BRCOND %cmp(s1), %bb.2
; CHECK-NEXT: G_BR %bb.1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.3(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %add:_(s32) = G_ADD [[COPY]], [[COPY1]]
; CHECK-NEXT: G_BR %bb.3
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: successors: %bb.3(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %cst32_10:_(s32) = G_CONSTANT i32 10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.3:
; CHECK-NEXT: %phi:_(s32) = G_PHI %add(s32), %bb.1, %cst32_10(s32), %bb.2
; CHECK-NEXT: %ext:_(s64) = G_SEXT %phi(s32)
; CHECK-NEXT: $x0 = COPY %ext(s64)
; CHECK-NEXT: RET_ReallyLR implicit $x0
bb.1.entry:
liveins: $w0, $w1
%0:_(s32) = COPY $w0
%1:_(s32) = COPY $w1
%zero:_(s32) = G_CONSTANT i32 0
%one:_(s32) = G_CONSTANT i32 2
%cmp:_(s1) = G_ICMP intpred(sgt), %0(s32), %one
G_BRCOND %cmp(s1), %bb.2
G_BR %bb.3
bb.2:
%add:_(s32) = G_ADD %0, %1
G_BR %bb.4
bb.3:
%cst32_10:_(s32) = G_CONSTANT i32 10
bb.4:
%phi:_(s32) = G_PHI %add(s32), %bb.2, %cst32_10(s32), %bb.3
%ext:_(s64) = G_SEXT %phi
$x0 = COPY %ext(s64)
RET_ReallyLR implicit $x0
...
# Same as above but allowed with a G_ANYEXT.
---
name: anyext_add_through_phi
tracksRegLiveness: true
body: |
; CHECK-LABEL: name: anyext_add_through_phi
; CHECK: bb.0.entry:
; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
; CHECK-NEXT: liveins: $w0, $w1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
; CHECK-NEXT: %one:_(s32) = G_CONSTANT i32 2
; CHECK-NEXT: %cmp:_(s1) = G_ICMP intpred(sle), [[COPY]](s32), %one
; CHECK-NEXT: G_BRCOND %cmp(s1), %bb.2
; CHECK-NEXT: G_BR %bb.1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.3(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %add:_(s32) = G_ADD [[COPY]], [[COPY1]]
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT %add(s32)
; CHECK-NEXT: G_BR %bb.3
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: successors: %bb.3(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.3:
; CHECK-NEXT: %ext:_(s64) = G_PHI [[ANYEXT]](s64), %bb.1, [[C]](s64), %bb.2
; CHECK-NEXT: $x0 = COPY %ext(s64)
; CHECK-NEXT: RET_ReallyLR implicit $x0
bb.1.entry:
liveins: $w0, $w1
%0:_(s32) = COPY $w0
%1:_(s32) = COPY $w1
%zero:_(s32) = G_CONSTANT i32 0
%one:_(s32) = G_CONSTANT i32 2
%cmp:_(s1) = G_ICMP intpred(sgt), %0(s32), %one
G_BRCOND %cmp(s1), %bb.2
G_BR %bb.3
bb.2:
%add:_(s32) = G_ADD %0, %1
G_BR %bb.4
bb.3:
%cst32_10:_(s32) = G_CONSTANT i32 10
bb.4:
%phi:_(s32) = G_PHI %add(s32), %bb.2, %cst32_10(s32), %bb.3
%ext:_(s64) = G_ANYEXT %phi
$x0 = COPY %ext(s64)
RET_ReallyLR implicit $x0
...
# Same as above but with a source MI with multiple destination operands.
---
name: anyext_add_through_phi_multiple_operands
tracksRegLiveness: true
body: |
; CHECK-LABEL: name: anyext_add_through_phi_multiple_operands
; CHECK: bb.0.entry:
; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
; CHECK-NEXT: liveins: $w0, $w1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
; CHECK-NEXT: %one:_(s32) = G_CONSTANT i32 2
; CHECK-NEXT: %cmp:_(s1) = G_ICMP intpred(sle), [[COPY]](s32), %one
; CHECK-NEXT: G_BRCOND %cmp(s1), %bb.2
; CHECK-NEXT: G_BR %bb.1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.3(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %big0:_(s64) = G_SEXT [[COPY]](s32)
; CHECK-NEXT: %big1:_(s64) = G_SEXT [[COPY1]](s32)
; CHECK-NEXT: %add:_(s64) = G_ADD %big0, %big1
; CHECK-NEXT: %first:_(s32), %second:_(s32) = G_UNMERGE_VALUES %add(s64)
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT %second(s32)
; CHECK-NEXT: G_BR %bb.3
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
; CHECK-NEXT: successors: %bb.3(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.3:
; CHECK-NEXT: %ext:_(s64) = G_PHI [[ANYEXT]](s64), %bb.1, [[C]](s64), %bb.2
; CHECK-NEXT: $x0 = COPY %ext(s64)
; CHECK-NEXT: RET_ReallyLR implicit $x0
bb.1.entry:
liveins: $w0, $w1
%0:_(s32) = COPY $w0
%1:_(s32) = COPY $w1
%zero:_(s32) = G_CONSTANT i32 0
%one:_(s32) = G_CONSTANT i32 2
%cmp:_(s1) = G_ICMP intpred(sgt), %0(s32), %one
G_BRCOND %cmp(s1), %bb.2
G_BR %bb.3
bb.2:
%big0:_(s64) = G_SEXT %0
%big1:_(s64) = G_SEXT %1
%add:_(s64) = G_ADD %big0, %big1
%first:_(s32), %second:_(s32) = G_UNMERGE_VALUES %add:_(s64)
G_BR %bb.4
bb.3:
%cst32_10:_(s32) = G_CONSTANT i32 10
bb.4:
%phi:_(s32) = G_PHI %second, %bb.2, %cst32_10, %bb.3
%ext:_(s64) = G_ANYEXT %phi
$x0 = COPY %ext(s64)
RET_ReallyLR implicit $x0
...