# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=aarch64-- -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
---
name: cmp_imm_32
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.1:
liveins: $w0
; CHECK-LABEL: name: cmp_imm_32
; CHECK: liveins: $w0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32sp = COPY $w0
; CHECK-NEXT: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 42, 0, implicit-def $nzcv
; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
; CHECK-NEXT: $w0 = COPY [[CSINCWr]]
; CHECK-NEXT: RET_ReallyLR implicit $w0
%0:gpr(s32) = COPY $w0
%1:gpr(s32) = G_CONSTANT i32 42
%5:gpr(s32) = G_ICMP intpred(eq), %0(s32), %1
$w0 = COPY %5(s32)
RET_ReallyLR implicit $w0
...
---
name: cmp_imm_64
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.1:
liveins: $x0
; CHECK-LABEL: name: cmp_imm_64
; CHECK: liveins: $x0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
; CHECK-NEXT: [[SUBSXri:%[0-9]+]]:gpr64 = SUBSXri [[COPY]], 42, 0, implicit-def $nzcv
; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
; CHECK-NEXT: $w0 = COPY [[CSINCWr]]
; CHECK-NEXT: RET_ReallyLR implicit $w0
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 42
%5:gpr(s32) = G_ICMP intpred(eq), %0(s64), %1
$w0 = COPY %5(s32)
RET_ReallyLR implicit $w0
...
---
name: cmp_imm_out_of_range
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.1:
liveins: $x0
; CHECK-LABEL: name: cmp_imm_out_of_range
; CHECK: liveins: $x0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
; CHECK-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 13132
; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
; CHECK-NEXT: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr [[COPY]], [[SUBREG_TO_REG]], implicit-def $nzcv
; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
; CHECK-NEXT: $w0 = COPY [[CSINCWr]]
; CHECK-NEXT: RET_ReallyLR implicit $w0
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 13132
%5:gpr(s32) = G_ICMP intpred(eq), %0(s64), %1
$w0 = COPY %5(s32)
RET_ReallyLR implicit $w0
...
---
name: cmp_imm_lookthrough
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.1:
liveins: $w0
; CHECK-LABEL: name: cmp_imm_lookthrough
; CHECK: liveins: $w0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32sp = COPY $w0
; CHECK-NEXT: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 42, 0, implicit-def $nzcv
; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
; CHECK-NEXT: $w0 = COPY [[CSINCWr]]
; CHECK-NEXT: RET_ReallyLR implicit $w0
%0:gpr(s32) = COPY $w0
%1:gpr(s64) = G_CONSTANT i64 42
%2:gpr(s32) = G_TRUNC %1(s64)
%5:gpr(s32) = G_ICMP intpred(eq), %0(s32), %2
$w0 = COPY %5(s32)
RET_ReallyLR implicit $w0
...
---
name: cmp_imm_lookthrough_bad_trunc
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.1:
liveins: $w0
; CHECK-LABEL: name: cmp_imm_lookthrough_bad_trunc
; CHECK: liveins: $w0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32sp = COPY $w0
; CHECK-NEXT: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 0, 0, implicit-def $nzcv
; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
; CHECK-NEXT: $w0 = COPY [[CSINCWr]]
; CHECK-NEXT: RET_ReallyLR implicit $w0
%0:gpr(s32) = COPY $w0
%1:gpr(s64) = G_CONSTANT i64 68719476736 ; 0x1000000000
%2:gpr(s32) = G_TRUNC %1(s64) ; Value truncates to 0
%5:gpr(s32) = G_ICMP intpred(eq), %0(s32), %2
$w0 = COPY %5(s32)
RET_ReallyLR implicit $w0
...
---
name: cmp_neg_imm_32
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.1:
liveins: $w0
; CHECK-LABEL: name: cmp_neg_imm_32
; CHECK: liveins: $w0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %reg0:gpr32sp = COPY $w0
; CHECK-NEXT: [[ADDSWri:%[0-9]+]]:gpr32 = ADDSWri %reg0, 10, 0, implicit-def $nzcv
; CHECK-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
; CHECK-NEXT: $w0 = COPY %cmp
; CHECK-NEXT: RET_ReallyLR implicit $w0
%reg0:gpr(s32) = COPY $w0
%cst:gpr(s32) = G_CONSTANT i32 -10
%cmp:gpr(s32) = G_ICMP intpred(eq), %reg0(s32), %cst
$w0 = COPY %cmp(s32)
RET_ReallyLR implicit $w0
...
---
name: cmp_neg_imm_64
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.1:
liveins: $x0
; CHECK-LABEL: name: cmp_neg_imm_64
; CHECK: liveins: $x0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %reg0:gpr64sp = COPY $x0
; CHECK-NEXT: [[ADDSXri:%[0-9]+]]:gpr64 = ADDSXri %reg0, 10, 0, implicit-def $nzcv
; CHECK-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
; CHECK-NEXT: $w0 = COPY %cmp
; CHECK-NEXT: RET_ReallyLR implicit $w0
%reg0:gpr(s64) = COPY $x0
%cst:gpr(s64) = G_CONSTANT i64 -10
%cmp:gpr(s32) = G_ICMP intpred(eq), %reg0(s64), %cst
$w0 = COPY %cmp(s32)
RET_ReallyLR implicit $w0
...
---
name: cmp_neg_imm_invalid
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.1:
liveins: $w0
; CHECK-LABEL: name: cmp_neg_imm_invalid
; CHECK: liveins: $w0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %reg0:gpr32 = COPY $w0
; CHECK-NEXT: %cst:gpr32 = MOVi32imm -5000
; CHECK-NEXT: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr %reg0, %cst, implicit-def $nzcv
; CHECK-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
; CHECK-NEXT: $w0 = COPY %cmp
; CHECK-NEXT: RET_ReallyLR implicit $w0
%reg0:gpr(s32) = COPY $w0
%cst:gpr(s32) = G_CONSTANT i32 -5000
%cmp:gpr(s32) = G_ICMP intpred(eq), %reg0(s32), %cst
$w0 = COPY %cmp(s32)
RET_ReallyLR implicit $w0
...
---
name: cmp_arith_extended_s64
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0:
liveins: $w0, $x1
; CHECK-LABEL: name: cmp_arith_extended_s64
; CHECK: liveins: $w0, $x1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %reg0:gpr32 = COPY $w0
; CHECK-NEXT: %reg1:gpr64sp = COPY $x1
; CHECK-NEXT: [[SUBSXrx:%[0-9]+]]:gpr64 = SUBSXrx %reg1, %reg0, 18, implicit-def $nzcv
; CHECK-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 9, implicit $nzcv
; CHECK-NEXT: $w0 = COPY %cmp
; CHECK-NEXT: RET_ReallyLR implicit $w0
%reg0:gpr(s32) = COPY $w0
%reg1:gpr(s64) = COPY $x1
%ext:gpr(s64) = G_ZEXT %reg0(s32)
%cst:gpr(s64) = G_CONSTANT i64 2
%shift:gpr(s64) = G_SHL %ext, %cst(s64)
%cmp:gpr(s32) = G_ICMP intpred(ugt), %reg1(s64), %shift
$w0 = COPY %cmp(s32)
RET_ReallyLR implicit $w0
...
---
name: cmp_arith_extended_s32
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0:
liveins: $w0, $w1, $h0
; CHECK-LABEL: name: cmp_arith_extended_s32
; CHECK: liveins: $w0, $w1, $h0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:fpr32 = SUBREG_TO_REG 0, $h0, %subreg.hsub
; CHECK-NEXT: %reg0:gpr32all = COPY [[SUBREG_TO_REG]]
; CHECK-NEXT: %reg1:gpr32sp = COPY $w1
; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY %reg0
; CHECK-NEXT: [[SUBSWrx:%[0-9]+]]:gpr32 = SUBSWrx %reg1, [[COPY]], 10, implicit-def $nzcv
; CHECK-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 9, implicit $nzcv
; CHECK-NEXT: $w0 = COPY %cmp
; CHECK-NEXT: RET_ReallyLR implicit $w0
%reg0:gpr(s16) = COPY $h0
%reg1:gpr(s32) = COPY $w1
%ext:gpr(s32) = G_ZEXT %reg0(s16)
%cst:gpr(s32) = G_CONSTANT i32 2
%shift:gpr(s32) = G_SHL %ext, %cst(s32)
%cmp:gpr(s32) = G_ICMP intpred(ugt), %reg1(s32), %shift
$w0 = COPY %cmp(s32)
RET_ReallyLR implicit $w0
...
---
name: cmp_arith_extended_shl_too_large
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0:
liveins: $w0, $x1
; The constant on the G_SHL is > 4, so we won't sleect SUBSXrx
; CHECK-LABEL: name: cmp_arith_extended_shl_too_large
; CHECK: liveins: $w0, $x1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %reg0:gpr32 = COPY $w0
; CHECK-NEXT: %reg1:gpr64 = COPY $x1
; CHECK-NEXT: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, %reg0, 0
; CHECK-NEXT: %ext:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
; CHECK-NEXT: [[SUBSXrs:%[0-9]+]]:gpr64 = SUBSXrs %reg1, %ext, 5, implicit-def $nzcv
; CHECK-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 9, implicit $nzcv
; CHECK-NEXT: $w0 = COPY %cmp
; CHECK-NEXT: RET_ReallyLR implicit $w0
%reg0:gpr(s32) = COPY $w0
%reg1:gpr(s64) = COPY $x1
%ext:gpr(s64) = G_ZEXT %reg0(s32)
%cst:gpr(s64) = G_CONSTANT i64 5
%shift:gpr(s64) = G_SHL %ext, %cst(s64)
%cmp:gpr(s32) = G_ICMP intpred(ugt), %reg1(s64), %shift
$w0 = COPY %cmp(s32)
RET_ReallyLR implicit $w0
...
---
name: cmp_add_rhs
legalized: true
regBankSelected: true
tracksRegLiveness: true
machineFunctionInfo: {}
body: |
bb.0:
liveins: $w0, $w1, $w2
; The CSINC should use the add's RHS.
; CHECK-LABEL: name: cmp_add_rhs
; CHECK: liveins: $w0, $w1, $w2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %cmp_lhs:gpr32 = COPY $w0
; CHECK-NEXT: %cmp_rhs:gpr32 = COPY $w1
; CHECK-NEXT: %add_rhs:gpr32 = COPY $w2
; CHECK-NEXT: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv
; CHECK-NEXT: %add:gpr32 = CSINCWr %add_rhs, %add_rhs, 1, implicit $nzcv
; CHECK-NEXT: $w0 = COPY %add
; CHECK-NEXT: RET_ReallyLR implicit $w0
%cmp_lhs:gpr(s32) = COPY $w0
%cmp_rhs:gpr(s32) = COPY $w1
%add_rhs:gpr(s32) = COPY $w2
%cmp:gpr(s32) = G_ICMP intpred(eq), %cmp_lhs(s32), %cmp_rhs
%add:gpr(s32) = G_ADD %cmp, %add_rhs
$w0 = COPY %add(s32)
RET_ReallyLR implicit $w0
...
---
name: cmp_add_lhs
legalized: true
regBankSelected: true
tracksRegLiveness: true
machineFunctionInfo: {}
body: |
bb.0:
liveins: $w0, $w1, $w2
; The CSINC should use the add's LHS.
; CHECK-LABEL: name: cmp_add_lhs
; CHECK: liveins: $w0, $w1, $w2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %cmp_lhs:gpr32 = COPY $w0
; CHECK-NEXT: %cmp_rhs:gpr32 = COPY $w1
; CHECK-NEXT: %add_lhs:gpr32 = COPY $w2
; CHECK-NEXT: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv
; CHECK-NEXT: %add:gpr32 = CSINCWr %add_lhs, %add_lhs, 1, implicit $nzcv
; CHECK-NEXT: $w0 = COPY %add
; CHECK-NEXT: RET_ReallyLR implicit $w0
%cmp_lhs:gpr(s32) = COPY $w0
%cmp_rhs:gpr(s32) = COPY $w1
%add_lhs:gpr(s32) = COPY $w2
%cmp:gpr(s32) = G_ICMP intpred(eq), %cmp_lhs(s32), %cmp_rhs
%add:gpr(s32) = G_ADD %add_lhs, %cmp
$w0 = COPY %add(s32)
RET_ReallyLR implicit $w0
...
---
name: cmp_add_lhs_vector
legalized: true
regBankSelected: true
tracksRegLiveness: true
machineFunctionInfo: {}
body: |
bb.0:
liveins: $q0, $q1, $q2
; We don't emit CSINC with vectors, so there should be no optimization here.
; CHECK-LABEL: name: cmp_add_lhs_vector
; CHECK: liveins: $q0, $q1, $q2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %cmp_lhs:fpr128 = COPY $q0
; CHECK-NEXT: %cmp_rhs:fpr128 = COPY $q1
; CHECK-NEXT: %add_lhs:fpr128 = COPY $q2
; CHECK-NEXT: %cmp:fpr128 = CMEQv4i32 %cmp_lhs, %cmp_rhs
; CHECK-NEXT: %add:fpr128 = ADDv4i32 %add_lhs, %cmp
; CHECK-NEXT: $q0 = COPY %add
; CHECK-NEXT: RET_ReallyLR implicit $q0
%cmp_lhs:fpr(<4 x s32>) = COPY $q0
%cmp_rhs:fpr(<4 x s32>) = COPY $q1
%add_lhs:fpr(<4 x s32>) = COPY $q2
%cmp:fpr(<4 x s32>) = G_ICMP intpred(eq), %cmp_lhs(<4 x s32>), %cmp_rhs
%add:fpr(<4 x s32>) = G_ADD %add_lhs, %cmp
$q0 = COPY %add(<4 x s32>)
RET_ReallyLR implicit $q0
...
---
name: cmp_add_rhs_64
legalized: true
regBankSelected: true
tracksRegLiveness: true
machineFunctionInfo: {}
body: |
bb.0:
liveins: $x0, $x1, $x2
; The CSINC should use the add's RHS.
; CHECK-LABEL: name: cmp_add_rhs_64
; CHECK: liveins: $x0, $x1, $x2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %cmp_lhs:gpr64 = COPY $x0
; CHECK-NEXT: %cmp_rhs:gpr64 = COPY $x1
; CHECK-NEXT: %add_rhs:gpr64 = COPY $x2
; CHECK-NEXT: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv
; CHECK-NEXT: %add:gpr64 = CSINCXr %add_rhs, %add_rhs, 1, implicit $nzcv
; CHECK-NEXT: $x0 = COPY %add
; CHECK-NEXT: RET_ReallyLR implicit $x0
%cmp_lhs:gpr(s64) = COPY $x0
%cmp_rhs:gpr(s64) = COPY $x1
%add_rhs:gpr(s64) = COPY $x2
%cmp:gpr(s32) = G_ICMP intpred(eq), %cmp_lhs(s64), %cmp_rhs
%cmp_ext:gpr(s64) = G_ZEXT %cmp
%add:gpr(s64) = G_ADD %cmp_ext, %add_rhs
$x0 = COPY %add(s64)
RET_ReallyLR implicit $x0
...
---
name: cmp_add_rhs_64_zext_multi_use
legalized: true
regBankSelected: true
tracksRegLiveness: true
machineFunctionInfo: {}
body: |
bb.0:
liveins: $x0, $x1, $x2
; The ZExt is used more than once so don't fold.
; CHECK-LABEL: name: cmp_add_rhs_64_zext_multi_use
; CHECK: liveins: $x0, $x1, $x2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %cmp_lhs:gpr64 = COPY $x0
; CHECK-NEXT: %cmp_rhs:gpr64 = COPY $x1
; CHECK-NEXT: %add_rhs:gpr64 = COPY $x2
; CHECK-NEXT: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv
; CHECK-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
; CHECK-NEXT: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, %cmp, 0
; CHECK-NEXT: %cmp_ext:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
; CHECK-NEXT: %add:gpr64 = ADDXrr %cmp_ext, %add_rhs
; CHECK-NEXT: %or:gpr64 = ORRXrr %add, %cmp_ext
; CHECK-NEXT: $x0 = COPY %or
; CHECK-NEXT: RET_ReallyLR implicit $x0
%cmp_lhs:gpr(s64) = COPY $x0
%cmp_rhs:gpr(s64) = COPY $x1
%add_rhs:gpr(s64) = COPY $x2
%cmp:gpr(s32) = G_ICMP intpred(eq), %cmp_lhs(s64), %cmp_rhs
%cmp_ext:gpr(s64) = G_ZEXT %cmp
%add:gpr(s64) = G_ADD %cmp_ext, %add_rhs
%or:gpr(s64) = G_OR %add, %cmp_ext
$x0 = COPY %or(s64)
RET_ReallyLR implicit $x0
...
---
name: cmp_add_rhs_64_cmp_multi_use
legalized: true
regBankSelected: true
tracksRegLiveness: true
machineFunctionInfo: {}
body: |
bb.0:
liveins: $x0, $x1, $x2
; The cmp is used more than once so don't fold.
; CHECK-LABEL: name: cmp_add_rhs_64_cmp_multi_use
; CHECK: liveins: $x0, $x1, $x2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %cmp_lhs:gpr64 = COPY $x0
; CHECK-NEXT: %cmp_rhs:gpr64 = COPY $x1
; CHECK-NEXT: %add_rhs:gpr64 = COPY $x2
; CHECK-NEXT: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv
; CHECK-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
; CHECK-NEXT: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, %cmp, 0
; CHECK-NEXT: %cmp_ext:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
; CHECK-NEXT: %add:gpr64 = ADDXrr %cmp_ext, %add_rhs
; CHECK-NEXT: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF
; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], %cmp, %subreg.sub_32
; CHECK-NEXT: %cmp_ext2:gpr64 = SBFMXri [[INSERT_SUBREG]], 0, 31
; CHECK-NEXT: %or:gpr64 = ORRXrr %add, %cmp_ext2
; CHECK-NEXT: $x0 = COPY %or
; CHECK-NEXT: RET_ReallyLR implicit $x0
%cmp_lhs:gpr(s64) = COPY $x0
%cmp_rhs:gpr(s64) = COPY $x1
%add_rhs:gpr(s64) = COPY $x2
%cmp:gpr(s32) = G_ICMP intpred(eq), %cmp_lhs(s64), %cmp_rhs
%cmp_ext:gpr(s64) = G_ZEXT %cmp
%add:gpr(s64) = G_ADD %cmp_ext, %add_rhs
%cmp_ext2:gpr(s64) = G_SEXT %cmp
%or:gpr(s64) = G_OR %add, %cmp_ext2
$x0 = COPY %or(s64)
RET_ReallyLR implicit $x0