llvm/llvm/test/CodeGen/SystemZ/int-usub-03.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
; Test subtraction of a zero-extended i32 from an i64.
;
; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s

declare i64 @foo()

; Check SLGFR.
define zeroext i1 @f1(i64 %dummy, i64 %a, i32 %b, ptr %res) {
; CHECK-LABEL: f1:
; CHECK:       # %bb.0:
; CHECK-NEXT:    slgfr %r3, %r4
; CHECK-NEXT:    ipm %r0
; CHECK-NEXT:    afi %r0, -536870912
; CHECK-NEXT:    risbg %r2, %r0, 63, 191, 33
; CHECK-NEXT:    stg %r3, 0(%r5)
; CHECK-NEXT:    br %r14
  %bext = zext i32 %b to i64
  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %bext)
  %val = extractvalue {i64, i1} %t, 0
  %obit = extractvalue {i64, i1} %t, 1
  store i64 %val, ptr %res
  ret i1 %obit
}

; Check using the overflow result for a branch.
define void @f2(i64 %dummy, i64 %a, i32 %b, ptr %res) {
; CHECK-LABEL: f2:
; CHECK:       # %bb.0:
; CHECK-NEXT:    slgfr %r3, %r4
; CHECK-NEXT:    stg %r3, 0(%r5)
; CHECK-NEXT:    jgle foo@PLT
; CHECK-NEXT:  .LBB1_1: # %exit
; CHECK-NEXT:    br %r14
  %bext = zext i32 %b to i64
  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %bext)
  %val = extractvalue {i64, i1} %t, 0
  %obit = extractvalue {i64, i1} %t, 1
  store i64 %val, ptr %res
  br i1 %obit, label %call, label %exit

call:
  tail call i64 @foo()
  br label %exit

exit:
  ret void
}

; ... and the same with the inverted direction.
define void @f3(i64 %dummy, i64 %a, i32 %b, ptr %res) {
; CHECK-LABEL: f3:
; CHECK:       # %bb.0:
; CHECK-NEXT:    slgfr %r3, %r4
; CHECK-NEXT:    stg %r3, 0(%r5)
; CHECK-NEXT:    jgnle foo@PLT
; CHECK-NEXT:  .LBB2_1: # %exit
; CHECK-NEXT:    br %r14
  %bext = zext i32 %b to i64
  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %bext)
  %val = extractvalue {i64, i1} %t, 0
  %obit = extractvalue {i64, i1} %t, 1
  store i64 %val, ptr %res
  br i1 %obit, label %exit, label %call

call:
  tail call i64 @foo()
  br label %exit

exit:
  ret void
}

; Check SLGF with no displacement.
define zeroext i1 @f4(i64 %dummy, i64 %a, ptr %src, ptr %res) {
; CHECK-LABEL: f4:
; CHECK:       # %bb.0:
; CHECK-NEXT:    slgf %r3, 0(%r4)
; CHECK-NEXT:    ipm %r0
; CHECK-NEXT:    afi %r0, -536870912
; CHECK-NEXT:    risbg %r2, %r0, 63, 191, 33
; CHECK-NEXT:    stg %r3, 0(%r5)
; CHECK-NEXT:    br %r14
  %b = load i32, ptr %src
  %bext = zext i32 %b to i64
  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %bext)
  %val = extractvalue {i64, i1} %t, 0
  %obit = extractvalue {i64, i1} %t, 1
  store i64 %val, ptr %res
  ret i1 %obit
}

; Check the high end of the aligned SLGF range.
define zeroext i1 @f5(i64 %dummy, i64 %a, ptr %src, ptr %res) {
; CHECK-LABEL: f5:
; CHECK:       # %bb.0:
; CHECK-NEXT:    slgf %r3, 524284(%r4)
; CHECK-NEXT:    ipm %r0
; CHECK-NEXT:    afi %r0, -536870912
; CHECK-NEXT:    risbg %r2, %r0, 63, 191, 33
; CHECK-NEXT:    stg %r3, 0(%r5)
; CHECK-NEXT:    br %r14
  %ptr = getelementptr i32, ptr %src, i64 131071
  %b = load i32, ptr %ptr
  %bext = zext i32 %b to i64
  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %bext)
  %val = extractvalue {i64, i1} %t, 0
  %obit = extractvalue {i64, i1} %t, 1
  store i64 %val, ptr %res
  ret i1 %obit
}

; Check the next doubleword up, which needs separate address logic.
; Other sequences besides this one would be OK.
define zeroext i1 @f6(i64 %dummy, i64 %a, ptr %src, ptr %res) {
; CHECK-LABEL: f6:
; CHECK:       # %bb.0:
; CHECK-NEXT:    agfi %r4, 524288
; CHECK-NEXT:    slgf %r3, 0(%r4)
; CHECK-NEXT:    ipm %r0
; CHECK-NEXT:    afi %r0, -536870912
; CHECK-NEXT:    risbg %r2, %r0, 63, 191, 33
; CHECK-NEXT:    stg %r3, 0(%r5)
; CHECK-NEXT:    br %r14
  %ptr = getelementptr i32, ptr %src, i64 131072
  %b = load i32, ptr %ptr
  %bext = zext i32 %b to i64
  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %bext)
  %val = extractvalue {i64, i1} %t, 0
  %obit = extractvalue {i64, i1} %t, 1
  store i64 %val, ptr %res
  ret i1 %obit
}

; Check the high end of the negative aligned SLGF range.
define zeroext i1 @f7(i64 %dummy, i64 %a, ptr %src, ptr %res) {
; CHECK-LABEL: f7:
; CHECK:       # %bb.0:
; CHECK-NEXT:    slgf %r3, -4(%r4)
; CHECK-NEXT:    ipm %r0
; CHECK-NEXT:    afi %r0, -536870912
; CHECK-NEXT:    risbg %r2, %r0, 63, 191, 33
; CHECK-NEXT:    stg %r3, 0(%r5)
; CHECK-NEXT:    br %r14
  %ptr = getelementptr i32, ptr %src, i64 -1
  %b = load i32, ptr %ptr
  %bext = zext i32 %b to i64
  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %bext)
  %val = extractvalue {i64, i1} %t, 0
  %obit = extractvalue {i64, i1} %t, 1
  store i64 %val, ptr %res
  ret i1 %obit
}

; Check the low end of the SLGF range.
define zeroext i1 @f8(i64 %dummy, i64 %a, ptr %src, ptr %res) {
; CHECK-LABEL: f8:
; CHECK:       # %bb.0:
; CHECK-NEXT:    slgf %r3, -524288(%r4)
; CHECK-NEXT:    ipm %r0
; CHECK-NEXT:    afi %r0, -536870912
; CHECK-NEXT:    risbg %r2, %r0, 63, 191, 33
; CHECK-NEXT:    stg %r3, 0(%r5)
; CHECK-NEXT:    br %r14
  %ptr = getelementptr i32, ptr %src, i64 -131072
  %b = load i32, ptr %ptr
  %bext = zext i32 %b to i64
  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %bext)
  %val = extractvalue {i64, i1} %t, 0
  %obit = extractvalue {i64, i1} %t, 1
  store i64 %val, ptr %res
  ret i1 %obit
}

; Check the next doubleword down, which needs separate address logic.
; Other sequences besides this one would be OK.
define zeroext i1 @f9(i64 %dummy, i64 %a, ptr %src, ptr %res) {
; CHECK-LABEL: f9:
; CHECK:       # %bb.0:
; CHECK-NEXT:    agfi %r4, -524292
; CHECK-NEXT:    slgf %r3, 0(%r4)
; CHECK-NEXT:    ipm %r0
; CHECK-NEXT:    afi %r0, -536870912
; CHECK-NEXT:    risbg %r2, %r0, 63, 191, 33
; CHECK-NEXT:    stg %r3, 0(%r5)
; CHECK-NEXT:    br %r14
  %ptr = getelementptr i32, ptr %src, i64 -131073
  %b = load i32, ptr %ptr
  %bext = zext i32 %b to i64
  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %bext)
  %val = extractvalue {i64, i1} %t, 0
  %obit = extractvalue {i64, i1} %t, 1
  store i64 %val, ptr %res
  ret i1 %obit
}

; Check that SLGF allows an index.
define zeroext i1 @f10(i64 %src, i64 %index, i64 %a, ptr %res) {
; CHECK-LABEL: f10:
; CHECK:       # %bb.0:
; CHECK-NEXT:    slgf %r4, 524284(%r3,%r2)
; CHECK-NEXT:    ipm %r0
; CHECK-NEXT:    afi %r0, -536870912
; CHECK-NEXT:    risbg %r2, %r0, 63, 191, 33
; CHECK-NEXT:    stg %r4, 0(%r5)
; CHECK-NEXT:    br %r14
  %add1 = add i64 %src, %index
  %add2 = add i64 %add1, 524284
  %ptr = inttoptr i64 %add2 to ptr
  %b = load i32, ptr %ptr
  %bext = zext i32 %b to i64
  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %bext)
  %val = extractvalue {i64, i1} %t, 0
  %obit = extractvalue {i64, i1} %t, 1
  store i64 %val, ptr %res
  ret i1 %obit
}

; Check that subtractions of spilled values can use SLGF rather than SLGFR.
define zeroext i1 @f11(ptr %ptr0) {
; CHECK-LABEL: f11:
; CHECK:       # %bb.0:
; CHECK-NEXT:    stmg %r6, %r15, 48(%r15)
; CHECK-NEXT:    .cfi_offset %r6, -112
; CHECK-NEXT:    .cfi_offset %r7, -104
; CHECK-NEXT:    .cfi_offset %r8, -96
; CHECK-NEXT:    .cfi_offset %r9, -88
; CHECK-NEXT:    .cfi_offset %r10, -80
; CHECK-NEXT:    .cfi_offset %r11, -72
; CHECK-NEXT:    .cfi_offset %r12, -64
; CHECK-NEXT:    .cfi_offset %r13, -56
; CHECK-NEXT:    .cfi_offset %r14, -48
; CHECK-NEXT:    .cfi_offset %r15, -40
; CHECK-NEXT:    aghi %r15, -168
; CHECK-NEXT:    .cfi_def_cfa_offset 328
; CHECK-NEXT:    lhi %r0, 100
; CHECK-NEXT:    lhi %r12, 100
; CHECK-NEXT:    a %r12, 0(%r2)
; CHECK-NEXT:    lhi %r13, 100
; CHECK-NEXT:    a %r13, 8(%r2)
; CHECK-NEXT:    lhi %r6, 100
; CHECK-NEXT:    a %r6, 16(%r2)
; CHECK-NEXT:    lhi %r7, 100
; CHECK-NEXT:    a %r7, 24(%r2)
; CHECK-NEXT:    lhi %r8, 100
; CHECK-NEXT:    a %r8, 32(%r2)
; CHECK-NEXT:    lhi %r9, 100
; CHECK-NEXT:    a %r9, 40(%r2)
; CHECK-NEXT:    lhi %r10, 100
; CHECK-NEXT:    a %r10, 48(%r2)
; CHECK-NEXT:    lhi %r11, 100
; CHECK-NEXT:    a %r11, 56(%r2)
; CHECK-NEXT:    lhi %r1, 100
; CHECK-NEXT:    a %r1, 64(%r2)
; CHECK-NEXT:    st %r1, 160(%r15) # 4-byte Folded Spill
; CHECK-NEXT:    a %r0, 72(%r2)
; CHECK-NEXT:    st %r0, 164(%r15) # 4-byte Folded Spill
; CHECK-NEXT:    st %r12, 0(%r2)
; CHECK-NEXT:    st %r13, 8(%r2)
; CHECK-NEXT:    st %r6, 16(%r2)
; CHECK-NEXT:    st %r7, 24(%r2)
; CHECK-NEXT:    st %r8, 32(%r2)
; CHECK-NEXT:    st %r9, 40(%r2)
; CHECK-NEXT:    st %r10, 48(%r2)
; CHECK-NEXT:    st %r11, 56(%r2)
; CHECK-NEXT:    st %r1, 64(%r2)
; CHECK-NEXT:    st %r0, 72(%r2)
; CHECK-NEXT:    brasl %r14, foo@PLT
; CHECK-NEXT:    slgfr %r2, %r12
; CHECK-NEXT:    ipm %r0
; CHECK-NEXT:    afi %r0, -536870912
; CHECK-NEXT:    srl %r0, 31
; CHECK-NEXT:    slgfr %r2, %r13
; CHECK-NEXT:    ipm %r1
; CHECK-NEXT:    afi %r1, -536870912
; CHECK-NEXT:    rosbg %r0, %r1, 63, 63, 33
; CHECK-NEXT:    slgfr %r2, %r6
; CHECK-NEXT:    ipm %r1
; CHECK-NEXT:    afi %r1, -536870912
; CHECK-NEXT:    rosbg %r0, %r1, 63, 63, 33
; CHECK-NEXT:    slgfr %r2, %r7
; CHECK-NEXT:    ipm %r1
; CHECK-NEXT:    afi %r1, -536870912
; CHECK-NEXT:    rosbg %r0, %r1, 63, 63, 33
; CHECK-NEXT:    slgfr %r2, %r8
; CHECK-NEXT:    ipm %r1
; CHECK-NEXT:    afi %r1, -536870912
; CHECK-NEXT:    rosbg %r0, %r1, 63, 63, 33
; CHECK-NEXT:    slgfr %r2, %r9
; CHECK-NEXT:    ipm %r1
; CHECK-NEXT:    afi %r1, -536870912
; CHECK-NEXT:    rosbg %r0, %r1, 63, 63, 33
; CHECK-NEXT:    slgfr %r2, %r10
; CHECK-NEXT:    ipm %r1
; CHECK-NEXT:    afi %r1, -536870912
; CHECK-NEXT:    rosbg %r0, %r1, 63, 63, 33
; CHECK-NEXT:    slgfr %r2, %r11
; CHECK-NEXT:    ipm %r1
; CHECK-NEXT:    afi %r1, -536870912
; CHECK-NEXT:    rosbg %r0, %r1, 63, 63, 33
; CHECK-NEXT:    slgf %r2, 160(%r15) # 4-byte Folded Reload
; CHECK-NEXT:    ipm %r1
; CHECK-NEXT:    afi %r1, -536870912
; CHECK-NEXT:    rosbg %r0, %r1, 63, 63, 33
; CHECK-NEXT:    slgf %r2, 164(%r15) # 4-byte Folded Reload
; CHECK-NEXT:    ipm %r1
; CHECK-NEXT:    afi %r1, -536870912
; CHECK-NEXT:    rosbg %r0, %r1, 63, 63, 33
; CHECK-NEXT:    risbg %r2, %r0, 63, 191, 0
; CHECK-NEXT:    lmg %r6, %r15, 216(%r15)
; CHECK-NEXT:    br %r14
  %ptr1 = getelementptr i32, ptr %ptr0, i64 2
  %ptr2 = getelementptr i32, ptr %ptr0, i64 4
  %ptr3 = getelementptr i32, ptr %ptr0, i64 6
  %ptr4 = getelementptr i32, ptr %ptr0, i64 8
  %ptr5 = getelementptr i32, ptr %ptr0, i64 10
  %ptr6 = getelementptr i32, ptr %ptr0, i64 12
  %ptr7 = getelementptr i32, ptr %ptr0, i64 14
  %ptr8 = getelementptr i32, ptr %ptr0, i64 16
  %ptr9 = getelementptr i32, ptr %ptr0, i64 18

  %val0 = load i32, ptr %ptr0
  %val1 = load i32, ptr %ptr1
  %val2 = load i32, ptr %ptr2
  %val3 = load i32, ptr %ptr3
  %val4 = load i32, ptr %ptr4
  %val5 = load i32, ptr %ptr5
  %val6 = load i32, ptr %ptr6
  %val7 = load i32, ptr %ptr7
  %val8 = load i32, ptr %ptr8
  %val9 = load i32, ptr %ptr9

  %frob0 = add i32 %val0, 100
  %frob1 = add i32 %val1, 100
  %frob2 = add i32 %val2, 100
  %frob3 = add i32 %val3, 100
  %frob4 = add i32 %val4, 100
  %frob5 = add i32 %val5, 100
  %frob6 = add i32 %val6, 100
  %frob7 = add i32 %val7, 100
  %frob8 = add i32 %val8, 100
  %frob9 = add i32 %val9, 100

  store i32 %frob0, ptr %ptr0
  store i32 %frob1, ptr %ptr1
  store i32 %frob2, ptr %ptr2
  store i32 %frob3, ptr %ptr3
  store i32 %frob4, ptr %ptr4
  store i32 %frob5, ptr %ptr5
  store i32 %frob6, ptr %ptr6
  store i32 %frob7, ptr %ptr7
  store i32 %frob8, ptr %ptr8
  store i32 %frob9, ptr %ptr9

  %ret = call i64 @foo()

  %ext0 = zext i32 %frob0 to i64
  %ext1 = zext i32 %frob1 to i64
  %ext2 = zext i32 %frob2 to i64
  %ext3 = zext i32 %frob3 to i64
  %ext4 = zext i32 %frob4 to i64
  %ext5 = zext i32 %frob5 to i64
  %ext6 = zext i32 %frob6 to i64
  %ext7 = zext i32 %frob7 to i64
  %ext8 = zext i32 %frob8 to i64
  %ext9 = zext i32 %frob9 to i64

  %t0 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %ret, i64 %ext0)
  %add0 = extractvalue {i64, i1} %t0, 0
  %obit0 = extractvalue {i64, i1} %t0, 1
  %t1 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add0, i64 %ext1)
  %add1 = extractvalue {i64, i1} %t1, 0
  %obit1 = extractvalue {i64, i1} %t1, 1
  %res1 = or i1 %obit0, %obit1
  %t2 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add1, i64 %ext2)
  %add2 = extractvalue {i64, i1} %t2, 0
  %obit2 = extractvalue {i64, i1} %t2, 1
  %res2 = or i1 %res1, %obit2
  %t3 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add2, i64 %ext3)
  %add3 = extractvalue {i64, i1} %t3, 0
  %obit3 = extractvalue {i64, i1} %t3, 1
  %res3 = or i1 %res2, %obit3
  %t4 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add3, i64 %ext4)
  %add4 = extractvalue {i64, i1} %t4, 0
  %obit4 = extractvalue {i64, i1} %t4, 1
  %res4 = or i1 %res3, %obit4
  %t5 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add4, i64 %ext5)
  %add5 = extractvalue {i64, i1} %t5, 0
  %obit5 = extractvalue {i64, i1} %t5, 1
  %res5 = or i1 %res4, %obit5
  %t6 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add5, i64 %ext6)
  %add6 = extractvalue {i64, i1} %t6, 0
  %obit6 = extractvalue {i64, i1} %t6, 1
  %res6 = or i1 %res5, %obit6
  %t7 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add6, i64 %ext7)
  %add7 = extractvalue {i64, i1} %t7, 0
  %obit7 = extractvalue {i64, i1} %t7, 1
  %res7 = or i1 %res6, %obit7
  %t8 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add7, i64 %ext8)
  %add8 = extractvalue {i64, i1} %t8, 0
  %obit8 = extractvalue {i64, i1} %t8, 1
  %res8 = or i1 %res7, %obit8
  %t9 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add8, i64 %ext9)
  %add9 = extractvalue {i64, i1} %t9, 0
  %obit9 = extractvalue {i64, i1} %t9, 1
  %res9 = or i1 %res8, %obit9

  ret i1 %res9
}

declare {i64, i1} @llvm.usub.with.overflow.i64(i64, i64) nounwind readnone