llvm/llvm/test/CodeGen/LoongArch/smul-with-overflow.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc --mtriple=loongarch32 -mattr=+d < %s | FileCheck %s --check-prefix=LA32
; RUN: llc --mtriple=loongarch64 -mattr=+d < %s | FileCheck %s --check-prefix=LA64

define zeroext i1 @smuloi64(i64 %v1, i64 %v2, ptr %res) {
; LA32-LABEL: smuloi64:
; LA32:       # %bb.0:
; LA32-NEXT:    mulh.wu $a5, $a0, $a2
; LA32-NEXT:    mul.w $a6, $a1, $a2
; LA32-NEXT:    add.w $a5, $a6, $a5
; LA32-NEXT:    sltu $a6, $a5, $a6
; LA32-NEXT:    mulh.wu $a7, $a1, $a2
; LA32-NEXT:    add.w $a6, $a7, $a6
; LA32-NEXT:    mul.w $a7, $a0, $a3
; LA32-NEXT:    add.w $a5, $a7, $a5
; LA32-NEXT:    sltu $a7, $a5, $a7
; LA32-NEXT:    mulh.wu $t0, $a0, $a3
; LA32-NEXT:    add.w $a7, $t0, $a7
; LA32-NEXT:    add.w $a7, $a6, $a7
; LA32-NEXT:    mul.w $t0, $a1, $a3
; LA32-NEXT:    add.w $t1, $t0, $a7
; LA32-NEXT:    srai.w $t2, $a1, 31
; LA32-NEXT:    mul.w $t3, $a2, $t2
; LA32-NEXT:    srai.w $t4, $a3, 31
; LA32-NEXT:    mul.w $t5, $t4, $a0
; LA32-NEXT:    add.w $t6, $t5, $t3
; LA32-NEXT:    add.w $t7, $t1, $t6
; LA32-NEXT:    sltu $t8, $t7, $t1
; LA32-NEXT:    sltu $t0, $t1, $t0
; LA32-NEXT:    sltu $a6, $a7, $a6
; LA32-NEXT:    mulh.wu $a7, $a1, $a3
; LA32-NEXT:    add.w $a6, $a7, $a6
; LA32-NEXT:    add.w $a6, $a6, $t0
; LA32-NEXT:    mulh.wu $a7, $a2, $t2
; LA32-NEXT:    add.w $a7, $a7, $t3
; LA32-NEXT:    mul.w $a3, $a3, $t2
; LA32-NEXT:    add.w $a3, $a7, $a3
; LA32-NEXT:    mul.w $a1, $t4, $a1
; LA32-NEXT:    mulh.wu $a7, $t4, $a0
; LA32-NEXT:    add.w $a1, $a7, $a1
; LA32-NEXT:    add.w $a1, $a1, $t5
; LA32-NEXT:    add.w $a1, $a1, $a3
; LA32-NEXT:    sltu $a3, $t6, $t5
; LA32-NEXT:    add.w $a1, $a1, $a3
; LA32-NEXT:    add.w $a1, $a6, $a1
; LA32-NEXT:    add.w $a1, $a1, $t8
; LA32-NEXT:    srai.w $a3, $a5, 31
; LA32-NEXT:    xor $a1, $a1, $a3
; LA32-NEXT:    xor $a3, $t7, $a3
; LA32-NEXT:    or $a1, $a3, $a1
; LA32-NEXT:    sltu $a1, $zero, $a1
; LA32-NEXT:    mul.w $a0, $a0, $a2
; LA32-NEXT:    st.w $a0, $a4, 0
; LA32-NEXT:    st.w $a5, $a4, 4
; LA32-NEXT:    move $a0, $a1
; LA32-NEXT:    ret
;
; LA64-LABEL: smuloi64:
; LA64:       # %bb.0:
; LA64-NEXT:    mulh.d $a3, $a0, $a1
; LA64-NEXT:    mul.d $a1, $a0, $a1
; LA64-NEXT:    srai.d $a0, $a1, 63
; LA64-NEXT:    xor $a0, $a3, $a0
; LA64-NEXT:    sltu $a0, $zero, $a0
; LA64-NEXT:    st.d $a1, $a2, 0
; LA64-NEXT:    ret
  %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
  %val = extractvalue {i64, i1} %t, 0
  %obit = extractvalue {i64, i1} %t, 1
  store i64 %val, ptr %res
  ret i1 %obit
}

define zeroext i1 @smuloi128(i128 %v1, i128 %v2, ptr %res) {
; LA32-LABEL: smuloi128:
; LA32:       # %bb.0:
; LA32-NEXT:    addi.w $sp, $sp, -96
; LA32-NEXT:    .cfi_def_cfa_offset 96
; LA32-NEXT:    st.w $ra, $sp, 92 # 4-byte Folded Spill
; LA32-NEXT:    st.w $fp, $sp, 88 # 4-byte Folded Spill
; LA32-NEXT:    st.w $s0, $sp, 84 # 4-byte Folded Spill
; LA32-NEXT:    st.w $s1, $sp, 80 # 4-byte Folded Spill
; LA32-NEXT:    st.w $s2, $sp, 76 # 4-byte Folded Spill
; LA32-NEXT:    st.w $s3, $sp, 72 # 4-byte Folded Spill
; LA32-NEXT:    st.w $s4, $sp, 68 # 4-byte Folded Spill
; LA32-NEXT:    st.w $s5, $sp, 64 # 4-byte Folded Spill
; LA32-NEXT:    st.w $s6, $sp, 60 # 4-byte Folded Spill
; LA32-NEXT:    st.w $s7, $sp, 56 # 4-byte Folded Spill
; LA32-NEXT:    st.w $s8, $sp, 52 # 4-byte Folded Spill
; LA32-NEXT:    .cfi_offset 1, -4
; LA32-NEXT:    .cfi_offset 22, -8
; LA32-NEXT:    .cfi_offset 23, -12
; LA32-NEXT:    .cfi_offset 24, -16
; LA32-NEXT:    .cfi_offset 25, -20
; LA32-NEXT:    .cfi_offset 26, -24
; LA32-NEXT:    .cfi_offset 27, -28
; LA32-NEXT:    .cfi_offset 28, -32
; LA32-NEXT:    .cfi_offset 29, -36
; LA32-NEXT:    .cfi_offset 30, -40
; LA32-NEXT:    .cfi_offset 31, -44
; LA32-NEXT:    st.w $a2, $sp, 48 # 4-byte Folded Spill
; LA32-NEXT:    ld.w $t0, $a1, 12
; LA32-NEXT:    ld.w $t1, $a1, 8
; LA32-NEXT:    ld.w $a5, $a0, 12
; LA32-NEXT:    ld.w $a7, $a1, 0
; LA32-NEXT:    ld.w $a3, $a0, 0
; LA32-NEXT:    ld.w $a6, $a0, 4
; LA32-NEXT:    ld.w $a4, $a0, 8
; LA32-NEXT:    ld.w $t3, $a1, 4
; LA32-NEXT:    mulh.wu $a0, $a3, $a7
; LA32-NEXT:    mul.w $a1, $a6, $a7
; LA32-NEXT:    add.w $a0, $a1, $a0
; LA32-NEXT:    sltu $a1, $a0, $a1
; LA32-NEXT:    mulh.wu $t2, $a6, $a7
; LA32-NEXT:    add.w $a1, $t2, $a1
; LA32-NEXT:    mul.w $t2, $a3, $t3
; LA32-NEXT:    add.w $a0, $t2, $a0
; LA32-NEXT:    st.w $a0, $sp, 44 # 4-byte Folded Spill
; LA32-NEXT:    sltu $t2, $a0, $t2
; LA32-NEXT:    mulh.wu $t4, $a3, $t3
; LA32-NEXT:    add.w $t2, $t4, $t2
; LA32-NEXT:    add.w $t2, $a1, $t2
; LA32-NEXT:    mul.w $t4, $a6, $t3
; LA32-NEXT:    add.w $t5, $t4, $t2
; LA32-NEXT:    sltu $t4, $t5, $t4
; LA32-NEXT:    sltu $a1, $t2, $a1
; LA32-NEXT:    mulh.wu $t2, $a6, $t3
; LA32-NEXT:    add.w $a1, $t2, $a1
; LA32-NEXT:    add.w $a1, $a1, $t4
; LA32-NEXT:    mulh.wu $t2, $a4, $a7
; LA32-NEXT:    mul.w $t4, $a5, $a7
; LA32-NEXT:    add.w $t2, $t4, $t2
; LA32-NEXT:    mul.w $t6, $a4, $t3
; LA32-NEXT:    add.w $t7, $t6, $t2
; LA32-NEXT:    add.w $a1, $t7, $a1
; LA32-NEXT:    mul.w $t8, $a4, $a7
; LA32-NEXT:    add.w $t5, $t8, $t5
; LA32-NEXT:    sltu $t8, $t5, $t8
; LA32-NEXT:    add.w $a1, $a1, $t8
; LA32-NEXT:    sltu $fp, $a1, $t7
; LA32-NEXT:    xor $s0, $a1, $t7
; LA32-NEXT:    sltui $s0, $s0, 1
; LA32-NEXT:    masknez $fp, $fp, $s0
; LA32-NEXT:    maskeqz $t8, $t8, $s0
; LA32-NEXT:    or $t8, $t8, $fp
; LA32-NEXT:    sltu $t2, $t2, $t4
; LA32-NEXT:    mulh.wu $t4, $a5, $a7
; LA32-NEXT:    add.w $t4, $t4, $t2
; LA32-NEXT:    sltu $t2, $t7, $t6
; LA32-NEXT:    mulh.wu $t6, $a4, $t3
; LA32-NEXT:    add.w $t2, $t6, $t2
; LA32-NEXT:    add.w $fp, $t4, $t2
; LA32-NEXT:    mul.w $t6, $a5, $t3
; LA32-NEXT:    add.w $s0, $t6, $fp
; LA32-NEXT:    add.w $s1, $s0, $t8
; LA32-NEXT:    mulh.wu $t2, $a3, $t1
; LA32-NEXT:    mul.w $t7, $a6, $t1
; LA32-NEXT:    add.w $t8, $t7, $t2
; LA32-NEXT:    mul.w $s2, $a3, $t0
; LA32-NEXT:    add.w $s3, $s2, $t8
; LA32-NEXT:    add.w $t2, $s3, $a1
; LA32-NEXT:    mul.w $s4, $a3, $t1
; LA32-NEXT:    add.w $a0, $s4, $t5
; LA32-NEXT:    st.w $a0, $sp, 40 # 4-byte Folded Spill
; LA32-NEXT:    sltu $t5, $a0, $s4
; LA32-NEXT:    add.w $a0, $t2, $t5
; LA32-NEXT:    st.w $a0, $sp, 36 # 4-byte Folded Spill
; LA32-NEXT:    sltu $s4, $a0, $s3
; LA32-NEXT:    xor $s5, $a0, $s3
; LA32-NEXT:    sltui $s5, $s5, 1
; LA32-NEXT:    masknez $s4, $s4, $s5
; LA32-NEXT:    maskeqz $t5, $t5, $s5
; LA32-NEXT:    or $t5, $t5, $s4
; LA32-NEXT:    sltu $t7, $t8, $t7
; LA32-NEXT:    mulh.wu $t8, $a6, $t1
; LA32-NEXT:    add.w $s4, $t8, $t7
; LA32-NEXT:    sltu $t7, $s3, $s2
; LA32-NEXT:    mulh.wu $t8, $a3, $t0
; LA32-NEXT:    add.w $t7, $t8, $t7
; LA32-NEXT:    add.w $s2, $s4, $t7
; LA32-NEXT:    mul.w $s3, $a6, $t0
; LA32-NEXT:    add.w $s6, $s3, $s2
; LA32-NEXT:    add.w $s7, $s6, $t5
; LA32-NEXT:    add.w $s5, $s1, $s7
; LA32-NEXT:    mul.w $s8, $a4, $t1
; LA32-NEXT:    add.w $ra, $s8, $s5
; LA32-NEXT:    srai.w $t8, $a5, 31
; LA32-NEXT:    mul.w $t7, $a7, $t8
; LA32-NEXT:    st.w $a7, $sp, 28 # 4-byte Folded Spill
; LA32-NEXT:    srai.w $t5, $t0, 31
; LA32-NEXT:    sltu $s5, $s5, $s1
; LA32-NEXT:    sltu $s1, $s1, $s0
; LA32-NEXT:    sltu $s0, $s0, $t6
; LA32-NEXT:    mul.w $t2, $a3, $t5
; LA32-NEXT:    st.w $a3, $sp, 24 # 4-byte Folded Spill
; LA32-NEXT:    sltu $t4, $fp, $t4
; LA32-NEXT:    mulh.wu $fp, $a5, $t3
; LA32-NEXT:    st.w $a5, $sp, 0 # 4-byte Folded Spill
; LA32-NEXT:    add.w $t4, $fp, $t4
; LA32-NEXT:    add.w $fp, $t2, $t7
; LA32-NEXT:    add.w $s0, $t4, $s0
; LA32-NEXT:    add.w $a0, $ra, $fp
; LA32-NEXT:    st.w $a0, $sp, 32 # 4-byte Folded Spill
; LA32-NEXT:    add.w $a2, $s0, $s1
; LA32-NEXT:    sltu $s0, $a0, $ra
; LA32-NEXT:    sltu $s1, $s7, $s6
; LA32-NEXT:    sltu $s3, $s6, $s3
; LA32-NEXT:    sltu $s2, $s2, $s4
; LA32-NEXT:    move $s6, $a6
; LA32-NEXT:    st.w $a6, $sp, 16 # 4-byte Folded Spill
; LA32-NEXT:    mulh.wu $s4, $a6, $t0
; LA32-NEXT:    add.w $s2, $s4, $s2
; LA32-NEXT:    add.w $s2, $s2, $s3
; LA32-NEXT:    add.w $s1, $s2, $s1
; LA32-NEXT:    add.w $s1, $a2, $s1
; LA32-NEXT:    add.w $s7, $s1, $s5
; LA32-NEXT:    move $a0, $a4
; LA32-NEXT:    st.w $a4, $sp, 4 # 4-byte Folded Spill
; LA32-NEXT:    mulh.wu $s1, $a4, $t1
; LA32-NEXT:    mul.w $a5, $a5, $t1
; LA32-NEXT:    add.w $a4, $a5, $s1
; LA32-NEXT:    mul.w $a6, $a0, $t0
; LA32-NEXT:    add.w $a1, $a6, $a4
; LA32-NEXT:    sltu $ra, $ra, $s8
; LA32-NEXT:    add.w $s1, $a1, $s7
; LA32-NEXT:    add.w $s8, $s1, $ra
; LA32-NEXT:    move $a0, $t2
; LA32-NEXT:    st.w $t2, $sp, 8 # 4-byte Folded Spill
; LA32-NEXT:    sltu $t6, $fp, $t2
; LA32-NEXT:    mulh.wu $t2, $a7, $t8
; LA32-NEXT:    mul.w $s4, $t3, $t8
; LA32-NEXT:    add.w $a7, $s4, $t2
; LA32-NEXT:    st.w $a7, $sp, 12 # 4-byte Folded Spill
; LA32-NEXT:    add.w $s3, $t7, $a7
; LA32-NEXT:    mulh.wu $a7, $a3, $t5
; LA32-NEXT:    add.w $t4, $a7, $a0
; LA32-NEXT:    mul.w $s2, $s6, $t5
; LA32-NEXT:    add.w $s1, $t4, $s2
; LA32-NEXT:    add.w $fp, $s1, $s3
; LA32-NEXT:    add.w $a0, $fp, $t6
; LA32-NEXT:    add.w $fp, $s8, $a0
; LA32-NEXT:    add.w $a3, $fp, $s0
; LA32-NEXT:    st.w $a3, $sp, 20 # 4-byte Folded Spill
; LA32-NEXT:    xor $fp, $a3, $s8
; LA32-NEXT:    sltui $fp, $fp, 1
; LA32-NEXT:    sltu $s6, $a3, $s8
; LA32-NEXT:    masknez $s6, $s6, $fp
; LA32-NEXT:    maskeqz $fp, $s0, $fp
; LA32-NEXT:    or $s6, $fp, $s6
; LA32-NEXT:    sltu $fp, $s7, $a2
; LA32-NEXT:    xor $a2, $s7, $a2
; LA32-NEXT:    sltui $a2, $a2, 1
; LA32-NEXT:    masknez $fp, $fp, $a2
; LA32-NEXT:    maskeqz $a2, $s5, $a2
; LA32-NEXT:    or $s0, $a2, $fp
; LA32-NEXT:    sltu $a2, $a4, $a5
; LA32-NEXT:    ld.w $a5, $sp, 0 # 4-byte Folded Reload
; LA32-NEXT:    mulh.wu $a3, $a5, $t1
; LA32-NEXT:    add.w $a2, $a3, $a2
; LA32-NEXT:    sltu $a3, $a1, $a6
; LA32-NEXT:    ld.w $fp, $sp, 4 # 4-byte Folded Reload
; LA32-NEXT:    mulh.wu $a4, $fp, $t0
; LA32-NEXT:    add.w $a3, $a4, $a3
; LA32-NEXT:    sltu $a4, $s8, $a1
; LA32-NEXT:    xor $a1, $s8, $a1
; LA32-NEXT:    sltui $a1, $a1, 1
; LA32-NEXT:    masknez $a4, $a4, $a1
; LA32-NEXT:    maskeqz $a1, $ra, $a1
; LA32-NEXT:    or $a1, $a1, $a4
; LA32-NEXT:    sltu $a4, $a0, $s1
; LA32-NEXT:    xor $a0, $a0, $s1
; LA32-NEXT:    sltui $a0, $a0, 1
; LA32-NEXT:    masknez $a4, $a4, $a0
; LA32-NEXT:    maskeqz $a0, $t6, $a0
; LA32-NEXT:    or $s5, $a0, $a4
; LA32-NEXT:    sltu $a0, $s3, $t7
; LA32-NEXT:    add.w $a0, $t2, $a0
; LA32-NEXT:    ld.w $t2, $sp, 8 # 4-byte Folded Reload
; LA32-NEXT:    sltu $a4, $t4, $t2
; LA32-NEXT:    add.w $s7, $a7, $a4
; LA32-NEXT:    add.w $a3, $a2, $a3
; LA32-NEXT:    sltu $a2, $a3, $a2
; LA32-NEXT:    mulh.wu $a4, $a5, $t0
; LA32-NEXT:    add.w $a2, $a4, $a2
; LA32-NEXT:    mul.w $a4, $a5, $t0
; LA32-NEXT:    move $a6, $a5
; LA32-NEXT:    add.w $a3, $a4, $a3
; LA32-NEXT:    sltu $a4, $a3, $a4
; LA32-NEXT:    add.w $a2, $a2, $a4
; LA32-NEXT:    add.w $a4, $a3, $s0
; LA32-NEXT:    sltu $a3, $a4, $a3
; LA32-NEXT:    add.w $a2, $a2, $a3
; LA32-NEXT:    add.w $s8, $a4, $a1
; LA32-NEXT:    sltu $a1, $s8, $a4
; LA32-NEXT:    add.w $ra, $a2, $a1
; LA32-NEXT:    ld.w $a1, $sp, 12 # 4-byte Folded Reload
; LA32-NEXT:    sltu $a1, $a1, $s4
; LA32-NEXT:    mulh.wu $a2, $t3, $t8
; LA32-NEXT:    add.w $a1, $a2, $a1
; LA32-NEXT:    add.w $a0, $a1, $a0
; LA32-NEXT:    sltu $a1, $a0, $a1
; LA32-NEXT:    add.w $a1, $a2, $a1
; LA32-NEXT:    add.w $a0, $s4, $a0
; LA32-NEXT:    sltu $a2, $a0, $s4
; LA32-NEXT:    add.w $a1, $a1, $a2
; LA32-NEXT:    mul.w $a2, $t8, $t1
; LA32-NEXT:    mul.w $a3, $t8, $t0
; LA32-NEXT:    mulh.wu $a4, $t8, $t1
; LA32-NEXT:    add.w $a3, $a4, $a3
; LA32-NEXT:    add.w $a3, $a3, $a2
; LA32-NEXT:    add.w $a3, $s3, $a3
; LA32-NEXT:    add.w $a2, $t7, $a2
; LA32-NEXT:    sltu $a4, $a2, $t7
; LA32-NEXT:    add.w $a3, $a3, $a4
; LA32-NEXT:    add.w $a1, $a1, $a3
; LA32-NEXT:    add.w $a2, $a0, $a2
; LA32-NEXT:    sltu $a0, $a2, $a0
; LA32-NEXT:    add.w $a0, $a1, $a0
; LA32-NEXT:    sltu $a1, $s1, $s2
; LA32-NEXT:    ld.w $a3, $sp, 16 # 4-byte Folded Reload
; LA32-NEXT:    mulh.wu $a3, $t5, $a3
; LA32-NEXT:    add.w $a1, $a3, $a1
; LA32-NEXT:    add.w $a1, $s7, $a1
; LA32-NEXT:    sltu $a4, $a1, $s7
; LA32-NEXT:    add.w $a3, $a3, $a4
; LA32-NEXT:    add.w $a1, $s2, $a1
; LA32-NEXT:    sltu $a4, $a1, $s2
; LA32-NEXT:    add.w $a3, $a3, $a4
; LA32-NEXT:    mul.w $a4, $fp, $t5
; LA32-NEXT:    mulh.wu $a5, $fp, $t5
; LA32-NEXT:    mul.w $a6, $a6, $t5
; LA32-NEXT:    add.w $a5, $a5, $a4
; LA32-NEXT:    add.w $a5, $a5, $a6
; LA32-NEXT:    add.w $a5, $a5, $s1
; LA32-NEXT:    add.w $a6, $a4, $t2
; LA32-NEXT:    sltu $a4, $a6, $a4
; LA32-NEXT:    add.w $a4, $a5, $a4
; LA32-NEXT:    add.w $a3, $a3, $a4
; LA32-NEXT:    add.w $a4, $a1, $a6
; LA32-NEXT:    sltu $a1, $a4, $a1
; LA32-NEXT:    add.w $a1, $a3, $a1
; LA32-NEXT:    add.w $a0, $a1, $a0
; LA32-NEXT:    add.w $a1, $a4, $a2
; LA32-NEXT:    sltu $a2, $a1, $a4
; LA32-NEXT:    add.w $a0, $a0, $a2
; LA32-NEXT:    add.w $a2, $a1, $s5
; LA32-NEXT:    sltu $a1, $a2, $a1
; LA32-NEXT:    add.w $a0, $a0, $a1
; LA32-NEXT:    add.w $a0, $ra, $a0
; LA32-NEXT:    add.w $a1, $s8, $a2
; LA32-NEXT:    sltu $a2, $a1, $s8
; LA32-NEXT:    add.w $a0, $a0, $a2
; LA32-NEXT:    add.w $a2, $a1, $s6
; LA32-NEXT:    sltu $a1, $a2, $a1
; LA32-NEXT:    add.w $a0, $a0, $a1
; LA32-NEXT:    ld.w $a4, $sp, 36 # 4-byte Folded Reload
; LA32-NEXT:    srai.w $a1, $a4, 31
; LA32-NEXT:    xor $a0, $a0, $a1
; LA32-NEXT:    ld.w $a3, $sp, 20 # 4-byte Folded Reload
; LA32-NEXT:    xor $a3, $a3, $a1
; LA32-NEXT:    or $a0, $a3, $a0
; LA32-NEXT:    xor $a2, $a2, $a1
; LA32-NEXT:    ld.w $a3, $sp, 32 # 4-byte Folded Reload
; LA32-NEXT:    xor $a1, $a3, $a1
; LA32-NEXT:    or $a1, $a1, $a2
; LA32-NEXT:    or $a0, $a1, $a0
; LA32-NEXT:    ld.w $a1, $sp, 28 # 4-byte Folded Reload
; LA32-NEXT:    ld.w $a2, $sp, 24 # 4-byte Folded Reload
; LA32-NEXT:    mul.w $a1, $a2, $a1
; LA32-NEXT:    ld.w $a2, $sp, 48 # 4-byte Folded Reload
; LA32-NEXT:    st.w $a1, $a2, 0
; LA32-NEXT:    ld.w $a1, $sp, 44 # 4-byte Folded Reload
; LA32-NEXT:    st.w $a1, $a2, 4
; LA32-NEXT:    ld.w $a1, $sp, 40 # 4-byte Folded Reload
; LA32-NEXT:    st.w $a1, $a2, 8
; LA32-NEXT:    sltu $a0, $zero, $a0
; LA32-NEXT:    st.w $a4, $a2, 12
; LA32-NEXT:    ld.w $s8, $sp, 52 # 4-byte Folded Reload
; LA32-NEXT:    ld.w $s7, $sp, 56 # 4-byte Folded Reload
; LA32-NEXT:    ld.w $s6, $sp, 60 # 4-byte Folded Reload
; LA32-NEXT:    ld.w $s5, $sp, 64 # 4-byte Folded Reload
; LA32-NEXT:    ld.w $s4, $sp, 68 # 4-byte Folded Reload
; LA32-NEXT:    ld.w $s3, $sp, 72 # 4-byte Folded Reload
; LA32-NEXT:    ld.w $s2, $sp, 76 # 4-byte Folded Reload
; LA32-NEXT:    ld.w $s1, $sp, 80 # 4-byte Folded Reload
; LA32-NEXT:    ld.w $s0, $sp, 84 # 4-byte Folded Reload
; LA32-NEXT:    ld.w $fp, $sp, 88 # 4-byte Folded Reload
; LA32-NEXT:    ld.w $ra, $sp, 92 # 4-byte Folded Reload
; LA32-NEXT:    addi.w $sp, $sp, 96
; LA32-NEXT:    ret
;
; LA64-LABEL: smuloi128:
; LA64:       # %bb.0:
; LA64-NEXT:    mulh.du $a5, $a0, $a2
; LA64-NEXT:    mul.d $a6, $a1, $a2
; LA64-NEXT:    add.d $a5, $a6, $a5
; LA64-NEXT:    sltu $a6, $a5, $a6
; LA64-NEXT:    mulh.du $a7, $a1, $a2
; LA64-NEXT:    add.d $a6, $a7, $a6
; LA64-NEXT:    mul.d $a7, $a0, $a3
; LA64-NEXT:    add.d $a5, $a7, $a5
; LA64-NEXT:    sltu $a7, $a5, $a7
; LA64-NEXT:    mulh.du $t0, $a0, $a3
; LA64-NEXT:    add.d $a7, $t0, $a7
; LA64-NEXT:    add.d $a7, $a6, $a7
; LA64-NEXT:    mul.d $t0, $a1, $a3
; LA64-NEXT:    add.d $t1, $t0, $a7
; LA64-NEXT:    srai.d $t2, $a1, 63
; LA64-NEXT:    mul.d $t3, $a2, $t2
; LA64-NEXT:    srai.d $t4, $a3, 63
; LA64-NEXT:    mul.d $t5, $t4, $a0
; LA64-NEXT:    add.d $t6, $t5, $t3
; LA64-NEXT:    add.d $t7, $t1, $t6
; LA64-NEXT:    sltu $t8, $t7, $t1
; LA64-NEXT:    sltu $t0, $t1, $t0
; LA64-NEXT:    sltu $a6, $a7, $a6
; LA64-NEXT:    mulh.du $a7, $a1, $a3
; LA64-NEXT:    add.d $a6, $a7, $a6
; LA64-NEXT:    add.d $a6, $a6, $t0
; LA64-NEXT:    mulh.du $a7, $a2, $t2
; LA64-NEXT:    add.d $a7, $a7, $t3
; LA64-NEXT:    mul.d $a3, $a3, $t2
; LA64-NEXT:    add.d $a3, $a7, $a3
; LA64-NEXT:    mul.d $a1, $t4, $a1
; LA64-NEXT:    mulh.du $a7, $t4, $a0
; LA64-NEXT:    add.d $a1, $a7, $a1
; LA64-NEXT:    add.d $a1, $a1, $t5
; LA64-NEXT:    add.d $a1, $a1, $a3
; LA64-NEXT:    sltu $a3, $t6, $t5
; LA64-NEXT:    add.d $a1, $a1, $a3
; LA64-NEXT:    add.d $a1, $a6, $a1
; LA64-NEXT:    add.d $a1, $a1, $t8
; LA64-NEXT:    srai.d $a3, $a5, 63
; LA64-NEXT:    xor $a1, $a1, $a3
; LA64-NEXT:    xor $a3, $t7, $a3
; LA64-NEXT:    or $a1, $a3, $a1
; LA64-NEXT:    sltu $a1, $zero, $a1
; LA64-NEXT:    mul.d $a0, $a0, $a2
; LA64-NEXT:    st.d $a0, $a4, 0
; LA64-NEXT:    st.d $a5, $a4, 8
; LA64-NEXT:    move $a0, $a1
; LA64-NEXT:    ret
  %t = call {i128, i1} @llvm.smul.with.overflow.i128(i128 %v1, i128 %v2)
  %val = extractvalue {i128, i1} %t, 0
  %obit = extractvalue {i128, i1} %t, 1
  store i128 %val, ptr %res
  ret i1 %obit
}

declare {i64, i1} @llvm.smul.with.overflow.i64(i64, i64) nounwind readnone
declare {i128, i1} @llvm.smul.with.overflow.i128(i128, i128) nounwind readnone