llvm/llvm/test/CodeGen/LoongArch/typepromotion-overflow.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=loongarch32 %s -o - | FileCheck %s --check-prefix=LA32
; RUN: llc -mtriple=loongarch64 %s -o - | FileCheck %s --check-prefix=LA64

define zeroext i16 @overflow_add(i16 zeroext %a, i16 zeroext %b) {
; LA32-LABEL: overflow_add:
; LA32:       # %bb.0:
; LA32-NEXT:    add.w $a0, $a1, $a0
; LA32-NEXT:    ori $a0, $a0, 1
; LA32-NEXT:    bstrpick.w $a0, $a0, 15, 0
; LA32-NEXT:    ori $a1, $zero, 1024
; LA32-NEXT:    sltu $a0, $a1, $a0
; LA32-NEXT:    ori $a1, $zero, 5
; LA32-NEXT:    masknez $a1, $a1, $a0
; LA32-NEXT:    ori $a2, $zero, 2
; LA32-NEXT:    maskeqz $a0, $a2, $a0
; LA32-NEXT:    or $a0, $a0, $a1
; LA32-NEXT:    ret
;
; LA64-LABEL: overflow_add:
; LA64:       # %bb.0:
; LA64-NEXT:    add.d $a0, $a1, $a0
; LA64-NEXT:    ori $a0, $a0, 1
; LA64-NEXT:    bstrpick.d $a0, $a0, 15, 0
; LA64-NEXT:    ori $a1, $zero, 1024
; LA64-NEXT:    sltu $a0, $a1, $a0
; LA64-NEXT:    ori $a1, $zero, 5
; LA64-NEXT:    masknez $a1, $a1, $a0
; LA64-NEXT:    ori $a2, $zero, 2
; LA64-NEXT:    maskeqz $a0, $a2, $a0
; LA64-NEXT:    or $a0, $a0, $a1
; LA64-NEXT:    ret
  %add = add i16 %b, %a
  %or = or i16 %add, 1
  %cmp = icmp ugt i16 %or, 1024
  %res = select i1 %cmp, i16 2, i16 5
  ret i16 %res
}

define zeroext i16 @overflow_sub(i16 zeroext %a, i16 zeroext %b) {
; LA32-LABEL: overflow_sub:
; LA32:       # %bb.0:
; LA32-NEXT:    sub.w $a0, $a0, $a1
; LA32-NEXT:    ori $a0, $a0, 1
; LA32-NEXT:    bstrpick.w $a0, $a0, 15, 0
; LA32-NEXT:    ori $a1, $zero, 1024
; LA32-NEXT:    sltu $a0, $a1, $a0
; LA32-NEXT:    ori $a1, $zero, 5
; LA32-NEXT:    masknez $a1, $a1, $a0
; LA32-NEXT:    ori $a2, $zero, 2
; LA32-NEXT:    maskeqz $a0, $a2, $a0
; LA32-NEXT:    or $a0, $a0, $a1
; LA32-NEXT:    ret
;
; LA64-LABEL: overflow_sub:
; LA64:       # %bb.0:
; LA64-NEXT:    sub.d $a0, $a0, $a1
; LA64-NEXT:    ori $a0, $a0, 1
; LA64-NEXT:    bstrpick.d $a0, $a0, 15, 0
; LA64-NEXT:    ori $a1, $zero, 1024
; LA64-NEXT:    sltu $a0, $a1, $a0
; LA64-NEXT:    ori $a1, $zero, 5
; LA64-NEXT:    masknez $a1, $a1, $a0
; LA64-NEXT:    ori $a2, $zero, 2
; LA64-NEXT:    maskeqz $a0, $a2, $a0
; LA64-NEXT:    or $a0, $a0, $a1
; LA64-NEXT:    ret
  %add = sub i16 %a, %b
  %or = or i16 %add, 1
  %cmp = icmp ugt i16 %or, 1024
  %res = select i1 %cmp, i16 2, i16 5
  ret i16 %res
}

define zeroext i16 @overflow_mul(i16 zeroext %a, i16 zeroext %b) {
; LA32-LABEL: overflow_mul:
; LA32:       # %bb.0:
; LA32-NEXT:    mul.w $a0, $a1, $a0
; LA32-NEXT:    ori $a0, $a0, 1
; LA32-NEXT:    bstrpick.w $a0, $a0, 15, 0
; LA32-NEXT:    ori $a1, $zero, 1024
; LA32-NEXT:    sltu $a0, $a1, $a0
; LA32-NEXT:    ori $a1, $zero, 5
; LA32-NEXT:    masknez $a1, $a1, $a0
; LA32-NEXT:    ori $a2, $zero, 2
; LA32-NEXT:    maskeqz $a0, $a2, $a0
; LA32-NEXT:    or $a0, $a0, $a1
; LA32-NEXT:    ret
;
; LA64-LABEL: overflow_mul:
; LA64:       # %bb.0:
; LA64-NEXT:    mul.d $a0, $a1, $a0
; LA64-NEXT:    ori $a0, $a0, 1
; LA64-NEXT:    bstrpick.d $a0, $a0, 15, 0
; LA64-NEXT:    ori $a1, $zero, 1024
; LA64-NEXT:    sltu $a0, $a1, $a0
; LA64-NEXT:    ori $a1, $zero, 5
; LA64-NEXT:    masknez $a1, $a1, $a0
; LA64-NEXT:    ori $a2, $zero, 2
; LA64-NEXT:    maskeqz $a0, $a2, $a0
; LA64-NEXT:    or $a0, $a0, $a1
; LA64-NEXT:    ret
  %add = mul i16 %b, %a
  %or = or i16 %add, 1
  %cmp = icmp ugt i16 %or, 1024
  %res = select i1 %cmp, i16 2, i16 5
  ret i16 %res
}

define zeroext i16 @overflow_shl(i16 zeroext %a, i16 zeroext %b) {
; LA32-LABEL: overflow_shl:
; LA32:       # %bb.0:
; LA32-NEXT:    sll.w $a0, $a0, $a1
; LA32-NEXT:    ori $a0, $a0, 1
; LA32-NEXT:    bstrpick.w $a0, $a0, 15, 0
; LA32-NEXT:    ori $a1, $zero, 1024
; LA32-NEXT:    sltu $a0, $a1, $a0
; LA32-NEXT:    ori $a1, $zero, 5
; LA32-NEXT:    masknez $a1, $a1, $a0
; LA32-NEXT:    ori $a2, $zero, 2
; LA32-NEXT:    maskeqz $a0, $a2, $a0
; LA32-NEXT:    or $a0, $a0, $a1
; LA32-NEXT:    ret
;
; LA64-LABEL: overflow_shl:
; LA64:       # %bb.0:
; LA64-NEXT:    sll.d $a0, $a0, $a1
; LA64-NEXT:    ori $a0, $a0, 1
; LA64-NEXT:    bstrpick.d $a0, $a0, 15, 0
; LA64-NEXT:    ori $a1, $zero, 1024
; LA64-NEXT:    sltu $a0, $a1, $a0
; LA64-NEXT:    ori $a1, $zero, 5
; LA64-NEXT:    masknez $a1, $a1, $a0
; LA64-NEXT:    ori $a2, $zero, 2
; LA64-NEXT:    maskeqz $a0, $a2, $a0
; LA64-NEXT:    or $a0, $a0, $a1
; LA64-NEXT:    ret
  %add = shl i16 %a, %b
  %or = or i16 %add, 1
  %cmp = icmp ugt i16 %or, 1024
  %res = select i1 %cmp, i16 2, i16 5
  ret i16 %res
}

define i32 @overflow_add_no_consts(i8 zeroext %a, i8 zeroext %b, i8 zeroext %limit) {
; LA32-LABEL: overflow_add_no_consts:
; LA32:       # %bb.0:
; LA32-NEXT:    add.w $a0, $a1, $a0
; LA32-NEXT:    andi $a0, $a0, 255
; LA32-NEXT:    sltu $a0, $a2, $a0
; LA32-NEXT:    ori $a1, $zero, 16
; LA32-NEXT:    masknez $a1, $a1, $a0
; LA32-NEXT:    ori $a2, $zero, 8
; LA32-NEXT:    maskeqz $a0, $a2, $a0
; LA32-NEXT:    or $a0, $a0, $a1
; LA32-NEXT:    ret
;
; LA64-LABEL: overflow_add_no_consts:
; LA64:       # %bb.0:
; LA64-NEXT:    add.d $a0, $a1, $a0
; LA64-NEXT:    andi $a0, $a0, 255
; LA64-NEXT:    sltu $a0, $a2, $a0
; LA64-NEXT:    ori $a1, $zero, 16
; LA64-NEXT:    masknez $a1, $a1, $a0
; LA64-NEXT:    ori $a2, $zero, 8
; LA64-NEXT:    maskeqz $a0, $a2, $a0
; LA64-NEXT:    or $a0, $a0, $a1
; LA64-NEXT:    ret
  %add = add i8 %b, %a
  %cmp = icmp ugt i8 %add, %limit
  %res = select i1 %cmp, i32 8, i32 16
  ret i32 %res
}

define i32 @overflow_add_const_limit(i8 zeroext %a, i8 zeroext %b) {
; LA32-LABEL: overflow_add_const_limit:
; LA32:       # %bb.0:
; LA32-NEXT:    add.w $a0, $a1, $a0
; LA32-NEXT:    andi $a0, $a0, 255
; LA32-NEXT:    ori $a1, $zero, 128
; LA32-NEXT:    sltu $a0, $a1, $a0
; LA32-NEXT:    ori $a1, $zero, 16
; LA32-NEXT:    masknez $a1, $a1, $a0
; LA32-NEXT:    ori $a2, $zero, 8
; LA32-NEXT:    maskeqz $a0, $a2, $a0
; LA32-NEXT:    or $a0, $a0, $a1
; LA32-NEXT:    ret
;
; LA64-LABEL: overflow_add_const_limit:
; LA64:       # %bb.0:
; LA64-NEXT:    add.d $a0, $a1, $a0
; LA64-NEXT:    andi $a0, $a0, 255
; LA64-NEXT:    ori $a1, $zero, 128
; LA64-NEXT:    sltu $a0, $a1, $a0
; LA64-NEXT:    ori $a1, $zero, 16
; LA64-NEXT:    masknez $a1, $a1, $a0
; LA64-NEXT:    ori $a2, $zero, 8
; LA64-NEXT:    maskeqz $a0, $a2, $a0
; LA64-NEXT:    or $a0, $a0, $a1
; LA64-NEXT:    ret
  %add = add i8 %b, %a
  %cmp = icmp ugt i8 %add, -128
  %res = select i1 %cmp, i32 8, i32 16
  ret i32 %res
}

define i32 @overflow_add_positive_const_limit(i8 zeroext %a) {
; LA32-LABEL: overflow_add_positive_const_limit:
; LA32:       # %bb.0:
; LA32-NEXT:    ext.w.b $a0, $a0
; LA32-NEXT:    slti $a0, $a0, -1
; LA32-NEXT:    ori $a1, $zero, 16
; LA32-NEXT:    masknez $a1, $a1, $a0
; LA32-NEXT:    ori $a2, $zero, 8
; LA32-NEXT:    maskeqz $a0, $a2, $a0
; LA32-NEXT:    or $a0, $a0, $a1
; LA32-NEXT:    ret
;
; LA64-LABEL: overflow_add_positive_const_limit:
; LA64:       # %bb.0:
; LA64-NEXT:    ext.w.b $a0, $a0
; LA64-NEXT:    slti $a0, $a0, -1
; LA64-NEXT:    ori $a1, $zero, 16
; LA64-NEXT:    masknez $a1, $a1, $a0
; LA64-NEXT:    ori $a2, $zero, 8
; LA64-NEXT:    maskeqz $a0, $a2, $a0
; LA64-NEXT:    or $a0, $a0, $a1
; LA64-NEXT:    ret
  %cmp = icmp slt i8 %a, -1
  %res = select i1 %cmp, i32 8, i32 16
  ret i32 %res
}

define i32 @unsafe_add_underflow(i8 zeroext %a) {
; LA32-LABEL: unsafe_add_underflow:
; LA32:       # %bb.0:
; LA32-NEXT:    addi.w $a0, $a0, -1
; LA32-NEXT:    sltui $a0, $a0, 1
; LA32-NEXT:    ori $a1, $zero, 16
; LA32-NEXT:    masknez $a1, $a1, $a0
; LA32-NEXT:    ori $a2, $zero, 8
; LA32-NEXT:    maskeqz $a0, $a2, $a0
; LA32-NEXT:    or $a0, $a0, $a1
; LA32-NEXT:    ret
;
; LA64-LABEL: unsafe_add_underflow:
; LA64:       # %bb.0:
; LA64-NEXT:    addi.d $a0, $a0, -1
; LA64-NEXT:    sltui $a0, $a0, 1
; LA64-NEXT:    ori $a1, $zero, 16
; LA64-NEXT:    masknez $a1, $a1, $a0
; LA64-NEXT:    ori $a2, $zero, 8
; LA64-NEXT:    maskeqz $a0, $a2, $a0
; LA64-NEXT:    or $a0, $a0, $a1
; LA64-NEXT:    ret
  %cmp = icmp eq i8 %a, 1
  %res = select i1 %cmp, i32 8, i32 16
  ret i32 %res
}

define i32 @safe_add_underflow(i8 zeroext %a) {
; LA32-LABEL: safe_add_underflow:
; LA32:       # %bb.0:
; LA32-NEXT:    sltui $a0, $a0, 1
; LA32-NEXT:    ori $a1, $zero, 16
; LA32-NEXT:    masknez $a1, $a1, $a0
; LA32-NEXT:    ori $a2, $zero, 8
; LA32-NEXT:    maskeqz $a0, $a2, $a0
; LA32-NEXT:    or $a0, $a0, $a1
; LA32-NEXT:    ret
;
; LA64-LABEL: safe_add_underflow:
; LA64:       # %bb.0:
; LA64-NEXT:    sltui $a0, $a0, 1
; LA64-NEXT:    ori $a1, $zero, 16
; LA64-NEXT:    masknez $a1, $a1, $a0
; LA64-NEXT:    ori $a2, $zero, 8
; LA64-NEXT:    maskeqz $a0, $a2, $a0
; LA64-NEXT:    or $a0, $a0, $a1
; LA64-NEXT:    ret
  %cmp = icmp eq i8 %a, 0
  %res = select i1 %cmp, i32 8, i32 16
  ret i32 %res
}

define i32 @safe_add_underflow_neg(i8 zeroext %a) {
; LA32-LABEL: safe_add_underflow_neg:
; LA32:       # %bb.0:
; LA32-NEXT:    addi.w $a0, $a0, -2
; LA32-NEXT:    sltui $a0, $a0, 251
; LA32-NEXT:    ori $a1, $zero, 16
; LA32-NEXT:    masknez $a1, $a1, $a0
; LA32-NEXT:    ori $a2, $zero, 8
; LA32-NEXT:    maskeqz $a0, $a2, $a0
; LA32-NEXT:    or $a0, $a0, $a1
; LA32-NEXT:    ret
;
; LA64-LABEL: safe_add_underflow_neg:
; LA64:       # %bb.0:
; LA64-NEXT:    addi.d $a0, $a0, -2
; LA64-NEXT:    sltui $a0, $a0, 251
; LA64-NEXT:    ori $a1, $zero, 16
; LA64-NEXT:    masknez $a1, $a1, $a0
; LA64-NEXT:    ori $a2, $zero, 8
; LA64-NEXT:    maskeqz $a0, $a2, $a0
; LA64-NEXT:    or $a0, $a0, $a1
; LA64-NEXT:    ret
  %add = add i8 %a, -2
  %cmp = icmp ult i8 %add, -5
  %res = select i1 %cmp, i32 8, i32 16
  ret i32 %res
}

define i32 @overflow_sub_negative_const_limit(i8 zeroext %a) {
; LA32-LABEL: overflow_sub_negative_const_limit:
; LA32:       # %bb.0:
; LA32-NEXT:    ext.w.b $a0, $a0
; LA32-NEXT:    slti $a0, $a0, -1
; LA32-NEXT:    ori $a1, $zero, 16
; LA32-NEXT:    masknez $a1, $a1, $a0
; LA32-NEXT:    ori $a2, $zero, 8
; LA32-NEXT:    maskeqz $a0, $a2, $a0
; LA32-NEXT:    or $a0, $a0, $a1
; LA32-NEXT:    ret
;
; LA64-LABEL: overflow_sub_negative_const_limit:
; LA64:       # %bb.0:
; LA64-NEXT:    ext.w.b $a0, $a0
; LA64-NEXT:    slti $a0, $a0, -1
; LA64-NEXT:    ori $a1, $zero, 16
; LA64-NEXT:    masknez $a1, $a1, $a0
; LA64-NEXT:    ori $a2, $zero, 8
; LA64-NEXT:    maskeqz $a0, $a2, $a0
; LA64-NEXT:    or $a0, $a0, $a1
; LA64-NEXT:    ret
  %cmp = icmp slt i8 %a, -1
  %res = select i1 %cmp, i32 8, i32 16
  ret i32 %res
}

define i32 @sext_sub_underflow(i8 zeroext %a) {
; LA32-LABEL: sext_sub_underflow:
; LA32:       # %bb.0:
; LA32-NEXT:    addi.w $a0, $a0, -6
; LA32-NEXT:    addi.w $a1, $zero, -6
; LA32-NEXT:    sltu $a0, $a1, $a0
; LA32-NEXT:    ori $a1, $zero, 16
; LA32-NEXT:    masknez $a1, $a1, $a0
; LA32-NEXT:    ori $a2, $zero, 8
; LA32-NEXT:    maskeqz $a0, $a2, $a0
; LA32-NEXT:    or $a0, $a0, $a1
; LA32-NEXT:    ret
;
; LA64-LABEL: sext_sub_underflow:
; LA64:       # %bb.0:
; LA64-NEXT:    addi.d $a0, $a0, -6
; LA64-NEXT:    addi.w $a1, $zero, -6
; LA64-NEXT:    sltu $a0, $a1, $a0
; LA64-NEXT:    ori $a1, $zero, 16
; LA64-NEXT:    masknez $a1, $a1, $a0
; LA64-NEXT:    ori $a2, $zero, 8
; LA64-NEXT:    maskeqz $a0, $a2, $a0
; LA64-NEXT:    or $a0, $a0, $a1
; LA64-NEXT:    ret
  %sub = add i8 %a, -6
  %cmp = icmp ugt i8 %sub, -6
  %res = select i1 %cmp, i32 8, i32 16
  ret i32 %res
}

define i32 @safe_sub_underflow(i8 zeroext %a) {
; LA32-LABEL: safe_sub_underflow:
; LA32:       # %bb.0:
; LA32-NEXT:    sltui $a0, $a0, 1
; LA32-NEXT:    ori $a1, $zero, 8
; LA32-NEXT:    masknez $a1, $a1, $a0
; LA32-NEXT:    ori $a2, $zero, 16
; LA32-NEXT:    maskeqz $a0, $a2, $a0
; LA32-NEXT:    or $a0, $a0, $a1
; LA32-NEXT:    ret
;
; LA64-LABEL: safe_sub_underflow:
; LA64:       # %bb.0:
; LA64-NEXT:    sltui $a0, $a0, 1
; LA64-NEXT:    ori $a1, $zero, 8
; LA64-NEXT:    masknez $a1, $a1, $a0
; LA64-NEXT:    ori $a2, $zero, 16
; LA64-NEXT:    maskeqz $a0, $a2, $a0
; LA64-NEXT:    or $a0, $a0, $a1
; LA64-NEXT:    ret
  %cmp.not = icmp eq i8 %a, 0
  %res = select i1 %cmp.not, i32 16, i32 8
  ret i32 %res
}

define i32 @safe_sub_underflow_neg(i8 zeroext %a) {
; LA32-LABEL: safe_sub_underflow_neg:
; LA32:       # %bb.0:
; LA32-NEXT:    addi.w $a0, $a0, -4
; LA32-NEXT:    ori $a1, $zero, 250
; LA32-NEXT:    sltu $a0, $a1, $a0
; LA32-NEXT:    ori $a1, $zero, 16
; LA32-NEXT:    masknez $a1, $a1, $a0
; LA32-NEXT:    ori $a2, $zero, 8
; LA32-NEXT:    maskeqz $a0, $a2, $a0
; LA32-NEXT:    or $a0, $a0, $a1
; LA32-NEXT:    ret
;
; LA64-LABEL: safe_sub_underflow_neg:
; LA64:       # %bb.0:
; LA64-NEXT:    addi.d $a0, $a0, -4
; LA64-NEXT:    ori $a1, $zero, 250
; LA64-NEXT:    sltu $a0, $a1, $a0
; LA64-NEXT:    ori $a1, $zero, 16
; LA64-NEXT:    masknez $a1, $a1, $a0
; LA64-NEXT:    ori $a2, $zero, 8
; LA64-NEXT:    maskeqz $a0, $a2, $a0
; LA64-NEXT:    or $a0, $a0, $a1
; LA64-NEXT:    ret
  %sub = add i8 %a, -4
  %cmp = icmp ugt i8 %sub, -6
  %res = select i1 %cmp, i32 8, i32 16
  ret i32 %res
}

define i32 @sext_sub_underflow_neg(i8 zeroext %a) {
; LA32-LABEL: sext_sub_underflow_neg:
; LA32:       # %bb.0:
; LA32-NEXT:    addi.w $a0, $a0, -4
; LA32-NEXT:    sltui $a0, $a0, -3
; LA32-NEXT:    ori $a1, $zero, 16
; LA32-NEXT:    masknez $a1, $a1, $a0
; LA32-NEXT:    ori $a2, $zero, 8
; LA32-NEXT:    maskeqz $a0, $a2, $a0
; LA32-NEXT:    or $a0, $a0, $a1
; LA32-NEXT:    ret
;
; LA64-LABEL: sext_sub_underflow_neg:
; LA64:       # %bb.0:
; LA64-NEXT:    addi.d $a0, $a0, -4
; LA64-NEXT:    sltui $a0, $a0, -3
; LA64-NEXT:    ori $a1, $zero, 16
; LA64-NEXT:    masknez $a1, $a1, $a0
; LA64-NEXT:    ori $a2, $zero, 8
; LA64-NEXT:    maskeqz $a0, $a2, $a0
; LA64-NEXT:    or $a0, $a0, $a1
; LA64-NEXT:    ret
  %sub = add i8 %a, -4
  %cmp = icmp ult i8 %sub, -3
  %res = select i1 %cmp, i32 8, i32 16
  ret i32 %res
}

define i32 @safe_sub_imm_var(ptr nocapture readonly %b) local_unnamed_addr #1 {
; LA32-LABEL: safe_sub_imm_var:
; LA32:       # %bb.0: # %entry
; LA32-NEXT:    move $a0, $zero
; LA32-NEXT:    ret
;
; LA64-LABEL: safe_sub_imm_var:
; LA64:       # %bb.0: # %entry
; LA64-NEXT:    move $a0, $zero
; LA64-NEXT:    ret
entry:
  ret i32 0
}

define i32 @safe_sub_var_imm(ptr nocapture readonly %b) local_unnamed_addr #1 {
; LA32-LABEL: safe_sub_var_imm:
; LA32:       # %bb.0: # %entry
; LA32-NEXT:    ld.bu $a0, $a0, 0
; LA32-NEXT:    addi.w $a0, $a0, -248
; LA32-NEXT:    addi.w $a1, $zero, -4
; LA32-NEXT:    sltu $a0, $a1, $a0
; LA32-NEXT:    ret
;
; LA64-LABEL: safe_sub_var_imm:
; LA64:       # %bb.0: # %entry
; LA64-NEXT:    ld.bu $a0, $a0, 0
; LA64-NEXT:    addi.d $a0, $a0, -248
; LA64-NEXT:    addi.w $a1, $zero, -4
; LA64-NEXT:    sltu $a0, $a1, $a0
; LA64-NEXT:    ret
entry:
  %0 = load i8, ptr %b, align 1
  %sub = add nsw i8 %0, 8
  %cmp = icmp ugt i8 %sub, -4
  %conv4 = zext i1 %cmp to i32
  ret i32 %conv4
}

define i32 @safe_add_imm_var(ptr nocapture readnone %b) {
; LA32-LABEL: safe_add_imm_var:
; LA32:       # %bb.0: # %entry
; LA32-NEXT:    ori $a0, $zero, 1
; LA32-NEXT:    ret
;
; LA64-LABEL: safe_add_imm_var:
; LA64:       # %bb.0: # %entry
; LA64-NEXT:    ori $a0, $zero, 1
; LA64-NEXT:    ret
entry:
  ret i32 1
}

define i32 @safe_add_var_imm(ptr nocapture readnone %b) {
; LA32-LABEL: safe_add_var_imm:
; LA32:       # %bb.0: # %entry
; LA32-NEXT:    ori $a0, $zero, 1
; LA32-NEXT:    ret
;
; LA64-LABEL: safe_add_var_imm:
; LA64:       # %bb.0: # %entry
; LA64-NEXT:    ori $a0, $zero, 1
; LA64-NEXT:    ret
entry:
  ret i32 1
}

define i8 @convert_add_order(i8 zeroext %arg) {
; LA32-LABEL: convert_add_order:
; LA32:       # %bb.0:
; LA32-NEXT:    ori $a1, $a0, 1
; LA32-NEXT:    sltui $a2, $a1, 50
; LA32-NEXT:    addi.w $a1, $a1, -40
; LA32-NEXT:    sltui $a1, $a1, 20
; LA32-NEXT:    ori $a3, $zero, 2
; LA32-NEXT:    sub.w $a1, $a3, $a1
; LA32-NEXT:    ori $a3, $zero, 255
; LA32-NEXT:    masknez $a3, $a3, $a2
; LA32-NEXT:    maskeqz $a1, $a1, $a2
; LA32-NEXT:    or $a1, $a1, $a3
; LA32-NEXT:    and $a0, $a1, $a0
; LA32-NEXT:    ret
;
; LA64-LABEL: convert_add_order:
; LA64:       # %bb.0:
; LA64-NEXT:    ori $a1, $a0, 1
; LA64-NEXT:    sltui $a2, $a1, 50
; LA64-NEXT:    addi.d $a1, $a1, -40
; LA64-NEXT:    sltui $a1, $a1, 20
; LA64-NEXT:    ori $a3, $zero, 2
; LA64-NEXT:    sub.d $a1, $a3, $a1
; LA64-NEXT:    ori $a3, $zero, 255
; LA64-NEXT:    masknez $a3, $a3, $a2
; LA64-NEXT:    maskeqz $a1, $a1, $a2
; LA64-NEXT:    or $a1, $a1, $a3
; LA64-NEXT:    and $a0, $a1, $a0
; LA64-NEXT:    ret
  %shl = or i8 %arg, 1
  %cmp.0 = icmp ult i8 %shl, 50
  %sub = add nsw i8 %shl, -40
  %cmp.1 = icmp ult i8 %sub, 20
  %mask.sel.v = select i1 %cmp.1, i8 1, i8 2
  %mask.sel = select i1 %cmp.0, i8 %mask.sel.v, i8 -1
  %res = and i8 %mask.sel, %arg
  ret i8 %res
}

define i8 @underflow_if_sub(i32 %arg, i8 zeroext %arg1) {
; LA32-LABEL: underflow_if_sub:
; LA32:       # %bb.0:
; LA32-NEXT:    slt $a2, $zero, $a0
; LA32-NEXT:    and $a0, $a2, $a0
; LA32-NEXT:    addi.w $a0, $a0, 245
; LA32-NEXT:    sltu $a1, $a0, $a1
; LA32-NEXT:    maskeqz $a0, $a0, $a1
; LA32-NEXT:    ori $a2, $zero, 100
; LA32-NEXT:    masknez $a1, $a2, $a1
; LA32-NEXT:    or $a0, $a0, $a1
; LA32-NEXT:    ret
;
; LA64-LABEL: underflow_if_sub:
; LA64:       # %bb.0:
; LA64-NEXT:    addi.w $a2, $a0, 0
; LA64-NEXT:    slt $a2, $zero, $a2
; LA64-NEXT:    and $a0, $a2, $a0
; LA64-NEXT:    addi.d $a0, $a0, 245
; LA64-NEXT:    sltu $a1, $a0, $a1
; LA64-NEXT:    maskeqz $a0, $a0, $a1
; LA64-NEXT:    ori $a2, $zero, 100
; LA64-NEXT:    masknez $a1, $a2, $a1
; LA64-NEXT:    or $a0, $a0, $a1
; LA64-NEXT:    ret
  %cmp = icmp sgt i32 %arg, 0
  %conv = zext i1 %cmp to i32
  %and = and i32 %conv, %arg
  %trunc = trunc i32 %and to i8
  %conv1 = add nuw nsw i8 %trunc, -11
  %cmp.1 = icmp ult i8 %conv1, %arg1
  %res = select i1 %cmp.1, i8 %conv1, i8 100
  ret i8 %res
}

define i8 @underflow_if_sub_signext(i32 %arg, i8 signext %arg1) {
; LA32-LABEL: underflow_if_sub_signext:
; LA32:       # %bb.0:
; LA32-NEXT:    andi $a1, $a1, 255
; LA32-NEXT:    slt $a2, $zero, $a0
; LA32-NEXT:    and $a0, $a2, $a0
; LA32-NEXT:    addi.w $a0, $a0, 245
; LA32-NEXT:    sltu $a1, $a0, $a1
; LA32-NEXT:    maskeqz $a0, $a0, $a1
; LA32-NEXT:    ori $a2, $zero, 100
; LA32-NEXT:    masknez $a1, $a2, $a1
; LA32-NEXT:    or $a0, $a0, $a1
; LA32-NEXT:    ret
;
; LA64-LABEL: underflow_if_sub_signext:
; LA64:       # %bb.0:
; LA64-NEXT:    addi.w $a2, $a0, 0
; LA64-NEXT:    andi $a1, $a1, 255
; LA64-NEXT:    slt $a2, $zero, $a2
; LA64-NEXT:    and $a0, $a2, $a0
; LA64-NEXT:    addi.d $a0, $a0, 245
; LA64-NEXT:    sltu $a1, $a0, $a1
; LA64-NEXT:    maskeqz $a0, $a0, $a1
; LA64-NEXT:    ori $a2, $zero, 100
; LA64-NEXT:    masknez $a1, $a2, $a1
; LA64-NEXT:    or $a0, $a0, $a1
; LA64-NEXT:    ret
  %cmp = icmp sgt i32 %arg, 0
  %conv = zext i1 %cmp to i32
  %and = and i32 %conv, %arg
  %trunc = trunc i32 %and to i8
  %conv1 = add nuw nsw i8 %trunc, -11
  %cmp.1 = icmp ult i8 %conv1, %arg1
  %res = select i1 %cmp.1, i8 %conv1, i8 100
  ret i8 %res
}