llvm/llvm/test/CodeGen/RISCV/rv64zba.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s \
; RUN:   | FileCheck %s -check-prefixes=CHECK,RV64I
; RUN: llc -mtriple=riscv64 -mattr=+m,+zba -verify-machineinstrs < %s \
; RUN:   | FileCheck %s -check-prefixes=CHECK,RV64ZBA,RV64ZBANOZBB
; RUN: llc -mtriple=riscv64 -mattr=+m,+zba,+zbb -verify-machineinstrs < %s \
; RUN:   | FileCheck %s -check-prefixes=CHECK,RV64ZBA,RV64ZBAZBB,RV64ZBAZBBNOZBS
; RUN: llc -mtriple=riscv64 -mattr=+m,+zba,+zbb,+zbs -verify-machineinstrs < %s \
; RUN:   | FileCheck %s -check-prefixes=CHECK,RV64ZBA,RV64ZBAZBB,RV64ZBAZBBZBS

define i64 @slliuw(i64 %a) nounwind {
; RV64I-LABEL: slliuw:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a0, a0, 32
; RV64I-NEXT:    srli a0, a0, 31
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: slliuw:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    slli.uw a0, a0, 1
; RV64ZBA-NEXT:    ret
  %conv1 = shl i64 %a, 1
  %shl = and i64 %conv1, 8589934590
  ret i64 %shl
}

define i128 @slliuw_2(i32 signext %0, ptr %1) {
; RV64I-LABEL: slliuw_2:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a0, a0, 32
; RV64I-NEXT:    srli a0, a0, 28
; RV64I-NEXT:    add a1, a1, a0
; RV64I-NEXT:    ld a0, 0(a1)
; RV64I-NEXT:    ld a1, 8(a1)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: slliuw_2:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    slli.uw a0, a0, 4
; RV64ZBA-NEXT:    add a1, a1, a0
; RV64ZBA-NEXT:    ld a0, 0(a1)
; RV64ZBA-NEXT:    ld a1, 8(a1)
; RV64ZBA-NEXT:    ret
  %3 = zext i32 %0 to i64
  %4 = getelementptr inbounds i128, ptr %1, i64 %3
  %5 = load i128, ptr %4
  ret i128 %5
}

define i64 @adduw(i64 %a, i64 %b) nounwind {
; RV64I-LABEL: adduw:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a1, a1, 32
; RV64I-NEXT:    srli a1, a1, 32
; RV64I-NEXT:    add a0, a1, a0
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: adduw:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    add.uw a0, a1, a0
; RV64ZBA-NEXT:    ret
  %and = and i64 %b, 4294967295
  %add = add i64 %and, %a
  ret i64 %add
}

define signext i8 @adduw_2(i32 signext %0, ptr %1) {
; RV64I-LABEL: adduw_2:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a0, a0, 32
; RV64I-NEXT:    srli a0, a0, 32
; RV64I-NEXT:    add a0, a1, a0
; RV64I-NEXT:    lb a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: adduw_2:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    add.uw a0, a0, a1
; RV64ZBA-NEXT:    lb a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %3 = zext i32 %0 to i64
  %4 = getelementptr inbounds i8, ptr %1, i64 %3
  %5 = load i8, ptr %4
  ret i8 %5
}

define i64 @zextw_i64(i64 %a) nounwind {
; RV64I-LABEL: zextw_i64:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a0, a0, 32
; RV64I-NEXT:    srli a0, a0, 32
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: zextw_i64:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    zext.w a0, a0
; RV64ZBA-NEXT:    ret
  %and = and i64 %a, 4294967295
  ret i64 %and
}

; This makes sure targetShrinkDemandedConstant changes the and immmediate to
; allow zext.w or slli+srli.
define i64 @zextw_demandedbits_i64(i64 %0) {
; RV64I-LABEL: zextw_demandedbits_i64:
; RV64I:       # %bb.0:
; RV64I-NEXT:    ori a0, a0, 1
; RV64I-NEXT:    slli a0, a0, 32
; RV64I-NEXT:    srli a0, a0, 32
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: zextw_demandedbits_i64:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    ori a0, a0, 1
; RV64ZBA-NEXT:    zext.w a0, a0
; RV64ZBA-NEXT:    ret
  %2 = and i64 %0, 4294967294
  %3 = or i64 %2, 1
  ret i64 %3
}

define signext i16 @sh1add(i64 %0, ptr %1) {
; RV64I-LABEL: sh1add:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a0, a0, 1
; RV64I-NEXT:    add a0, a1, a0
; RV64I-NEXT:    lh a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: sh1add:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh1add a0, a0, a1
; RV64ZBA-NEXT:    lh a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %3 = getelementptr inbounds i16, ptr %1, i64 %0
  %4 = load i16, ptr %3
  ret i16 %4
}

define signext i32 @sh2add(i64 %0, ptr %1) {
; RV64I-LABEL: sh2add:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a0, a0, 2
; RV64I-NEXT:    add a0, a1, a0
; RV64I-NEXT:    lw a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: sh2add:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh2add a0, a0, a1
; RV64ZBA-NEXT:    lw a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %3 = getelementptr inbounds i32, ptr %1, i64 %0
  %4 = load i32, ptr %3
  ret i32 %4
}

define i64 @sh3add(i64 %0, ptr %1) {
; RV64I-LABEL: sh3add:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a0, a0, 3
; RV64I-NEXT:    add a0, a1, a0
; RV64I-NEXT:    ld a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: sh3add:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh3add a0, a0, a1
; RV64ZBA-NEXT:    ld a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %3 = getelementptr inbounds i64, ptr %1, i64 %0
  %4 = load i64, ptr %3
  ret i64 %4
}

define signext i16 @sh1adduw(i32 signext %0, ptr %1) {
; RV64I-LABEL: sh1adduw:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a0, a0, 32
; RV64I-NEXT:    srli a0, a0, 31
; RV64I-NEXT:    add a0, a1, a0
; RV64I-NEXT:    lh a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: sh1adduw:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh1add.uw a0, a0, a1
; RV64ZBA-NEXT:    lh a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %3 = zext i32 %0 to i64
  %4 = getelementptr inbounds i16, ptr %1, i64 %3
  %5 = load i16, ptr %4
  ret i16 %5
}

define i64 @sh1adduw_2(i64 %0, i64 %1) {
; RV64I-LABEL: sh1adduw_2:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a0, a0, 32
; RV64I-NEXT:    srli a0, a0, 31
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: sh1adduw_2:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh1add.uw a0, a0, a1
; RV64ZBA-NEXT:    ret
  %3 = shl i64 %0, 1
  %4 = and i64 %3, 8589934590
  %5 = add i64 %4, %1
  ret i64 %5
}

define i64 @sh1adduw_3(i64 %0, i64 %1) {
; RV64I-LABEL: sh1adduw_3:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a0, a0, 32
; RV64I-NEXT:    srli a0, a0, 31
; RV64I-NEXT:    or a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: sh1adduw_3:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh1add.uw a0, a0, a1
; RV64ZBA-NEXT:    ret
  %3 = shl i64 %0, 1
  %4 = and i64 %3, 8589934590
  %5 = or disjoint i64 %4, %1
  ret i64 %5
}

define signext i32 @sh2adduw(i32 signext %0, ptr %1) {
; RV64I-LABEL: sh2adduw:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a0, a0, 32
; RV64I-NEXT:    srli a0, a0, 30
; RV64I-NEXT:    add a0, a1, a0
; RV64I-NEXT:    lw a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: sh2adduw:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh2add.uw a0, a0, a1
; RV64ZBA-NEXT:    lw a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %3 = zext i32 %0 to i64
  %4 = getelementptr inbounds i32, ptr %1, i64 %3
  %5 = load i32, ptr %4
  ret i32 %5
}

define i64 @sh2adduw_2(i64 %0, i64 %1) {
; RV64I-LABEL: sh2adduw_2:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a0, a0, 32
; RV64I-NEXT:    srli a0, a0, 30
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: sh2adduw_2:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh2add.uw a0, a0, a1
; RV64ZBA-NEXT:    ret
  %3 = shl i64 %0, 2
  %4 = and i64 %3, 17179869180
  %5 = add i64 %4, %1
  ret i64 %5
}

define i64 @sh2adduw_3(i64 %0, i64 %1) {
; RV64I-LABEL: sh2adduw_3:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a0, a0, 32
; RV64I-NEXT:    srli a0, a0, 30
; RV64I-NEXT:    or a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: sh2adduw_3:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh2add.uw a0, a0, a1
; RV64ZBA-NEXT:    ret
  %3 = shl i64 %0, 2
  %4 = and i64 %3, 17179869180
  %5 = or disjoint i64 %4, %1
  ret i64 %5
}

define i64 @sh3adduw(i32 signext %0, ptr %1) {
; RV64I-LABEL: sh3adduw:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a0, a0, 32
; RV64I-NEXT:    srli a0, a0, 29
; RV64I-NEXT:    add a0, a1, a0
; RV64I-NEXT:    ld a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: sh3adduw:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh3add.uw a0, a0, a1
; RV64ZBA-NEXT:    ld a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %3 = zext i32 %0 to i64
  %4 = getelementptr inbounds i64, ptr %1, i64 %3
  %5 = load i64, ptr %4
  ret i64 %5
}

define i64 @sh3adduw_2(i64 %0, i64 %1) {
; RV64I-LABEL: sh3adduw_2:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a0, a0, 32
; RV64I-NEXT:    srli a0, a0, 29
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: sh3adduw_2:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh3add.uw a0, a0, a1
; RV64ZBA-NEXT:    ret
  %3 = shl i64 %0, 3
  %4 = and i64 %3, 34359738360
  %5 = add i64 %4, %1
  ret i64 %5
}

define i64 @sh3adduw_3(i64 %0, i64 %1) {
; RV64I-LABEL: sh3adduw_3:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a0, a0, 32
; RV64I-NEXT:    srli a0, a0, 29
; RV64I-NEXT:    or a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: sh3adduw_3:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh3add.uw a0, a0, a1
; RV64ZBA-NEXT:    ret
  %3 = shl i64 %0, 3
  %4 = and i64 %3, 34359738360
  %5 = or disjoint i64 %4, %1
  ret i64 %5
}

; Type legalization inserts a sext_inreg after the first add. That add will be
; selected as sh2add which does not sign extend. SimplifyDemandedBits is unable
; to remove the sext_inreg because it has multiple uses. The ashr will use the
; sext_inreg to become sraiw. This leaves the sext_inreg only used by the shl.
; If the shl is selected as sllw, we don't need the sext_inreg.
define i64 @sh2add_extra_sext(i32 %x, i32 %y, i32 %z) {
; RV64I-LABEL: sh2add_extra_sext:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a0, a0, 2
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    sllw a1, a2, a0
; RV64I-NEXT:    sraiw a0, a0, 2
; RV64I-NEXT:    mul a0, a1, a0
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: sh2add_extra_sext:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh2add a0, a0, a1
; RV64ZBA-NEXT:    sllw a1, a2, a0
; RV64ZBA-NEXT:    sraiw a0, a0, 2
; RV64ZBA-NEXT:    mul a0, a1, a0
; RV64ZBA-NEXT:    ret
  %a = shl i32 %x, 2
  %b = add i32 %a, %y
  %c = shl i32 %z, %b
  %d = ashr i32 %b, 2
  %e = sext i32 %c to i64
  %f = sext i32 %d to i64
  %g = mul i64 %e, %f
  ret i64 %g
}

define i64 @addmul6(i64 %a, i64 %b) {
; RV64I-LABEL: addmul6:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a2, a0, 1
; RV64I-NEXT:    slli a0, a0, 3
; RV64I-NEXT:    sub a0, a0, a2
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: addmul6:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh1add a0, a0, a0
; RV64ZBA-NEXT:    sh1add a0, a0, a1
; RV64ZBA-NEXT:    ret
  %c = mul i64 %a, 6
  %d = add i64 %c, %b
  ret i64 %d
}

define i64 @disjointormul6(i64 %a, i64 %b) {
; RV64I-LABEL: disjointormul6:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a2, a0, 1
; RV64I-NEXT:    slli a0, a0, 3
; RV64I-NEXT:    sub a0, a0, a2
; RV64I-NEXT:    or a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: disjointormul6:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh1add a0, a0, a0
; RV64ZBA-NEXT:    sh1add a0, a0, a1
; RV64ZBA-NEXT:    ret
  %c = mul i64 %a, 6
  %d = or disjoint i64 %c, %b
  ret i64 %d
}

define i64 @addmul10(i64 %a, i64 %b) {
; RV64I-LABEL: addmul10:
; RV64I:       # %bb.0:
; RV64I-NEXT:    li a2, 10
; RV64I-NEXT:    mul a0, a0, a2
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: addmul10:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh2add a0, a0, a0
; RV64ZBA-NEXT:    sh1add a0, a0, a1
; RV64ZBA-NEXT:    ret
  %c = mul i64 %a, 10
  %d = add i64 %c, %b
  ret i64 %d
}

define i64 @addmul12(i64 %a, i64 %b) {
; RV64I-LABEL: addmul12:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a2, a0, 2
; RV64I-NEXT:    slli a0, a0, 4
; RV64I-NEXT:    sub a0, a0, a2
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: addmul12:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh1add a0, a0, a0
; RV64ZBA-NEXT:    sh2add a0, a0, a1
; RV64ZBA-NEXT:    ret
  %c = mul i64 %a, 12
  %d = add i64 %c, %b
  ret i64 %d
}

define i64 @addmul18(i64 %a, i64 %b) {
; RV64I-LABEL: addmul18:
; RV64I:       # %bb.0:
; RV64I-NEXT:    li a2, 18
; RV64I-NEXT:    mul a0, a0, a2
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: addmul18:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh3add a0, a0, a0
; RV64ZBA-NEXT:    sh1add a0, a0, a1
; RV64ZBA-NEXT:    ret
  %c = mul i64 %a, 18
  %d = add i64 %c, %b
  ret i64 %d
}

define i64 @addmul20(i64 %a, i64 %b) {
; RV64I-LABEL: addmul20:
; RV64I:       # %bb.0:
; RV64I-NEXT:    li a2, 20
; RV64I-NEXT:    mul a0, a0, a2
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: addmul20:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh2add a0, a0, a0
; RV64ZBA-NEXT:    sh2add a0, a0, a1
; RV64ZBA-NEXT:    ret
  %c = mul i64 %a, 20
  %d = add i64 %c, %b
  ret i64 %d
}

define i64 @addmul22(i64 %a, i64 %b) {
; CHECK-LABEL: addmul22:
; CHECK:       # %bb.0:
; CHECK-NEXT:    li a2, 22
; CHECK-NEXT:    mul a0, a0, a2
; CHECK-NEXT:    add a0, a0, a1
; CHECK-NEXT:    ret
  %c = mul i64 %a, 22
  %d = add i64 %c, %b
  ret i64 %d
}

define i64 @addmul24(i64 %a, i64 %b) {
; RV64I-LABEL: addmul24:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a2, a0, 3
; RV64I-NEXT:    slli a0, a0, 5
; RV64I-NEXT:    sub a0, a0, a2
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: addmul24:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh1add a0, a0, a0
; RV64ZBA-NEXT:    sh3add a0, a0, a1
; RV64ZBA-NEXT:    ret
  %c = mul i64 %a, 24
  %d = add i64 %c, %b
  ret i64 %d
}

define i64 @addmul36(i64 %a, i64 %b) {
; RV64I-LABEL: addmul36:
; RV64I:       # %bb.0:
; RV64I-NEXT:    li a2, 36
; RV64I-NEXT:    mul a0, a0, a2
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: addmul36:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh3add a0, a0, a0
; RV64ZBA-NEXT:    sh2add a0, a0, a1
; RV64ZBA-NEXT:    ret
  %c = mul i64 %a, 36
  %d = add i64 %c, %b
  ret i64 %d
}

define i64 @addmul40(i64 %a, i64 %b) {
; RV64I-LABEL: addmul40:
; RV64I:       # %bb.0:
; RV64I-NEXT:    li a2, 40
; RV64I-NEXT:    mul a0, a0, a2
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: addmul40:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh2add a0, a0, a0
; RV64ZBA-NEXT:    sh3add a0, a0, a1
; RV64ZBA-NEXT:    ret
  %c = mul i64 %a, 40
  %d = add i64 %c, %b
  ret i64 %d
}

define i64 @addmul72(i64 %a, i64 %b) {
; RV64I-LABEL: addmul72:
; RV64I:       # %bb.0:
; RV64I-NEXT:    li a2, 72
; RV64I-NEXT:    mul a0, a0, a2
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: addmul72:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh3add a0, a0, a0
; RV64ZBA-NEXT:    sh3add a0, a0, a1
; RV64ZBA-NEXT:    ret
  %c = mul i64 %a, 72
  %d = add i64 %c, %b
  ret i64 %d
}

define i64 @addmul162(i64 %a, i64 %b) {
; CHECK-LABEL: addmul162:
; CHECK:       # %bb.0:
; CHECK-NEXT:    li a2, 162
; CHECK-NEXT:    mul a0, a0, a2
; CHECK-NEXT:    add a0, a0, a1
; CHECK-NEXT:    ret
  %c = mul i64 %a, 162
  %d = add i64 %c, %b
  ret i64 %d
}

define i64 @addmul180(i64 %a, i64 %b) {
; CHECK-LABEL: addmul180:
; CHECK:       # %bb.0:
; CHECK-NEXT:    li a2, 180
; CHECK-NEXT:    mul a0, a0, a2
; CHECK-NEXT:    add a0, a0, a1
; CHECK-NEXT:    ret
  %c = mul i64 %a, 180
  %d = add i64 %c, %b
  ret i64 %d
}

define i64 @add255mul180(i64 %a) {
; CHECK-LABEL: add255mul180:
; CHECK:       # %bb.0:
; CHECK-NEXT:    li a1, 180
; CHECK-NEXT:    mul a0, a0, a1
; CHECK-NEXT:    addi a0, a0, 255
; CHECK-NEXT:    ret
  %c = mul i64 %a, 180
  %d = add i64 %c, 255
  ret i64 %d
}


define i64 @addmul4096(i64 %a, i64 %b) {
; CHECK-LABEL: addmul4096:
; CHECK:       # %bb.0:
; CHECK-NEXT:    slli a0, a0, 12
; CHECK-NEXT:    add a0, a0, a1
; CHECK-NEXT:    ret
  %c = mul i64 %a, 4096
  %d = add i64 %c, %b
  ret i64 %d
}

define i64 @addmul4230(i64 %a, i64 %b) {
; CHECK-LABEL: addmul4230:
; CHECK:       # %bb.0:
; CHECK-NEXT:    lui a2, 1
; CHECK-NEXT:    addiw a2, a2, 134
; CHECK-NEXT:    mul a0, a0, a2
; CHECK-NEXT:    add a0, a0, a1
; CHECK-NEXT:    ret
  %c = mul i64 %a, 4230
  %d = add i64 %c, %b
  ret i64 %d
}

define i64 @mul96(i64 %a) {
; RV64I-LABEL: mul96:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a1, a0, 5
; RV64I-NEXT:    slli a0, a0, 7
; RV64I-NEXT:    sub a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: mul96:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh1add a0, a0, a0
; RV64ZBA-NEXT:    slli a0, a0, 5
; RV64ZBA-NEXT:    ret
  %c = mul i64 %a, 96
  ret i64 %c
}

define i64 @mul119(i64 %a) {
; RV64I-LABEL: mul119:
; RV64I:       # %bb.0:
; RV64I-NEXT:    li a1, 119
; RV64I-NEXT:    mul a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: mul119:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh3add a1, a0, a0
; RV64ZBA-NEXT:    slli a0, a0, 7
; RV64ZBA-NEXT:    sub a0, a0, a1
; RV64ZBA-NEXT:    ret
  %c = mul i64 %a, 119
  ret i64 %c
}

define i64 @mul123(i64 %a) {
; RV64I-LABEL: mul123:
; RV64I:       # %bb.0:
; RV64I-NEXT:    li a1, 123
; RV64I-NEXT:    mul a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: mul123:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh2add a1, a0, a0
; RV64ZBA-NEXT:    slli a0, a0, 7
; RV64ZBA-NEXT:    sub a0, a0, a1
; RV64ZBA-NEXT:    ret
  %c = mul i64 %a, 123
  ret i64 %c
}

define i64 @mul125(i64 %a) {
; RV64I-LABEL: mul125:
; RV64I:       # %bb.0:
; RV64I-NEXT:    li a1, 125
; RV64I-NEXT:    mul a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: mul125:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh1add a1, a0, a0
; RV64ZBA-NEXT:    slli a0, a0, 7
; RV64ZBA-NEXT:    sub a0, a0, a1
; RV64ZBA-NEXT:    ret
  %c = mul i64 %a, 125
  ret i64 %c
}

define i64 @mul131(i64 %a) {
; RV64I-LABEL: mul131:
; RV64I:       # %bb.0:
; RV64I-NEXT:    li a1, 131
; RV64I-NEXT:    mul a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: mul131:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh1add a1, a0, a0
; RV64ZBA-NEXT:    slli a0, a0, 7
; RV64ZBA-NEXT:    add a0, a0, a1
; RV64ZBA-NEXT:    ret
  %c = mul i64 %a, 131
  ret i64 %c
}

define i64 @mul133(i64 %a) {
; RV64I-LABEL: mul133:
; RV64I:       # %bb.0:
; RV64I-NEXT:    li a1, 133
; RV64I-NEXT:    mul a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: mul133:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh2add a1, a0, a0
; RV64ZBA-NEXT:    slli a0, a0, 7
; RV64ZBA-NEXT:    add a0, a0, a1
; RV64ZBA-NEXT:    ret
  %c = mul i64 %a, 133
  ret i64 %c
}

define i64 @mul137(i64 %a) {
; RV64I-LABEL: mul137:
; RV64I:       # %bb.0:
; RV64I-NEXT:    li a1, 137
; RV64I-NEXT:    mul a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: mul137:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh3add a1, a0, a0
; RV64ZBA-NEXT:    slli a0, a0, 7
; RV64ZBA-NEXT:    add a0, a0, a1
; RV64ZBA-NEXT:    ret
  %c = mul i64 %a, 137
  ret i64 %c
}

define i64 @mul160(i64 %a) {
; RV64I-LABEL: mul160:
; RV64I:       # %bb.0:
; RV64I-NEXT:    li a1, 160
; RV64I-NEXT:    mul a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: mul160:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh2add a0, a0, a0
; RV64ZBA-NEXT:    slli a0, a0, 5
; RV64ZBA-NEXT:    ret
  %c = mul i64 %a, 160
  ret i64 %c
}

define i64 @mul288(i64 %a) {
; RV64I-LABEL: mul288:
; RV64I:       # %bb.0:
; RV64I-NEXT:    li a1, 288
; RV64I-NEXT:    mul a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: mul288:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh3add a0, a0, a0
; RV64ZBA-NEXT:    slli a0, a0, 5
; RV64ZBA-NEXT:    ret
  %c = mul i64 %a, 288
  ret i64 %c
}

define i64 @zext_mul68(i32 signext %a) {
; RV64I-LABEL: zext_mul68:
; RV64I:       # %bb.0:
; RV64I-NEXT:    li a1, 17
; RV64I-NEXT:    slli a1, a1, 34
; RV64I-NEXT:    slli a0, a0, 32
; RV64I-NEXT:    mulhu a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: zext_mul68:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    slli.uw a1, a0, 6
; RV64ZBA-NEXT:    sh2add.uw a0, a0, a1
; RV64ZBA-NEXT:    ret
  %b = zext i32 %a to i64
  %c = mul i64 %b, 68
  ret i64 %c
}

define i64 @zext_mul96(i32 signext %a) {
; RV64I-LABEL: zext_mul96:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a0, a0, 32
; RV64I-NEXT:    srli a1, a0, 27
; RV64I-NEXT:    srli a0, a0, 25
; RV64I-NEXT:    sub a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: zext_mul96:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    slli.uw a0, a0, 5
; RV64ZBA-NEXT:    sh1add a0, a0, a0
; RV64ZBA-NEXT:    ret
  %b = zext i32 %a to i64
  %c = mul i64 %b, 96
  ret i64 %c
}

define i64 @zext_mul160(i32 signext %a) {
; RV64I-LABEL: zext_mul160:
; RV64I:       # %bb.0:
; RV64I-NEXT:    li a1, 5
; RV64I-NEXT:    slli a1, a1, 37
; RV64I-NEXT:    slli a0, a0, 32
; RV64I-NEXT:    mulhu a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: zext_mul160:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    slli.uw a0, a0, 5
; RV64ZBA-NEXT:    sh2add a0, a0, a0
; RV64ZBA-NEXT:    ret
  %b = zext i32 %a to i64
  %c = mul i64 %b, 160
  ret i64 %c
}

define i64 @zext_mul288(i32 signext %a) {
; RV64I-LABEL: zext_mul288:
; RV64I:       # %bb.0:
; RV64I-NEXT:    li a1, 9
; RV64I-NEXT:    slli a1, a1, 37
; RV64I-NEXT:    slli a0, a0, 32
; RV64I-NEXT:    mulhu a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: zext_mul288:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    slli.uw a0, a0, 5
; RV64ZBA-NEXT:    sh3add a0, a0, a0
; RV64ZBA-NEXT:    ret
  %b = zext i32 %a to i64
  %c = mul i64 %b, 288
  ret i64 %c
}

; We can't use slli.uw becaues the shift amount is more than 31.
define i64 @zext_mul12884901888(i32 signext %a) {
; RV64I-LABEL: zext_mul12884901888:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a1, a0, 32
; RV64I-NEXT:    slli a0, a0, 34
; RV64I-NEXT:    sub a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: zext_mul12884901888:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh1add a0, a0, a0
; RV64ZBA-NEXT:    slli a0, a0, 32
; RV64ZBA-NEXT:    ret
  %b = zext i32 %a to i64
  %c = mul i64 %b, 12884901888
  ret i64 %c
}

; We can't use slli.uw becaues the shift amount is more than 31.
define i64 @zext_mul21474836480(i32 signext %a) {
; RV64I-LABEL: zext_mul21474836480:
; RV64I:       # %bb.0:
; RV64I-NEXT:    li a1, 5
; RV64I-NEXT:    slli a1, a1, 32
; RV64I-NEXT:    mul a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: zext_mul21474836480:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh2add a0, a0, a0
; RV64ZBA-NEXT:    slli a0, a0, 32
; RV64ZBA-NEXT:    ret
  %b = zext i32 %a to i64
  %c = mul i64 %b, 21474836480
  ret i64 %c
}

; We can't use slli.uw becaues the shift amount is more than 31.
define i64 @zext_mul38654705664(i32 signext %a) {
; RV64I-LABEL: zext_mul38654705664:
; RV64I:       # %bb.0:
; RV64I-NEXT:    li a1, 9
; RV64I-NEXT:    slli a1, a1, 32
; RV64I-NEXT:    mul a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: zext_mul38654705664:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh3add a0, a0, a0
; RV64ZBA-NEXT:    slli a0, a0, 32
; RV64ZBA-NEXT:    ret
  %b = zext i32 %a to i64
  %c = mul i64 %b, 38654705664
  ret i64 %c
}

define i64 @sh1add_imm(i64 %0) {
; CHECK-LABEL: sh1add_imm:
; CHECK:       # %bb.0:
; CHECK-NEXT:    slli a0, a0, 1
; CHECK-NEXT:    addi a0, a0, 5
; CHECK-NEXT:    ret
  %a = shl i64 %0, 1
  %b = add i64 %a, 5
  ret i64 %b
}

define i64 @sh2add_imm(i64 %0) {
; CHECK-LABEL: sh2add_imm:
; CHECK:       # %bb.0:
; CHECK-NEXT:    slli a0, a0, 2
; CHECK-NEXT:    addi a0, a0, -6
; CHECK-NEXT:    ret
  %a = shl i64 %0, 2
  %b = add i64 %a, -6
  ret i64 %b
}

define i64 @sh3add_imm(i64 %0) {
; CHECK-LABEL: sh3add_imm:
; CHECK:       # %bb.0:
; CHECK-NEXT:    slli a0, a0, 3
; CHECK-NEXT:    addi a0, a0, 7
; CHECK-NEXT:    ret
  %a = shl i64 %0, 3
  %b = add i64 %a, 7
  ret i64 %b
}

define i64 @sh1adduw_imm(i32 signext %0) {
; RV64I-LABEL: sh1adduw_imm:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a0, a0, 32
; RV64I-NEXT:    srli a0, a0, 31
; RV64I-NEXT:    addi a0, a0, 11
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: sh1adduw_imm:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    slli.uw a0, a0, 1
; RV64ZBA-NEXT:    addi a0, a0, 11
; RV64ZBA-NEXT:    ret
  %a = zext i32 %0 to i64
  %b = shl i64 %a, 1
  %c = add i64 %b, 11
  ret i64 %c
}

define i64 @sh2adduw_imm(i32 signext %0) {
; RV64I-LABEL: sh2adduw_imm:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a0, a0, 32
; RV64I-NEXT:    srli a0, a0, 30
; RV64I-NEXT:    addi a0, a0, -12
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: sh2adduw_imm:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    slli.uw a0, a0, 2
; RV64ZBA-NEXT:    addi a0, a0, -12
; RV64ZBA-NEXT:    ret
  %a = zext i32 %0 to i64
  %b = shl i64 %a, 2
  %c = add i64 %b, -12
  ret i64 %c
}

define i64 @sh3adduw_imm(i32 signext %0) {
; RV64I-LABEL: sh3adduw_imm:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a0, a0, 32
; RV64I-NEXT:    srli a0, a0, 29
; RV64I-NEXT:    addi a0, a0, 13
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: sh3adduw_imm:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    slli.uw a0, a0, 3
; RV64ZBA-NEXT:    addi a0, a0, 13
; RV64ZBA-NEXT:    ret
  %a = zext i32 %0 to i64
  %b = shl i64 %a, 3
  %c = add i64 %b, 13
  ret i64 %c
}

define i64 @adduw_imm(i32 signext %0) nounwind {
; RV64I-LABEL: adduw_imm:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a0, a0, 32
; RV64I-NEXT:    srli a0, a0, 32
; RV64I-NEXT:    addi a0, a0, 5
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: adduw_imm:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    zext.w a0, a0
; RV64ZBA-NEXT:    addi a0, a0, 5
; RV64ZBA-NEXT:    ret
  %a = zext i32 %0 to i64
  %b = add i64 %a, 5
  ret i64 %b
}

define i64 @mul258(i64 %a) {
; RV64I-LABEL: mul258:
; RV64I:       # %bb.0:
; RV64I-NEXT:    li a1, 258
; RV64I-NEXT:    mul a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: mul258:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    slli a1, a0, 8
; RV64ZBA-NEXT:    sh1add a0, a0, a1
; RV64ZBA-NEXT:    ret
  %c = mul i64 %a, 258
  ret i64 %c
}

define i64 @mul260(i64 %a) {
; RV64I-LABEL: mul260:
; RV64I:       # %bb.0:
; RV64I-NEXT:    li a1, 260
; RV64I-NEXT:    mul a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: mul260:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    slli a1, a0, 8
; RV64ZBA-NEXT:    sh2add a0, a0, a1
; RV64ZBA-NEXT:    ret
  %c = mul i64 %a, 260
  ret i64 %c
}

define i64 @mul264(i64 %a) {
; RV64I-LABEL: mul264:
; RV64I:       # %bb.0:
; RV64I-NEXT:    li a1, 264
; RV64I-NEXT:    mul a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: mul264:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    slli a1, a0, 8
; RV64ZBA-NEXT:    sh3add a0, a0, a1
; RV64ZBA-NEXT:    ret
  %c = mul i64 %a, 264
  ret i64 %c
}

define i64 @imm_zextw() nounwind {
; RV64I-LABEL: imm_zextw:
; RV64I:       # %bb.0:
; RV64I-NEXT:    li a0, 1
; RV64I-NEXT:    slli a0, a0, 32
; RV64I-NEXT:    addi a0, a0, -2
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: imm_zextw:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    li a0, -2
; RV64ZBA-NEXT:    zext.w a0, a0
; RV64ZBA-NEXT:    ret
  ret i64 4294967294 ; -2 in 32 bits.
}

define i64 @mul11(i64 %a) {
; RV64I-LABEL: mul11:
; RV64I:       # %bb.0:
; RV64I-NEXT:    li a1, 11
; RV64I-NEXT:    mul a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: mul11:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh2add a1, a0, a0
; RV64ZBA-NEXT:    sh1add a0, a1, a0
; RV64ZBA-NEXT:    ret
  %c = mul i64 %a, 11
  ret i64 %c
}

define i64 @mul19(i64 %a) {
; RV64I-LABEL: mul19:
; RV64I:       # %bb.0:
; RV64I-NEXT:    li a1, 19
; RV64I-NEXT:    mul a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: mul19:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh3add a1, a0, a0
; RV64ZBA-NEXT:    sh1add a0, a1, a0
; RV64ZBA-NEXT:    ret
  %c = mul i64 %a, 19
  ret i64 %c
}

define i64 @mul13(i64 %a) {
; RV64I-LABEL: mul13:
; RV64I:       # %bb.0:
; RV64I-NEXT:    li a1, 13
; RV64I-NEXT:    mul a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: mul13:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh1add a1, a0, a0
; RV64ZBA-NEXT:    sh2add a0, a1, a0
; RV64ZBA-NEXT:    ret
  %c = mul i64 %a, 13
  ret i64 %c
}

define i64 @mul21(i64 %a) {
; RV64I-LABEL: mul21:
; RV64I:       # %bb.0:
; RV64I-NEXT:    li a1, 21
; RV64I-NEXT:    mul a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: mul21:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh2add a1, a0, a0
; RV64ZBA-NEXT:    sh2add a0, a1, a0
; RV64ZBA-NEXT:    ret
  %c = mul i64 %a, 21
  ret i64 %c
}

define i64 @mul37(i64 %a) {
; RV64I-LABEL: mul37:
; RV64I:       # %bb.0:
; RV64I-NEXT:    li a1, 37
; RV64I-NEXT:    mul a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: mul37:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh3add a1, a0, a0
; RV64ZBA-NEXT:    sh2add a0, a1, a0
; RV64ZBA-NEXT:    ret
  %c = mul i64 %a, 37
  ret i64 %c
}

define i64 @mul25(i64 %a) {
; RV64I-LABEL: mul25:
; RV64I:       # %bb.0:
; RV64I-NEXT:    li a1, 25
; RV64I-NEXT:    mul a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: mul25:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh2add a0, a0, a0
; RV64ZBA-NEXT:    sh2add a0, a0, a0
; RV64ZBA-NEXT:    ret
  %c = mul i64 %a, 25
  ret i64 %c
}

define i64 @mul41(i64 %a) {
; RV64I-LABEL: mul41:
; RV64I:       # %bb.0:
; RV64I-NEXT:    li a1, 41
; RV64I-NEXT:    mul a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: mul41:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh2add a1, a0, a0
; RV64ZBA-NEXT:    sh3add a0, a1, a0
; RV64ZBA-NEXT:    ret
  %c = mul i64 %a, 41
  ret i64 %c
}

define i64 @mul73(i64 %a) {
; RV64I-LABEL: mul73:
; RV64I:       # %bb.0:
; RV64I-NEXT:    li a1, 73
; RV64I-NEXT:    mul a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: mul73:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh3add a1, a0, a0
; RV64ZBA-NEXT:    sh3add a0, a1, a0
; RV64ZBA-NEXT:    ret
  %c = mul i64 %a, 73
  ret i64 %c
}

define i64 @mul27(i64 %a) {
; RV64I-LABEL: mul27:
; RV64I:       # %bb.0:
; RV64I-NEXT:    li a1, 27
; RV64I-NEXT:    mul a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: mul27:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh1add a0, a0, a0
; RV64ZBA-NEXT:    sh3add a0, a0, a0
; RV64ZBA-NEXT:    ret
  %c = mul i64 %a, 27
  ret i64 %c
}

define i64 @mul45(i64 %a) {
; RV64I-LABEL: mul45:
; RV64I:       # %bb.0:
; RV64I-NEXT:    li a1, 45
; RV64I-NEXT:    mul a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: mul45:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh2add a0, a0, a0
; RV64ZBA-NEXT:    sh3add a0, a0, a0
; RV64ZBA-NEXT:    ret
  %c = mul i64 %a, 45
  ret i64 %c
}

define i64 @mul81(i64 %a) {
; RV64I-LABEL: mul81:
; RV64I:       # %bb.0:
; RV64I-NEXT:    li a1, 81
; RV64I-NEXT:    mul a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: mul81:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh3add a0, a0, a0
; RV64ZBA-NEXT:    sh3add a0, a0, a0
; RV64ZBA-NEXT:    ret
  %c = mul i64 %a, 81
  ret i64 %c
}

define i64 @mul4098(i64 %a) {
; RV64I-LABEL: mul4098:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a1, a0, 1
; RV64I-NEXT:    slli a0, a0, 12
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: mul4098:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    slli a1, a0, 12
; RV64ZBA-NEXT:    sh1add a0, a0, a1
; RV64ZBA-NEXT:    ret
  %c = mul i64 %a, 4098
  ret i64 %c
}

define i64 @mul4100(i64 %a) {
; RV64I-LABEL: mul4100:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a1, a0, 2
; RV64I-NEXT:    slli a0, a0, 12
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: mul4100:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    slli a1, a0, 12
; RV64ZBA-NEXT:    sh2add a0, a0, a1
; RV64ZBA-NEXT:    ret
  %c = mul i64 %a, 4100
  ret i64 %c
}

define i64 @mul4104(i64 %a) {
; RV64I-LABEL: mul4104:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a1, a0, 3
; RV64I-NEXT:    slli a0, a0, 12
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: mul4104:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    slli a1, a0, 12
; RV64ZBA-NEXT:    sh3add a0, a0, a1
; RV64ZBA-NEXT:    ret
  %c = mul i64 %a, 4104
  ret i64 %c
}

define signext i32 @mulw192(i32 signext %a) {
; RV64I-LABEL: mulw192:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a1, a0, 6
; RV64I-NEXT:    slli a0, a0, 8
; RV64I-NEXT:    subw a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: mulw192:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh1add a0, a0, a0
; RV64ZBA-NEXT:    slliw a0, a0, 6
; RV64ZBA-NEXT:    ret
  %c = mul i32 %a, 192
  ret i32 %c
}

define signext i32 @mulw320(i32 signext %a) {
; RV64I-LABEL: mulw320:
; RV64I:       # %bb.0:
; RV64I-NEXT:    li a1, 320
; RV64I-NEXT:    mulw a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: mulw320:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh2add a0, a0, a0
; RV64ZBA-NEXT:    slliw a0, a0, 6
; RV64ZBA-NEXT:    ret
  %c = mul i32 %a, 320
  ret i32 %c
}

define signext i32 @mulw576(i32 signext %a) {
; RV64I-LABEL: mulw576:
; RV64I:       # %bb.0:
; RV64I-NEXT:    li a1, 576
; RV64I-NEXT:    mulw a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: mulw576:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh3add a0, a0, a0
; RV64ZBA-NEXT:    slliw a0, a0, 6
; RV64ZBA-NEXT:    ret
  %c = mul i32 %a, 576
  ret i32 %c
}

define i64 @add4104(i64 %a) {
; RV64I-LABEL: add4104:
; RV64I:       # %bb.0:
; RV64I-NEXT:    lui a1, 1
; RV64I-NEXT:    addiw a1, a1, 8
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: add4104:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    li a1, 1026
; RV64ZBA-NEXT:    sh2add a0, a1, a0
; RV64ZBA-NEXT:    ret
  %c = add i64 %a, 4104
  ret i64 %c
}

define i64 @add4104_2(i64 %a) {
; RV64I-LABEL: add4104_2:
; RV64I:       # %bb.0:
; RV64I-NEXT:    lui a1, 1
; RV64I-NEXT:    addiw a1, a1, 8
; RV64I-NEXT:    or a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: add4104_2:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    li a1, 1026
; RV64ZBA-NEXT:    sh2add a0, a1, a0
; RV64ZBA-NEXT:    ret
  %c = or disjoint i64 %a, 4104
  ret i64 %c
}

define i64 @add8208(i64 %a) {
; RV64I-LABEL: add8208:
; RV64I:       # %bb.0:
; RV64I-NEXT:    lui a1, 2
; RV64I-NEXT:    addiw a1, a1, 16
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: add8208:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    li a1, 1026
; RV64ZBA-NEXT:    sh3add a0, a1, a0
; RV64ZBA-NEXT:    ret
  %c = add i64 %a, 8208
  ret i64 %c
}

; Make sure we prefer LUI for the 8192 instead of using sh3add.
define signext i32 @add8192_i32(i32 signext %a) {
; CHECK-LABEL: add8192_i32:
; CHECK:       # %bb.0:
; CHECK-NEXT:    lui a1, 2
; CHECK-NEXT:    addw a0, a0, a1
; CHECK-NEXT:    ret
  %c = add i32 %a, 8192
  ret i32 %c
}

; Make sure we prefer LUI for the 8192 instead of using sh3add.
define i64 @add8192(i64 %a) {
; CHECK-LABEL: add8192:
; CHECK:       # %bb.0:
; CHECK-NEXT:    lui a1, 2
; CHECK-NEXT:    add a0, a0, a1
; CHECK-NEXT:    ret
  %c = add i64 %a, 8192
  ret i64 %c
}

define signext i32 @addshl32_5_6(i32 signext %a, i32 signext %b) {
; RV64I-LABEL: addshl32_5_6:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a0, a0, 5
; RV64I-NEXT:    slli a1, a1, 6
; RV64I-NEXT:    addw a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: addshl32_5_6:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh1add a0, a1, a0
; RV64ZBA-NEXT:    slliw a0, a0, 5
; RV64ZBA-NEXT:    ret
  %c = shl i32 %a, 5
  %d = shl i32 %b, 6
  %e = add i32 %c, %d
  ret i32 %e
}

define i64 @addshl64_5_6(i64 %a, i64 %b) {
; RV64I-LABEL: addshl64_5_6:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a0, a0, 5
; RV64I-NEXT:    slli a1, a1, 6
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: addshl64_5_6:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh1add a0, a1, a0
; RV64ZBA-NEXT:    slli a0, a0, 5
; RV64ZBA-NEXT:    ret
  %c = shl i64 %a, 5
  %d = shl i64 %b, 6
  %e = add i64 %c, %d
  ret i64 %e
}

define signext i32 @addshl32_5_7(i32 signext %a, i32 signext %b) {
; RV64I-LABEL: addshl32_5_7:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a0, a0, 5
; RV64I-NEXT:    slli a1, a1, 7
; RV64I-NEXT:    addw a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: addshl32_5_7:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh2add a0, a1, a0
; RV64ZBA-NEXT:    slliw a0, a0, 5
; RV64ZBA-NEXT:    ret
  %c = shl i32 %a, 5
  %d = shl i32 %b, 7
  %e = add i32 %c, %d
  ret i32 %e
}

define i64 @addshl64_5_7(i64 %a, i64 %b) {
; RV64I-LABEL: addshl64_5_7:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a0, a0, 5
; RV64I-NEXT:    slli a1, a1, 7
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: addshl64_5_7:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh2add a0, a1, a0
; RV64ZBA-NEXT:    slli a0, a0, 5
; RV64ZBA-NEXT:    ret
  %c = shl i64 %a, 5
  %d = shl i64 %b, 7
  %e = add i64 %c, %d
  ret i64 %e
}

define signext i32 @addshl32_5_8(i32 signext %a, i32 signext %b) {
; RV64I-LABEL: addshl32_5_8:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a0, a0, 5
; RV64I-NEXT:    slli a1, a1, 8
; RV64I-NEXT:    addw a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: addshl32_5_8:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh3add a0, a1, a0
; RV64ZBA-NEXT:    slliw a0, a0, 5
; RV64ZBA-NEXT:    ret
  %c = shl i32 %a, 5
  %d = shl i32 %b, 8
  %e = add i32 %c, %d
  ret i32 %e
}

define i64 @addshl64_5_8(i64 %a, i64 %b) {
; RV64I-LABEL: addshl64_5_8:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a0, a0, 5
; RV64I-NEXT:    slli a1, a1, 8
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: addshl64_5_8:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh3add a0, a1, a0
; RV64ZBA-NEXT:    slli a0, a0, 5
; RV64ZBA-NEXT:    ret
  %c = shl i64 %a, 5
  %d = shl i64 %b, 8
  %e = add i64 %c, %d
  ret i64 %e
}

; Make sure we use sext.h+slli+srli for Zba+Zbb.
; FIXME: The RV64I and Zba only cases can be done with only 3 shifts.
define zeroext i32 @sext_ashr_zext_i8(i8 %a) nounwind {
; RV64I-LABEL: sext_ashr_zext_i8:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a0, a0, 56
; RV64I-NEXT:    srai a0, a0, 31
; RV64I-NEXT:    srli a0, a0, 32
; RV64I-NEXT:    ret
;
; RV64ZBANOZBB-LABEL: sext_ashr_zext_i8:
; RV64ZBANOZBB:       # %bb.0:
; RV64ZBANOZBB-NEXT:    slli a0, a0, 56
; RV64ZBANOZBB-NEXT:    srai a0, a0, 31
; RV64ZBANOZBB-NEXT:    srli a0, a0, 32
; RV64ZBANOZBB-NEXT:    ret
;
; RV64ZBAZBB-LABEL: sext_ashr_zext_i8:
; RV64ZBAZBB:       # %bb.0:
; RV64ZBAZBB-NEXT:    sext.b a0, a0
; RV64ZBAZBB-NEXT:    slli a0, a0, 23
; RV64ZBAZBB-NEXT:    srli a0, a0, 32
; RV64ZBAZBB-NEXT:    ret
  %ext = sext i8 %a to i32
  %1 = ashr i32 %ext, 9
  ret i32 %1
}

define i64 @sh6_sh3_add1(i64 noundef %x, i64 noundef %y, i64 noundef %z) {
; RV64I-LABEL: sh6_sh3_add1:
; RV64I:       # %bb.0: # %entry
; RV64I-NEXT:    slli a2, a2, 3
; RV64I-NEXT:    slli a1, a1, 6
; RV64I-NEXT:    add a1, a1, a2
; RV64I-NEXT:    add a0, a1, a0
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: sh6_sh3_add1:
; RV64ZBA:       # %bb.0: # %entry
; RV64ZBA-NEXT:    sh3add a1, a1, a2
; RV64ZBA-NEXT:    sh3add a0, a1, a0
; RV64ZBA-NEXT:    ret
entry:
  %shl = shl i64 %z, 3
  %shl1 = shl i64 %y, 6
  %add = add nsw i64 %shl1, %shl
  %add2 = add nsw i64 %add, %x
  ret i64 %add2
}

define i64 @sh6_sh3_add2(i64 noundef %x, i64 noundef %y, i64 noundef %z) {
; RV64I-LABEL: sh6_sh3_add2:
; RV64I:       # %bb.0: # %entry
; RV64I-NEXT:    slli a2, a2, 3
; RV64I-NEXT:    slli a1, a1, 6
; RV64I-NEXT:    add a0, a1, a0
; RV64I-NEXT:    add a0, a0, a2
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: sh6_sh3_add2:
; RV64ZBA:       # %bb.0: # %entry
; RV64ZBA-NEXT:    sh3add a1, a1, a2
; RV64ZBA-NEXT:    sh3add a0, a1, a0
; RV64ZBA-NEXT:    ret
entry:
  %shl = shl i64 %z, 3
  %shl1 = shl i64 %y, 6
  %add = add nsw i64 %shl1, %x
  %add2 = add nsw i64 %add, %shl
  ret i64 %add2
}

define i64 @sh6_sh3_add3(i64 noundef %x, i64 noundef %y, i64 noundef %z) {
; RV64I-LABEL: sh6_sh3_add3:
; RV64I:       # %bb.0: # %entry
; RV64I-NEXT:    slli a2, a2, 3
; RV64I-NEXT:    slli a1, a1, 6
; RV64I-NEXT:    add a1, a1, a2
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: sh6_sh3_add3:
; RV64ZBA:       # %bb.0: # %entry
; RV64ZBA-NEXT:    sh3add a1, a1, a2
; RV64ZBA-NEXT:    sh3add a0, a1, a0
; RV64ZBA-NEXT:    ret
entry:
  %shl = shl i64 %z, 3
  %shl1 = shl i64 %y, 6
  %add = add nsw i64 %shl1, %shl
  %add2 = add nsw i64 %x, %add
  ret i64 %add2
}

define i64 @sh6_sh3_add4(i64 noundef %x, i64 noundef %y, i64 noundef %z) {
; RV64I-LABEL: sh6_sh3_add4:
; RV64I:       # %bb.0: # %entry
; RV64I-NEXT:    slli a2, a2, 3
; RV64I-NEXT:    slli a1, a1, 6
; RV64I-NEXT:    add a0, a0, a2
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: sh6_sh3_add4:
; RV64ZBA:       # %bb.0: # %entry
; RV64ZBA-NEXT:    slli a1, a1, 6
; RV64ZBA-NEXT:    sh3add a0, a2, a0
; RV64ZBA-NEXT:    add a0, a0, a1
; RV64ZBA-NEXT:    ret
entry:
  %shl = shl i64 %z, 3
  %shl1 = shl i64 %y, 6
  %add = add nsw i64 %x, %shl
  %add2 = add nsw i64 %add, %shl1
  ret i64 %add2
}

; Make sure we use sext.h+slli+srli for Zba+Zbb.
; FIXME: The RV64I and Zba only cases can be done with only 3 shifts.
define zeroext i32 @sext_ashr_zext_i16(i16 %a) nounwind {
; RV64I-LABEL: sext_ashr_zext_i16:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a0, a0, 48
; RV64I-NEXT:    srai a0, a0, 25
; RV64I-NEXT:    srli a0, a0, 32
; RV64I-NEXT:    ret
;
; RV64ZBANOZBB-LABEL: sext_ashr_zext_i16:
; RV64ZBANOZBB:       # %bb.0:
; RV64ZBANOZBB-NEXT:    slli a0, a0, 48
; RV64ZBANOZBB-NEXT:    srai a0, a0, 25
; RV64ZBANOZBB-NEXT:    srli a0, a0, 32
; RV64ZBANOZBB-NEXT:    ret
;
; RV64ZBAZBB-LABEL: sext_ashr_zext_i16:
; RV64ZBAZBB:       # %bb.0:
; RV64ZBAZBB-NEXT:    sext.h a0, a0
; RV64ZBAZBB-NEXT:    slli a0, a0, 23
; RV64ZBAZBB-NEXT:    srli a0, a0, 32
; RV64ZBAZBB-NEXT:    ret
  %ext = sext i16 %a to i32
  %1 = ashr i32 %ext, 9
  ret i32 %1
}

; This the IR you get from InstCombine if take the difference of 2 pointers and
; cast is to unsigned before using as an index.
define signext i16 @sh1adduw_ptrdiff(i64 %diff, ptr %baseptr) {
; RV64I-LABEL: sh1adduw_ptrdiff:
; RV64I:       # %bb.0:
; RV64I-NEXT:    li a2, 1
; RV64I-NEXT:    slli a2, a2, 33
; RV64I-NEXT:    addi a2, a2, -2
; RV64I-NEXT:    and a0, a0, a2
; RV64I-NEXT:    add a0, a1, a0
; RV64I-NEXT:    lh a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: sh1adduw_ptrdiff:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    srli a0, a0, 1
; RV64ZBA-NEXT:    sh1add.uw a0, a0, a1
; RV64ZBA-NEXT:    lh a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %ptrdiff = lshr exact i64 %diff, 1
  %cast = and i64 %ptrdiff, 4294967295
  %ptr = getelementptr inbounds i16, ptr %baseptr, i64 %cast
  %res = load i16, ptr %ptr
  ret i16 %res
}

define signext i32 @sh2adduw_ptrdiff(i64 %diff, ptr %baseptr) {
; RV64I-LABEL: sh2adduw_ptrdiff:
; RV64I:       # %bb.0:
; RV64I-NEXT:    li a2, 1
; RV64I-NEXT:    slli a2, a2, 34
; RV64I-NEXT:    addi a2, a2, -4
; RV64I-NEXT:    and a0, a0, a2
; RV64I-NEXT:    add a0, a1, a0
; RV64I-NEXT:    lw a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: sh2adduw_ptrdiff:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    srli a0, a0, 2
; RV64ZBA-NEXT:    sh2add.uw a0, a0, a1
; RV64ZBA-NEXT:    lw a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %ptrdiff = lshr exact i64 %diff, 2
  %cast = and i64 %ptrdiff, 4294967295
  %ptr = getelementptr inbounds i32, ptr %baseptr, i64 %cast
  %res = load i32, ptr %ptr
  ret i32 %res
}

define i64 @sh3adduw_ptrdiff(i64 %diff, ptr %baseptr) {
; RV64I-LABEL: sh3adduw_ptrdiff:
; RV64I:       # %bb.0:
; RV64I-NEXT:    li a2, 1
; RV64I-NEXT:    slli a2, a2, 35
; RV64I-NEXT:    addi a2, a2, -8
; RV64I-NEXT:    and a0, a0, a2
; RV64I-NEXT:    add a0, a1, a0
; RV64I-NEXT:    ld a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: sh3adduw_ptrdiff:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    srli a0, a0, 3
; RV64ZBA-NEXT:    sh3add.uw a0, a0, a1
; RV64ZBA-NEXT:    ld a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %ptrdiff = lshr exact i64 %diff, 3
  %cast = and i64 %ptrdiff, 4294967295
  %ptr = getelementptr inbounds i64, ptr %baseptr, i64 %cast
  %res = load i64, ptr %ptr
  ret i64 %res
}

define signext i16 @srliw_1_sh1add(ptr %0, i32 signext %1) {
; RV64I-LABEL: srliw_1_sh1add:
; RV64I:       # %bb.0:
; RV64I-NEXT:    srliw a1, a1, 1
; RV64I-NEXT:    slli a1, a1, 1
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    lh a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: srliw_1_sh1add:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    srliw a1, a1, 1
; RV64ZBA-NEXT:    sh1add a0, a1, a0
; RV64ZBA-NEXT:    lh a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %3 = lshr i32 %1, 1
  %4 = zext i32 %3 to i64
  %5 = getelementptr inbounds i16, ptr %0, i64 %4
  %6 = load i16, ptr %5, align 2
  ret i16 %6
}

define i128 @slliuw_ptrdiff(i64 %diff, ptr %baseptr) {
; RV64I-LABEL: slliuw_ptrdiff:
; RV64I:       # %bb.0:
; RV64I-NEXT:    li a2, 1
; RV64I-NEXT:    slli a2, a2, 36
; RV64I-NEXT:    addi a2, a2, -16
; RV64I-NEXT:    and a0, a0, a2
; RV64I-NEXT:    add a1, a1, a0
; RV64I-NEXT:    ld a0, 0(a1)
; RV64I-NEXT:    ld a1, 8(a1)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: slliuw_ptrdiff:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    srli a0, a0, 4
; RV64ZBA-NEXT:    slli.uw a0, a0, 4
; RV64ZBA-NEXT:    add a1, a1, a0
; RV64ZBA-NEXT:    ld a0, 0(a1)
; RV64ZBA-NEXT:    ld a1, 8(a1)
; RV64ZBA-NEXT:    ret
  %ptrdiff = lshr exact i64 %diff, 4
  %cast = and i64 %ptrdiff, 4294967295
  %ptr = getelementptr inbounds i128, ptr %baseptr, i64 %cast
  %res = load i128, ptr %ptr
  ret i128 %res
}

define signext i32 @srliw_2_sh2add(ptr %0, i32 signext %1) {
; RV64I-LABEL: srliw_2_sh2add:
; RV64I:       # %bb.0:
; RV64I-NEXT:    srliw a1, a1, 2
; RV64I-NEXT:    slli a1, a1, 2
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    lw a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: srliw_2_sh2add:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    srliw a1, a1, 2
; RV64ZBA-NEXT:    sh2add a0, a1, a0
; RV64ZBA-NEXT:    lw a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %3 = lshr i32 %1, 2
  %4 = zext i32 %3 to i64
  %5 = getelementptr inbounds i32, ptr %0, i64 %4
  %6 = load i32, ptr %5, align 4
  ret i32 %6
}

define i64 @srliw_3_sh3add(ptr %0, i32 signext %1) {
; RV64I-LABEL: srliw_3_sh3add:
; RV64I:       # %bb.0:
; RV64I-NEXT:    srliw a1, a1, 3
; RV64I-NEXT:    slli a1, a1, 3
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    ld a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: srliw_3_sh3add:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    srliw a1, a1, 3
; RV64ZBA-NEXT:    sh3add a0, a1, a0
; RV64ZBA-NEXT:    ld a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %3 = lshr i32 %1, 3
  %4 = zext i32 %3 to i64
  %5 = getelementptr inbounds i64, ptr %0, i64 %4
  %6 = load i64, ptr %5, align 8
  ret i64 %6
}

define signext i32 @srliw_1_sh2add(ptr %0, i32 signext %1) {
; RV64I-LABEL: srliw_1_sh2add:
; RV64I:       # %bb.0:
; RV64I-NEXT:    srliw a1, a1, 1
; RV64I-NEXT:    slli a1, a1, 2
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    lw a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: srliw_1_sh2add:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    srliw a1, a1, 1
; RV64ZBA-NEXT:    sh2add a0, a1, a0
; RV64ZBA-NEXT:    lw a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %3 = lshr i32 %1, 1
  %4 = zext i32 %3 to i64
  %5 = getelementptr inbounds i32, ptr %0, i64 %4
  %6 = load i32, ptr %5, align 4
  ret i32 %6
}

define i64 @srliw_1_sh3add(ptr %0, i32 signext %1) {
; RV64I-LABEL: srliw_1_sh3add:
; RV64I:       # %bb.0:
; RV64I-NEXT:    srliw a1, a1, 1
; RV64I-NEXT:    slli a1, a1, 3
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    ld a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: srliw_1_sh3add:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    srliw a1, a1, 1
; RV64ZBA-NEXT:    sh3add a0, a1, a0
; RV64ZBA-NEXT:    ld a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %3 = lshr i32 %1, 1
  %4 = zext i32 %3 to i64
  %5 = getelementptr inbounds i64, ptr %0, i64 %4
  %6 = load i64, ptr %5, align 8
  ret i64 %6
}

define i64 @srliw_2_sh3add(ptr %0, i32 signext %1) {
; RV64I-LABEL: srliw_2_sh3add:
; RV64I:       # %bb.0:
; RV64I-NEXT:    srliw a1, a1, 2
; RV64I-NEXT:    slli a1, a1, 3
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    ld a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: srliw_2_sh3add:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    srliw a1, a1, 2
; RV64ZBA-NEXT:    sh3add a0, a1, a0
; RV64ZBA-NEXT:    ld a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %3 = lshr i32 %1, 2
  %4 = zext i32 %3 to i64
  %5 = getelementptr inbounds i64, ptr %0, i64 %4
  %6 = load i64, ptr %5, align 8
  ret i64 %6
}

define signext i16 @srliw_2_sh1add(ptr %0, i32 signext %1) {
; RV64I-LABEL: srliw_2_sh1add:
; RV64I:       # %bb.0:
; RV64I-NEXT:    srliw a1, a1, 2
; RV64I-NEXT:    slli a1, a1, 1
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    lh a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: srliw_2_sh1add:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    srliw a1, a1, 2
; RV64ZBA-NEXT:    sh1add a0, a1, a0
; RV64ZBA-NEXT:    lh a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %3 = lshr i32 %1, 2
  %4 = zext i32 %3 to i64
  %5 = getelementptr inbounds i16, ptr %0, i64 %4
  %6 = load i16, ptr %5, align 2
  ret i16 %6
}


define signext i32 @srliw_3_sh2add(ptr %0, i32 signext %1) {
; RV64I-LABEL: srliw_3_sh2add:
; RV64I:       # %bb.0:
; RV64I-NEXT:    srliw a1, a1, 3
; RV64I-NEXT:    slli a1, a1, 2
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    lw a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: srliw_3_sh2add:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    srliw a1, a1, 3
; RV64ZBA-NEXT:    sh2add a0, a1, a0
; RV64ZBA-NEXT:    lw a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %3 = lshr i32 %1, 3
  %4 = zext i32 %3 to i64
  %5 = getelementptr inbounds i32, ptr %0, i64 %4
  %6 = load i32, ptr %5, align 4
  ret i32 %6
}

define i64 @srliw_4_sh3add(ptr %0, i32 signext %1) {
; RV64I-LABEL: srliw_4_sh3add:
; RV64I:       # %bb.0:
; RV64I-NEXT:    srliw a1, a1, 4
; RV64I-NEXT:    slli a1, a1, 3
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    ld a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: srliw_4_sh3add:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    srliw a1, a1, 4
; RV64ZBA-NEXT:    sh3add a0, a1, a0
; RV64ZBA-NEXT:    ld a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %3 = lshr i32 %1, 4
  %4 = zext i32 %3 to i64
  %5 = getelementptr inbounds i64, ptr %0, i64 %4
  %6 = load i64, ptr %5, align 8
  ret i64 %6
}

define signext i32 @srli_1_sh2add(ptr %0, i64 %1) {
; RV64I-LABEL: srli_1_sh2add:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a1, a1, 1
; RV64I-NEXT:    andi a1, a1, -4
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    lw a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: srli_1_sh2add:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    srli a1, a1, 1
; RV64ZBA-NEXT:    sh2add a0, a1, a0
; RV64ZBA-NEXT:    lw a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %3 = lshr i64 %1, 1
  %4 = getelementptr inbounds i32, ptr %0, i64 %3
  %5 = load i32, ptr %4, align 4
  ret i32 %5
}

define i64 @srli_2_sh3add(ptr %0, i64 %1) {
; RV64I-LABEL: srli_2_sh3add:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a1, a1, 1
; RV64I-NEXT:    andi a1, a1, -8
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    ld a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: srli_2_sh3add:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    srli a1, a1, 2
; RV64ZBA-NEXT:    sh3add a0, a1, a0
; RV64ZBA-NEXT:    ld a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %3 = lshr i64 %1, 2
  %4 = getelementptr inbounds i64, ptr %0, i64 %3
  %5 = load i64, ptr %4, align 8
  ret i64 %5
}

define signext i16 @srli_2_sh1add(ptr %0, i64 %1) {
; RV64I-LABEL: srli_2_sh1add:
; RV64I:       # %bb.0:
; RV64I-NEXT:    srli a1, a1, 1
; RV64I-NEXT:    andi a1, a1, -2
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    lh a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: srli_2_sh1add:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    srli a1, a1, 2
; RV64ZBA-NEXT:    sh1add a0, a1, a0
; RV64ZBA-NEXT:    lh a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %3 = lshr i64 %1, 2
  %4 = getelementptr inbounds i16, ptr %0, i64 %3
  %5 = load i16, ptr %4, align 2
  ret i16 %5
}

define signext i32 @srli_3_sh2add(ptr %0, i64 %1) {
; RV64I-LABEL: srli_3_sh2add:
; RV64I:       # %bb.0:
; RV64I-NEXT:    srli a1, a1, 1
; RV64I-NEXT:    andi a1, a1, -4
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    lw a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: srli_3_sh2add:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    srli a1, a1, 3
; RV64ZBA-NEXT:    sh2add a0, a1, a0
; RV64ZBA-NEXT:    lw a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %3 = lshr i64 %1, 3
  %4 = getelementptr inbounds i32, ptr %0, i64 %3
  %5 = load i32, ptr %4, align 4
  ret i32 %5
}

define i64 @srli_4_sh3add(ptr %0, i64 %1) {
; RV64I-LABEL: srli_4_sh3add:
; RV64I:       # %bb.0:
; RV64I-NEXT:    srli a1, a1, 1
; RV64I-NEXT:    andi a1, a1, -8
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    ld a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: srli_4_sh3add:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    srli a1, a1, 4
; RV64ZBA-NEXT:    sh3add a0, a1, a0
; RV64ZBA-NEXT:    ld a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %3 = lshr i64 %1, 4
  %4 = getelementptr inbounds i64, ptr %0, i64 %3
  %5 = load i64, ptr %4, align 8
  ret i64 %5
}

define signext i16 @shl_2_sh1add(ptr %0, i32 signext %1) {
; RV64I-LABEL: shl_2_sh1add:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a1, a1, 34
; RV64I-NEXT:    srli a1, a1, 31
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    lh a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: shl_2_sh1add:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    slli a1, a1, 2
; RV64ZBA-NEXT:    sh1add.uw a0, a1, a0
; RV64ZBA-NEXT:    lh a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %3 = shl i32 %1, 2
  %4 = zext i32 %3 to i64
  %5 = getelementptr inbounds i16, ptr %0, i64 %4
  %6 = load i16, ptr %5, align 2
  ret i16 %6
}

define signext i32 @shl_16_sh2add(ptr %0, i32 signext %1) {
; RV64I-LABEL: shl_16_sh2add:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a1, a1, 48
; RV64I-NEXT:    srli a1, a1, 30
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    lw a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: shl_16_sh2add:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    slli a1, a1, 16
; RV64ZBA-NEXT:    sh2add.uw a0, a1, a0
; RV64ZBA-NEXT:    lw a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %3 = shl i32 %1, 16
  %4 = zext i32 %3 to i64
  %5 = getelementptr inbounds i32, ptr %0, i64 %4
  %6 = load i32, ptr %5, align 4
  ret i32 %6
}

define i64 @shl_31_sh3add(ptr %0, i32 signext %1) {
; RV64I-LABEL: shl_31_sh3add:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a1, a1, 63
; RV64I-NEXT:    srli a1, a1, 29
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    ld a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: shl_31_sh3add:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    slli a1, a1, 31
; RV64ZBA-NEXT:    sh3add.uw a0, a1, a0
; RV64ZBA-NEXT:    ld a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %3 = shl i32 %1, 31
  %4 = zext i32 %3 to i64
  %5 = getelementptr inbounds i64, ptr %0, i64 %4
  %6 = load i64, ptr %5, align 8
  ret i64 %6
}

define i64 @pack_i64(i64 %a, i64 %b) nounwind {
; RV64I-LABEL: pack_i64:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a0, a0, 32
; RV64I-NEXT:    srli a0, a0, 32
; RV64I-NEXT:    slli a1, a1, 32
; RV64I-NEXT:    or a0, a1, a0
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: pack_i64:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    slli a1, a1, 32
; RV64ZBA-NEXT:    add.uw a0, a0, a1
; RV64ZBA-NEXT:    ret
  %shl = and i64 %a, 4294967295
  %shl1 = shl i64 %b, 32
  %or = or i64 %shl1, %shl
  ret i64 %or
}

define i64 @pack_i64_2(i32 signext %a, i32 signext %b) nounwind {
; RV64I-LABEL: pack_i64_2:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a0, a0, 32
; RV64I-NEXT:    srli a0, a0, 32
; RV64I-NEXT:    slli a1, a1, 32
; RV64I-NEXT:    or a0, a1, a0
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: pack_i64_2:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    slli a1, a1, 32
; RV64ZBA-NEXT:    add.uw a0, a0, a1
; RV64ZBA-NEXT:    ret
  %zexta = zext i32 %a to i64
  %zextb = zext i32 %b to i64
  %shl1 = shl i64 %zextb, 32
  %or = or i64 %shl1, %zexta
  ret i64 %or
}

define i64 @pack_i64_disjoint(i64 %a, i64 %b) nounwind {
; RV64I-LABEL: pack_i64_disjoint:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a0, a0, 32
; RV64I-NEXT:    srli a0, a0, 32
; RV64I-NEXT:    or a0, a1, a0
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: pack_i64_disjoint:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    add.uw a0, a0, a1
; RV64ZBA-NEXT:    ret
  %shl = and i64 %a, 4294967295
  %or = or disjoint i64 %b, %shl
  ret i64 %or
}

define i64 @pack_i64_disjoint_2(i32 signext %a, i64 %b) nounwind {
; RV64I-LABEL: pack_i64_disjoint_2:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a0, a0, 32
; RV64I-NEXT:    srli a0, a0, 32
; RV64I-NEXT:    or a0, a1, a0
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: pack_i64_disjoint_2:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    add.uw a0, a0, a1
; RV64ZBA-NEXT:    ret
  %zexta = zext i32 %a to i64
  %or = or disjoint i64 %b, %zexta
  ret i64 %or
}

define i8 @array_index_sh1_sh0(ptr %p, i64 %idx1, i64 %idx2) {
; RV64I-LABEL: array_index_sh1_sh0:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a1, a1, 1
; RV64I-NEXT:    add a0, a0, a2
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    lbu a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: array_index_sh1_sh0:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh1add a0, a1, a0
; RV64ZBA-NEXT:    add a0, a0, a2
; RV64ZBA-NEXT:    lbu a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %a = getelementptr inbounds [2 x i8], ptr %p, i64 %idx1, i64 %idx2
  %b = load i8, ptr %a, align 1
  ret i8 %b
}

define i16 @array_index_sh1_sh1(ptr %p, i64 %idx1, i64 %idx2) {
; RV64I-LABEL: array_index_sh1_sh1:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a1, a1, 2
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    slli a2, a2, 1
; RV64I-NEXT:    add a0, a0, a2
; RV64I-NEXT:    lh a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: array_index_sh1_sh1:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh2add a0, a1, a0
; RV64ZBA-NEXT:    sh1add a0, a2, a0
; RV64ZBA-NEXT:    lh a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %a = getelementptr inbounds [2 x i16], ptr %p, i64 %idx1, i64 %idx2
  %b = load i16, ptr %a, align 2
  ret i16 %b
}

define i32 @array_index_sh1_sh2(ptr %p, i64 %idx1, i64 %idx2) {
; RV64I-LABEL: array_index_sh1_sh2:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a1, a1, 3
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    slli a2, a2, 2
; RV64I-NEXT:    add a0, a0, a2
; RV64I-NEXT:    lw a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: array_index_sh1_sh2:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh3add a0, a1, a0
; RV64ZBA-NEXT:    sh2add a0, a2, a0
; RV64ZBA-NEXT:    lw a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %a = getelementptr inbounds [2 x i32], ptr %p, i64 %idx1, i64 %idx2
  %b = load i32, ptr %a, align 4
  ret i32 %b
}

define i64 @array_index_sh1_sh3(ptr %p, i64 %idx1, i64 %idx2) {
; RV64I-LABEL: array_index_sh1_sh3:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a1, a1, 4
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    slli a2, a2, 3
; RV64I-NEXT:    add a0, a0, a2
; RV64I-NEXT:    ld a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: array_index_sh1_sh3:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh1add a1, a1, a2
; RV64ZBA-NEXT:    sh3add a0, a1, a0
; RV64ZBA-NEXT:    ld a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %a = getelementptr inbounds [2 x i64], ptr %p, i64 %idx1, i64 %idx2
  %b = load i64, ptr %a, align 8
  ret i64 %b
}

define i8 @array_index_sh2_sh0(ptr %p, i64 %idx1, i64 %idx2) {
; RV64I-LABEL: array_index_sh2_sh0:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a1, a1, 2
; RV64I-NEXT:    add a0, a0, a2
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    lbu a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: array_index_sh2_sh0:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh2add a0, a1, a0
; RV64ZBA-NEXT:    add a0, a0, a2
; RV64ZBA-NEXT:    lbu a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %a = getelementptr inbounds [4 x i8], ptr %p, i64 %idx1, i64 %idx2
  %b = load i8, ptr %a, align 1
  ret i8 %b
}

define i16 @array_index_sh2_sh1(ptr %p, i64 %idx1, i64 %idx2) {
; RV64I-LABEL: array_index_sh2_sh1:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a1, a1, 3
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    slli a2, a2, 1
; RV64I-NEXT:    add a0, a0, a2
; RV64I-NEXT:    lh a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: array_index_sh2_sh1:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh3add a0, a1, a0
; RV64ZBA-NEXT:    sh1add a0, a2, a0
; RV64ZBA-NEXT:    lh a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %a = getelementptr inbounds [4 x i16], ptr %p, i64 %idx1, i64 %idx2
  %b = load i16, ptr %a, align 2
  ret i16 %b
}

define i32 @array_index_sh2_sh2(ptr %p, i64 %idx1, i64 %idx2) {
; RV64I-LABEL: array_index_sh2_sh2:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a1, a1, 4
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    slli a2, a2, 2
; RV64I-NEXT:    add a0, a0, a2
; RV64I-NEXT:    lw a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: array_index_sh2_sh2:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh2add a1, a1, a2
; RV64ZBA-NEXT:    sh2add a0, a1, a0
; RV64ZBA-NEXT:    lw a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %a = getelementptr inbounds [4 x i32], ptr %p, i64 %idx1, i64 %idx2
  %b = load i32, ptr %a, align 4
  ret i32 %b
}

define i64 @array_index_sh2_sh3(ptr %p, i64 %idx1, i64 %idx2) {
; RV64I-LABEL: array_index_sh2_sh3:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a1, a1, 5
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    slli a2, a2, 3
; RV64I-NEXT:    add a0, a0, a2
; RV64I-NEXT:    ld a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: array_index_sh2_sh3:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh2add a1, a1, a2
; RV64ZBA-NEXT:    sh3add a0, a1, a0
; RV64ZBA-NEXT:    ld a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %a = getelementptr inbounds [4 x i64], ptr %p, i64 %idx1, i64 %idx2
  %b = load i64, ptr %a, align 8
  ret i64 %b
}

define i8 @array_index_sh3_sh0(ptr %p, i64 %idx1, i64 %idx2) {
; RV64I-LABEL: array_index_sh3_sh0:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a1, a1, 3
; RV64I-NEXT:    add a0, a0, a2
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    lbu a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: array_index_sh3_sh0:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh3add a0, a1, a0
; RV64ZBA-NEXT:    add a0, a0, a2
; RV64ZBA-NEXT:    lbu a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %a = getelementptr inbounds [8 x i8], ptr %p, i64 %idx1, i64 %idx2
  %b = load i8, ptr %a, align 1
  ret i8 %b
}

define i16 @array_index_sh3_sh1(ptr %p, i64 %idx1, i64 %idx2) {
; RV64I-LABEL: array_index_sh3_sh1:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a1, a1, 4
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    slli a2, a2, 1
; RV64I-NEXT:    add a0, a0, a2
; RV64I-NEXT:    lh a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: array_index_sh3_sh1:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh3add a1, a1, a2
; RV64ZBA-NEXT:    sh1add a0, a1, a0
; RV64ZBA-NEXT:    lh a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %a = getelementptr inbounds [8 x i16], ptr %p, i64 %idx1, i64 %idx2
  %b = load i16, ptr %a, align 2
  ret i16 %b
}

define i32 @array_index_sh3_sh2(ptr %p, i64 %idx1, i64 %idx2) {
; RV64I-LABEL: array_index_sh3_sh2:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a1, a1, 5
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    slli a2, a2, 2
; RV64I-NEXT:    add a0, a0, a2
; RV64I-NEXT:    lw a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: array_index_sh3_sh2:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh3add a1, a1, a2
; RV64ZBA-NEXT:    sh2add a0, a1, a0
; RV64ZBA-NEXT:    lw a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %a = getelementptr inbounds [8 x i32], ptr %p, i64 %idx1, i64 %idx2
  %b = load i32, ptr %a, align 4
  ret i32 %b
}

define i64 @array_index_sh3_sh3(ptr %p, i64 %idx1, i64 %idx2) {
; RV64I-LABEL: array_index_sh3_sh3:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a1, a1, 6
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    slli a2, a2, 3
; RV64I-NEXT:    add a0, a0, a2
; RV64I-NEXT:    ld a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: array_index_sh3_sh3:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh3add a1, a1, a2
; RV64ZBA-NEXT:    sh3add a0, a1, a0
; RV64ZBA-NEXT:    ld a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %a = getelementptr inbounds [8 x i64], ptr %p, i64 %idx1, i64 %idx2
  %b = load i64, ptr %a, align 8
  ret i64 %b
}

; Similar to above, but with a lshr on one of the indices. This requires
; special handling during isel to form a shift pair.
define i64 @array_index_lshr_sh3_sh3(ptr %p, i64 %idx1, i64 %idx2) {
; RV64I-LABEL: array_index_lshr_sh3_sh3:
; RV64I:       # %bb.0:
; RV64I-NEXT:    srli a1, a1, 58
; RV64I-NEXT:    slli a1, a1, 6
; RV64I-NEXT:    slli a2, a2, 3
; RV64I-NEXT:    add a0, a0, a2
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    ld a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: array_index_lshr_sh3_sh3:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    srli a1, a1, 58
; RV64ZBA-NEXT:    sh3add a1, a1, a2
; RV64ZBA-NEXT:    sh3add a0, a1, a0
; RV64ZBA-NEXT:    ld a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %shr = lshr i64 %idx1, 58
  %a = getelementptr inbounds [8 x i64], ptr %p, i64 %shr, i64 %idx2
  %b = load i64, ptr %a, align 8
  ret i64 %b
}

define i8 @array_index_sh4_sh0(ptr %p, i64 %idx1, i64 %idx2) {
; CHECK-LABEL: array_index_sh4_sh0:
; CHECK:       # %bb.0:
; CHECK-NEXT:    slli a1, a1, 4
; CHECK-NEXT:    add a0, a0, a2
; CHECK-NEXT:    add a0, a0, a1
; CHECK-NEXT:    lbu a0, 0(a0)
; CHECK-NEXT:    ret
  %a = getelementptr inbounds [16 x i8], ptr %p, i64 %idx1, i64 %idx2
  %b = load i8, ptr %a, align 1
  ret i8 %b
}

define i16 @array_index_sh4_sh1(ptr %p, i64 %idx1, i64 %idx2) {
; RV64I-LABEL: array_index_sh4_sh1:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a1, a1, 5
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    slli a2, a2, 1
; RV64I-NEXT:    add a0, a0, a2
; RV64I-NEXT:    lh a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: array_index_sh4_sh1:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    slli a1, a1, 5
; RV64ZBA-NEXT:    add a0, a0, a1
; RV64ZBA-NEXT:    sh1add a0, a2, a0
; RV64ZBA-NEXT:    lh a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %a = getelementptr inbounds [16 x i16], ptr %p, i64 %idx1, i64 %idx2
  %b = load i16, ptr %a, align 2
  ret i16 %b
}

define i32 @array_index_sh4_sh2(ptr %p, i64 %idx1, i64 %idx2) {
; RV64I-LABEL: array_index_sh4_sh2:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a1, a1, 6
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    slli a2, a2, 2
; RV64I-NEXT:    add a0, a0, a2
; RV64I-NEXT:    lw a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: array_index_sh4_sh2:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    slli a1, a1, 6
; RV64ZBA-NEXT:    add a0, a0, a1
; RV64ZBA-NEXT:    sh2add a0, a2, a0
; RV64ZBA-NEXT:    lw a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %a = getelementptr inbounds [16 x i32], ptr %p, i64 %idx1, i64 %idx2
  %b = load i32, ptr %a, align 4
  ret i32 %b
}

define i64 @array_index_sh4_sh3(ptr %p, i64 %idx1, i64 %idx2) {
; RV64I-LABEL: array_index_sh4_sh3:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a1, a1, 7
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    slli a2, a2, 3
; RV64I-NEXT:    add a0, a0, a2
; RV64I-NEXT:    ld a0, 0(a0)
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: array_index_sh4_sh3:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    slli a1, a1, 7
; RV64ZBA-NEXT:    add a0, a0, a1
; RV64ZBA-NEXT:    sh3add a0, a2, a0
; RV64ZBA-NEXT:    ld a0, 0(a0)
; RV64ZBA-NEXT:    ret
  %a = getelementptr inbounds [16 x i64], ptr %p, i64 %idx1, i64 %idx2
  %b = load i64, ptr %a, align 8
  ret i64 %b
}

define ptr @test_gep_gep_dont_crash(ptr %p, i64 %a1, i64 %a2) {
; RV64I-LABEL: test_gep_gep_dont_crash:
; RV64I:       # %bb.0:
; RV64I-NEXT:    srliw a2, a2, 6
; RV64I-NEXT:    slli a2, a2, 3
; RV64I-NEXT:    slli a1, a1, 3
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    add a0, a0, a2
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: test_gep_gep_dont_crash:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    srliw a2, a2, 6
; RV64ZBA-NEXT:    add a1, a2, a1
; RV64ZBA-NEXT:    sh3add a0, a1, a0
; RV64ZBA-NEXT:    ret
  %lshr = lshr i64 %a2, 6
  %and = and i64 %lshr, 67108863
  %gep1 = getelementptr i64, ptr %p, i64 %and
  %gep2 = getelementptr i64, ptr %gep1, i64 %a1
  ret ptr %gep2
}

define i64 @regression(i32 signext %x, i32 signext %y) {
; RV64I-LABEL: regression:
; RV64I:       # %bb.0:
; RV64I-NEXT:    subw a0, a0, a1
; RV64I-NEXT:    slli a0, a0, 32
; RV64I-NEXT:    srli a1, a0, 29
; RV64I-NEXT:    srli a0, a0, 27
; RV64I-NEXT:    sub a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: regression:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    subw a0, a0, a1
; RV64ZBA-NEXT:    slli.uw a0, a0, 3
; RV64ZBA-NEXT:    sh1add a0, a0, a0
; RV64ZBA-NEXT:    ret
  %sub = sub i32 %x, %y
  %ext = zext i32 %sub to i64
  %res = mul nuw nsw i64 %ext, 24
  ret i64 %res
}

define i64 @mul_neg1(i64 %a) {
; CHECK-LABEL: mul_neg1:
; CHECK:       # %bb.0:
; CHECK-NEXT:    neg a0, a0
; CHECK-NEXT:    ret
  %c = mul i64 %a, -1
  ret i64 %c
}

define i64 @mul_neg2(i64 %a) {
; CHECK-LABEL: mul_neg2:
; CHECK:       # %bb.0:
; CHECK-NEXT:    slli a0, a0, 1
; CHECK-NEXT:    neg a0, a0
; CHECK-NEXT:    ret
  %c = mul i64 %a, -2
  ret i64 %c
}

define i64 @mul_neg3(i64 %a) {
; RV64I-LABEL: mul_neg3:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a1, a0, 1
; RV64I-NEXT:    neg a0, a0
; RV64I-NEXT:    sub a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: mul_neg3:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh1add a0, a0, a0
; RV64ZBA-NEXT:    neg a0, a0
; RV64ZBA-NEXT:    ret
  %c = mul i64 %a, -3
  ret i64 %c
}

define i64 @mul_neg4(i64 %a) {
; CHECK-LABEL: mul_neg4:
; CHECK:       # %bb.0:
; CHECK-NEXT:    slli a0, a0, 2
; CHECK-NEXT:    neg a0, a0
; CHECK-NEXT:    ret
  %c = mul i64 %a, -4
  ret i64 %c
}

define i64 @mul_neg5(i64 %a) {
; RV64I-LABEL: mul_neg5:
; RV64I:       # %bb.0:
; RV64I-NEXT:    slli a1, a0, 2
; RV64I-NEXT:    neg a0, a0
; RV64I-NEXT:    sub a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: mul_neg5:
; RV64ZBA:       # %bb.0:
; RV64ZBA-NEXT:    sh2add a0, a0, a0
; RV64ZBA-NEXT:    neg a0, a0
; RV64ZBA-NEXT:    ret
  %c = mul i64 %a, -5
  ret i64 %c
}

define i64 @mul_neg6(i64 %a) {
; CHECK-LABEL: mul_neg6:
; CHECK:       # %bb.0:
; CHECK-NEXT:    li a1, -6
; CHECK-NEXT:    mul a0, a0, a1
; CHECK-NEXT:    ret
  %c = mul i64 %a, -6
  ret i64 %c
}

define i64 @mul_neg7(i64 %a) {
; CHECK-LABEL: mul_neg7:
; CHECK:       # %bb.0:
; CHECK-NEXT:    slli a1, a0, 3
; CHECK-NEXT:    sub a0, a0, a1
; CHECK-NEXT:    ret
  %c = mul i64 %a, -7
  ret i64 %c
}

define i64 @mul_neg8(i64 %a) {
; CHECK-LABEL: mul_neg8:
; CHECK:       # %bb.0:
; CHECK-NEXT:    slli a0, a0, 3
; CHECK-NEXT:    neg a0, a0
; CHECK-NEXT:    ret
  %c = mul i64 %a, -8
  ret i64 %c
}

define i64 @bext_mul12(i32 %1, i32 %2) {
; RV64I-LABEL: bext_mul12:
; RV64I:       # %bb.0: # %entry
; RV64I-NEXT:    srlw a0, a0, a1
; RV64I-NEXT:    andi a0, a0, 1
; RV64I-NEXT:    slli a1, a0, 2
; RV64I-NEXT:    slli a0, a0, 4
; RV64I-NEXT:    sub a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBANOZBB-LABEL: bext_mul12:
; RV64ZBANOZBB:       # %bb.0: # %entry
; RV64ZBANOZBB-NEXT:    srlw a0, a0, a1
; RV64ZBANOZBB-NEXT:    andi a0, a0, 1
; RV64ZBANOZBB-NEXT:    sh1add a0, a0, a0
; RV64ZBANOZBB-NEXT:    slli a0, a0, 2
; RV64ZBANOZBB-NEXT:    ret
;
; RV64ZBAZBBNOZBS-LABEL: bext_mul12:
; RV64ZBAZBBNOZBS:       # %bb.0: # %entry
; RV64ZBAZBBNOZBS-NEXT:    srlw a0, a0, a1
; RV64ZBAZBBNOZBS-NEXT:    andi a0, a0, 1
; RV64ZBAZBBNOZBS-NEXT:    sh1add a0, a0, a0
; RV64ZBAZBBNOZBS-NEXT:    slli a0, a0, 2
; RV64ZBAZBBNOZBS-NEXT:    ret
;
; RV64ZBAZBBZBS-LABEL: bext_mul12:
; RV64ZBAZBBZBS:       # %bb.0: # %entry
; RV64ZBAZBBZBS-NEXT:    bext a0, a0, a1
; RV64ZBAZBBZBS-NEXT:    sh1add a0, a0, a0
; RV64ZBAZBBZBS-NEXT:    slli a0, a0, 2
; RV64ZBAZBBZBS-NEXT:    ret
entry:
  %3 = lshr i32 %1, %2
  %4 = and i32 %3, 1
  %5 = zext nneg i32 %4 to i64
  %6 = mul i64 %5, 12
  ret i64 %6
}

define i64 @bext_mul45(i32 %1, i32 %2) {
; RV64I-LABEL: bext_mul45:
; RV64I:       # %bb.0: # %entry
; RV64I-NEXT:    srlw a0, a0, a1
; RV64I-NEXT:    andi a0, a0, 1
; RV64I-NEXT:    li a1, 45
; RV64I-NEXT:    mul a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBANOZBB-LABEL: bext_mul45:
; RV64ZBANOZBB:       # %bb.0: # %entry
; RV64ZBANOZBB-NEXT:    srlw a0, a0, a1
; RV64ZBANOZBB-NEXT:    andi a0, a0, 1
; RV64ZBANOZBB-NEXT:    sh2add a0, a0, a0
; RV64ZBANOZBB-NEXT:    sh3add a0, a0, a0
; RV64ZBANOZBB-NEXT:    ret
;
; RV64ZBAZBBNOZBS-LABEL: bext_mul45:
; RV64ZBAZBBNOZBS:       # %bb.0: # %entry
; RV64ZBAZBBNOZBS-NEXT:    srlw a0, a0, a1
; RV64ZBAZBBNOZBS-NEXT:    andi a0, a0, 1
; RV64ZBAZBBNOZBS-NEXT:    sh2add a0, a0, a0
; RV64ZBAZBBNOZBS-NEXT:    sh3add a0, a0, a0
; RV64ZBAZBBNOZBS-NEXT:    ret
;
; RV64ZBAZBBZBS-LABEL: bext_mul45:
; RV64ZBAZBBZBS:       # %bb.0: # %entry
; RV64ZBAZBBZBS-NEXT:    bext a0, a0, a1
; RV64ZBAZBBZBS-NEXT:    sh2add a0, a0, a0
; RV64ZBAZBBZBS-NEXT:    sh3add a0, a0, a0
; RV64ZBAZBBZBS-NEXT:    ret
entry:
  %3 = lshr i32 %1, %2
  %4 = and i32 %3, 1
  %5 = zext nneg i32 %4 to i64
  %6 = mul i64 %5, 45
  ret i64 %6
}

define i64 @bext_mul132(i32 %1, i32 %2) {
; RV64I-LABEL: bext_mul132:
; RV64I:       # %bb.0: # %entry
; RV64I-NEXT:    srlw a0, a0, a1
; RV64I-NEXT:    andi a0, a0, 1
; RV64I-NEXT:    li a1, 132
; RV64I-NEXT:    mul a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBANOZBB-LABEL: bext_mul132:
; RV64ZBANOZBB:       # %bb.0: # %entry
; RV64ZBANOZBB-NEXT:    srlw a0, a0, a1
; RV64ZBANOZBB-NEXT:    andi a0, a0, 1
; RV64ZBANOZBB-NEXT:    slli a1, a0, 7
; RV64ZBANOZBB-NEXT:    sh2add a0, a0, a1
; RV64ZBANOZBB-NEXT:    ret
;
; RV64ZBAZBBNOZBS-LABEL: bext_mul132:
; RV64ZBAZBBNOZBS:       # %bb.0: # %entry
; RV64ZBAZBBNOZBS-NEXT:    srlw a0, a0, a1
; RV64ZBAZBBNOZBS-NEXT:    andi a0, a0, 1
; RV64ZBAZBBNOZBS-NEXT:    slli a1, a0, 7
; RV64ZBAZBBNOZBS-NEXT:    sh2add a0, a0, a1
; RV64ZBAZBBNOZBS-NEXT:    ret
;
; RV64ZBAZBBZBS-LABEL: bext_mul132:
; RV64ZBAZBBZBS:       # %bb.0: # %entry
; RV64ZBAZBBZBS-NEXT:    bext a0, a0, a1
; RV64ZBAZBBZBS-NEXT:    slli a1, a0, 7
; RV64ZBAZBBZBS-NEXT:    sh2add a0, a0, a1
; RV64ZBAZBBZBS-NEXT:    ret
entry:
  %3 = lshr i32 %1, %2
  %4 = and i32 %3, 1
  %5 = zext nneg i32 %4 to i64
  %6 = mul i64 %5, 132
  ret i64 %6
}

define ptr @gep_lshr_i32(ptr %0, i64 %1) {
; RV64I-LABEL: gep_lshr_i32:
; RV64I:       # %bb.0: # %entry
; RV64I-NEXT:    srli a1, a1, 2
; RV64I-NEXT:    li a2, 5
; RV64I-NEXT:    slli a2, a2, 36
; RV64I-NEXT:    slli a1, a1, 32
; RV64I-NEXT:    mulhu a1, a1, a2
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: gep_lshr_i32:
; RV64ZBA:       # %bb.0: # %entry
; RV64ZBA-NEXT:    srli a1, a1, 2
; RV64ZBA-NEXT:    slli.uw a1, a1, 4
; RV64ZBA-NEXT:    sh2add a1, a1, a1
; RV64ZBA-NEXT:    add a0, a0, a1
; RV64ZBA-NEXT:    ret
entry:
  %2 = lshr exact i64 %1, 2
  %3 = and i64 %2, 4294967295
  %5 = getelementptr [80 x i8], ptr %0, i64 %3
  ret ptr %5
}

define i64 @srli_slliuw(i64 %1) {
; RV64I-LABEL: srli_slliuw:
; RV64I:       # %bb.0: # %entry
; RV64I-NEXT:    slli a0, a0, 2
; RV64I-NEXT:    li a1, 1
; RV64I-NEXT:    slli a1, a1, 36
; RV64I-NEXT:    addi a1, a1, -16
; RV64I-NEXT:    and a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: srli_slliuw:
; RV64ZBA:       # %bb.0: # %entry
; RV64ZBA-NEXT:    srli a0, a0, 2
; RV64ZBA-NEXT:    slli.uw a0, a0, 4
; RV64ZBA-NEXT:    ret
entry:
  %2 = lshr exact i64 %1, 2
  %3 = and i64 %2, 4294967295
  %4 = shl i64 %3, 4
  ret i64 %4
}

define i64 @srli_slliuw_canonical(i64 %0) {
; RV64I-LABEL: srli_slliuw_canonical:
; RV64I:       # %bb.0: # %entry
; RV64I-NEXT:    slli a0, a0, 2
; RV64I-NEXT:    li a1, 1
; RV64I-NEXT:    slli a1, a1, 36
; RV64I-NEXT:    addi a1, a1, -16
; RV64I-NEXT:    and a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: srli_slliuw_canonical:
; RV64ZBA:       # %bb.0: # %entry
; RV64ZBA-NEXT:    srli a0, a0, 2
; RV64ZBA-NEXT:    slli.uw a0, a0, 4
; RV64ZBA-NEXT:    ret
entry:
  %1 = shl i64 %0, 2
  %2 = and i64 %1, 68719476720
  ret i64 %2
}

; Make sure we don't accidentally use slli.uw with a shift of 32.
define i64 @srli_slliuw_negative_test(i64 %0) {
; CHECK-LABEL: srli_slliuw_negative_test:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    srli a0, a0, 6
; CHECK-NEXT:    slli a0, a0, 32
; CHECK-NEXT:    ret
entry:
  %1 = lshr i64 %0, 6
  %2 = shl i64 %1, 32
  ret i64 %2
}

define i64 @srli_slli_i16(i64 %1) {
; CHECK-LABEL: srli_slli_i16:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    slli a0, a0, 2
; CHECK-NEXT:    lui a1, 256
; CHECK-NEXT:    addiw a1, a1, -16
; CHECK-NEXT:    and a0, a0, a1
; CHECK-NEXT:    ret
entry:
  %2 = lshr exact i64 %1, 2
  %3 = and i64 %2, 65535
  %4 = shl i64 %3, 4
  ret i64 %4
}

define i64 @srli_slliuw_2(i64 %1) {
; RV64I-LABEL: srli_slliuw_2:
; RV64I:       # %bb.0: # %entry
; RV64I-NEXT:    srli a0, a0, 15
; RV64I-NEXT:    li a1, 1
; RV64I-NEXT:    slli a1, a1, 35
; RV64I-NEXT:    addi a1, a1, -8
; RV64I-NEXT:    and a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: srli_slliuw_2:
; RV64ZBA:       # %bb.0: # %entry
; RV64ZBA-NEXT:    srli a0, a0, 18
; RV64ZBA-NEXT:    slli.uw a0, a0, 3
; RV64ZBA-NEXT:    ret
entry:
  %2 = lshr i64 %1, 18
  %3 = and i64 %2, 4294967295
  %4 = shl i64 %3, 3
  ret i64 %4
}

define i64 @srli_slliuw_canonical_2(i64 %0) {
; RV64I-LABEL: srli_slliuw_canonical_2:
; RV64I:       # %bb.0: # %entry
; RV64I-NEXT:    srli a0, a0, 15
; RV64I-NEXT:    li a1, 1
; RV64I-NEXT:    slli a1, a1, 35
; RV64I-NEXT:    addi a1, a1, -8
; RV64I-NEXT:    and a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: srli_slliuw_canonical_2:
; RV64ZBA:       # %bb.0: # %entry
; RV64ZBA-NEXT:    srli a0, a0, 18
; RV64ZBA-NEXT:    slli.uw a0, a0, 3
; RV64ZBA-NEXT:    ret
entry:
  %1 = lshr i64 %0, 15
  %2 = and i64 %1, 34359738360
  ret i64 %2
}

define ptr @srai_srli_sh3add(ptr %0, i64 %1) nounwind {
; RV64I-LABEL: srai_srli_sh3add:
; RV64I:       # %bb.0: # %entry
; RV64I-NEXT:    srai a1, a1, 32
; RV64I-NEXT:    srli a1, a1, 6
; RV64I-NEXT:    slli a1, a1, 3
; RV64I-NEXT:    add a0, a0, a1
; RV64I-NEXT:    ret
;
; RV64ZBA-LABEL: srai_srli_sh3add:
; RV64ZBA:       # %bb.0: # %entry
; RV64ZBA-NEXT:    srai a1, a1, 32
; RV64ZBA-NEXT:    srli a1, a1, 6
; RV64ZBA-NEXT:    sh3add a0, a1, a0
; RV64ZBA-NEXT:    ret
entry:
  %2 = ashr i64 %1, 32
  %3 = lshr i64 %2, 6
  %4 = getelementptr i64, ptr %0, i64 %3
  ret ptr %4
}

define ptr @srai_srli_slli(ptr %0, i64 %1) nounwind {
; CHECK-LABEL: srai_srli_slli:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    srai a1, a1, 32
; CHECK-NEXT:    srli a1, a1, 6
; CHECK-NEXT:    slli a1, a1, 4
; CHECK-NEXT:    add a0, a0, a1
; CHECK-NEXT:    ret
entry:
  %2 = ashr i64 %1, 32
  %3 = lshr i64 %2, 6
  %4 = getelementptr i128, ptr %0, i64 %3
  ret ptr %4
}

; Negative to make sure the peephole added for srai_srli_slli and
; srai_srli_sh3add doesn't break this.
define i64 @srai_andi(i64 %x) nounwind {
; CHECK-LABEL: srai_andi:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    srai a0, a0, 8
; CHECK-NEXT:    andi a0, a0, -8
; CHECK-NEXT:    ret
entry:
  %y = ashr i64 %x, 8
  %z = and i64 %y, -8
  ret i64 %z
}

; Negative to make sure the peephole added for srai_srli_slli and
; srai_srli_sh3add doesn't break this.
define i64 @srai_lui_and(i64 %x) nounwind {
; CHECK-LABEL: srai_lui_and:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    srai a0, a0, 8
; CHECK-NEXT:    lui a1, 1048574
; CHECK-NEXT:    and a0, a0, a1
; CHECK-NEXT:    ret
entry:
  %y = ashr i64 %x, 8
  %z = and i64 %y, -8192
  ret i64 %z
}