llvm/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir

# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc %s -o - -mtriple=riscv64 -mattr=v -verify-machineinstrs \
# RUN:     -run-pass=phi-node-elimination,register-coalescer,riscv-insert-vsetvli | FileCheck %s

--- |
  source_filename = "vsetvli-insert.ll"
  target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128"
  target triple = "riscv64"

  define <vscale x 1 x i64> @load_add_or_sub(i8 zeroext %cond, ptr %0, <vscale x 1 x i64> %1, i64 %2) #0 {
  entry:
    %a = call <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64> undef, ptr %0, i64 %2)
    %tobool = icmp eq i8 %cond, 0
    br i1 %tobool, label %if.else, label %if.then

  if.then:                                          ; preds = %entry
    %b = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %a, <vscale x 1 x i64> %1, i64 %2)
    br label %if.end

  if.else:                                          ; preds = %entry
    %c = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %a, <vscale x 1 x i64> %1, i64 %2)
    br label %if.end

  if.end:                                           ; preds = %if.else, %if.then
    %d = phi <vscale x 1 x i64> [ %b, %if.then ], [ %c, %if.else ]
    ret <vscale x 1 x i64> %d
  }

  define void @load_zext_or_sext(i8 zeroext %cond, ptr %0, ptr %1, i64 %2) #0 {
  entry:
    %a = call <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32.i64(<vscale x 1 x i32> undef, ptr %0, i64 %2)
    %tobool = icmp eq i8 %cond, 0
    br i1 %tobool, label %if.else, label %if.then

  if.then:                                          ; preds = %entry
    %b = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> %a, i64 %2)
    br label %if.end

  if.else:                                          ; preds = %entry
    %c = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> %a, i64 %2)
    br label %if.end

  if.end:                                           ; preds = %if.else, %if.then
    %d = phi <vscale x 1 x i64> [ %b, %if.then ], [ %c, %if.else ]
    call void @llvm.riscv.vse.nxv1i64.i64(<vscale x 1 x i64> %d, ptr %1, i64 %2)
    ret void
  }

  declare i64 @llvm.riscv.vmv.x.s.nxv1i64(<vscale x 1 x i64>) #1

  define i64 @vmv_x_s(i8 zeroext %cond, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) #0 {
  entry:
    %tobool = icmp eq i8 %cond, 0
    br i1 %tobool, label %if.else, label %if.then

  if.then:                                          ; preds = %entry
    %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2)
    br label %if.end

  if.else:                                          ; preds = %entry
    %b = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %1, <vscale x 1 x i64> %1, i64 %2)
    br label %if.end

  if.end:                                           ; preds = %if.else, %if.then
    %c = phi <vscale x 1 x i64> [ %a, %if.then ], [ %b, %if.else ]
    %d = call i64 @llvm.riscv.vmv.x.s.nxv1i64(<vscale x 1 x i64> %c)
    ret i64 %d
  }

  declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg) #2

  define <vscale x 1 x i64> @vsetvli_add_or_sub(i8 zeroext %cond, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %avl) #0 {
  entry:
    %vl = call i64 @llvm.riscv.vsetvli.i64(i64 %avl, i64 3, i64 0)
    %tobool = icmp eq i8 %cond, 0
    br i1 %tobool, label %if.else, label %if.then

  if.then:                                          ; preds = %entry
    %b = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %vl)
    br label %if.end

  if.else:                                          ; preds = %entry
    %c = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %vl)
    br label %if.end

  if.end:                                           ; preds = %if.else, %if.then
    %d = phi <vscale x 1 x i64> [ %b, %if.then ], [ %c, %if.else ]
    ret <vscale x 1 x i64> %d
  }

  define void @vsetvli_vcpop() {
    ret void
  }

  define void @vsetvli_loop_store() {
    ret void
  }

  define void @vsetvli_loop_store2() {
    ret void
  }

  define void @redusum_loop(ptr nocapture noundef readonly %a, i32 noundef signext %n, ptr nocapture noundef writeonly %res) #0 {
  entry:
    br label %vector.body

  vector.body:                                      ; preds = %vector.body, %entry
    %lsr.iv1 = phi ptr [ %scevgep, %vector.body ], [ %a, %entry ]
    %lsr.iv = phi i64 [ %lsr.iv.next, %vector.body ], [ 2048, %entry ]
    %vec.phi = phi <4 x i32> [ zeroinitializer, %entry ], [ %0, %vector.body ]
    %lsr.iv12 = bitcast ptr %lsr.iv1 to ptr
    %wide.load = load <4 x i32>, ptr %lsr.iv12, align 4
    %0 = add <4 x i32> %wide.load, %vec.phi
    %lsr.iv.next = add nsw i64 %lsr.iv, -4
    %scevgep = getelementptr i32, ptr %lsr.iv1, i64 4
    %1 = icmp eq i64 %lsr.iv.next, 0
    br i1 %1, label %middle.block, label %vector.body

  middle.block:                                     ; preds = %vector.body
    %2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %0)
    store i32 %2, ptr %res, align 4
    ret void
  }

  define void @vsetvli_vluxei64_regression() {
    ret void
  }

  define void @if_in_loop() {
    ret void
  }

  define void @pre_undemanded_vl() {
    ret void
  }

  define void @clobberred_forwarded_avl() {
    ret void
  }

  define void @clobberred_forwarded_phi_avl() {
    ret void
  }

  declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)

  declare <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, i64) #1

  declare <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, i64) #1

  declare <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64>, ptr nocapture, i64) #3

  declare <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32.i64(<vscale x 1 x i32>, ptr nocapture, i64) #3

  declare void @llvm.riscv.vse.nxv1i64.i64(<vscale x 1 x i64>, ptr nocapture, i64) #4

  declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>, <vscale x 1 x i32>, i64) #1

  declare <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>, <vscale x 1 x i32>, i64) #1

  attributes #0 = { "target-features"="+v" }
  attributes #1 = { nounwind readnone }
  attributes #2 = { nounwind }
  attributes #3 = { nounwind readonly }
  attributes #4 = { nounwind writeonly }

...
---
name:            load_add_or_sub
alignment:       4
tracksRegLiveness: true
registers:
  - { id: 0, class: vr }
  - { id: 1, class: vr }
  - { id: 2, class: vr }
  - { id: 3, class: vr }
  - { id: 4, class: gpr }
  - { id: 5, class: gpr }
  - { id: 6, class: vr }
  - { id: 7, class: gprnox0 }
  - { id: 8, class: gpr }
liveins:
  - { reg: '$x10', virtual-reg: '%4' }
  - { reg: '$x11', virtual-reg: '%5' }
  - { reg: '$v8', virtual-reg: '%6' }
  - { reg: '$x12', virtual-reg: '%7' }
frameInfo:
  maxAlignment:    1
machineFunctionInfo: {}
body:             |
  ; CHECK-LABEL: name: load_add_or_sub
  ; CHECK: bb.0.entry:
  ; CHECK-NEXT:   successors: %bb.2(0x30000000), %bb.1(0x50000000)
  ; CHECK-NEXT:   liveins: $x10, $x11, $v8, $x12
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x12
  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v8
  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x11
  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
  ; CHECK-NEXT:   dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
  ; CHECK-NEXT:   [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 undef $noreg, [[COPY2]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
  ; CHECK-NEXT:   BEQ [[COPY3]], $x0, %bb.2
  ; CHECK-NEXT:   PseudoBR %bb.1
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.1.if.then:
  ; CHECK-NEXT:   successors: %bb.3(0x80000000)
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 undef $noreg, [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
  ; CHECK-NEXT:   PseudoBR %bb.3
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.2.if.else:
  ; CHECK-NEXT:   successors: %bb.3(0x80000000)
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 undef $noreg, [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.3.if.end:
  ; CHECK-NEXT:   $v8 = COPY [[PseudoVADD_VV_M1_]]
  ; CHECK-NEXT:   PseudoRET implicit $v8
  bb.0.entry:
    successors: %bb.2(0x30000000), %bb.1(0x50000000)
    liveins: $x10, $x11, $v8, $x12

    %7:gprnox0 = COPY $x12
    %6:vr = COPY $v8
    %5:gpr = COPY $x11
    %4:gpr = COPY $x10
    %0:vr = PseudoVLE64_V_M1 undef $noreg, %5, %7, 6, 0
    %8:gpr = COPY $x0
    BEQ %4, %8, %bb.2
    PseudoBR %bb.1

  bb.1.if.then:
    %1:vr = PseudoVADD_VV_M1 undef $noreg, %0, %6, %7, 6, 0
    PseudoBR %bb.3

  bb.2.if.else:
    %2:vr = PseudoVSUB_VV_M1 undef $noreg, %0, %6, %7, 6, 0

  bb.3.if.end:
    %3:vr = PHI %1, %bb.1, %2, %bb.2
    $v8 = COPY %3
    PseudoRET implicit $v8

...
---
name:            load_zext_or_sext
alignment:       4
tracksRegLiveness: true
registers:
  - { id: 0, class: vr }
  - { id: 1, class: vr }
  - { id: 2, class: vr }
  - { id: 3, class: vr }
  - { id: 4, class: gpr }
  - { id: 5, class: gpr }
  - { id: 6, class: gpr }
  - { id: 7, class: gprnox0 }
  - { id: 8, class: gpr }
liveins:
  - { reg: '$x10', virtual-reg: '%4' }
  - { reg: '$x11', virtual-reg: '%5' }
  - { reg: '$x12', virtual-reg: '%6' }
  - { reg: '$x13', virtual-reg: '%7' }
frameInfo:
  maxAlignment:    1
machineFunctionInfo: {}
body:             |
  ; CHECK-LABEL: name: load_zext_or_sext
  ; CHECK: bb.0.entry:
  ; CHECK-NEXT:   successors: %bb.2(0x30000000), %bb.1(0x50000000)
  ; CHECK-NEXT:   liveins: $x10, $x11, $x12, $x13
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x13
  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x12
  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x11
  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
  ; CHECK-NEXT:   dead $x0 = PseudoVSETVLI [[COPY]], 215 /* e32, mf2, ta, ma */, implicit-def $vl, implicit-def $vtype
  ; CHECK-NEXT:   [[PseudoVLE32_V_MF2_:%[0-9]+]]:vr = PseudoVLE32_V_MF2 undef $noreg, [[COPY2]], $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
  ; CHECK-NEXT:   BEQ [[COPY3]], $x0, %bb.2
  ; CHECK-NEXT:   PseudoBR %bb.1
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.1.if.then:
  ; CHECK-NEXT:   successors: %bb.3(0x80000000)
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   dead $x0 = PseudoVSETVLIX0 killed $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl
  ; CHECK-NEXT:   early-clobber %9:vr = PseudoVZEXT_VF2_M1 undef $noreg, [[PseudoVLE32_V_MF2_]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
  ; CHECK-NEXT:   PseudoBR %bb.3
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.2.if.else:
  ; CHECK-NEXT:   successors: %bb.3(0x80000000)
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   dead $x0 = PseudoVSETVLIX0 killed $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl
  ; CHECK-NEXT:   early-clobber %9:vr = PseudoVSEXT_VF2_M1 undef $noreg, [[PseudoVLE32_V_MF2_]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.3.if.end:
  ; CHECK-NEXT:   PseudoVSE64_V_M1 %9, [[COPY1]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype
  ; CHECK-NEXT:   PseudoRET
  bb.0.entry:
    successors: %bb.2(0x30000000), %bb.1(0x50000000)
    liveins: $x10, $x11, $x12, $x13

    %7:gprnox0 = COPY $x13
    %6:gpr = COPY $x12
    %5:gpr = COPY $x11
    %4:gpr = COPY $x10
    %0:vr = PseudoVLE32_V_MF2 undef $noreg, %5, %7, 5, 0
    %8:gpr = COPY $x0
    BEQ %4, %8, %bb.2
    PseudoBR %bb.1

  bb.1.if.then:
    early-clobber %1:vr = PseudoVZEXT_VF2_M1 undef $noreg, %0, %7, 6, 0
    PseudoBR %bb.3

  bb.2.if.else:
    early-clobber %2:vr = PseudoVSEXT_VF2_M1 undef $noreg, %0, %7, 6, 0

  bb.3.if.end:
    %3:vr = PHI %1, %bb.1, %2, %bb.2
    PseudoVSE64_V_M1 %3, %6, %7, 6
    PseudoRET

...
---
name:            vmv_x_s
alignment:       4
tracksRegLiveness: true
registers:
  - { id: 0, class: vr }
  - { id: 1, class: vr }
  - { id: 2, class: vr }
  - { id: 3, class: gpr }
  - { id: 4, class: vr }
  - { id: 5, class: vr }
  - { id: 6, class: gprnox0 }
  - { id: 7, class: gpr }
  - { id: 8, class: gpr }
liveins:
  - { reg: '$x10', virtual-reg: '%3' }
  - { reg: '$v8', virtual-reg: '%4' }
  - { reg: '$v9', virtual-reg: '%5' }
  - { reg: '$x11', virtual-reg: '%6' }
frameInfo:
  maxAlignment:    1
machineFunctionInfo: {}
body:             |
  ; CHECK-LABEL: name: vmv_x_s
  ; CHECK: bb.0.entry:
  ; CHECK-NEXT:   successors: %bb.2(0x30000000), %bb.1(0x50000000)
  ; CHECK-NEXT:   liveins: $x10, $v8, $v9, $x11
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v9
  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v8
  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
  ; CHECK-NEXT:   BEQ [[COPY3]], $x0, %bb.2
  ; CHECK-NEXT:   PseudoBR %bb.1
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.1.if.then:
  ; CHECK-NEXT:   successors: %bb.3(0x80000000)
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
  ; CHECK-NEXT:   [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 undef $noreg, [[COPY2]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
  ; CHECK-NEXT:   PseudoBR %bb.3
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.2.if.else:
  ; CHECK-NEXT:   successors: %bb.3(0x80000000)
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
  ; CHECK-NEXT:   [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 undef $noreg, [[COPY1]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.3.if.end:
  ; CHECK-NEXT:   [[PseudoVMV_X_S:%[0-9]+]]:gpr = PseudoVMV_X_S [[PseudoVADD_VV_M1_]], 6 /* e64 */, implicit $vtype
  ; CHECK-NEXT:   $x10 = COPY [[PseudoVMV_X_S]]
  ; CHECK-NEXT:   PseudoRET implicit $x10
  bb.0.entry:
    successors: %bb.2(0x30000000), %bb.1(0x50000000)
    liveins: $x10, $v8, $v9, $x11

    %6:gprnox0 = COPY $x11
    %5:vr = COPY $v9
    %4:vr = COPY $v8
    %3:gpr = COPY $x10
    %7:gpr = COPY $x0
    BEQ %3, %7, %bb.2
    PseudoBR %bb.1

  bb.1.if.then:
    %0:vr = PseudoVADD_VV_M1 undef $noreg, %4, %5, %6, 6, 0
    PseudoBR %bb.3

  bb.2.if.else:
    %1:vr = PseudoVSUB_VV_M1 undef $noreg, %5, %5, %6, 6, 0

  bb.3.if.end:
    %2:vr = PHI %0, %bb.1, %1, %bb.2
    %8:gpr = PseudoVMV_X_S %2, 6
    $x10 = COPY %8
    PseudoRET implicit $x10

...
---
name:            vsetvli_add_or_sub
alignment:       4
tracksRegLiveness: true
registers:
  - { id: 0, class: gprnox0 }
  - { id: 1, class: vr }
  - { id: 2, class: vr }
  - { id: 3, class: vr }
  - { id: 4, class: gpr }
  - { id: 5, class: vr }
  - { id: 6, class: vr }
  - { id: 7, class: gprnox0 }
  - { id: 8, class: gpr }
liveins:
  - { reg: '$x10', virtual-reg: '%4' }
  - { reg: '$v8', virtual-reg: '%5' }
  - { reg: '$v9', virtual-reg: '%6' }
  - { reg: '$x11', virtual-reg: '%7' }
frameInfo:
  maxAlignment:    1
machineFunctionInfo: {}
body:             |
  ; CHECK-LABEL: name: vsetvli_add_or_sub
  ; CHECK: bb.0.entry:
  ; CHECK-NEXT:   successors: %bb.2(0x30000000), %bb.1(0x50000000)
  ; CHECK-NEXT:   liveins: $x10, $v8, $v9, $x11
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v9
  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v8
  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
  ; CHECK-NEXT:   dead [[PseudoVSETVLI:%[0-9]+]]:gprnox0 = PseudoVSETVLI [[COPY]], 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype
  ; CHECK-NEXT:   BEQ [[COPY3]], $x0, %bb.2
  ; CHECK-NEXT:   PseudoBR %bb.1
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.1.if.then:
  ; CHECK-NEXT:   successors: %bb.3(0x80000000)
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 undef $noreg, [[COPY2]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
  ; CHECK-NEXT:   PseudoBR %bb.3
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.2.if.else:
  ; CHECK-NEXT:   successors: %bb.3(0x80000000)
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 undef $noreg, [[COPY2]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.3.if.end:
  ; CHECK-NEXT:   $v8 = COPY [[PseudoVADD_VV_M1_]]
  ; CHECK-NEXT:   PseudoRET implicit $v8
  bb.0.entry:
    successors: %bb.2(0x30000000), %bb.1(0x50000000)
    liveins: $x10, $v8, $v9, $x11

    %7:gprnox0 = COPY $x11
    %6:vr = COPY $v9
    %5:vr = COPY $v8
    %4:gpr = COPY $x10
    %0:gprnox0 = PseudoVSETVLI %7, 88, implicit-def dead $vl, implicit-def dead $vtype
    %8:gpr = COPY $x0
    BEQ %4, %8, %bb.2
    PseudoBR %bb.1

  bb.1.if.then:
    %1:vr = PseudoVADD_VV_M1 undef $noreg, %5, %6, %0, 6, 0
    PseudoBR %bb.3

  bb.2.if.else:
    %2:vr = PseudoVSUB_VV_M1 undef $noreg, %5, %6, %0, 6, 0

  bb.3.if.end:
    %3:vr = PHI %1, %bb.1, %2, %bb.2
    $v8 = COPY %3
    PseudoRET implicit $v8

...
---
name:            vsetvli_vcpop
tracksRegLiveness: true
registers:
  - { id: 0, class: gpr, preferred-register: '' }
  - { id: 1, class: gpr, preferred-register: '' }
  - { id: 2, class: gpr, preferred-register: '' }
  - { id: 3, class: vr, preferred-register: '' }
  - { id: 4, class: vrnov0, preferred-register: '' }
  - { id: 5, class: vmv0, preferred-register: '' }
  - { id: 6, class: vrnov0, preferred-register: '' }
  - { id: 7, class: gpr, preferred-register: '' }
  - { id: 8, class: gpr, preferred-register: '' }
  - { id: 9, class: gpr, preferred-register: '' }
  - { id: 10, class: gpr, preferred-register: '' }
  - { id: 11, class: vr, preferred-register: '' }
body:             |
  ; CHECK-LABEL: name: vsetvli_vcpop
  ; CHECK: bb.0:
  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
  ; CHECK-NEXT:   liveins: $x10, $x11
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x11
  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
  ; CHECK-NEXT:   dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 killed $x0, 223 /* e64, mf2, ta, ma */, implicit-def $vl, implicit-def $vtype
  ; CHECK-NEXT:   [[PseudoVID_V_MF2_:%[0-9]+]]:vr = PseudoVID_V_MF2 undef $noreg, -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
  ; CHECK-NEXT:   dead [[PseudoVSETVLIX0_1:%[0-9]+]]:gpr = PseudoVSETVLIX0 killed $x0, 215 /* e32, mf2, ta, ma */, implicit-def $vl, implicit-def $vtype
  ; CHECK-NEXT:   [[PseudoVMV_V_I_MF2_:%[0-9]+]]:vrnov0 = PseudoVMV_V_I_MF2 undef $noreg, 0, -1, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.1:
  ; CHECK-NEXT:   successors: %bb.2(0x40000000), %bb.3(0x40000000)
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   [[PseudoVMSEQ_VI_MF2_:%[0-9]+]]:vmv0 = PseudoVMSEQ_VI_MF2 [[PseudoVID_V_MF2_]], 0, -1, 5 /* e32 */, implicit $vl, implicit $vtype
  ; CHECK-NEXT:   $v0 = COPY [[PseudoVMSEQ_VI_MF2_]]
  ; CHECK-NEXT:   dead $x0 = PseudoVSETVLIX0 killed $x0, 23 /* e32, mf2, tu, mu */, implicit-def $vl, implicit-def $vtype, implicit $vl
  ; CHECK-NEXT:   [[PseudoVLE32_V_MF2_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_MF2_MASK [[PseudoVMV_V_I_MF2_]], [[COPY]], $v0, -1, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
  ; CHECK-NEXT:   dead $x0 = PseudoVSETVLIX0 killed $x0, 197 /* e8, mf8, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl
  ; CHECK-NEXT:   [[PseudoVCPOP_M_B1_:%[0-9]+]]:gpr = PseudoVCPOP_M_B1 [[PseudoVMSEQ_VI_MF2_]], -1, 0 /* e8 */, implicit $vl, implicit $vtype
  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:gpr = IMPLICIT_DEF
  ; CHECK-NEXT:   BEQ [[PseudoVCPOP_M_B1_]], $x0, %bb.3
  ; CHECK-NEXT:   PseudoBR %bb.2
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.2:
  ; CHECK-NEXT:   successors: %bb.3(0x80000000)
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:gpr = LWU [[COPY1]], 0
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.3:
  ; CHECK-NEXT:   dead $x0 = PseudoVSETVLIX0 killed $x0, 215 /* e32, mf2, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl
  ; CHECK-NEXT:   [[PseudoVADD_VX_MF2_:%[0-9]+]]:vr = nsw PseudoVADD_VX_MF2 undef $noreg, [[PseudoVLE32_V_MF2_MASK]], [[DEF]], -1, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
  ; CHECK-NEXT:   $v0 = COPY [[PseudoVADD_VX_MF2_]]
  ; CHECK-NEXT:   PseudoRET implicit $v0
  bb.0:
    successors: %bb.1(0x80000000)
    liveins: $x10, $x11

    %0:gpr = COPY $x11
    %1:gpr = COPY $x10
    %2:gpr = IMPLICIT_DEF
    %3:vr = PseudoVID_V_MF2 undef $noreg, -1, 6, 0
    %4:vrnov0 = PseudoVMV_V_I_MF2 undef $noreg, 0, -1, 5, 0

  bb.1:
    successors: %bb.2(0x40000000), %bb.3(0x40000000)

    %5:vmv0 = PseudoVMSEQ_VI_MF2 killed %3, 0, -1, 5
    $v0 = COPY %5
    %6:vrnov0 = PseudoVLE32_V_MF2_MASK %4, killed %0, $v0, -1, 5, 0
    %7:gpr = PseudoVCPOP_M_B1 %5, -1, 0
    %8:gpr = COPY $x0
    BEQ killed %7, %8, %bb.3
    PseudoBR %bb.2

  bb.2:
    successors: %bb.3(0x80000000)

    %9:gpr = LWU %1, 0

  bb.3:
    %10:gpr = PHI %2, %bb.1, %9, %bb.2
    %11:vr = nsw PseudoVADD_VX_MF2 undef $noreg, %6, %10, -1, 5, 0
    $v0 = COPY %11
    PseudoRET implicit $v0
...
---
name:            vsetvli_loop_store
tracksRegLiveness: true
registers:
  - { id: 0, class: gpr, preferred-register: '' }
  - { id: 1, class: gpr, preferred-register: '' }
  - { id: 2, class: gpr, preferred-register: '' }
  - { id: 3, class: gpr, preferred-register: '' }
  - { id: 4, class: vr,  preferred-register: '' }
  - { id: 5, class: gpr, preferred-register: '' }
  - { id: 6, class: gpr, preferred-register: '' }
  - { id: 7, class: vr,  preferred-register: '' }
  - { id: 8, class: gpr, preferred-register: '' }
  - { id: 9, class: gpr, preferred-register: '' }
  - { id: 10, class: gpr, preferred-register: '' }
body:             |
  ; CHECK-LABEL: name: vsetvli_loop_store
  ; CHECK: bb.0:
  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
  ; CHECK-NEXT:   liveins: $x10, $x11
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x10
  ; CHECK-NEXT:   [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
  ; CHECK-NEXT:   [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3
  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x11
  ; CHECK-NEXT:   dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 killed $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
  ; CHECK-NEXT:   [[PseudoVID_V_M1_:%[0-9]+]]:vr = PseudoVID_V_M1 undef $noreg, -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x0
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.1:
  ; CHECK-NEXT:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   [[PseudoVADD_VX_M1_:%[0-9]+]]:vr = PseudoVADD_VX_M1 undef $noreg, [[PseudoVID_V_M1_]], [[COPY2]], -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
  ; CHECK-NEXT:   [[MUL:%[0-9]+]]:gpr = MUL [[COPY2]], [[SRLI]]
  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:gpr = ADD [[COPY]], [[MUL]]
  ; CHECK-NEXT:   PseudoVSE32_V_MF2 [[PseudoVADD_VX_M1_]], [[ADD]], -1, 5 /* e32 */, implicit $vl, implicit $vtype
  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = ADDI [[COPY2]], 1
  ; CHECK-NEXT:   BLTU [[COPY2]], [[COPY1]], %bb.1
  ; CHECK-NEXT:   PseudoBR %bb.2
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.2:
  ; CHECK-NEXT:   PseudoRET
  bb.0:
    liveins: $x10, $x11
    %0:gpr = COPY $x10
    %1:gpr = PseudoReadVLENB
    %2:gpr = SRLI %1:gpr, 3
    %3:gpr = COPY $x11
    %4:vr = PseudoVID_V_M1 undef $noreg, -1, 6, 0
    %5:gpr = COPY $x0

  bb.1:
    successors: %bb.1, %bb.2

    %6:gpr = PHI %5:gpr, %bb.0, %10:gpr, %bb.1
    %7:vr = PseudoVADD_VX_M1 undef $noreg, %4:vr, %6:gpr, -1, 6, 0
    %8:gpr = MUL %6:gpr, %2:gpr
    %9:gpr = ADD %0:gpr, %8:gpr
    PseudoVSE32_V_MF2 killed %7:vr, killed %9:gpr, -1, 5
    %10:gpr = ADDI %6:gpr, 1
    BLTU %10:gpr, %3:gpr, %bb.1
    PseudoBR %bb.2

  bb.2:

    PseudoRET
...
---
name:            vsetvli_loop_store2
tracksRegLiveness: true
registers:
  - { id: 0, class: gpr, preferred-register: '' }
  - { id: 1, class: gpr, preferred-register: '' }
  - { id: 2, class: gpr, preferred-register: '' }
  - { id: 3, class: gpr, preferred-register: '' }
  - { id: 4, class: vr,  preferred-register: '' }
  - { id: 5, class: gpr, preferred-register: '' }
  - { id: 6, class: gpr, preferred-register: '' }
  - { id: 7, class: vr,  preferred-register: '' }
  - { id: 8, class: gpr, preferred-register: '' }
  - { id: 9, class: gpr, preferred-register: '' }
  - { id: 10, class: gpr, preferred-register: '' }
body:             |
  ; CHECK-LABEL: name: vsetvli_loop_store2
  ; CHECK: bb.0:
  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
  ; CHECK-NEXT:   liveins: $x10, $x11
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x10
  ; CHECK-NEXT:   [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB
  ; CHECK-NEXT:   [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3
  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x11
  ; CHECK-NEXT:   dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 killed $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
  ; CHECK-NEXT:   [[PseudoVID_V_M1_:%[0-9]+]]:vr = PseudoVID_V_M1 undef $noreg, -1, 6 /* e64 */, 3 /* ta, ma */, implicit $vl, implicit $vtype
  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x0
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.1:
  ; CHECK-NEXT:   successors: %bb.2(0x80000000)
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   [[PseudoVADD_VX_M1_:%[0-9]+]]:vr = PseudoVADD_VX_M1 undef $noreg, [[PseudoVID_V_M1_]], [[COPY2]], -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
  ; CHECK-NEXT:   [[MUL:%[0-9]+]]:gpr = MUL [[COPY2]], [[SRLI]]
  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:gpr = ADD [[COPY]], [[MUL]]
  ; CHECK-NEXT:   PseudoVSE32_V_MF2 [[PseudoVADD_VX_M1_]], [[ADD]], -1, 5 /* e32 */, implicit $vl, implicit $vtype
  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = ADDI [[COPY2]], 1
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.2:
  ; CHECK-NEXT:   successors: %bb.1(0x40000000), %bb.3(0x40000000)
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   BLTU [[COPY2]], [[COPY1]], %bb.1
  ; CHECK-NEXT:   PseudoBR %bb.3
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.3:
  ; CHECK-NEXT:   PseudoRET
  bb.0:
    liveins: $x10, $x11
    %0:gpr = COPY $x10
    %1:gpr = PseudoReadVLENB
    %2:gpr = SRLI %1:gpr, 3
    %3:gpr = COPY $x11
    %4:vr = PseudoVID_V_M1 undef $noreg, -1, 6, 3
    %5:gpr = COPY $x0

  bb.1:
    successors: %bb.3

    %6:gpr = PHI %5:gpr, %bb.0, %10:gpr, %bb.3
    %7:vr = PseudoVADD_VX_M1 undef $noreg, %4:vr, %6:gpr, -1, 6, 0
    %8:gpr = MUL %6:gpr, %2:gpr
    %9:gpr = ADD %0:gpr, %8:gpr
    PseudoVSE32_V_MF2 killed %7:vr, killed %9:gpr, -1, 5
    %10:gpr = ADDI %6:gpr, 1

  bb.3:
    successors: %bb.1, %bb.2
    BLTU %10:gpr, %3:gpr, %bb.1
    PseudoBR %bb.2

  bb.2:

    PseudoRET
...
---
name:            redusum_loop
alignment:       4
tracksRegLiveness: true
registers:
  - { id: 0, class: gpr }
  - { id: 1, class: gpr }
  - { id: 2, class: vr }
  - { id: 3, class: vr }
  - { id: 4, class: gpr }
  - { id: 5, class: gpr }
  - { id: 6, class: gpr }
  - { id: 7, class: gpr }
  - { id: 8, class: gpr }
  - { id: 9, class: gpr }
  - { id: 10, class: vr }
  - { id: 11, class: vr }
  - { id: 12, class: vr }
  - { id: 13, class: gpr }
  - { id: 14, class: vr }
  - { id: 15, class: vr }
  - { id: 16, class: vr }
  - { id: 17, class: vr }
  - { id: 18, class: gpr }
  - { id: 19, class: gpr }
  - { id: 20, class: vr }
  - { id: 21, class: vr }
  - { id: 22, class: vr }
  - { id: 23, class: vr }
  - { id: 24, class: vr }
liveins:
  - { reg: '$x10', virtual-reg: '%6' }
  - { reg: '$x12', virtual-reg: '%8' }
frameInfo:
  maxAlignment:    1
machineFunctionInfo: {}
body:             |
  ; CHECK-LABEL: name: redusum_loop
  ; CHECK: bb.0.entry:
  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
  ; CHECK-NEXT:   liveins: $x10, $x12
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
  ; CHECK-NEXT:   dead $x0 = PseudoVSETIVLI 4, 208 /* e32, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
  ; CHECK-NEXT:   [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 undef $noreg, 0, 4, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
  ; CHECK-NEXT:   [[LUI:%[0-9]+]]:gpr = LUI 1
  ; CHECK-NEXT:   [[ADDIW:%[0-9]+]]:gpr = ADDIW [[LUI]], -2048
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.1.vector.body:
  ; CHECK-NEXT:   successors: %bb.2(0x04000000), %bb.1(0x7c000000)
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 undef $noreg, [[COPY1]], 4, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load (s128) from %ir.lsr.iv12, align 4)
  ; CHECK-NEXT:   [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 undef $noreg, [[PseudoVLE32_V_M1_]], [[PseudoVMV_V_I_M1_]], 4, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
  ; CHECK-NEXT:   [[ADDIW:%[0-9]+]]:gpr = nsw ADDI [[ADDIW]], -4
  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = ADDI [[COPY1]], 16
  ; CHECK-NEXT:   BNE [[ADDIW]], $x0, %bb.1
  ; CHECK-NEXT:   PseudoBR %bb.2
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.2.middle.block:
  ; CHECK-NEXT:   [[PseudoVMV_S_X:%[0-9]+]]:vr = PseudoVMV_S_X undef $noreg, $x0, 1, 5 /* e32 */, implicit $vl, implicit $vtype
  ; CHECK-NEXT:   [[PseudoVREDSUM_VS_M1_E8_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1_E8 undef $noreg, [[PseudoVMV_V_I_M1_]], [[PseudoVMV_S_X]], 4, 5 /* e32 */, 1 /* ta, mu */, implicit $vl, implicit $vtype
  ; CHECK-NEXT:   dead $x0 = PseudoVSETIVLI 1, 208 /* e32, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
  ; CHECK-NEXT:   PseudoVSE32_V_M1 [[PseudoVREDSUM_VS_M1_E8_]], [[COPY]], 1, 5 /* e32 */, implicit $vl, implicit $vtype :: (store (s32) into %ir.res)
  ; CHECK-NEXT:   PseudoRET
  bb.0.entry:
    liveins: $x10, $x12

    %8:gpr = COPY $x12
    %6:gpr = COPY $x10
    %11:vr = PseudoVMV_V_I_M1 undef $noreg, 0, 4, 5, 0
    %12:vr = COPY %11
    %10:vr = COPY %12
    %13:gpr = LUI 1
    %9:gpr = ADDIW killed %13, -2048

  bb.1.vector.body:
    successors: %bb.2(0x04000000), %bb.1(0x7c000000)

    %0:gpr = PHI %6, %bb.0, %5, %bb.1
    %1:gpr = PHI %9, %bb.0, %4, %bb.1
    %2:vr = PHI %10, %bb.0, %16, %bb.1
    %14:vr = PseudoVLE32_V_M1 undef $noreg, %0, 4, 5, 0 :: (load (s128) from %ir.lsr.iv12, align 4)
    %16:vr = PseudoVADD_VV_M1 undef $noreg, killed %14, %2, 4, 5, 0
    %4:gpr = nsw ADDI %1, -4
    %5:gpr = ADDI %0, 16
    %18:gpr = COPY $x0
    BNE %4, %18, %bb.1
    PseudoBR %bb.2

  bb.2.middle.block:
    %19:gpr = COPY $x0
    %20:vr = PseudoVMV_S_X undef $noreg, %19, 1, 5
    %23:vr = PseudoVREDSUM_VS_M1_E8 undef $noreg, %16, killed %20, 4, 5, 1
    PseudoVSE32_V_M1 killed %23, %8, 1, 5 :: (store (s32) into %ir.res)
    PseudoRET

...
---
name:            vsetvli_vluxei64_regression
tracksRegLiveness: true
body:             |
  ; CHECK-LABEL: name: vsetvli_vluxei64_regression
  ; CHECK: bb.0:
  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
  ; CHECK-NEXT:   liveins: $x10, $x11, $x12, $v0, $v1, $v2, $v3
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   %a:gpr = COPY $x10
  ; CHECK-NEXT:   %b:gpr = COPY $x11
  ; CHECK-NEXT:   %inaddr:gpr = COPY $x12
  ; CHECK-NEXT:   %idxs:vr = COPY $v0
  ; CHECK-NEXT:   %t1:vr = COPY $v1
  ; CHECK-NEXT:   %t3:vr = COPY $v2
  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:vrnov0 = COPY $v3
  ; CHECK-NEXT:   %t5:vrnov0 = COPY $v1
  ; CHECK-NEXT:   dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 killed $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
  ; CHECK-NEXT:   %t6:vr = PseudoVMSEQ_VI_M1 %t1, 0, -1, 6 /* e64 */, implicit $vl, implicit $vtype
  ; CHECK-NEXT:   PseudoBR %bb.1
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.1:
  ; CHECK-NEXT:   successors: %bb.3(0x40000000), %bb.2(0x40000000)
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   %mask:vr = PseudoVMANDN_MM_MF8 %t6, %t3, -1, 0 /* e8 */, implicit $vl, implicit $vtype
  ; CHECK-NEXT:   BEQ %a, $x0, %bb.3
  ; CHECK-NEXT:   PseudoBR %bb.2
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.2:
  ; CHECK-NEXT:   successors: %bb.3(0x80000000)
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   $v0 = COPY %mask
  ; CHECK-NEXT:   dead $x0 = PseudoVSETVLIX0 killed $x0, 69 /* e8, mf8, ta, mu */, implicit-def $vl, implicit-def $vtype, implicit $vl
  ; CHECK-NEXT:   early-clobber [[COPY]]:vrnov0 = PseudoVLUXEI64_V_M1_MF8_MASK %t5, %inaddr, %idxs, $v0, -1, 3 /* e8 */, 1 /* ta, mu */, implicit $vl, implicit $vtype
  ; CHECK-NEXT:   PseudoBR %bb.3
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.3:
  ; CHECK-NEXT:   $v0 = COPY %mask
  ; CHECK-NEXT:   dead $x0 = PseudoVSETVLIX0 killed $x0, 197 /* e8, mf8, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl
  ; CHECK-NEXT:   PseudoVSOXEI64_V_M1_MF8_MASK [[COPY]], %b, %idxs, $v0, -1, 3 /* e8 */, implicit $vl, implicit $vtype
  ; CHECK-NEXT:   PseudoRET
  bb.0:
    successors: %bb.1
    liveins: $x10, $x11, $x12, $v0, $v1, $v2, $v3

    %a:gpr = COPY $x10
    %b:gpr = COPY $x11
    %inaddr:gpr = COPY $x12
    %idxs:vr = COPY $v0
    %t1:vr = COPY $v1
    %t3:vr = COPY $v2
    %t4:vr = COPY $v3
    %t5:vrnov0 = COPY $v1
    %t6:vr = PseudoVMSEQ_VI_M1 %t1, 0, -1, 6
    PseudoBR %bb.1

  bb.1:
    successors: %bb.3, %bb.2

    %mask:vr = PseudoVMANDN_MM_MF8 %t6, %t3, -1, 0
    %t2:gpr = COPY $x0
    BEQ %a, %t2, %bb.3
    PseudoBR %bb.2

  bb.2:
    successors: %bb.3

    $v0 = COPY %mask
    early-clobber %t0:vrnov0 = PseudoVLUXEI64_V_M1_MF8_MASK %t5, killed %inaddr, %idxs, $v0, -1, 3, 1
    %ldval:vr = COPY %t0
    PseudoBR %bb.3

  bb.3:
    %stval:vr = PHI %t4, %bb.1, %ldval, %bb.2
    $v0 = COPY %mask
    PseudoVSOXEI64_V_M1_MF8_MASK killed %stval, killed %b, %idxs, $v0, -1, 3
    PseudoRET

...
---
name:            if_in_loop
tracksRegLiveness: true
body:             |
  ; CHECK-LABEL: name: if_in_loop
  ; CHECK: bb.0:
  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
  ; CHECK-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   %dst:gpr = COPY $x10
  ; CHECK-NEXT:   %src:gpr = COPY $x11
  ; CHECK-NEXT:   dead [[COPY:%[0-9]+]]:gpr = COPY $x12
  ; CHECK-NEXT:   %tc:gpr = COPY $x13
  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x14
  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x15
  ; CHECK-NEXT:   %vlenb:gpr = PseudoReadVLENB
  ; CHECK-NEXT:   %inc:gpr = SRLI %vlenb, 3
  ; CHECK-NEXT:   dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 killed $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
  ; CHECK-NEXT:   [[PseudoVID_V_M1_:%[0-9]+]]:vr = PseudoVID_V_M1 undef $noreg, -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x0
  ; CHECK-NEXT:   PseudoBR %bb.1
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.1:
  ; CHECK-NEXT:   successors: %bb.2(0x40000000), %bb.3(0x40000000)
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:gpr = ADD [[COPY2]], [[COPY3]]
  ; CHECK-NEXT:   dead $x0 = PseudoVSETVLIX0 killed $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl
  ; CHECK-NEXT:   [[PseudoVADD_VX_M1_:%[0-9]+]]:vr = PseudoVADD_VX_M1 undef $noreg, [[PseudoVID_V_M1_]], [[ADD]], -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
  ; CHECK-NEXT:   [[PseudoVMSLTU_VX_M1_:%[0-9]+]]:vr = PseudoVMSLTU_VX_M1 [[PseudoVADD_VX_M1_]], [[COPY1]], -1, 6 /* e64 */, implicit $vl, implicit $vtype
  ; CHECK-NEXT:   [[PseudoVCPOP_M_B1_:%[0-9]+]]:gpr = PseudoVCPOP_M_B1 [[PseudoVMSLTU_VX_M1_]], -1, 0 /* e8 */, implicit $vl, implicit $vtype
  ; CHECK-NEXT:   BEQ [[PseudoVCPOP_M_B1_]], $x0, %bb.3
  ; CHECK-NEXT:   PseudoBR %bb.2
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.2:
  ; CHECK-NEXT:   successors: %bb.3(0x80000000)
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   [[ADD1:%[0-9]+]]:gpr = ADD %src, [[COPY3]]
  ; CHECK-NEXT:   [[PseudoVLE8_V_MF8_:%[0-9]+]]:vrnov0 = PseudoVLE8_V_MF8 undef $noreg, [[ADD1]], -1, 3 /* e8 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
  ; CHECK-NEXT:   dead $x0 = PseudoVSETVLIX0 killed $x0, 197 /* e8, mf8, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl
  ; CHECK-NEXT:   [[PseudoVADD_VI_MF8_:%[0-9]+]]:vrnov0 = PseudoVADD_VI_MF8 undef $noreg, [[PseudoVLE8_V_MF8_]], 4, -1, 3 /* e8 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
  ; CHECK-NEXT:   [[ADD2:%[0-9]+]]:gpr = ADD %dst, [[COPY3]]
  ; CHECK-NEXT:   PseudoVSE8_V_MF8 [[PseudoVADD_VI_MF8_]], [[ADD2]], -1, 3 /* e8 */, implicit $vl, implicit $vtype
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.3:
  ; CHECK-NEXT:   successors: %bb.1(0x7c000000), %bb.4(0x04000000)
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = ADD [[COPY3]], %inc
  ; CHECK-NEXT:   BLTU [[COPY3]], %tc, %bb.1
  ; CHECK-NEXT:   PseudoBR %bb.4
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.4:
  ; CHECK-NEXT:   PseudoRET
  bb.0:
    successors: %bb.1(0x80000000)
    liveins: $x10, $x11, $x12, $x13, $x14, $x15

    %dst:gpr = COPY $x10
    %src:gpr = COPY $x11
    %48:gpr = COPY $x12
    %tc:gpr = COPY $x13
    %11:gpr = COPY $x14
    %12:gpr = COPY $x15
    %vlenb:gpr = PseudoReadVLENB
    %inc:gpr = SRLI killed %vlenb, 3
    %10:vr = PseudoVID_V_M1 undef $noreg, -1, 6, 0
    %59:gpr = COPY $x0
    PseudoBR %bb.1

  bb.1:
    successors: %bb.2(0x40000000), %bb.3(0x40000000)

    %26:gpr = PHI %59, %bb.0, %28, %bb.3
    %61:gpr = ADD %12, %26
    %27:vr = PseudoVADD_VX_M1 undef $noreg, %10, killed %61, -1, 6, 0
    %62:vr = PseudoVMSLTU_VX_M1 %27, %11, -1, 6
    %63:gpr = PseudoVCPOP_M_B1 %62, -1, 0
    %64:gpr = COPY $x0
    BEQ killed %63, %64, %bb.3
    PseudoBR %bb.2

  bb.2:
    successors: %bb.3(0x80000000)

    %66:gpr = ADD %src, %26
    %67:vrnov0 = PseudoVLE8_V_MF8 undef $noreg, killed %66, -1, 3, 0
    %76:vrnov0 = PseudoVADD_VI_MF8 undef $noreg, %67, 4, -1, 3, 0
    %77:gpr = ADD %dst, %26
    PseudoVSE8_V_MF8 killed %76, killed %77, -1, 3

  bb.3:
    successors: %bb.1(0x7c000000), %bb.4(0x04000000)

    %28:gpr = ADD %26, %inc
    BLTU %28, %tc, %bb.1
    PseudoBR %bb.4

  bb.4:
    PseudoRET

...
---
name: pre_undemanded_vl
body: |
  ; CHECK-LABEL: name: pre_undemanded_vl
  ; CHECK: bb.0:
  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   dead $x0 = PseudoVSETIVLI 1, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
  ; CHECK-NEXT:   PseudoBR %bb.1
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.1:
  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   dead %x:gpr = PseudoVMV_X_S undef $noreg, 6 /* e64 */, implicit $vtype
  ; CHECK-NEXT:   PseudoBR %bb.1
  bb.0:
    PseudoBR %bb.1
  bb.1:
    %x:gpr = PseudoVMV_X_S undef $noreg, 6
    PseudoBR %bb.1
...
---
name: clobberred_forwarded_avl
tracksRegLiveness: true
body: |
  ; CHECK-LABEL: name: clobberred_forwarded_avl
  ; CHECK: bb.0:
  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
  ; CHECK-NEXT:   liveins: $x10, $v8m2
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   %avl:gprnox0 = COPY $x10
  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY %avl
  ; CHECK-NEXT:   dead %outvl:gprnox0 = PseudoVSETVLI %avl, 209 /* e32, m2, ta, ma */, implicit-def $vl, implicit-def $vtype
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.1:
  ; CHECK-NEXT:   successors: %bb.2(0x80000000)
  ; CHECK-NEXT:   liveins: $v8m2
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   dead %avl:gprnox0 = ADDI %avl, 1
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.2:
  ; CHECK-NEXT:   liveins: $v8m2
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 killed $x0, 209 /* e32, m2, ta, ma */, implicit-def $vl, implicit-def $vtype
  ; CHECK-NEXT:   renamable $v10m2 = PseudoVADD_VV_M2 undef renamable $v10m2, renamable $v8m2, renamable $v8m2, -1, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
  ; CHECK-NEXT:   dead $x0 = PseudoVSETVLI [[COPY]], 209 /* e32, m2, ta, ma */, implicit-def $vl, implicit-def $vtype
  ; CHECK-NEXT:   renamable $v8m2 = PseudoVADD_VV_M2 undef renamable $v8m2, killed renamable $v10m2, renamable $v8m2, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
  ; CHECK-NEXT:   PseudoRET implicit $v8m2
  bb.0:
    liveins: $x10, $v8m2
    %avl:gprnox0 = COPY $x10
    %outvl:gprnox0 = PseudoVSETVLI %avl:gprnox0, 209, implicit-def dead $vl, implicit-def dead $vtype

  bb.1:
    liveins: $v8m2
    %avl:gprnox0 = ADDI %avl:gprnox0, 1

  bb.2:
    liveins: $v8m2
    renamable $v10m2 = PseudoVADD_VV_M2 undef renamable $v10m2, renamable $v8m2, renamable $v8m2, -1, 5, 0
    renamable $v8m2 = PseudoVADD_VV_M2 undef renamable $v8m2, killed renamable $v10m2, killed renamable $v8m2, %outvl:gprnox0, 5, 0
    PseudoRET implicit $v8m2
...
---
name: clobberred_forwarded_phi_avl
tracksRegLiveness: true
body: |
  ; CHECK-LABEL: name: clobberred_forwarded_phi_avl
  ; CHECK: bb.0:
  ; CHECK-NEXT:   successors: %bb.2(0x40000000), %bb.1(0x40000000)
  ; CHECK-NEXT:   liveins: $x10, $x11, $v8m2
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   %v:vrm2 = COPY $v8m2
  ; CHECK-NEXT:   [[ADDI:%[0-9]+]]:gprnox0 = ADDI $x0, 1
  ; CHECK-NEXT:   %x:gpr = COPY $x10
  ; CHECK-NEXT:   %y:gpr = COPY $x11
  ; CHECK-NEXT:   BEQ %x, %y, %bb.2
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.1:
  ; CHECK-NEXT:   successors: %bb.2(0x80000000)
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   [[ADDI:%[0-9]+]]:gprnox0 = ADDI $x0, 2
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.2:
  ; CHECK-NEXT:   successors: %bb.3(0x80000000)
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY [[ADDI]]
  ; CHECK-NEXT:   dead %outvl:gprnox0 = PseudoVSETVLI [[ADDI]], 209 /* e32, m2, ta, ma */, implicit-def $vl, implicit-def $vtype
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.3:
  ; CHECK-NEXT:   successors: %bb.4(0x80000000)
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   dead [[ADDI:%[0-9]+]]:gprnox0 = ADDI [[ADDI]], 1
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.4:
  ; CHECK-NEXT:   dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 killed $x0, 209 /* e32, m2, ta, ma */, implicit-def $vl, implicit-def $vtype
  ; CHECK-NEXT:   renamable $v10m2 = PseudoVADD_VV_M2 undef renamable $v10m2, %v, %v, -1, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
  ; CHECK-NEXT:   dead $x0 = PseudoVSETVLI [[COPY]], 209 /* e32, m2, ta, ma */, implicit-def $vl, implicit-def $vtype
  ; CHECK-NEXT:   renamable $v8m2 = PseudoVADD_VV_M2 undef renamable $v8m2, killed renamable $v10m2, %v, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
  ; CHECK-NEXT:   PseudoRET implicit $v8m2
  bb.0:
    liveins: $x10, $x11, $v8m2
    %v:vrm2 = COPY $v8m2
    %a:gpr = ADDI $x0, 1
    %x:gpr = COPY $x10
    %y:gpr = COPY $x11
    BEQ %x, %y, %bb.2

  bb.1:
    %b:gpr = ADDI $x0, 2

  bb.2:
    %avl:gprnox0 = PHI %a, %bb.0, %b, %bb.1
    %outvl:gprnox0 = PseudoVSETVLI %avl:gprnox0, 209, implicit-def dead $vl, implicit-def dead $vtype

  bb.3:
    %avl:gprnox0 = ADDI %avl:gprnox0, 1

  bb.4:
    renamable $v10m2 = PseudoVADD_VV_M2 undef renamable $v10m2, %v, %v, -1, 5, 0
    renamable $v8m2 = PseudoVADD_VV_M2 undef renamable $v8m2, killed renamable $v10m2, killed %v, %outvl:gprnox0, 5, 0
    PseudoRET implicit $v8m2