llvm/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir

# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc %s -o - -mtriple=riscv64 -mattr=v -run-pass=liveintervals,riscv-insert-vsetvli \
# RUN:     -verify-machineinstrs | FileCheck %s

--- |
  source_filename = "vsetvli-insert.ll"
  target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128"
  target triple = "riscv64"

  define <vscale x 1 x i64> @add(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) #0 {
  entry:
    %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2)
    ret <vscale x 1 x i64> %a
  }

  define <vscale x 1 x i64> @load_add(ptr %0, <vscale x 1 x i64> %1, i64 %2) #0 {
  entry:
    %a = call <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64> undef, ptr %0, i64 %2)
    %b = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %a, <vscale x 1 x i64> %1, i64 %2)
    ret <vscale x 1 x i64> %b
  }

  define <vscale x 1 x i64> @load_zext(ptr %0, i64 %1) #0 {
  entry:
    %a = call <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32.i64(<vscale x 1 x i32> undef, ptr %0, i64 %1)
    %b = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> %a, i64 %1)
    ret <vscale x 1 x i64> %b
  }

  declare i64 @llvm.riscv.vmv.x.s.nxv1i64(<vscale x 1 x i64>) #1

  define i64 @vmv_x_s(<vscale x 1 x i64> %0) #0 {
  entry:
    %a = call i64 @llvm.riscv.vmv.x.s.nxv1i64(<vscale x 1 x i64> %0)
    ret i64 %a
  }

  define void @add_v2i64(ptr %x, ptr %y) #0 {
    %a = load <2 x i64>, ptr %x, align 16
    %b = load <2 x i64>, ptr %y, align 16
    %c = add <2 x i64> %a, %b
    store <2 x i64> %c, ptr %x, align 16
    ret void
  }

  declare i64 @llvm.vector.reduce.add.v2i64(<2 x i64>) #2

  define i64 @vreduce_add_v2i64(ptr %x) #0 {
    %v = load <2 x i64>, ptr %x, align 16
    %red = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %v)
    ret i64 %red
  }

  declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg) #3

  define <vscale x 1 x i64> @vsetvli_add(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %avl) #0 {
  entry:
    %a = call i64 @llvm.riscv.vsetvli.i64(i64 %avl, i64 3, i64 0)
    %b = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %a)
    ret <vscale x 1 x i64> %b
  }

  define <vscale x 1 x i64> @load_add_inlineasm(ptr %0, <vscale x 1 x i64> %1, i64 %2) #0 {
  entry:
    %a = call <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64> undef, ptr %0, i64 %2)
    call void asm sideeffect "", ""()
    %b = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %a, <vscale x 1 x i64> %1, i64 %2)
    ret <vscale x 1 x i64> %b
  }

  define void @vmv_v_i_different_lmuls() {
    ret void
  }

  define void @pre_same_sewlmul_ratio() {
    ret void
  }

  define void @postpass_modify_vl() {
    ret void
  }

  define void @coalesce_dead_avl_addi() {
    ret void
  }

  define void @coalesce_dead_avl_nonvolatile_load() {
    ret void
  }

  define void @coalesce_dead_avl_volatile_load() {
    ret void
  }

  define void @coalesce_shrink_removed_vsetvlis_uses() {
    ret void
  }

  declare <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, i64) #1

  declare <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64>, ptr nocapture, i64) #4

  declare <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32.i64(<vscale x 1 x i32>, ptr nocapture, i64) #4

  declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>, <vscale x 1 x i32>, i64) #1

  attributes #0 = { "target-features"="+v" }
  attributes #1 = { nounwind readnone }
  attributes #2 = { nofree nosync nounwind readnone willreturn }
  attributes #3 = { nounwind }
  attributes #4 = { nounwind readonly }

...
---
name:            add
alignment:       4
tracksRegLiveness: true
registers:
  - { id: 0, class: vr }
  - { id: 1, class: vr }
  - { id: 2, class: gprnox0 }
  - { id: 3, class: vr }
liveins:
  - { reg: '$v8', virtual-reg: '%0' }
  - { reg: '$v9', virtual-reg: '%1' }
  - { reg: '$x10', virtual-reg: '%2' }
frameInfo:
  maxAlignment:    1
machineFunctionInfo: {}
body:             |
  bb.0.entry:
    liveins: $v8, $v9, $x10

    ; CHECK-LABEL: name: add
    ; CHECK: liveins: $v8, $v9, $x10
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8
    ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
    ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 undef $noreg, [[COPY2]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
    ; CHECK-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
    ; CHECK-NEXT: PseudoRET implicit $v8
    %2:gprnox0 = COPY $x10
    %1:vr = COPY $v9
    %0:vr = COPY $v8
    %3:vr = PseudoVADD_VV_M1 undef $noreg, %0, %1, %2, 6, 0
    $v8 = COPY %3
    PseudoRET implicit $v8

...
---
name:            load_add
alignment:       4
tracksRegLiveness: true
registers:
  - { id: 0, class: gpr }
  - { id: 1, class: vr }
  - { id: 2, class: gprnox0 }
  - { id: 3, class: vr }
  - { id: 4, class: vr }
liveins:
  - { reg: '$x10', virtual-reg: '%0' }
  - { reg: '$v8', virtual-reg: '%1' }
  - { reg: '$x11', virtual-reg: '%2' }
frameInfo:
  maxAlignment:    1
machineFunctionInfo: {}
body:             |
  bb.0.entry:
    liveins: $x10, $v8, $x11

    ; CHECK-LABEL: name: load_add
    ; CHECK: liveins: $x10, $v8, $x11
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v8
    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10
    ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
    ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 undef $noreg, [[COPY2]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
    ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 undef $noreg, [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
    ; CHECK-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
    ; CHECK-NEXT: PseudoRET implicit $v8
    %2:gprnox0 = COPY $x11
    %1:vr = COPY $v8
    %0:gpr = COPY $x10
    %3:vr = PseudoVLE64_V_M1 undef $noreg, %0, %2, 6, 0
    %4:vr = PseudoVADD_VV_M1 undef $noreg, killed %3, %1, %2, 6, 0
    $v8 = COPY %4
    PseudoRET implicit $v8

...
---
name:            load_zext
alignment:       4
tracksRegLiveness: true
registers:
  - { id: 0, class: gpr }
  - { id: 1, class: gprnox0 }
  - { id: 2, class: vr }
  - { id: 3, class: vr }
liveins:
  - { reg: '$x10', virtual-reg: '%0' }
  - { reg: '$x11', virtual-reg: '%1' }
frameInfo:
  maxAlignment:    1
machineFunctionInfo: {}
body:             |
  bb.0.entry:
    liveins: $x10, $x11

    ; CHECK-LABEL: name: load_zext
    ; CHECK: liveins: $x10, $x11
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10
    ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
    ; CHECK-NEXT: [[PseudoVLE32_V_MF2_:%[0-9]+]]:vr = PseudoVLE32_V_MF2 undef $noreg, [[COPY1]], $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
    ; CHECK-NEXT: early-clobber %3:vr = PseudoVZEXT_VF2_M1 undef $noreg, [[PseudoVLE32_V_MF2_]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
    ; CHECK-NEXT: $v8 = COPY %3
    ; CHECK-NEXT: PseudoRET implicit $v8
    %1:gprnox0 = COPY $x11
    %0:gpr = COPY $x10
    %2:vr = PseudoVLE32_V_MF2 undef $noreg, %0, %1, 5, 0
    early-clobber %3:vr = PseudoVZEXT_VF2_M1 undef $noreg, killed %2, %1, 6, 0
    $v8 = COPY %3
    PseudoRET implicit $v8

...
---
name:            vmv_x_s
alignment:       4
tracksRegLiveness: true
registers:
  - { id: 0, class: vr }
  - { id: 1, class: gpr }
liveins:
  - { reg: '$v8', virtual-reg: '%0' }
frameInfo:
  maxAlignment:    1
machineFunctionInfo: {}
body:             |
  bb.0.entry:
    liveins: $v8

    ; CHECK-LABEL: name: vmv_x_s
    ; CHECK: liveins: $v8
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
    ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 1, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
    ; CHECK-NEXT: [[PseudoVMV_X_S:%[0-9]+]]:gpr = PseudoVMV_X_S [[COPY]], 6 /* e64 */, implicit $vtype
    ; CHECK-NEXT: $x10 = COPY [[PseudoVMV_X_S]]
    ; CHECK-NEXT: PseudoRET implicit $x10
    %0:vr = COPY $v8
    %1:gpr = PseudoVMV_X_S %0, 6
    $x10 = COPY %1
    PseudoRET implicit $x10

...
---
name:            add_v2i64
alignment:       4
tracksRegLiveness: true
registers:
  - { id: 0, class: gpr }
  - { id: 1, class: gpr }
  - { id: 2, class: vr }
  - { id: 3, class: vr }
  - { id: 4, class: vr }
liveins:
  - { reg: '$x10', virtual-reg: '%0' }
  - { reg: '$x11', virtual-reg: '%1' }
frameInfo:
  maxAlignment:    1
machineFunctionInfo: {}
body:             |
  bb.0 (%ir-block.0):
    liveins: $x10, $x11

    ; CHECK-LABEL: name: add_v2i64
    ; CHECK: liveins: $x10, $x11
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x11
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10
    ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
    ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 undef $noreg, [[COPY1]], 2, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load (s128) from %ir.x)
    ; CHECK-NEXT: [[PseudoVLE64_V_M1_1:%[0-9]+]]:vr = PseudoVLE64_V_M1 undef $noreg, [[COPY]], 2, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load (s128) from %ir.y)
    ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 undef $noreg, [[PseudoVLE64_V_M1_]], [[PseudoVLE64_V_M1_1]], 2, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
    ; CHECK-NEXT: PseudoVSE64_V_M1 [[PseudoVADD_VV_M1_]], [[COPY1]], 2, 6 /* e64 */, implicit $vl, implicit $vtype :: (store (s128) into %ir.x)
    ; CHECK-NEXT: PseudoRET
    %1:gpr = COPY $x11
    %0:gpr = COPY $x10
    %2:vr = PseudoVLE64_V_M1 undef $noreg, %0, 2, 6, 0 :: (load (s128) from %ir.x)
    %3:vr = PseudoVLE64_V_M1 undef $noreg, %1, 2, 6, 0 :: (load (s128) from %ir.y)
    %4:vr = PseudoVADD_VV_M1 undef $noreg, killed %2, killed %3, 2, 6, 0
    PseudoVSE64_V_M1 killed %4, %0, 2, 6 :: (store (s128) into %ir.x)
    PseudoRET

...
---
name:            vreduce_add_v2i64
alignment:       4
tracksRegLiveness: true
registers:
  - { id: 0, class: gpr }
  - { id: 1, class: vr }
  - { id: 2, class: vr }
  - { id: 3, class: vr }
  - { id: 4, class: vr }
  - { id: 5, class: gpr }
liveins:
  - { reg: '$x10', virtual-reg: '%0' }
frameInfo:
  maxAlignment:    1
machineFunctionInfo: {}
body:             |
  bb.0 (%ir-block.0):
    liveins: $x10

    ; CHECK-LABEL: name: vreduce_add_v2i64
    ; CHECK: liveins: $x10
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
    ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
    ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 undef $noreg, [[COPY]], 2, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load (s128) from %ir.x)
    ; CHECK-NEXT: dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 killed $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
    ; CHECK-NEXT: [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 undef $noreg, 0, -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
    ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
    ; CHECK-NEXT: [[PseudoVREDSUM_VS_M1_E8_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1_E8 undef $noreg, [[PseudoVLE64_V_M1_]], [[PseudoVMV_V_I_M1_]], 2, 6 /* e64 */, 1 /* ta, mu */, implicit $vl, implicit $vtype
    ; CHECK-NEXT: [[PseudoVMV_X_S:%[0-9]+]]:gpr = PseudoVMV_X_S [[PseudoVREDSUM_VS_M1_E8_]], 6 /* e64 */, implicit $vtype
    ; CHECK-NEXT: $x10 = COPY [[PseudoVMV_X_S]]
    ; CHECK-NEXT: PseudoRET implicit $x10
    %0:gpr = COPY $x10
    %1:vr = PseudoVLE64_V_M1 undef $noreg, %0, 2, 6, 0 :: (load (s128) from %ir.x)
    %2:vr = PseudoVMV_V_I_M1 undef $noreg, 0, -1, 6, 0
    %3:vr = PseudoVREDSUM_VS_M1_E8 undef $noreg, killed %1, killed %2, 2, 6, 1
    %5:gpr = PseudoVMV_X_S killed %3, 6
    $x10 = COPY %5
    PseudoRET implicit $x10

...
---
name:            vsetvli_add
alignment:       4
tracksRegLiveness: true
registers:
  - { id: 0, class: vr }
  - { id: 1, class: vr }
  - { id: 2, class: gprnox0 }
  - { id: 3, class: gprnox0 }
  - { id: 4, class: vr }
liveins:
  - { reg: '$v8', virtual-reg: '%0' }
  - { reg: '$v9', virtual-reg: '%1' }
  - { reg: '$x10', virtual-reg: '%2' }
frameInfo:
  maxAlignment:    1
machineFunctionInfo: {}
body:             |
  bb.0.entry:
    liveins: $v8, $v9, $x10

    ; CHECK-LABEL: name: vsetvli_add
    ; CHECK: liveins: $v8, $v9, $x10
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8
    ; CHECK-NEXT: dead [[PseudoVSETVLI:%[0-9]+]]:gprnox0 = PseudoVSETVLI [[COPY]], 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype
    ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 undef $noreg, [[COPY2]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
    ; CHECK-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
    ; CHECK-NEXT: PseudoRET implicit $v8
    %2:gprnox0 = COPY $x10
    %1:vr = COPY $v9
    %0:vr = COPY $v8
    %3:gprnox0 = PseudoVSETVLI %2, 88, implicit-def dead $vl, implicit-def dead $vtype
    %4:vr = PseudoVADD_VV_M1 undef $noreg, %0, %1, killed %3, 6, 0
    $v8 = COPY %4
    PseudoRET implicit $v8

...
---
name:            load_add_inlineasm
alignment:       4
tracksRegLiveness: true
registers:
  - { id: 0, class: gpr }
  - { id: 1, class: vr }
  - { id: 2, class: gprnox0 }
  - { id: 3, class: vr }
  - { id: 4, class: vr }
liveins:
  - { reg: '$x10', virtual-reg: '%0' }
  - { reg: '$v8', virtual-reg: '%1' }
  - { reg: '$x11', virtual-reg: '%2' }
frameInfo:
  maxAlignment:    1
machineFunctionInfo: {}
body:             |
  bb.0.entry:
    liveins: $x10, $v8, $x11

    ; CHECK-LABEL: name: load_add_inlineasm
    ; CHECK: liveins: $x10, $v8, $x11
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v8
    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10
    ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
    ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 undef $noreg, [[COPY2]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
    ; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */
    ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
    ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 undef $noreg, [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
    ; CHECK-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
    ; CHECK-NEXT: PseudoRET implicit $v8
    %2:gprnox0 = COPY $x11
    %1:vr = COPY $v8
    %0:gpr = COPY $x10
    %3:vr = PseudoVLE64_V_M1 undef $noreg, %0, %2, 6, 0
    INLINEASM &"", 1 /* sideeffect attdialect */
    %4:vr = PseudoVADD_VV_M1 undef $noreg, killed %3, %1, %2, 6, 0
    $v8 = COPY %4
    PseudoRET implicit $v8

...
---
name:            vmv_v_i_different_lmuls
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $x10, $v8, $x11

    ; CHECK-LABEL: name: vmv_v_i_different_lmuls
    ; CHECK: liveins: $x10, $v8, $x11
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 4, 217 /* e64, m2, ta, ma */, implicit-def $vl, implicit-def $vtype
    ; CHECK-NEXT: dead [[PseudoVID_V_M2_:%[0-9]+]]:vrm2 = PseudoVID_V_M2 undef $noreg, 4, 6 /* e64 */, 3 /* ta, ma */, implicit $vl, implicit $vtype
    ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 198 /* e8, mf4, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl
    ; CHECK-NEXT: dead [[PseudoVMV_V_I_MF4_:%[0-9]+]]:vr = PseudoVMV_V_I_MF4 undef $noreg, 0, 4, 3 /* e8 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
    ; CHECK-NEXT: PseudoRET
    %0:vrm2 = PseudoVID_V_M2 undef $noreg, 4, 6, 3
    %4:vr = PseudoVMV_V_I_MF4 undef $noreg, 0, 4, 3, 0
    PseudoRET
...
---
# make sure we don't try to perform PRE when one of the blocks is sew/lmul ratio
# only
name: pre_same_sewlmul_ratio
tracksRegLiveness: true
body:             |
  ; CHECK-LABEL: name: pre_same_sewlmul_ratio
  ; CHECK: bb.0:
  ; CHECK-NEXT:   successors: %bb.2(0x40000000), %bb.1(0x40000000)
  ; CHECK-NEXT:   liveins: $x10
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   %cond:gpr = COPY $x10
  ; CHECK-NEXT:   dead $x0 = PseudoVSETIVLI 2, 215 /* e32, mf2, ta, ma */, implicit-def $vl, implicit-def $vtype
  ; CHECK-NEXT:   dead [[PseudoVMV_V_I_MF2_:%[0-9]+]]:vr = PseudoVMV_V_I_MF2 undef $noreg, 1, 2, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
  ; CHECK-NEXT:   BEQ %cond, $x0, %bb.2
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.1:
  ; CHECK-NEXT:   successors: %bb.2(0x80000000)
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   dead $x0 = PseudoVSETVLIX0 killed $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl
  ; CHECK-NEXT:   dead [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 undef $noreg, 1, 2, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.2:
  ; CHECK-NEXT:   successors: %bb.4(0x40000000), %bb.3(0x40000000)
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   BEQ %cond, $x0, %bb.4
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.3:
  ; CHECK-NEXT:   successors: %bb.4(0x80000000)
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT:   PseudoCALL $noreg, csr_ilp32_lp64
  ; CHECK-NEXT: {{  $}}
  ; CHECK-NEXT: bb.4:
  ; CHECK-NEXT:   $x0 = PseudoVSETIVLI 2, 215 /* e32, mf2, ta, ma */, implicit-def $vl, implicit-def $vtype
  ; CHECK-NEXT:   dead [[PseudoVMV_X_S:%[0-9]+]]:gpr = PseudoVMV_X_S undef $noreg, 5 /* e32 */, implicit $vtype
  ; CHECK-NEXT:   dead [[PseudoVMV_V_I_MF2_1:%[0-9]+]]:vr = PseudoVMV_V_I_MF2 undef $noreg, 1, 2, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
  ; CHECK-NEXT:   PseudoRET
  bb.0:
    liveins: $x10
    %cond:gpr = COPY $x10
    %1:vr = PseudoVMV_V_I_MF2 undef $noreg, 1, 2, 5, 0
    BEQ %cond, $x0, %bb.2
  bb.1:
    %2:vr = PseudoVMV_V_I_M1 undef $noreg, 1, 2, 6, 0
  bb.2: ; the exit info here should have sew/lmul ratio only
    BEQ %cond, $x0, %bb.4
  bb.3:
    PseudoCALL $noreg, csr_ilp32_lp64
  bb.4: ; this block will have PRE attempted on it
    %4:gpr = PseudoVMV_X_S undef $noreg, 5
    %5:vr = PseudoVMV_V_I_MF2 undef $noreg, 1, 2, 5, 0
    PseudoRET
...
---
name:            postpass_modify_vl
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $x1
    ; CHECK-LABEL: name: postpass_modify_vl
    ; CHECK: liveins: $x1
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 3, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
    ; CHECK-NEXT: dead [[COPY:%[0-9]+]]:gpr = COPY $vtype
    ; CHECK-NEXT: $vl = COPY $x1
    ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 3, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
    ; CHECK-NEXT: dead [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 undef $noreg, undef $noreg, undef $noreg, 3, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
    ; CHECK-NEXT: PseudoRET
    dead $x0 = PseudoVSETIVLI 3, 216, implicit-def $vl, implicit-def $vtype
    %1:gpr = COPY $vtype
    $vl = COPY $x1
    dead $x0 = PseudoVSETIVLI 3, 216, implicit-def $vl, implicit-def $vtype
    %4:vr = PseudoVADD_VV_M1 undef $noreg, undef $noreg, undef $noreg, 3, 6, 0
    PseudoRET
...
---
name: coalesce_dead_avl_addi
tracksRegLiveness: true
body:             |
  bb.0:
    ; CHECK-LABEL: name: coalesce_dead_avl_addi
    ; CHECK: $x0 = PseudoVSETIVLI 3, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
    ; CHECK-NEXT: dead %x:gpr = PseudoVMV_X_S $noreg, 6 /* e64 */, implicit $vtype
    ; CHECK-NEXT: $v0 = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 3, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
    ; CHECK-NEXT: PseudoRET
    %avl:gprnox0 = ADDI $x0, 42
    dead $x0 = PseudoVSETVLI killed %avl, 216, implicit-def $vl, implicit-def $vtype
    %x:gpr = PseudoVMV_X_S $noreg, 6
    dead $x0 = PseudoVSETIVLI 3, 216, implicit-def $vl, implicit-def $vtype
    $v0 = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 3, 6, 0
    PseudoRET
...
---
name: coalesce_dead_avl_nonvolatile_load
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $x1
    ; CHECK-LABEL: name: coalesce_dead_avl_nonvolatile_load
    ; CHECK: liveins: $x1
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: %ptr:gpr = COPY $x1
    ; CHECK-NEXT: dead %avl:gprnox0 = LW %ptr, 0 :: (dereferenceable load (s32))
    ; CHECK-NEXT: $x0 = PseudoVSETIVLI 3, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
    ; CHECK-NEXT: dead %x:gpr = PseudoVMV_X_S $noreg, 6 /* e64 */, implicit $vtype
    ; CHECK-NEXT: $v0 = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 3, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
    ; CHECK-NEXT: PseudoRET
    %ptr:gpr = COPY $x1
    %avl:gprnox0 = LW killed %ptr, 0 :: (dereferenceable load (s32))
    dead $x0 = PseudoVSETVLI killed %avl, 216, implicit-def $vl, implicit-def $vtype
    %x:gpr = PseudoVMV_X_S $noreg, 6
    dead $x0 = PseudoVSETIVLI 3, 216, implicit-def $vl, implicit-def $vtype
    $v0 = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 3, 6, 0
    PseudoRET
...
---
name: coalesce_dead_avl_volatile_load
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $x1
    ; CHECK-LABEL: name: coalesce_dead_avl_volatile_load
    ; CHECK: liveins: $x1
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: %ptr:gpr = COPY $x1
    ; CHECK-NEXT: dead %avl:gprnox0 = LW %ptr, 0 :: (volatile dereferenceable load (s32))
    ; CHECK-NEXT: $x0 = PseudoVSETIVLI 3, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
    ; CHECK-NEXT: dead %x:gpr = PseudoVMV_X_S $noreg, 6 /* e64 */, implicit $vtype
    ; CHECK-NEXT: $v0 = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 3, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
    ; CHECK-NEXT: PseudoRET
    %ptr:gpr = COPY $x1
    %avl:gprnox0 = LW killed %ptr, 0 :: (volatile dereferenceable load (s32))
    dead $x0 = PseudoVSETVLI killed %avl, 216, implicit-def $vl, implicit-def $vtype
    %x:gpr = PseudoVMV_X_S $noreg, 6
    dead $x0 = PseudoVSETIVLI 3, 216, implicit-def $vl, implicit-def $vtype
    $v0 = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 3, 6, 0
    PseudoRET
...
---
name: coalesce_shrink_removed_vsetvlis_uses
tracksRegLiveness: true
body: |
  bb.0:
    liveins: $x10, $v8
    ; CHECK-LABEL: name: coalesce_shrink_removed_vsetvlis_uses
    ; CHECK: liveins: $x10, $v8
    ; CHECK-NEXT: {{  $}}
    ; CHECK-NEXT: %avl2:gprnox0 = ADDI $x0, 2
    ; CHECK-NEXT: dead $x0 = PseudoVSETVLI %avl2, 209 /* e32, m2, ta, ma */, implicit-def $vl, implicit-def $vtype
    ; CHECK-NEXT: %x:gpr = COPY $x10
    ; CHECK-NEXT: renamable $v8 = PseudoVMV_S_X undef renamable $v8, %x, 1, 5 /* e32 */, implicit $vl, implicit $vtype
    ; CHECK-NEXT: PseudoRET implicit $v8
    %avl1:gprnox0 = ADDI $x0, 1
    dead $x0 = PseudoVSETVLI %avl1:gprnox0, 209, implicit-def dead $vl, implicit-def dead $vtype
    %avl2:gprnox0 = ADDI $x0, 2
    dead $x0 = PseudoVSETVLI %avl2:gprnox0, 209, implicit-def dead $vl, implicit-def dead $vtype
    %x:gpr = COPY $x10
    renamable $v8 = PseudoVMV_S_X undef renamable $v8, killed renamable %x, 1, 5
    PseudoRET implicit $v8