llvm/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-extract-subvector.mir

# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,RV32
# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,RV64

# Special handling for i1-element vectors with non-zero index
---
name:            extract_subvector_nxv2i1_nxv4i1
legalized:       false
tracksRegLiveness: true
body:             |
  bb.0.entry:
    ; RV32-LABEL: name: extract_subvector_nxv2i1_nxv4i1
    ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
    ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
    ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C]](s32)
    ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
    ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C1]](s32)
    ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
    ; RV32-NEXT: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
    ; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
    ; RV32-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C2]](s32)
    ; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
    ; RV32-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL [[C3]](s32)
    ; RV32-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
    ; RV32-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 4 x s8>) = G_VSLIDEDOWN_VL [[DEF1]], [[SELECT]], [[LSHR]](s32), [[VMSET_VL]](<vscale x 2 x s1>), [[C3]], 3
    ; RV32-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 4 x s8>), 0
    ; RV32-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
    ; RV32-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C4]](s32)
    ; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(ne), [[EXTRACT_SUBVECTOR]](<vscale x 2 x s8>), [[SPLAT_VECTOR2]]
    ; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
    ; RV32-NEXT: PseudoRET implicit $v8
    ;
    ; RV64-LABEL: name: extract_subvector_nxv2i1_nxv4i1
    ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
    ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SELECT [[DEF]](<vscale x 4 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
    ; RV64-NEXT: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
    ; RV64-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
    ; RV64-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[READ_VLENB]], [[C2]](s64)
    ; RV64-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
    ; RV64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL [[C3]](s64)
    ; RV64-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
    ; RV64-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 4 x s8>) = G_VSLIDEDOWN_VL [[DEF1]], [[SELECT]], [[LSHR]](s64), [[VMSET_VL]](<vscale x 2 x s1>), [[C3]], 3
    ; RV64-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 4 x s8>), 0
    ; RV64-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
    ; RV64-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C4]](s32)
    ; RV64-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT2]](s64)
    ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(ne), [[EXTRACT_SUBVECTOR]](<vscale x 2 x s8>), [[SPLAT_VECTOR2]]
    ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
    ; RV64-NEXT: PseudoRET implicit $v8
    %0:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
    %1:_(<vscale x 2 x s1>) = G_EXTRACT_SUBVECTOR %0(<vscale x 4 x s1>), 2
    $v8 = COPY %1(<vscale x 2 x s1>)
    PseudoRET implicit $v8
...
---
name:            extract_subvector_nxv2i1_nxv8i1
legalized:       false
tracksRegLiveness: true
body:             |
  bb.0.entry:
    ; RV32-LABEL: name: extract_subvector_nxv2i1_nxv8i1
    ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
    ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
    ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C]](s32)
    ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
    ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C1]](s32)
    ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
    ; RV32-NEXT: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
    ; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
    ; RV32-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C2]](s32)
    ; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
    ; RV32-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL [[C3]](s32)
    ; RV32-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
    ; RV32-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 8 x s8>) = G_VSLIDEDOWN_VL [[DEF1]], [[SELECT]], [[LSHR]](s32), [[VMSET_VL]](<vscale x 2 x s1>), [[C3]], 3
    ; RV32-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 8 x s8>), 0
    ; RV32-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
    ; RV32-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C4]](s32)
    ; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(ne), [[EXTRACT_SUBVECTOR]](<vscale x 2 x s8>), [[SPLAT_VECTOR2]]
    ; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
    ; RV32-NEXT: PseudoRET implicit $v8
    ;
    ; RV64-LABEL: name: extract_subvector_nxv2i1_nxv8i1
    ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
    ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SELECT [[DEF]](<vscale x 8 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
    ; RV64-NEXT: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
    ; RV64-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
    ; RV64-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[READ_VLENB]], [[C2]](s64)
    ; RV64-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
    ; RV64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL [[C3]](s64)
    ; RV64-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
    ; RV64-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 8 x s8>) = G_VSLIDEDOWN_VL [[DEF1]], [[SELECT]], [[LSHR]](s64), [[VMSET_VL]](<vscale x 2 x s1>), [[C3]], 3
    ; RV64-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 8 x s8>), 0
    ; RV64-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
    ; RV64-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C4]](s32)
    ; RV64-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT2]](s64)
    ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(ne), [[EXTRACT_SUBVECTOR]](<vscale x 2 x s8>), [[SPLAT_VECTOR2]]
    ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
    ; RV64-NEXT: PseudoRET implicit $v8
    %0:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
    %1:_(<vscale x 2 x s1>) = G_EXTRACT_SUBVECTOR %0(<vscale x 8 x s1>), 2
    $v8 = COPY %1(<vscale x 2 x s1>)
    PseudoRET implicit $v8
...
---
name:            extract_subvector_nxv4i1_nxv64i1
legalized:       false
tracksRegLiveness: true
body:             |
  bb.0.entry:
    ; RV32-LABEL: name: extract_subvector_nxv4i1_nxv64i1
    ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
    ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
    ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C]](s32)
    ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
    ; RV32-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C1]](s32)
    ; RV32-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SELECT [[DEF]](<vscale x 64 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
    ; RV32-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_EXTRACT_SUBVECTOR [[SELECT]](<vscale x 64 x s8>), 16
    ; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
    ; RV32-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C2]](s32)
    ; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 4 x s1>) = G_ICMP intpred(ne), [[EXTRACT_SUBVECTOR]](<vscale x 4 x s8>), [[SPLAT_VECTOR2]]
    ; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
    ; RV32-NEXT: PseudoRET implicit $v8
    ;
    ; RV64-LABEL: name: extract_subvector_nxv4i1_nxv64i1
    ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
    ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
    ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
    ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
    ; RV64-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
    ; RV64-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[ANYEXT1]](s64)
    ; RV64-NEXT: [[SELECT:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SELECT [[DEF]](<vscale x 64 x s1>), [[SPLAT_VECTOR1]], [[SPLAT_VECTOR]]
    ; RV64-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_EXTRACT_SUBVECTOR [[SELECT]](<vscale x 64 x s8>), 16
    ; RV64-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
    ; RV64-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
    ; RV64-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT2]](s64)
    ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 4 x s1>) = G_ICMP intpred(ne), [[EXTRACT_SUBVECTOR]](<vscale x 4 x s8>), [[SPLAT_VECTOR2]]
    ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
    ; RV64-NEXT: PseudoRET implicit $v8
    %0:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
    %1:_(<vscale x 4 x s1>) = G_EXTRACT_SUBVECTOR %0(<vscale x 64 x s1>), 16
    $v8 = COPY %1(<vscale x 4 x s1>)
    PseudoRET implicit $v8
...
---
name:            extract_subvector_nxv32i1_nxv64i1
legalized:       false
tracksRegLiveness: true
body:             |
  bb.0.entry:
    ; RV32-LABEL: name: extract_subvector_nxv32i1_nxv64i1
    ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
    ; RV32-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 8 x s8>) = G_BITCAST [[DEF]](<vscale x 64 x s1>)
    ; RV32-NEXT: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
    ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
    ; RV32-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
    ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
    ; RV32-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL [[C1]](s32)
    ; RV32-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
    ; RV32-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 8 x s8>) = G_VSLIDEDOWN_VL [[DEF1]], [[BITCAST]], [[LSHR]](s32), [[VMSET_VL]](<vscale x 4 x s1>), [[C1]], 3
    ; RV32-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 8 x s8>), 0
    ; RV32-NEXT: [[BITCAST1:%[0-9]+]]:_(<vscale x 32 x s1>) = G_BITCAST [[EXTRACT_SUBVECTOR]](<vscale x 4 x s8>)
    ; RV32-NEXT: $v8 = COPY [[BITCAST1]](<vscale x 32 x s1>)
    ; RV32-NEXT: PseudoRET implicit $v8
    ;
    ; RV64-LABEL: name: extract_subvector_nxv32i1_nxv64i1
    ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
    ; RV64-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 8 x s8>) = G_BITCAST [[DEF]](<vscale x 64 x s1>)
    ; RV64-NEXT: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
    ; RV64-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[READ_VLENB]], [[C]](s64)
    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
    ; RV64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL [[C1]](s64)
    ; RV64-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
    ; RV64-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 8 x s8>) = G_VSLIDEDOWN_VL [[DEF1]], [[BITCAST]], [[LSHR]](s64), [[VMSET_VL]](<vscale x 4 x s1>), [[C1]], 3
    ; RV64-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 8 x s8>), 0
    ; RV64-NEXT: [[BITCAST1:%[0-9]+]]:_(<vscale x 32 x s1>) = G_BITCAST [[EXTRACT_SUBVECTOR]](<vscale x 4 x s8>)
    ; RV64-NEXT: $v8 = COPY [[BITCAST1]](<vscale x 32 x s1>)
    ; RV64-NEXT: PseudoRET implicit $v8
    %0:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
    %1:_(<vscale x 32 x s1>) = G_EXTRACT_SUBVECTOR %0(<vscale x 64 x s1>), 32
    $v8 = COPY %1(<vscale x 32 x s1>)
    PseudoRET implicit $v8
...

# i1-element vectors with zero index
---
name:            extract_subvector_nxv1i1_nxv4i1_zero
legalized:       false
tracksRegLiveness: true
body:             |
  bb.0.entry:
    ; CHECK-LABEL: name: extract_subvector_nxv1i1_nxv4i1_zero
    ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
    ; CHECK-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 1 x s1>) = G_EXTRACT_SUBVECTOR [[DEF]](<vscale x 4 x s1>), 0
    ; CHECK-NEXT: $v8 = COPY [[EXTRACT_SUBVECTOR]](<vscale x 1 x s1>)
    ; CHECK-NEXT: PseudoRET implicit $v8
    %0:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
    %1:_(<vscale x 1 x s1>) = G_EXTRACT_SUBVECTOR %0(<vscale x 4 x s1>), 0
    $v8 = COPY %1(<vscale x 1 x s1>)
    PseudoRET implicit $v8
...
---
name:            extract_subvector_nxv4i1_nxv8i1_zero
legalized:       false
tracksRegLiveness: true
body:             |
  bb.0.entry:
    ; CHECK-LABEL: name: extract_subvector_nxv4i1_nxv8i1_zero
    ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
    ; CHECK-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 4 x s1>) = G_EXTRACT_SUBVECTOR [[DEF]](<vscale x 8 x s1>), 0
    ; CHECK-NEXT: $v8 = COPY [[EXTRACT_SUBVECTOR]](<vscale x 4 x s1>)
    ; CHECK-NEXT: PseudoRET implicit $v8
    %0:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
    %1:_(<vscale x 4 x s1>) = G_EXTRACT_SUBVECTOR %0(<vscale x 8 x s1>), 0
    $v8 = COPY %1(<vscale x 4 x s1>)
    PseudoRET implicit $v8
...
---
name:            extract_subvector_nxv32i1_nxv64i1_zero
legalized:       false
tracksRegLiveness: true
body:             |
  bb.0.entry:
    ; CHECK-LABEL: name: extract_subvector_nxv32i1_nxv64i1_zero
    ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
    ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 8 x s8>) = G_BITCAST [[DEF]](<vscale x 64 x s1>)
    ; CHECK-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_EXTRACT_SUBVECTOR [[BITCAST]](<vscale x 8 x s8>), 0
    ; CHECK-NEXT: [[BITCAST1:%[0-9]+]]:_(<vscale x 32 x s1>) = G_BITCAST [[EXTRACT_SUBVECTOR]](<vscale x 4 x s8>)
    ; CHECK-NEXT: $v8 = COPY [[BITCAST1]](<vscale x 32 x s1>)
    ; CHECK-NEXT: PseudoRET implicit $v8
    %0:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
    %1:_(<vscale x 32 x s1>) = G_EXTRACT_SUBVECTOR %0(<vscale x 64 x s1>), 0
    $v8 = COPY %1(<vscale x 32 x s1>)
    PseudoRET implicit $v8
...

# Extract with zero index
---
name:            extract_subvector_nxv1i8_nxv2i8_zero
legalized:       false
tracksRegLiveness: true
body:             |
  bb.0.entry:
    ; CHECK-LABEL: name: extract_subvector_nxv1i8_nxv2i8_zero
    ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
    ; CHECK-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_EXTRACT_SUBVECTOR [[DEF]](<vscale x 2 x s8>), 0
    ; CHECK-NEXT: $v8 = COPY [[EXTRACT_SUBVECTOR]](<vscale x 1 x s8>)
    ; CHECK-NEXT: PseudoRET implicit $v8
    %0:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
    %1:_(<vscale x 1 x s8>) = G_EXTRACT_SUBVECTOR %0(<vscale x 2 x s8>), 0
    $v8 = COPY %1(<vscale x 1 x s8>)
    PseudoRET implicit $v8
...
---
name:            extract_subvector_nxv2i16_nxv4i16_zero
legalized:       false
tracksRegLiveness: true
body:             |
  bb.0.entry:
    ; CHECK-LABEL: name: extract_subvector_nxv2i16_nxv4i16_zero
    ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
    ; CHECK-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_EXTRACT_SUBVECTOR [[DEF]](<vscale x 4 x s16>), 0
    ; CHECK-NEXT: $v8 = COPY [[EXTRACT_SUBVECTOR]](<vscale x 2 x s16>)
    ; CHECK-NEXT: PseudoRET implicit $v8
    %0:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
    %1:_(<vscale x 2 x s16>) = G_EXTRACT_SUBVECTOR %0(<vscale x 4 x s16>), 0
    $v8 = COPY %1(<vscale x 2 x s16>)
    PseudoRET implicit $v8
...
---
name:            extract_subvector_nxv4i32_nxv8i32_zero
legalized:       false
tracksRegLiveness: true
body:             |
  bb.0.entry:
    ; CHECK-LABEL: name: extract_subvector_nxv4i32_nxv8i32_zero
    ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
    ; CHECK-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_EXTRACT_SUBVECTOR [[DEF]](<vscale x 8 x s32>), 0
    ; CHECK-NEXT: $v8 = COPY [[EXTRACT_SUBVECTOR]](<vscale x 4 x s32>)
    ; CHECK-NEXT: PseudoRET implicit $v8
    %0:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
    %1:_(<vscale x 4 x s32>) = G_EXTRACT_SUBVECTOR %0(<vscale x 8 x s32>), 0
    $v8 = COPY %1(<vscale x 4 x s32>)
    PseudoRET implicit $v8
...
---
name:            extract_subvector_nxv4i8_nxv8i64_zero
legalized:       false
tracksRegLiveness: true
body:             |
  bb.0.entry:
    ; CHECK-LABEL: name: extract_subvector_nxv4i8_nxv8i64_zero
    ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
    ; CHECK-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_EXTRACT_SUBVECTOR [[DEF]](<vscale x 8 x s64>), 0
    ; CHECK-NEXT: $v8 = COPY [[EXTRACT_SUBVECTOR]](<vscale x 4 x s64>)
    ; CHECK-NEXT: PseudoRET implicit $v8
    %0:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
    %1:_(<vscale x 4 x s64>) = G_EXTRACT_SUBVECTOR %0(<vscale x 8 x s64>), 0
    $v8 = COPY %1(<vscale x 4 x s64>)
    PseudoRET implicit $v8
...

# Extract with non-zero index
---
name:            extract_subvector_nxv1i8_nxv2i8
legalized:       false
tracksRegLiveness: true
body:             |
  bb.0.entry:
    ; RV32-LABEL: name: extract_subvector_nxv1i8_nxv2i8
    ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
    ; RV32-NEXT: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
    ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
    ; RV32-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
    ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
    ; RV32-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL [[C1]](s32)
    ; RV32-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 2 x s8>) = G_VSLIDEDOWN_VL [[DEF]], [[DEF]], [[LSHR]](s32), [[VMSET_VL]](<vscale x 1 x s1>), [[C1]], 3
    ; RV32-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 2 x s8>), 0
    ; RV32-NEXT: $v8 = COPY [[EXTRACT_SUBVECTOR]](<vscale x 1 x s8>)
    ; RV32-NEXT: PseudoRET implicit $v8
    ;
    ; RV64-LABEL: name: extract_subvector_nxv1i8_nxv2i8
    ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
    ; RV64-NEXT: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
    ; RV64-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[READ_VLENB]], [[C]](s64)
    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
    ; RV64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL [[C1]](s64)
    ; RV64-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 2 x s8>) = G_VSLIDEDOWN_VL [[DEF]], [[DEF]], [[LSHR]](s64), [[VMSET_VL]](<vscale x 1 x s1>), [[C1]], 3
    ; RV64-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 2 x s8>), 0
    ; RV64-NEXT: $v8 = COPY [[EXTRACT_SUBVECTOR]](<vscale x 1 x s8>)
    ; RV64-NEXT: PseudoRET implicit $v8
    %0:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
    %1:_(<vscale x 1 x s8>) = G_EXTRACT_SUBVECTOR %0(<vscale x 2 x s8>), 1
    $v8 = COPY %1(<vscale x 1 x s8>)
    PseudoRET implicit $v8
...
---
name:            extract_subvector_nxv1i16_nxv4i16
legalized:       false
tracksRegLiveness: true
body:             |
  bb.0.entry:
    ; RV32-LABEL: name: extract_subvector_nxv1i16_nxv4i16
    ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
    ; RV32-NEXT: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
    ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
    ; RV32-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
    ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
    ; RV32-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL [[C1]](s32)
    ; RV32-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 4 x s16>) = G_VSLIDEDOWN_VL [[DEF]], [[DEF]], [[LSHR]](s32), [[VMSET_VL]](<vscale x 1 x s1>), [[C1]], 3
    ; RV32-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 4 x s16>), 0
    ; RV32-NEXT: $v8 = COPY [[EXTRACT_SUBVECTOR]](<vscale x 1 x s16>)
    ; RV32-NEXT: PseudoRET implicit $v8
    ;
    ; RV64-LABEL: name: extract_subvector_nxv1i16_nxv4i16
    ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
    ; RV64-NEXT: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
    ; RV64-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[READ_VLENB]], [[C]](s64)
    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
    ; RV64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL [[C1]](s64)
    ; RV64-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 4 x s16>) = G_VSLIDEDOWN_VL [[DEF]], [[DEF]], [[LSHR]](s64), [[VMSET_VL]](<vscale x 1 x s1>), [[C1]], 3
    ; RV64-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 4 x s16>), 0
    ; RV64-NEXT: $v8 = COPY [[EXTRACT_SUBVECTOR]](<vscale x 1 x s16>)
    ; RV64-NEXT: PseudoRET implicit $v8
    %0:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
    %1:_(<vscale x 1 x s16>) = G_EXTRACT_SUBVECTOR %0(<vscale x 4 x s16>), 2
    $v8 = COPY %1(<vscale x 1 x s16>)
    PseudoRET implicit $v8
...
---
name:            extract_subvector_nxv4i32_nxv8i32
legalized:       false
tracksRegLiveness: true
body:             |
  bb.0.entry:
    ; CHECK-LABEL: name: extract_subvector_nxv4i32_nxv8i32
    ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
    ; CHECK-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_EXTRACT_SUBVECTOR [[DEF]](<vscale x 8 x s32>), 4
    ; CHECK-NEXT: $v8 = COPY [[EXTRACT_SUBVECTOR]](<vscale x 4 x s32>)
    ; CHECK-NEXT: PseudoRET implicit $v8
    %0:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
    %1:_(<vscale x 4 x s32>) = G_EXTRACT_SUBVECTOR %0(<vscale x 8 x s32>), 4
    $v8 = COPY %1(<vscale x 4 x s32>)
    PseudoRET implicit $v8
...
---
name:            extract_subvector_nxv2i64_nxv8i64
legalized:       false
tracksRegLiveness: true
body:             |
  bb.0.entry:
    ; CHECK-LABEL: name: extract_subvector_nxv2i64_nxv8i64
    ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
    ; CHECK-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_EXTRACT_SUBVECTOR [[DEF]](<vscale x 8 x s64>), 2
    ; CHECK-NEXT: $v8 = COPY [[EXTRACT_SUBVECTOR]](<vscale x 2 x s64>)
    ; CHECK-NEXT: PseudoRET implicit $v8
    %0:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
    %1:_(<vscale x 2 x s64>) = G_EXTRACT_SUBVECTOR %0(<vscale x 8 x s64>), 2
    $v8 = COPY %1(<vscale x 2 x s64>)
    PseudoRET implicit $v8
...
---
name:            extract_subvector_subregidx_zero
legalized:       false
tracksRegLiveness: true
body:             |
  bb.0.entry:
    ; RV32-LABEL: name: extract_subvector_subregidx_zero
    ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
    ; RV32-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_EXTRACT_SUBVECTOR [[DEF]](<vscale x 16 x s32>), 0
    ; RV32-NEXT: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
    ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
    ; RV32-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
    ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
    ; RV32-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL [[C1]](s32)
    ; RV32-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
    ; RV32-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 2 x s32>) = G_VSLIDEDOWN_VL [[DEF1]], [[EXTRACT_SUBVECTOR]], [[LSHR]](s32), [[VMSET_VL]](<vscale x 1 x s1>), [[C1]], 3
    ; RV32-NEXT: [[EXTRACT_SUBVECTOR1:%[0-9]+]]:_(<vscale x 1 x s32>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 2 x s32>), 0
    ; RV32-NEXT: $v8 = COPY [[EXTRACT_SUBVECTOR1]](<vscale x 1 x s32>)
    ; RV32-NEXT: PseudoRET implicit $v8
    ;
    ; RV64-LABEL: name: extract_subvector_subregidx_zero
    ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
    ; RV64-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_EXTRACT_SUBVECTOR [[DEF]](<vscale x 16 x s32>), 0
    ; RV64-NEXT: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
    ; RV64-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[READ_VLENB]], [[C]](s64)
    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
    ; RV64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL [[C1]](s64)
    ; RV64-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
    ; RV64-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 2 x s32>) = G_VSLIDEDOWN_VL [[DEF1]], [[EXTRACT_SUBVECTOR]], [[LSHR]](s64), [[VMSET_VL]](<vscale x 1 x s1>), [[C1]], 3
    ; RV64-NEXT: [[EXTRACT_SUBVECTOR1:%[0-9]+]]:_(<vscale x 1 x s32>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 2 x s32>), 0
    ; RV64-NEXT: $v8 = COPY [[EXTRACT_SUBVECTOR1]](<vscale x 1 x s32>)
    ; RV64-NEXT: PseudoRET implicit $v8
    %0:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
    %1:_(<vscale x 1 x s32>) = G_EXTRACT_SUBVECTOR %0(<vscale x 16 x s32>), 1
    $v8 = COPY %1(<vscale x 1 x s32>)
    PseudoRET implicit $v8
...
---
name:            extract_subvector_subregidx
legalized:       false
tracksRegLiveness: true
body:             |
  bb.0.entry:
    ; RV32-LABEL: name: extract_subvector_subregidx
    ; RV32: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
    ; RV32-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_EXTRACT_SUBVECTOR [[DEF]](<vscale x 16 x s32>), 2
    ; RV32-NEXT: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
    ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
    ; RV32-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
    ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
    ; RV32-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL [[C1]](s32)
    ; RV32-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
    ; RV32-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 2 x s32>) = G_VSLIDEDOWN_VL [[DEF1]], [[EXTRACT_SUBVECTOR]], [[LSHR]](s32), [[VMSET_VL]](<vscale x 1 x s1>), [[C1]], 3
    ; RV32-NEXT: [[EXTRACT_SUBVECTOR1:%[0-9]+]]:_(<vscale x 1 x s32>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 2 x s32>), 0
    ; RV32-NEXT: $v8 = COPY [[EXTRACT_SUBVECTOR1]](<vscale x 1 x s32>)
    ; RV32-NEXT: PseudoRET implicit $v8
    ;
    ; RV64-LABEL: name: extract_subvector_subregidx
    ; RV64: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
    ; RV64-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_EXTRACT_SUBVECTOR [[DEF]](<vscale x 16 x s32>), 2
    ; RV64-NEXT: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
    ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
    ; RV64-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[READ_VLENB]], [[C]](s64)
    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
    ; RV64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL [[C1]](s64)
    ; RV64-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
    ; RV64-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 2 x s32>) = G_VSLIDEDOWN_VL [[DEF1]], [[EXTRACT_SUBVECTOR]], [[LSHR]](s64), [[VMSET_VL]](<vscale x 1 x s1>), [[C1]], 3
    ; RV64-NEXT: [[EXTRACT_SUBVECTOR1:%[0-9]+]]:_(<vscale x 1 x s32>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 2 x s32>), 0
    ; RV64-NEXT: $v8 = COPY [[EXTRACT_SUBVECTOR1]](<vscale x 1 x s32>)
    ; RV64-NEXT: PseudoRET implicit $v8
    %0:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
    %1:_(<vscale x 1 x s32>) = G_EXTRACT_SUBVECTOR %0(<vscale x 16 x s32>), 3
    $v8 = COPY %1(<vscale x 1 x s32>)
    PseudoRET implicit $v8
...