# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
# RUN: -o - | FileCheck -check-prefix=RV32I %s
# RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
# RUN: -o - | FileCheck -check-prefix=RV64I %s
--- |
define <vscale x 1 x i8> @vload_nx1i8(ptr %pa) #0 {
%va = load <vscale x 1 x i8>, ptr %pa, align 1
ret <vscale x 1 x i8> %va
}
define <vscale x 2 x i8> @vload_nx2i8(ptr %pa) #0 {
%va = load <vscale x 2 x i8>, ptr %pa, align 2
ret <vscale x 2 x i8> %va
}
define <vscale x 4 x i8> @vload_nx4i8(ptr %pa) #0 {
%va = load <vscale x 4 x i8>, ptr %pa, align 4
ret <vscale x 4 x i8> %va
}
define <vscale x 8 x i8> @vload_nx8i8(ptr %pa) #0 {
%va = load <vscale x 8 x i8>, ptr %pa, align 8
ret <vscale x 8 x i8> %va
}
define <vscale x 16 x i8> @vload_nx16i8(ptr %pa) #0 {
%va = load <vscale x 16 x i8>, ptr %pa, align 16
ret <vscale x 16 x i8> %va
}
define <vscale x 32 x i8> @vload_nx32i8(ptr %pa) #0 {
%va = load <vscale x 32 x i8>, ptr %pa, align 32
ret <vscale x 32 x i8> %va
}
define <vscale x 64 x i8> @vload_nx64i8(ptr %pa) #0 {
%va = load <vscale x 64 x i8>, ptr %pa, align 64
ret <vscale x 64 x i8> %va
}
define <vscale x 1 x i16> @vload_nx1i16(ptr %pa) #0 {
%va = load <vscale x 1 x i16>, ptr %pa, align 2
ret <vscale x 1 x i16> %va
}
define <vscale x 2 x i16> @vload_nx2i16(ptr %pa) #0 {
%va = load <vscale x 2 x i16>, ptr %pa, align 4
ret <vscale x 2 x i16> %va
}
define <vscale x 4 x i16> @vload_nx4i16(ptr %pa) #0 {
%va = load <vscale x 4 x i16>, ptr %pa, align 8
ret <vscale x 4 x i16> %va
}
define <vscale x 8 x i16> @vload_nx8i16(ptr %pa) #0 {
%va = load <vscale x 8 x i16>, ptr %pa, align 16
ret <vscale x 8 x i16> %va
}
define <vscale x 16 x i16> @vload_nx16i16(ptr %pa) #0 {
%va = load <vscale x 16 x i16>, ptr %pa, align 32
ret <vscale x 16 x i16> %va
}
define <vscale x 32 x i16> @vload_nx32i16(ptr %pa) #0 {
%va = load <vscale x 32 x i16>, ptr %pa, align 64
ret <vscale x 32 x i16> %va
}
define <vscale x 1 x i32> @vload_nx1i32(ptr %pa) #0 {
%va = load <vscale x 1 x i32>, ptr %pa, align 4
ret <vscale x 1 x i32> %va
}
define <vscale x 2 x i32> @vload_nx2i32(ptr %pa) #0 {
%va = load <vscale x 2 x i32>, ptr %pa, align 8
ret <vscale x 2 x i32> %va
}
define <vscale x 4 x i32> @vload_nx4i32(ptr %pa) #0 {
%va = load <vscale x 4 x i32>, ptr %pa, align 16
ret <vscale x 4 x i32> %va
}
define <vscale x 8 x i32> @vload_nx8i32(ptr %pa) #0 {
%va = load <vscale x 8 x i32>, ptr %pa, align 32
ret <vscale x 8 x i32> %va
}
define <vscale x 16 x i32> @vload_nx16i32(ptr %pa) #0 {
%va = load <vscale x 16 x i32>, ptr %pa, align 64
ret <vscale x 16 x i32> %va
}
define <vscale x 1 x i64> @vload_nx1i64(ptr %pa) #0 {
%va = load <vscale x 1 x i64>, ptr %pa, align 8
ret <vscale x 1 x i64> %va
}
define <vscale x 2 x i64> @vload_nx2i64(ptr %pa) #0 {
%va = load <vscale x 2 x i64>, ptr %pa, align 16
ret <vscale x 2 x i64> %va
}
define <vscale x 4 x i64> @vload_nx4i64(ptr %pa) #0 {
%va = load <vscale x 4 x i64>, ptr %pa, align 32
ret <vscale x 4 x i64> %va
}
define <vscale x 8 x i64> @vload_nx8i64(ptr %pa) #0 {
%va = load <vscale x 8 x i64>, ptr %pa, align 64
ret <vscale x 8 x i64> %va
}
define <vscale x 16 x i8> @vload_nx16i8_align1(ptr %pa) #0 {
%va = load <vscale x 16 x i8>, ptr %pa, align 1
ret <vscale x 16 x i8> %va
}
define <vscale x 16 x i8> @vload_nx16i8_align2(ptr %pa) #0 {
%va = load <vscale x 16 x i8>, ptr %pa, align 2
ret <vscale x 16 x i8> %va
}
define <vscale x 16 x i8> @vload_nx16i8_align16(ptr %pa) #0 {
%va = load <vscale x 16 x i8>, ptr %pa, align 16
ret <vscale x 16 x i8> %va
}
define <vscale x 16 x i8> @vload_nx16i8_align64(ptr %pa) #0 {
%va = load <vscale x 16 x i8>, ptr %pa, align 64
ret <vscale x 16 x i8> %va
}
define <vscale x 4 x i16> @vload_nx4i16_align1(ptr %pa) #0 {
%va = load <vscale x 4 x i16>, ptr %pa, align 1
ret <vscale x 4 x i16> %va
}
define <vscale x 4 x i16> @vload_nx4i16_align2(ptr %pa) #0 {
%va = load <vscale x 4 x i16>, ptr %pa, align 2
ret <vscale x 4 x i16> %va
}
define <vscale x 4 x i16> @vload_nx4i16_align4(ptr %pa) #0 {
%va = load <vscale x 4 x i16>, ptr %pa, align 4
ret <vscale x 4 x i16> %va
}
define <vscale x 4 x i16> @vload_nx4i16_align8(ptr %pa) #0 {
%va = load <vscale x 4 x i16>, ptr %pa, align 8
ret <vscale x 4 x i16> %va
}
define <vscale x 4 x i16> @vload_nx4i16_align16(ptr %pa) #0 {
%va = load <vscale x 4 x i16>, ptr %pa, align 16
ret <vscale x 4 x i16> %va
}
define <vscale x 2 x i32> @vload_nx2i32_align2(ptr %pa) #0 {
%va = load <vscale x 2 x i32>, ptr %pa, align 2
ret <vscale x 2 x i32> %va
}
define <vscale x 2 x i32> @vload_nx2i32_align4(ptr %pa) #0 {
%va = load <vscale x 2 x i32>, ptr %pa, align 4
ret <vscale x 2 x i32> %va
}
define <vscale x 2 x i32> @vload_nx2i32_align8(ptr %pa) #0 {
%va = load <vscale x 2 x i32>, ptr %pa, align 8
ret <vscale x 2 x i32> %va
}
define <vscale x 2 x i32> @vload_nx2i32_align16(ptr %pa) #0 {
%va = load <vscale x 2 x i32>, ptr %pa, align 16
ret <vscale x 2 x i32> %va
}
define <vscale x 2 x i32> @vload_nx2i32_align256(ptr %pa) #0 {
%va = load <vscale x 2 x i32>, ptr %pa, align 256
ret <vscale x 2 x i32> %va
}
define <vscale x 2 x i64> @vload_nx2i64_align4(ptr %pa) #0 {
%va = load <vscale x 2 x i64>, ptr %pa, align 4
ret <vscale x 2 x i64> %va
}
define <vscale x 2 x i64> @vload_nx2i64_align8(ptr %pa) #0 {
%va = load <vscale x 2 x i64>, ptr %pa, align 8
ret <vscale x 2 x i64> %va
}
define <vscale x 2 x i64> @vload_nx2i64_align16(ptr %pa) #0 {
%va = load <vscale x 2 x i64>, ptr %pa, align 16
ret <vscale x 2 x i64> %va
}
define <vscale x 2 x i64> @vload_nx2i64_align32(ptr %pa) #0 {
%va = load <vscale x 2 x i64>, ptr %pa, align 32
ret <vscale x 2 x i64> %va
}
define <vscale x 1 x ptr> @vload_nx1ptr(ptr %pa) #0 {
%va = load <vscale x 1 x ptr>, ptr %pa, align 4
ret <vscale x 1 x ptr> %va
}
define <vscale x 2 x ptr> @vload_nx2ptr(ptr %pa) #0 {
%va = load <vscale x 2 x ptr>, ptr %pa, align 8
ret <vscale x 2 x ptr> %va
}
define <vscale x 8 x ptr> @vload_nx8ptr(ptr %pa) #0 {
%va = load <vscale x 8 x ptr>, ptr %pa, align 32
ret <vscale x 8 x ptr> %va
}
...
---
name: vload_nx1i8
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx1i8
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s8>)
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: vload_nx1i8
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:_(p0) = COPY $x10
%1:_(<vscale x 1 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
$v8 = COPY %1(<vscale x 1 x s8>)
PseudoRET implicit $v8
...
---
name: vload_nx2i8
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx2i8
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s8>)
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: vload_nx2i8
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:_(p0) = COPY $x10
%1:_(<vscale x 2 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
$v8 = COPY %1(<vscale x 2 x s8>)
PseudoRET implicit $v8
...
---
name: vload_nx4i8
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx4i8
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s8>)
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: vload_nx4i8
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:_(p0) = COPY $x10
%1:_(<vscale x 4 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
$v8 = COPY %1(<vscale x 4 x s8>)
PseudoRET implicit $v8
...
---
name: vload_nx8i8
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx8i8
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 8 x s8>)
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: vload_nx8i8
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 8 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:_(p0) = COPY $x10
%1:_(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
$v8 = COPY %1(<vscale x 8 x s8>)
PseudoRET implicit $v8
...
---
name: vload_nx16i8
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx16i8
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
; RV32I-NEXT: PseudoRET implicit $v8m2
;
; RV64I-LABEL: name: vload_nx16i8
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8m2
%0:_(p0) = COPY $x10
%1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
$v8m2 = COPY %1(<vscale x 16 x s8>)
PseudoRET implicit $v8m2
...
---
name: vload_nx32i8
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx32i8
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
; RV32I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 32 x s8>)
; RV32I-NEXT: PseudoRET implicit $v8m4
;
; RV64I-LABEL: name: vload_nx32i8
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 32 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8m4
%0:_(p0) = COPY $x10
%1:_(<vscale x 32 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
$v8m4 = COPY %1(<vscale x 32 x s8>)
PseudoRET implicit $v8m4
...
---
name: vload_nx64i8
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx64i8
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
; RV32I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 64 x s8>)
; RV32I-NEXT: PseudoRET implicit $v8m8
;
; RV64I-LABEL: name: vload_nx64i8
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
; RV64I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 64 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8m8
%0:_(p0) = COPY $x10
%1:_(<vscale x 64 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
$v8m8 = COPY %1(<vscale x 64 x s8>)
PseudoRET implicit $v8m8
...
---
name: vload_nx1i16
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx1i16
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s16>)
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: vload_nx1i16
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:_(p0) = COPY $x10
%1:_(<vscale x 1 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
$v8 = COPY %1(<vscale x 1 x s16>)
PseudoRET implicit $v8
...
---
name: vload_nx2i16
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx2i16
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s16>)
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: vload_nx2i16
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:_(p0) = COPY $x10
%1:_(<vscale x 2 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
$v8 = COPY %1(<vscale x 2 x s16>)
PseudoRET implicit $v8
...
---
name: vload_nx4i16
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx4i16
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: vload_nx4i16
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:_(p0) = COPY $x10
%1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
$v8 = COPY %1(<vscale x 4 x s16>)
PseudoRET implicit $v8
...
---
name: vload_nx8i16
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx8i16
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 8 x s16>)
; RV32I-NEXT: PseudoRET implicit $v8m2
;
; RV64I-LABEL: name: vload_nx8i16
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 8 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8m2
%0:_(p0) = COPY $x10
%1:_(<vscale x 8 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
$v8m2 = COPY %1(<vscale x 8 x s16>)
PseudoRET implicit $v8m2
...
---
name: vload_nx16i16
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx16i16
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
; RV32I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 16 x s16>)
; RV32I-NEXT: PseudoRET implicit $v8m4
;
; RV64I-LABEL: name: vload_nx16i16
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 16 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8m4
%0:_(p0) = COPY $x10
%1:_(<vscale x 16 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
$v8m4 = COPY %1(<vscale x 16 x s16>)
PseudoRET implicit $v8m4
...
---
name: vload_nx32i16
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx32i16
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
; RV32I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 32 x s16>)
; RV32I-NEXT: PseudoRET implicit $v8m8
;
; RV64I-LABEL: name: vload_nx32i16
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
; RV64I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 32 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8m8
%0:_(p0) = COPY $x10
%1:_(<vscale x 32 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
$v8m8 = COPY %1(<vscale x 32 x s16>)
PseudoRET implicit $v8m8
...
---
name: vload_nx1i32
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx1i32
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s32>)
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: vload_nx1i32
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s32>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:_(p0) = COPY $x10
%1:_(<vscale x 1 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
$v8 = COPY %1(<vscale x 1 x s32>)
PseudoRET implicit $v8
...
---
name: vload_nx2i32
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx2i32
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: vload_nx2i32
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:_(p0) = COPY $x10
%1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
$v8 = COPY %1(<vscale x 2 x s32>)
PseudoRET implicit $v8
...
---
name: vload_nx4i32
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx4i32
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 4 x s32>)
; RV32I-NEXT: PseudoRET implicit $v8m2
;
; RV64I-LABEL: name: vload_nx4i32
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 4 x s32>)
; RV64I-NEXT: PseudoRET implicit $v8m2
%0:_(p0) = COPY $x10
%1:_(<vscale x 4 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
$v8m2 = COPY %1(<vscale x 4 x s32>)
PseudoRET implicit $v8m2
...
---
name: vload_nx8i32
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx8i32
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
; RV32I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x s32>)
; RV32I-NEXT: PseudoRET implicit $v8m4
;
; RV64I-LABEL: name: vload_nx8i32
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x s32>)
; RV64I-NEXT: PseudoRET implicit $v8m4
%0:_(p0) = COPY $x10
%1:_(<vscale x 8 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
$v8m4 = COPY %1(<vscale x 8 x s32>)
PseudoRET implicit $v8m4
...
---
name: vload_nx16i32
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx16i32
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
; RV32I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 16 x s32>)
; RV32I-NEXT: PseudoRET implicit $v8m8
;
; RV64I-LABEL: name: vload_nx16i32
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
; RV64I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 16 x s32>)
; RV64I-NEXT: PseudoRET implicit $v8m8
%0:_(p0) = COPY $x10
%1:_(<vscale x 16 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
$v8m8 = COPY %1(<vscale x 16 x s32>)
PseudoRET implicit $v8m8
...
---
name: vload_nx1i64
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx1i64
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s64>)
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: vload_nx1i64
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s64>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:_(p0) = COPY $x10
%1:_(<vscale x 1 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
$v8 = COPY %1(<vscale x 1 x s64>)
PseudoRET implicit $v8
...
---
name: vload_nx2i64
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx2i64
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
; RV32I-NEXT: PseudoRET implicit $v8m2
;
; RV64I-LABEL: name: vload_nx2i64
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
; RV64I-NEXT: PseudoRET implicit $v8m2
%0:_(p0) = COPY $x10
%1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
$v8m2 = COPY %1(<vscale x 2 x s64>)
PseudoRET implicit $v8m2
...
---
name: vload_nx4i64
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx4i64
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
; RV32I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 4 x s64>)
; RV32I-NEXT: PseudoRET implicit $v8m4
;
; RV64I-LABEL: name: vload_nx4i64
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 4 x s64>)
; RV64I-NEXT: PseudoRET implicit $v8m4
%0:_(p0) = COPY $x10
%1:_(<vscale x 4 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
$v8m4 = COPY %1(<vscale x 4 x s64>)
PseudoRET implicit $v8m4
...
---
name: vload_nx8i64
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx8i64
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
; RV32I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 8 x s64>)
; RV32I-NEXT: PseudoRET implicit $v8m8
;
; RV64I-LABEL: name: vload_nx8i64
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
; RV64I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 8 x s64>)
; RV64I-NEXT: PseudoRET implicit $v8m8
%0:_(p0) = COPY $x10
%1:_(<vscale x 8 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
$v8m8 = COPY %1(<vscale x 8 x s64>)
PseudoRET implicit $v8m8
...
---
name: vload_nx16i8_align1
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx16i8_align1
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
; RV32I-NEXT: PseudoRET implicit $v8m2
;
; RV64I-LABEL: name: vload_nx16i8_align1
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8m2
%0:_(p0) = COPY $x10
%1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
$v8m2 = COPY %1(<vscale x 16 x s8>)
PseudoRET implicit $v8m2
...
---
name: vload_nx16i8_align2
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx16i8_align2
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
; RV32I-NEXT: PseudoRET implicit $v8m2
;
; RV64I-LABEL: name: vload_nx16i8_align2
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8m2
%0:_(p0) = COPY $x10
%1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
$v8m2 = COPY %1(<vscale x 16 x s8>)
PseudoRET implicit $v8m2
...
---
name: vload_nx16i8_align16
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx16i8_align16
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
; RV32I-NEXT: PseudoRET implicit $v8m2
;
; RV64I-LABEL: name: vload_nx16i8_align16
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8m2
%0:_(p0) = COPY $x10
%1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
$v8m2 = COPY %1(<vscale x 16 x s8>)
PseudoRET implicit $v8m2
...
---
name: vload_nx16i8_align64
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx16i8_align64
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
; RV32I-NEXT: PseudoRET implicit $v8m2
;
; RV64I-LABEL: name: vload_nx16i8_align64
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8m2
%0:_(p0) = COPY $x10
%1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
$v8m2 = COPY %1(<vscale x 16 x s8>)
PseudoRET implicit $v8m2
...
---
name: vload_nx4i16_align1
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx4i16_align1
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 1)
; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
; RV32I-NEXT: $v8 = COPY [[BITCAST]](<vscale x 4 x s16>)
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: vload_nx4i16_align1
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 1)
; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
; RV64I-NEXT: $v8 = COPY [[BITCAST]](<vscale x 4 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:_(p0) = COPY $x10
%2:_(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 1)
%1:_(<vscale x 4 x s16>) = G_BITCAST %2(<vscale x 8 x s8>)
$v8 = COPY %1(<vscale x 4 x s16>)
PseudoRET implicit $v8
...
---
name: vload_nx4i16_align2
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx4i16_align2
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: vload_nx4i16_align2
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:_(p0) = COPY $x10
%1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
$v8 = COPY %1(<vscale x 4 x s16>)
PseudoRET implicit $v8
...
---
name: vload_nx4i16_align4
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx4i16_align4
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: vload_nx4i16_align4
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:_(p0) = COPY $x10
%1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
$v8 = COPY %1(<vscale x 4 x s16>)
PseudoRET implicit $v8
...
---
name: vload_nx4i16_align8
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx4i16_align8
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: vload_nx4i16_align8
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:_(p0) = COPY $x10
%1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
$v8 = COPY %1(<vscale x 4 x s16>)
PseudoRET implicit $v8
...
---
name: vload_nx4i16_align16
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx4i16_align16
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: vload_nx4i16_align16
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:_(p0) = COPY $x10
%1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
$v8 = COPY %1(<vscale x 4 x s16>)
PseudoRET implicit $v8
...
---
name: vload_nx2i32_align2
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx2i32_align2
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 2)
; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
; RV32I-NEXT: $v8 = COPY [[BITCAST]](<vscale x 2 x s32>)
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: vload_nx2i32_align2
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 2)
; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
; RV64I-NEXT: $v8 = COPY [[BITCAST]](<vscale x 2 x s32>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:_(p0) = COPY $x10
%2:_(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 2)
%1:_(<vscale x 2 x s32>) = G_BITCAST %2(<vscale x 8 x s8>)
$v8 = COPY %1(<vscale x 2 x s32>)
PseudoRET implicit $v8
...
---
name: vload_nx2i32_align4
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx2i32_align4
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: vload_nx2i32_align4
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:_(p0) = COPY $x10
%1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
$v8 = COPY %1(<vscale x 2 x s32>)
PseudoRET implicit $v8
...
---
name: vload_nx2i32_align8
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx2i32_align8
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: vload_nx2i32_align8
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:_(p0) = COPY $x10
%1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
$v8 = COPY %1(<vscale x 2 x s32>)
PseudoRET implicit $v8
...
---
name: vload_nx2i32_align16
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx2i32_align16
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: vload_nx2i32_align16
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:_(p0) = COPY $x10
%1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
$v8 = COPY %1(<vscale x 2 x s32>)
PseudoRET implicit $v8
...
---
name: vload_nx2i32_align256
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx2i32_align256
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: vload_nx2i32_align256
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:_(p0) = COPY $x10
%1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
$v8 = COPY %1(<vscale x 2 x s32>)
PseudoRET implicit $v8
...
---
name: vload_nx2i64_align4
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx2i64_align4
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 4)
; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_BITCAST [[LOAD]](<vscale x 16 x s8>)
; RV32I-NEXT: $v8m2 = COPY [[BITCAST]](<vscale x 2 x s64>)
; RV32I-NEXT: PseudoRET implicit $v8m2
;
; RV64I-LABEL: name: vload_nx2i64_align4
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 4)
; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_BITCAST [[LOAD]](<vscale x 16 x s8>)
; RV64I-NEXT: $v8m2 = COPY [[BITCAST]](<vscale x 2 x s64>)
; RV64I-NEXT: PseudoRET implicit $v8m2
%0:_(p0) = COPY $x10
%2:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 4)
%1:_(<vscale x 2 x s64>) = G_BITCAST %2(<vscale x 16 x s8>)
$v8m2 = COPY %1(<vscale x 2 x s64>)
PseudoRET implicit $v8m2
...
---
name: vload_nx2i64_align8
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx2i64_align8
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
; RV32I-NEXT: PseudoRET implicit $v8m2
;
; RV64I-LABEL: name: vload_nx2i64_align8
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
; RV64I-NEXT: PseudoRET implicit $v8m2
%0:_(p0) = COPY $x10
%1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
$v8m2 = COPY %1(<vscale x 2 x s64>)
PseudoRET implicit $v8m2
...
---
name: vload_nx2i64_align16
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx2i64_align16
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
; RV32I-NEXT: PseudoRET implicit $v8m2
;
; RV64I-LABEL: name: vload_nx2i64_align16
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
; RV64I-NEXT: PseudoRET implicit $v8m2
%0:_(p0) = COPY $x10
%1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
$v8m2 = COPY %1(<vscale x 2 x s64>)
PseudoRET implicit $v8m2
...
---
name: vload_nx2i64_align32
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx2i64_align32
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
; RV32I-NEXT: PseudoRET implicit $v8m2
;
; RV64I-LABEL: name: vload_nx2i64_align32
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
; RV64I-NEXT: PseudoRET implicit $v8m2
%0:_(p0) = COPY $x10
%1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
$v8m2 = COPY %1(<vscale x 2 x s64>)
PseudoRET implicit $v8m2
...
---
name: vload_nx1ptr
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx1ptr
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x p0>)
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: vload_nx1ptr
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x p0>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:_(p0) = COPY $x10
%1:_(<vscale x 1 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
$v8 = COPY %1(<vscale x 1 x p0>)
PseudoRET implicit $v8
...
---
name: vload_nx2ptr
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx2ptr
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x p0>)
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: vload_nx2ptr
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x p0>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:_(p0) = COPY $x10
%1:_(<vscale x 2 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
$v8 = COPY %1(<vscale x 2 x p0>)
PseudoRET implicit $v8
...
---
name: vload_nx8ptr
legalized: true
tracksRegLiveness: true
body: |
bb.1 (%ir-block.0):
liveins: $x10
; RV32I-LABEL: name: vload_nx8ptr
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
; RV32I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x p0>)
; RV32I-NEXT: PseudoRET implicit $v8m4
;
; RV64I-LABEL: name: vload_nx8ptr
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x p0>)
; RV64I-NEXT: PseudoRET implicit $v8m4
%0:_(p0) = COPY $x10
%1:_(<vscale x 8 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
$v8m4 = COPY %1(<vscale x 8 x p0>)
PseudoRET implicit $v8m4
...