# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
--- |
define void @vstore_nx1i8(ptr %pa, <vscale x 1 x i8> %b) #0 {
store <vscale x 1 x i8> %b, ptr %pa, align 1
ret void
}
define void @vstore_nx2i8(ptr %pa, <vscale x 2 x i8> %b) #0 {
store <vscale x 2 x i8> %b, ptr %pa, align 2
ret void
}
define void @vstore_nx4i8(ptr %pa, <vscale x 4 x i8> %b) #0 {
store <vscale x 4 x i8> %b, ptr %pa, align 4
ret void
}
define void @vstore_nx8i8(ptr %pa, <vscale x 8 x i8> %b) #0 {
store <vscale x 8 x i8> %b, ptr %pa, align 8
ret void
}
define void @vstore_nx16i8(ptr %pa, <vscale x 16 x i8> %b) #0 {
store <vscale x 16 x i8> %b, ptr %pa, align 16
ret void
}
define void @vstore_nx32i8(ptr %pa, <vscale x 32 x i8> %b) #0 {
store <vscale x 32 x i8> %b, ptr %pa, align 32
ret void
}
define void @vstore_nx64i8(ptr %pa, <vscale x 64 x i8> %b) #0 {
store <vscale x 64 x i8> %b, ptr %pa, align 64
ret void
}
define void @vstore_nx1i16(ptr %pa, <vscale x 1 x i16> %b) #0 {
store <vscale x 1 x i16> %b, ptr %pa, align 2
ret void
}
define void @vstore_nx2i16(ptr %pa, <vscale x 2 x i16> %b) #0 {
store <vscale x 2 x i16> %b, ptr %pa, align 4
ret void
}
define void @vstore_nx4i16(ptr %pa, <vscale x 4 x i16> %b) #0 {
store <vscale x 4 x i16> %b, ptr %pa, align 8
ret void
}
define void @vstore_nx8i16(ptr %pa, <vscale x 8 x i16> %b) #0 {
store <vscale x 8 x i16> %b, ptr %pa, align 16
ret void
}
define void @vstore_nx16i16(ptr %pa, <vscale x 16 x i16> %b) #0 {
store <vscale x 16 x i16> %b, ptr %pa, align 32
ret void
}
define void @vstore_nx32i16(ptr %pa, <vscale x 32 x i16> %b) #0 {
store <vscale x 32 x i16> %b, ptr %pa, align 64
ret void
}
define void @vstore_nx1i32(ptr %pa, <vscale x 1 x i32> %b) #0 {
store <vscale x 1 x i32> %b, ptr %pa, align 4
ret void
}
define void @vstore_nx2i32(ptr %pa, <vscale x 2 x i32> %b) #0 {
store <vscale x 2 x i32> %b, ptr %pa, align 8
ret void
}
define void @vstore_nx4i32(ptr %pa, <vscale x 4 x i32> %b) #0 {
store <vscale x 4 x i32> %b, ptr %pa, align 16
ret void
}
define void @vstore_nx8i32(ptr %pa, <vscale x 8 x i32> %b) #0 {
store <vscale x 8 x i32> %b, ptr %pa, align 32
ret void
}
define void @vstore_nx16i32(ptr %pa, <vscale x 16 x i32> %b) #0 {
store <vscale x 16 x i32> %b, ptr %pa, align 64
ret void
}
define void @vstore_nx1i64(ptr %pa, <vscale x 1 x i64> %b) #0 {
store <vscale x 1 x i64> %b, ptr %pa, align 8
ret void
}
define void @vstore_nx2i64(ptr %pa, <vscale x 2 x i64> %b) #0 {
store <vscale x 2 x i64> %b, ptr %pa, align 16
ret void
}
define void @vstore_nx4i64(ptr %pa, <vscale x 4 x i64> %b) #0 {
store <vscale x 4 x i64> %b, ptr %pa, align 32
ret void
}
define void @vstore_nx8i64(ptr %pa, <vscale x 8 x i64> %b) #0 {
store <vscale x 8 x i64> %b, ptr %pa, align 64
ret void
}
define void @vstore_nx16i8_align1(ptr %pa, <vscale x 16 x i8> %b) #0 {
store <vscale x 16 x i8> %b, ptr %pa, align 1
ret void
}
define void @vstore_nx16i8_align2(ptr %pa, <vscale x 16 x i8> %b) #0 {
store <vscale x 16 x i8> %b, ptr %pa, align 2
ret void
}
define void @vstore_nx16i8_align16(ptr %pa, <vscale x 16 x i8> %b) #0 {
store <vscale x 16 x i8> %b, ptr %pa, align 16
ret void
}
define void @vstore_nx16i8_align64(ptr %pa, <vscale x 16 x i8> %b) #0 {
store <vscale x 16 x i8> %b, ptr %pa, align 64
ret void
}
define void @vstore_nx4i16_align1(ptr %pa, <vscale x 4 x i16> %b) #0 {
store <vscale x 4 x i16> %b, ptr %pa, align 1
ret void
}
define void @vstore_nx4i16_align2(ptr %pa, <vscale x 4 x i16> %b) #0 {
store <vscale x 4 x i16> %b, ptr %pa, align 2
ret void
}
define void @vstore_nx4i16_align4(ptr %pa, <vscale x 4 x i16> %b) #0 {
store <vscale x 4 x i16> %b, ptr %pa, align 4
ret void
}
define void @vstore_nx4i16_align8(ptr %pa, <vscale x 4 x i16> %b) #0 {
store <vscale x 4 x i16> %b, ptr %pa, align 8
ret void
}
define void @vstore_nx4i16_align16(ptr %pa, <vscale x 4 x i16> %b) #0 {
store <vscale x 4 x i16> %b, ptr %pa, align 16
ret void
}
define void @vstore_nx2i32_align2(ptr %pa, <vscale x 2 x i32> %b) #0 {
store <vscale x 2 x i32> %b, ptr %pa, align 2
ret void
}
define void @vstore_nx2i32_align4(ptr %pa, <vscale x 2 x i32> %b) #0 {
store <vscale x 2 x i32> %b, ptr %pa, align 4
ret void
}
define void @vstore_nx2i32_align8(ptr %pa, <vscale x 2 x i32> %b) #0 {
store <vscale x 2 x i32> %b, ptr %pa, align 8
ret void
}
define void @vstore_nx2i32_align16(ptr %pa, <vscale x 2 x i32> %b) #0 {
store <vscale x 2 x i32> %b, ptr %pa, align 16
ret void
}
define void @vstore_nx2i32_align256(ptr %pa, <vscale x 2 x i32> %b) #0 {
store <vscale x 2 x i32> %b, ptr %pa, align 256
ret void
}
define void @vstore_nx2i64_align4(ptr %pa, <vscale x 2 x i64> %b) #0 {
store <vscale x 2 x i64> %b, ptr %pa, align 4
ret void
}
define void @vstore_nx2i64_align8(ptr %pa, <vscale x 2 x i64> %b) #0 {
store <vscale x 2 x i64> %b, ptr %pa, align 8
ret void
}
define void @vstore_nx2i64_align16(ptr %pa, <vscale x 2 x i64> %b) #0 {
store <vscale x 2 x i64> %b, ptr %pa, align 16
ret void
}
define void @vstore_nx2i64_align32(ptr %pa, <vscale x 2 x i64> %b) #0 {
store <vscale x 2 x i64> %b, ptr %pa, align 32
ret void
}
attributes #0 = { "target-features"="+v" }
...
---
name: vstore_nx1i8
body: |
bb.1 (%ir-block.0):
liveins: $v8, $x10
; CHECK-LABEL: name: vstore_nx1i8
; CHECK: liveins: $v8, $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 1 x s8>), [[COPY]](p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 1 x s8>) = COPY $v8
G_STORE %1(<vscale x 1 x s8>), %0(p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
PseudoRET
...
---
name: vstore_nx2i8
body: |
bb.1 (%ir-block.0):
liveins: $v8, $x10
; CHECK-LABEL: name: vstore_nx2i8
; CHECK: liveins: $v8, $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s8>), [[COPY]](p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 2 x s8>) = COPY $v8
G_STORE %1(<vscale x 2 x s8>), %0(p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
PseudoRET
...
---
name: vstore_nx4i8
body: |
bb.1 (%ir-block.0):
liveins: $v8, $x10
; CHECK-LABEL: name: vstore_nx4i8
; CHECK: liveins: $v8, $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s8>), [[COPY]](p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 4 x s8>) = COPY $v8
G_STORE %1(<vscale x 4 x s8>), %0(p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
PseudoRET
...
---
name: vstore_nx8i8
body: |
bb.1 (%ir-block.0):
liveins: $v8, $x10
; CHECK-LABEL: name: vstore_nx8i8
; CHECK: liveins: $v8, $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 8 x s8>) = COPY $v8
G_STORE %1(<vscale x 8 x s8>), %0(p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
PseudoRET
...
---
name: vstore_nx16i8
body: |
bb.1 (%ir-block.0):
liveins: $x10, $v8m2
; CHECK-LABEL: name: vstore_nx16i8
; CHECK: liveins: $x10, $v8m2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 16 x s8>) = COPY $v8m2
G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
PseudoRET
...
---
name: vstore_nx32i8
body: |
bb.1 (%ir-block.0):
liveins: $x10, $v8m4
; CHECK-LABEL: name: vstore_nx32i8
; CHECK: liveins: $x10, $v8m4
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 32 x s8>), [[COPY]](p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 32 x s8>) = COPY $v8m4
G_STORE %1(<vscale x 32 x s8>), %0(p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
PseudoRET
...
---
name: vstore_nx64i8
body: |
bb.1 (%ir-block.0):
liveins: $x10, $v8m8
; CHECK-LABEL: name: vstore_nx64i8
; CHECK: liveins: $x10, $v8m8
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8m8
; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 64 x s8>), [[COPY]](p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 64 x s8>) = COPY $v8m8
G_STORE %1(<vscale x 64 x s8>), %0(p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
PseudoRET
...
---
name: vstore_nx1i16
body: |
bb.1 (%ir-block.0):
liveins: $v8, $x10
; CHECK-LABEL: name: vstore_nx1i16
; CHECK: liveins: $v8, $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 1 x s16>), [[COPY]](p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 1 x s16>) = COPY $v8
G_STORE %1(<vscale x 1 x s16>), %0(p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
PseudoRET
...
---
name: vstore_nx2i16
body: |
bb.1 (%ir-block.0):
liveins: $v8, $x10
; CHECK-LABEL: name: vstore_nx2i16
; CHECK: liveins: $v8, $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s16>), [[COPY]](p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 2 x s16>) = COPY $v8
G_STORE %1(<vscale x 2 x s16>), %0(p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
PseudoRET
...
---
name: vstore_nx4i16
body: |
bb.1 (%ir-block.0):
liveins: $v8, $x10
; CHECK-LABEL: name: vstore_nx4i16
; CHECK: liveins: $v8, $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 4 x s16>) = COPY $v8
G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
PseudoRET
...
---
name: vstore_nx8i16
body: |
bb.1 (%ir-block.0):
liveins: $x10, $v8m2
; CHECK-LABEL: name: vstore_nx8i16
; CHECK: liveins: $x10, $v8m2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 8 x s16>), [[COPY]](p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 8 x s16>) = COPY $v8m2
G_STORE %1(<vscale x 8 x s16>), %0(p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
PseudoRET
...
---
name: vstore_nx16i16
body: |
bb.1 (%ir-block.0):
liveins: $x10, $v8m4
; CHECK-LABEL: name: vstore_nx16i16
; CHECK: liveins: $x10, $v8m4
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s16>), [[COPY]](p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 16 x s16>) = COPY $v8m4
G_STORE %1(<vscale x 16 x s16>), %0(p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
PseudoRET
...
---
name: vstore_nx32i16
body: |
bb.1 (%ir-block.0):
liveins: $x10, $v8m8
; CHECK-LABEL: name: vstore_nx32i16
; CHECK: liveins: $x10, $v8m8
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8m8
; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 32 x s16>), [[COPY]](p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 32 x s16>) = COPY $v8m8
G_STORE %1(<vscale x 32 x s16>), %0(p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
PseudoRET
...
---
name: vstore_nx1i32
body: |
bb.1 (%ir-block.0):
liveins: $v8, $x10
; CHECK-LABEL: name: vstore_nx1i32
; CHECK: liveins: $v8, $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 1 x s32>), [[COPY]](p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 1 x s32>) = COPY $v8
G_STORE %1(<vscale x 1 x s32>), %0(p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
PseudoRET
...
---
name: vstore_nx2i32
body: |
bb.1 (%ir-block.0):
liveins: $v8, $x10
; CHECK-LABEL: name: vstore_nx2i32
; CHECK: liveins: $v8, $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 2 x s32>) = COPY $v8
G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
PseudoRET
...
---
name: vstore_nx4i32
body: |
bb.1 (%ir-block.0):
liveins: $x10, $v8m2
; CHECK-LABEL: name: vstore_nx4i32
; CHECK: liveins: $x10, $v8m2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s32>), [[COPY]](p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 4 x s32>) = COPY $v8m2
G_STORE %1(<vscale x 4 x s32>), %0(p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
PseudoRET
...
---
name: vstore_nx8i32
body: |
bb.1 (%ir-block.0):
liveins: $x10, $v8m4
; CHECK-LABEL: name: vstore_nx8i32
; CHECK: liveins: $x10, $v8m4
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 8 x s32>), [[COPY]](p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 8 x s32>) = COPY $v8m4
G_STORE %1(<vscale x 8 x s32>), %0(p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
PseudoRET
...
---
name: vstore_nx16i32
body: |
bb.1 (%ir-block.0):
liveins: $x10, $v8m8
; CHECK-LABEL: name: vstore_nx16i32
; CHECK: liveins: $x10, $v8m8
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s32>), [[COPY]](p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 16 x s32>) = COPY $v8m8
G_STORE %1(<vscale x 16 x s32>), %0(p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
PseudoRET
...
---
name: vstore_nx1i64
body: |
bb.1 (%ir-block.0):
liveins: $v8, $x10
; CHECK-LABEL: name: vstore_nx1i64
; CHECK: liveins: $v8, $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 1 x s64>), [[COPY]](p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 1 x s64>) = COPY $v8
G_STORE %1(<vscale x 1 x s64>), %0(p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
PseudoRET
...
---
name: vstore_nx2i64
body: |
bb.1 (%ir-block.0):
liveins: $x10, $v8m2
; CHECK-LABEL: name: vstore_nx2i64
; CHECK: liveins: $x10, $v8m2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 2 x s64>) = COPY $v8m2
G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
PseudoRET
...
---
name: vstore_nx4i64
body: |
bb.1 (%ir-block.0):
liveins: $x10, $v8m4
; CHECK-LABEL: name: vstore_nx4i64
; CHECK: liveins: $x10, $v8m4
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4
; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s64>), [[COPY]](p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 4 x s64>) = COPY $v8m4
G_STORE %1(<vscale x 4 x s64>), %0(p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
PseudoRET
...
---
name: vstore_nx8i64
body: |
bb.1 (%ir-block.0):
liveins: $x10, $v8m8
; CHECK-LABEL: name: vstore_nx8i64
; CHECK: liveins: $x10, $v8m8
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 8 x s64>), [[COPY]](p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 8 x s64>) = COPY $v8m8
G_STORE %1(<vscale x 8 x s64>), %0(p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
PseudoRET
...
---
name: vstore_nx16i8_align1
body: |
bb.1 (%ir-block.0):
liveins: $x10, $v8m2
; CHECK-LABEL: name: vstore_nx16i8_align1
; CHECK: liveins: $x10, $v8m2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 16 x s8>) = COPY $v8m2
G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
PseudoRET
...
---
name: vstore_nx16i8_align2
body: |
bb.1 (%ir-block.0):
liveins: $x10, $v8m2
; CHECK-LABEL: name: vstore_nx16i8_align2
; CHECK: liveins: $x10, $v8m2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 16 x s8>) = COPY $v8m2
G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
PseudoRET
...
---
name: vstore_nx16i8_align16
body: |
bb.1 (%ir-block.0):
liveins: $x10, $v8m2
; CHECK-LABEL: name: vstore_nx16i8_align16
; CHECK: liveins: $x10, $v8m2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 16 x s8>) = COPY $v8m2
G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
PseudoRET
...
---
name: vstore_nx16i8_align64
body: |
bb.1 (%ir-block.0):
liveins: $x10, $v8m2
; CHECK-LABEL: name: vstore_nx16i8_align64
; CHECK: liveins: $x10, $v8m2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 16 x s8>) = COPY $v8m2
G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
PseudoRET
...
---
name: vstore_nx4i16_align1
body: |
bb.1 (%ir-block.0):
liveins: $v8, $x10
; CHECK-LABEL: name: vstore_nx4i16_align1
; CHECK: liveins: $v8, $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 4 x s16>)
; CHECK-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 1)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 4 x s16>) = COPY $v8
G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 1)
PseudoRET
...
---
name: vstore_nx4i16_align2
body: |
bb.1 (%ir-block.0):
liveins: $v8, $x10
; CHECK-LABEL: name: vstore_nx4i16_align2
; CHECK: liveins: $v8, $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 4 x s16>) = COPY $v8
G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
PseudoRET
...
---
name: vstore_nx4i16_align4
body: |
bb.1 (%ir-block.0):
liveins: $v8, $x10
; CHECK-LABEL: name: vstore_nx4i16_align4
; CHECK: liveins: $v8, $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 4 x s16>) = COPY $v8
G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
PseudoRET
...
---
name: vstore_nx4i16_align8
body: |
bb.1 (%ir-block.0):
liveins: $v8, $x10
; CHECK-LABEL: name: vstore_nx4i16_align8
; CHECK: liveins: $v8, $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 4 x s16>) = COPY $v8
G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
PseudoRET
...
---
name: vstore_nx4i16_align16
body: |
bb.1 (%ir-block.0):
liveins: $v8, $x10
; CHECK-LABEL: name: vstore_nx4i16_align16
; CHECK: liveins: $v8, $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 4 x s16>) = COPY $v8
G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
PseudoRET
...
---
name: vstore_nx2i32_align2
body: |
bb.1 (%ir-block.0):
liveins: $v8, $x10
; CHECK-LABEL: name: vstore_nx2i32_align2
; CHECK: liveins: $v8, $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s32>)
; CHECK-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 2)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 2 x s32>) = COPY $v8
G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 2)
PseudoRET
...
---
name: vstore_nx2i32_align4
body: |
bb.1 (%ir-block.0):
liveins: $v8, $x10
; CHECK-LABEL: name: vstore_nx2i32_align4
; CHECK: liveins: $v8, $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 2 x s32>) = COPY $v8
G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
PseudoRET
...
---
name: vstore_nx2i32_align8
body: |
bb.1 (%ir-block.0):
liveins: $v8, $x10
; CHECK-LABEL: name: vstore_nx2i32_align8
; CHECK: liveins: $v8, $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 2 x s32>) = COPY $v8
G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
PseudoRET
...
---
name: vstore_nx2i32_align16
body: |
bb.1 (%ir-block.0):
liveins: $v8, $x10
; CHECK-LABEL: name: vstore_nx2i32_align16
; CHECK: liveins: $v8, $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 2 x s32>) = COPY $v8
G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
PseudoRET
...
---
name: vstore_nx2i32_align256
body: |
bb.1 (%ir-block.0):
liveins: $v8, $x10
; CHECK-LABEL: name: vstore_nx2i32_align256
; CHECK: liveins: $v8, $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 2 x s32>) = COPY $v8
G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
PseudoRET
...
---
name: vstore_nx2i64_align4
body: |
bb.1 (%ir-block.0):
liveins: $x10, $v8m2
; CHECK-LABEL: name: vstore_nx2i64_align4
; CHECK: liveins: $x10, $v8m2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 16 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s64>)
; CHECK-NEXT: G_STORE [[BITCAST]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 4)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 2 x s64>) = COPY $v8m2
G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 4)
PseudoRET
...
---
name: vstore_nx2i64_align8
body: |
bb.1 (%ir-block.0):
liveins: $x10, $v8m2
; CHECK-LABEL: name: vstore_nx2i64_align8
; CHECK: liveins: $x10, $v8m2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 2 x s64>) = COPY $v8m2
G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
PseudoRET
...
---
name: vstore_nx2i64_align16
body: |
bb.1 (%ir-block.0):
liveins: $x10, $v8m2
; CHECK-LABEL: name: vstore_nx2i64_align16
; CHECK: liveins: $x10, $v8m2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 2 x s64>) = COPY $v8m2
G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
PseudoRET
...
---
name: vstore_nx2i64_align32
body: |
bb.1 (%ir-block.0):
liveins: $x10, $v8m2
; CHECK-LABEL: name: vstore_nx2i64_align32
; CHECK: liveins: $x10, $v8m2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
; CHECK-NEXT: PseudoRET
%0:_(p0) = COPY $x10
%1:_(<vscale x 2 x s64>) = COPY $v8m2
G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
PseudoRET
...