; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=aarch64-linux-gnu -O0 -mattr=+sve -global-isel -global-isel-abort=1 -aarch64-enable-gisel-sve=1 \
; RUN: -stop-after=irtranslator -verify-machineinstrs %s -o - | FileCheck %s
;; vscale x 128-bit
define void @formal_argument_nxv16i8(<vscale x 16 x i8> %0) {
; CHECK-LABEL: name: formal_argument_nxv16i8
; CHECK: bb.1 (%ir-block.1):
; CHECK-NEXT: liveins: $z0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z0
; CHECK-NEXT: RET_ReallyLR
ret void
}
define void @formal_argument_nxv8i16(<vscale x 8 x i16> %0) {
; CHECK-LABEL: name: formal_argument_nxv8i16
; CHECK: bb.1 (%ir-block.1):
; CHECK-NEXT: liveins: $z0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z0
; CHECK-NEXT: RET_ReallyLR
ret void
}
define void @formal_argument_nxv4i32(<vscale x 4 x i32> %0) {
; CHECK-LABEL: name: formal_argument_nxv4i32
; CHECK: bb.1 (%ir-block.1):
; CHECK-NEXT: liveins: $z0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z0
; CHECK-NEXT: RET_ReallyLR
ret void
}
define void @formal_argument_nxv2i64(<vscale x 2 x i64> %0) {
; CHECK-LABEL: name: formal_argument_nxv2i64
; CHECK: bb.1 (%ir-block.1):
; CHECK-NEXT: liveins: $z0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z0
; CHECK-NEXT: RET_ReallyLR
ret void
}
define void @formal_argument_nxv4f32(<vscale x 4 x float> %0) {
; CHECK-LABEL: name: formal_argument_nxv4f32
; CHECK: bb.1 (%ir-block.1):
; CHECK-NEXT: liveins: $z0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z0
; CHECK-NEXT: RET_ReallyLR
ret void
}
define void @formal_argument_nxv2f64(<vscale x 2 x double> %0) {
; CHECK-LABEL: name: formal_argument_nxv2f64
; CHECK: bb.1 (%ir-block.1):
; CHECK-NEXT: liveins: $z0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z0
; CHECK-NEXT: RET_ReallyLR
ret void
}
define void @formal_argument_nxv2p0(<vscale x 2 x ptr> %0) {
; CHECK-LABEL: name: formal_argument_nxv2p0
; CHECK: bb.1 (%ir-block.1):
; CHECK-NEXT: liveins: $z0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x p0>) = COPY $z0
; CHECK-NEXT: RET_ReallyLR
ret void
}
;; vscale x 256-bit
define void @formal_argument_nxv32i8(<vscale x 32 x i8> %0) {
; CHECK-LABEL: name: formal_argument_nxv32i8
; CHECK: bb.1 (%ir-block.1):
; CHECK-NEXT: liveins: $z0, $z1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z1
; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 32 x s8>) = G_CONCAT_VECTORS [[COPY]](<vscale x 16 x s8>), [[COPY1]](<vscale x 16 x s8>)
; CHECK-NEXT: RET_ReallyLR
ret void
}
define void @formal_argument_nxv16i16(<vscale x 16 x i16> %0) {
; CHECK-LABEL: name: formal_argument_nxv16i16
; CHECK: bb.1 (%ir-block.1):
; CHECK-NEXT: liveins: $z0, $z1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z1
; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 16 x s16>) = G_CONCAT_VECTORS [[COPY]](<vscale x 8 x s16>), [[COPY1]](<vscale x 8 x s16>)
; CHECK-NEXT: RET_ReallyLR
ret void
}
define void @formal_argument_nxv8i32(<vscale x 8 x i32> %0) {
; CHECK-LABEL: name: formal_argument_nxv8i32
; CHECK: bb.1 (%ir-block.1):
; CHECK-NEXT: liveins: $z0, $z1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z1
; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 8 x s32>) = G_CONCAT_VECTORS [[COPY]](<vscale x 4 x s32>), [[COPY1]](<vscale x 4 x s32>)
; CHECK-NEXT: RET_ReallyLR
ret void
}
define void @formal_argument_nxv4i64(<vscale x 4 x i64> %0) {
; CHECK-LABEL: name: formal_argument_nxv4i64
; CHECK: bb.1 (%ir-block.1):
; CHECK-NEXT: liveins: $z0, $z1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z1
; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 4 x s64>) = G_CONCAT_VECTORS [[COPY]](<vscale x 2 x s64>), [[COPY1]](<vscale x 2 x s64>)
; CHECK-NEXT: RET_ReallyLR
ret void
}
define void @formal_argument_nxv8f32(<vscale x 8 x float> %0) {
; CHECK-LABEL: name: formal_argument_nxv8f32
; CHECK: bb.1 (%ir-block.1):
; CHECK-NEXT: liveins: $z0, $z1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z1
; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 8 x s32>) = G_CONCAT_VECTORS [[COPY]](<vscale x 4 x s32>), [[COPY1]](<vscale x 4 x s32>)
; CHECK-NEXT: RET_ReallyLR
ret void
}
define void @formal_argument_nxv4f64(<vscale x 4 x double> %0) {
; CHECK-LABEL: name: formal_argument_nxv4f64
; CHECK: bb.1 (%ir-block.1):
; CHECK-NEXT: liveins: $z0, $z1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z1
; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 4 x s64>) = G_CONCAT_VECTORS [[COPY]](<vscale x 2 x s64>), [[COPY1]](<vscale x 2 x s64>)
; CHECK-NEXT: RET_ReallyLR
ret void
}
define void @formal_argument_nxv4p0(<vscale x 4 x ptr> %0) {
; CHECK-LABEL: name: formal_argument_nxv4p0
; CHECK: bb.1 (%ir-block.1):
; CHECK-NEXT: liveins: $z0, $z1
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z1
; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 4 x p0>) = G_CONCAT_VECTORS [[COPY]](<vscale x 2 x s64>), [[COPY1]](<vscale x 2 x s64>)
; CHECK-NEXT: RET_ReallyLR
ret void
}
;; vscale x 512-bit
define void @formal_argument_nxv64i8(<vscale x 64 x i8> %0) {
; CHECK-LABEL: name: formal_argument_nxv64i8
; CHECK: bb.1 (%ir-block.1):
; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z1
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z2
; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z3
; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 64 x s8>) = G_CONCAT_VECTORS [[COPY]](<vscale x 16 x s8>), [[COPY1]](<vscale x 16 x s8>), [[COPY2]](<vscale x 16 x s8>), [[COPY3]](<vscale x 16 x s8>)
; CHECK-NEXT: RET_ReallyLR
ret void
}
define void @formal_argument_nxv32i16(<vscale x 32 x i16> %0) {
; CHECK-LABEL: name: formal_argument_nxv32i16
; CHECK: bb.1 (%ir-block.1):
; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z1
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z2
; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z3
; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 32 x s16>) = G_CONCAT_VECTORS [[COPY]](<vscale x 8 x s16>), [[COPY1]](<vscale x 8 x s16>), [[COPY2]](<vscale x 8 x s16>), [[COPY3]](<vscale x 8 x s16>)
; CHECK-NEXT: RET_ReallyLR
ret void
}
define void @formal_argument_nxv16i32(<vscale x 16 x i32> %0) {
; CHECK-LABEL: name: formal_argument_nxv16i32
; CHECK: bb.1 (%ir-block.1):
; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z1
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z2
; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z3
; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 16 x s32>) = G_CONCAT_VECTORS [[COPY]](<vscale x 4 x s32>), [[COPY1]](<vscale x 4 x s32>), [[COPY2]](<vscale x 4 x s32>), [[COPY3]](<vscale x 4 x s32>)
; CHECK-NEXT: RET_ReallyLR
ret void
}
define void @formal_argument_nxv8i64(<vscale x 8 x i64> %0) {
; CHECK-LABEL: name: formal_argument_nxv8i64
; CHECK: bb.1 (%ir-block.1):
; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z1
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z2
; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z3
; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 8 x s64>) = G_CONCAT_VECTORS [[COPY]](<vscale x 2 x s64>), [[COPY1]](<vscale x 2 x s64>), [[COPY2]](<vscale x 2 x s64>), [[COPY3]](<vscale x 2 x s64>)
; CHECK-NEXT: RET_ReallyLR
ret void
}
define void @formal_argument_nxv16f32(<vscale x 16 x float> %0) {
; CHECK-LABEL: name: formal_argument_nxv16f32
; CHECK: bb.1 (%ir-block.1):
; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z1
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z2
; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z3
; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 16 x s32>) = G_CONCAT_VECTORS [[COPY]](<vscale x 4 x s32>), [[COPY1]](<vscale x 4 x s32>), [[COPY2]](<vscale x 4 x s32>), [[COPY3]](<vscale x 4 x s32>)
; CHECK-NEXT: RET_ReallyLR
ret void
}
define void @formal_argument_nxv8f64(<vscale x 8 x double> %0) {
; CHECK-LABEL: name: formal_argument_nxv8f64
; CHECK: bb.1 (%ir-block.1):
; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z1
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z2
; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z3
; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 8 x s64>) = G_CONCAT_VECTORS [[COPY]](<vscale x 2 x s64>), [[COPY1]](<vscale x 2 x s64>), [[COPY2]](<vscale x 2 x s64>), [[COPY3]](<vscale x 2 x s64>)
; CHECK-NEXT: RET_ReallyLR
ret void
}
define void @formal_argument_nxv8p0(<vscale x 8 x ptr> %0) {
; CHECK-LABEL: name: formal_argument_nxv8p0
; CHECK: bb.1 (%ir-block.1):
; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z1
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z2
; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z3
; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 8 x p0>) = G_CONCAT_VECTORS [[COPY]](<vscale x 2 x s64>), [[COPY1]](<vscale x 2 x s64>), [[COPY2]](<vscale x 2 x s64>), [[COPY3]](<vscale x 2 x s64>)
; CHECK-NEXT: RET_ReallyLR
ret void
}
;; vscale x 1024-bit
define void @formal_argument_nxv128i8(<vscale x 128 x i8> %0) {
; CHECK-LABEL: name: formal_argument_nxv128i8
; CHECK: bb.1 (%ir-block.1):
; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3, $z4, $z5, $z6, $z7
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z1
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z2
; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z3
; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z4
; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z5
; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z6
; CHECK-NEXT: [[COPY7:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z7
; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 128 x s8>) = G_CONCAT_VECTORS [[COPY]](<vscale x 16 x s8>), [[COPY1]](<vscale x 16 x s8>), [[COPY2]](<vscale x 16 x s8>), [[COPY3]](<vscale x 16 x s8>), [[COPY4]](<vscale x 16 x s8>), [[COPY5]](<vscale x 16 x s8>), [[COPY6]](<vscale x 16 x s8>), [[COPY7]](<vscale x 16 x s8>)
; CHECK-NEXT: RET_ReallyLR
ret void
}
define void @formal_argument_nxv64i16(<vscale x 64 x i16> %0) {
; CHECK-LABEL: name: formal_argument_nxv64i16
; CHECK: bb.1 (%ir-block.1):
; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3, $z4, $z5, $z6, $z7
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z1
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z2
; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z3
; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z4
; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z5
; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z6
; CHECK-NEXT: [[COPY7:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z7
; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 64 x s16>) = G_CONCAT_VECTORS [[COPY]](<vscale x 8 x s16>), [[COPY1]](<vscale x 8 x s16>), [[COPY2]](<vscale x 8 x s16>), [[COPY3]](<vscale x 8 x s16>), [[COPY4]](<vscale x 8 x s16>), [[COPY5]](<vscale x 8 x s16>), [[COPY6]](<vscale x 8 x s16>), [[COPY7]](<vscale x 8 x s16>)
; CHECK-NEXT: RET_ReallyLR
ret void
}
define void @formal_argument_nxv32i32(<vscale x 32 x i32> %0) {
; CHECK-LABEL: name: formal_argument_nxv32i32
; CHECK: bb.1 (%ir-block.1):
; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3, $z4, $z5, $z6, $z7
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z1
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z2
; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z3
; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z4
; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z5
; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z6
; CHECK-NEXT: [[COPY7:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z7
; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 32 x s32>) = G_CONCAT_VECTORS [[COPY]](<vscale x 4 x s32>), [[COPY1]](<vscale x 4 x s32>), [[COPY2]](<vscale x 4 x s32>), [[COPY3]](<vscale x 4 x s32>), [[COPY4]](<vscale x 4 x s32>), [[COPY5]](<vscale x 4 x s32>), [[COPY6]](<vscale x 4 x s32>), [[COPY7]](<vscale x 4 x s32>)
; CHECK-NEXT: RET_ReallyLR
ret void
}
define void @formal_argument_nxv16i64(<vscale x 16 x i64> %0) {
; CHECK-LABEL: name: formal_argument_nxv16i64
; CHECK: bb.1 (%ir-block.1):
; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3, $z4, $z5, $z6, $z7
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z1
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z2
; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z3
; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z4
; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z5
; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z6
; CHECK-NEXT: [[COPY7:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z7
; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 16 x s64>) = G_CONCAT_VECTORS [[COPY]](<vscale x 2 x s64>), [[COPY1]](<vscale x 2 x s64>), [[COPY2]](<vscale x 2 x s64>), [[COPY3]](<vscale x 2 x s64>), [[COPY4]](<vscale x 2 x s64>), [[COPY5]](<vscale x 2 x s64>), [[COPY6]](<vscale x 2 x s64>), [[COPY7]](<vscale x 2 x s64>)
; CHECK-NEXT: RET_ReallyLR
ret void
}
define void @formal_argument_nxv32f32(<vscale x 32 x float> %0) {
; CHECK-LABEL: name: formal_argument_nxv32f32
; CHECK: bb.1 (%ir-block.1):
; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3, $z4, $z5, $z6, $z7
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z1
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z2
; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z3
; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z4
; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z5
; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z6
; CHECK-NEXT: [[COPY7:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z7
; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 32 x s32>) = G_CONCAT_VECTORS [[COPY]](<vscale x 4 x s32>), [[COPY1]](<vscale x 4 x s32>), [[COPY2]](<vscale x 4 x s32>), [[COPY3]](<vscale x 4 x s32>), [[COPY4]](<vscale x 4 x s32>), [[COPY5]](<vscale x 4 x s32>), [[COPY6]](<vscale x 4 x s32>), [[COPY7]](<vscale x 4 x s32>)
; CHECK-NEXT: RET_ReallyLR
ret void
}
define void @formal_argument_nxv16f64(<vscale x 16 x double> %0) {
; CHECK-LABEL: name: formal_argument_nxv16f64
; CHECK: bb.1 (%ir-block.1):
; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3, $z4, $z5, $z6, $z7
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z1
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z2
; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z3
; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z4
; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z5
; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z6
; CHECK-NEXT: [[COPY7:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z7
; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 16 x s64>) = G_CONCAT_VECTORS [[COPY]](<vscale x 2 x s64>), [[COPY1]](<vscale x 2 x s64>), [[COPY2]](<vscale x 2 x s64>), [[COPY3]](<vscale x 2 x s64>), [[COPY4]](<vscale x 2 x s64>), [[COPY5]](<vscale x 2 x s64>), [[COPY6]](<vscale x 2 x s64>), [[COPY7]](<vscale x 2 x s64>)
; CHECK-NEXT: RET_ReallyLR
ret void
}
define void @formal_argument_nxv16p0(<vscale x 16 x ptr> %0) {
; CHECK-LABEL: name: formal_argument_nxv16p0
; CHECK: bb.1 (%ir-block.1):
; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3, $z4, $z5, $z6, $z7
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z1
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z2
; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z3
; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z4
; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z5
; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z6
; CHECK-NEXT: [[COPY7:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z7
; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 16 x p0>) = G_CONCAT_VECTORS [[COPY]](<vscale x 2 x s64>), [[COPY1]](<vscale x 2 x s64>), [[COPY2]](<vscale x 2 x s64>), [[COPY3]](<vscale x 2 x s64>), [[COPY4]](<vscale x 2 x s64>), [[COPY5]](<vscale x 2 x s64>), [[COPY6]](<vscale x 2 x s64>), [[COPY7]](<vscale x 2 x s64>)
; CHECK-NEXT: RET_ReallyLR
ret void
}