# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
# RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
# RUN: -o - | FileCheck %s
---
name: splat_zero_nxv1i8
legalized: true
regBankSelected: false
body: |
bb.1:
; CHECK-LABEL: name: splat_zero_nxv1i8
; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[C]](s32)
; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s8>)
; CHECK-NEXT: PseudoRET implicit $v8
%3:_(s32) = G_CONSTANT i32 0
%0:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR %3(s32)
$v8 = COPY %0(<vscale x 1 x s8>)
PseudoRET implicit $v8
...
---
name: splat_zero_nxv2i8
legalized: true
regBankSelected: false
body: |
bb.1:
; CHECK-LABEL: name: splat_zero_nxv2i8
; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C]](s32)
; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s8>)
; CHECK-NEXT: PseudoRET implicit $v8
%3:_(s32) = G_CONSTANT i32 0
%0:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR %3(s32)
$v8 = COPY %0(<vscale x 2 x s8>)
PseudoRET implicit $v8
...
---
name: splat_zero_nxv4i8
legalized: true
regBankSelected: false
body: |
bb.1:
; CHECK-LABEL: name: splat_zero_nxv4i8
; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C]](s32)
; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s8>)
; CHECK-NEXT: PseudoRET implicit $v8
%3:_(s32) = G_CONSTANT i32 0
%0:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR %3(s32)
$v8 = COPY %0(<vscale x 4 x s8>)
PseudoRET implicit $v8
...
---
name: splat_zero_nxv8i8
legalized: true
regBankSelected: false
body: |
bb.1:
; CHECK-LABEL: name: splat_zero_nxv8i8
; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C]](s32)
; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s8>)
; CHECK-NEXT: PseudoRET implicit $v8
%3:_(s32) = G_CONSTANT i32 0
%0:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR %3(s32)
$v8 = COPY %0(<vscale x 8 x s8>)
PseudoRET implicit $v8
...
---
name: splat_zero_nxv16i8
legalized: true
regBankSelected: false
body: |
bb.1:
; CHECK-LABEL: name: splat_zero_nxv16i8
; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[C]](s32)
; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s8>)
; CHECK-NEXT: PseudoRET implicit $v8m2
%3:_(s32) = G_CONSTANT i32 0
%0:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR %3(s32)
$v8m2 = COPY %0(<vscale x 16 x s8>)
PseudoRET implicit $v8m2
...
---
name: splat_zero_nxv32i8
legalized: true
regBankSelected: false
body: |
bb.1:
; CHECK-LABEL: name: splat_zero_nxv32i8
; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[C]](s32)
; CHECK-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 32 x s8>)
; CHECK-NEXT: PseudoRET implicit $v8m4
%3:_(s32) = G_CONSTANT i32 0
%0:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR %3(s32)
$v8m4 = COPY %0(<vscale x 32 x s8>)
PseudoRET implicit $v8m4
...
---
name: splat_zero_nxv64i8
legalized: true
regBankSelected: false
body: |
bb.1:
; CHECK-LABEL: name: splat_zero_nxv64i8
; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C]](s32)
; CHECK-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 64 x s8>)
; CHECK-NEXT: PseudoRET implicit $v8m8
%3:_(s32) = G_CONSTANT i32 0
%0:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR %3(s32)
$v8m8 = COPY %0(<vscale x 64 x s8>)
PseudoRET implicit $v8m8
...
---
name: splat_zero_nxv1i16
legalized: true
regBankSelected: false
body: |
bb.1:
; CHECK-LABEL: name: splat_zero_nxv1i16
; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[C]](s32)
; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s16>)
; CHECK-NEXT: PseudoRET implicit $v8
%3:_(s32) = G_CONSTANT i32 0
%0:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR %3(s32)
$v8 = COPY %0(<vscale x 1 x s16>)
PseudoRET implicit $v8
...
---
name: splat_zero_nxv2i16
legalized: true
regBankSelected: false
body: |
bb.1:
; CHECK-LABEL: name: splat_zero_nxv2i16
; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[C]](s32)
; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s16>)
; CHECK-NEXT: PseudoRET implicit $v8
%3:_(s32) = G_CONSTANT i32 0
%0:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR %3(s32)
$v8 = COPY %0(<vscale x 2 x s16>)
PseudoRET implicit $v8
...
---
name: splat_zero_nxv4i16
legalized: true
regBankSelected: false
body: |
bb.1:
; CHECK-LABEL: name: splat_zero_nxv4i16
; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[C]](s32)
; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s16>)
; CHECK-NEXT: PseudoRET implicit $v8
%3:_(s32) = G_CONSTANT i32 0
%0:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR %3(s32)
$v8 = COPY %0(<vscale x 4 x s16>)
PseudoRET implicit $v8
...
---
name: splat_zero_nxv8i16
legalized: true
regBankSelected: false
body: |
bb.1:
; CHECK-LABEL: name: splat_zero_nxv8i16
; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[C]](s32)
; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s16>)
; CHECK-NEXT: PseudoRET implicit $v8m2
%3:_(s32) = G_CONSTANT i32 0
%0:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR %3(s32)
$v8m2 = COPY %0(<vscale x 8 x s16>)
PseudoRET implicit $v8m2
...
---
name: splat_zero_nxv16i16
legalized: true
regBankSelected: false
body: |
bb.1:
; CHECK-LABEL: name: splat_zero_nxv16i16
; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[C]](s32)
; CHECK-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s16>)
; CHECK-NEXT: PseudoRET implicit $v8m4
%3:_(s32) = G_CONSTANT i32 0
%0:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR %3(s32)
$v8m4 = COPY %0(<vscale x 16 x s16>)
PseudoRET implicit $v8m4
...
---
name: splat_zero_nxv32i16
legalized: true
regBankSelected: false
body: |
bb.1:
; CHECK-LABEL: name: splat_zero_nxv32i16
; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[C]](s32)
; CHECK-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 32 x s16>)
; CHECK-NEXT: PseudoRET implicit $v8m8
%3:_(s32) = G_CONSTANT i32 0
%0:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR %3(s32)
$v8m8 = COPY %0(<vscale x 32 x s16>)
PseudoRET implicit $v8m8
...
---
name: splat_zero_nxv1i32
legalized: true
regBankSelected: false
body: |
bb.1:
; CHECK-LABEL: name: splat_zero_nxv1i32
; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[C]](s32)
; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s32>)
; CHECK-NEXT: PseudoRET implicit $v8
%1:_(s32) = G_CONSTANT i32 0
%0:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR %1(s32)
$v8 = COPY %0(<vscale x 1 x s32>)
PseudoRET implicit $v8
...
---
name: splat_zero_nxv2i32
legalized: true
regBankSelected: false
body: |
bb.1:
; CHECK-LABEL: name: splat_zero_nxv2i32
; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[C]](s32)
; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s32>)
; CHECK-NEXT: PseudoRET implicit $v8
%1:_(s32) = G_CONSTANT i32 0
%0:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR %1(s32)
$v8 = COPY %0(<vscale x 2 x s32>)
PseudoRET implicit $v8
...
---
name: splat_zero_nxv4i32
legalized: true
regBankSelected: false
body: |
bb.1:
; CHECK-LABEL: name: splat_zero_nxv4i32
; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[C]](s32)
; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s32>)
; CHECK-NEXT: PseudoRET implicit $v8m2
%1:_(s32) = G_CONSTANT i32 0
%0:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR %1(s32)
$v8m2 = COPY %0(<vscale x 4 x s32>)
PseudoRET implicit $v8m2
...
---
name: splat_zero_nxv8i32
legalized: true
regBankSelected: false
body: |
bb.1:
; CHECK-LABEL: name: splat_zero_nxv8i32
; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[C]](s32)
; CHECK-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s32>)
; CHECK-NEXT: PseudoRET implicit $v8m4
%1:_(s32) = G_CONSTANT i32 0
%0:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR %1(s32)
$v8m4 = COPY %0(<vscale x 8 x s32>)
PseudoRET implicit $v8m4
...
---
name: splat_zero_nxv16i32
legalized: true
regBankSelected: false
body: |
bb.1:
; CHECK-LABEL: name: splat_zero_nxv16i32
; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[C]](s32)
; CHECK-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s32>)
; CHECK-NEXT: PseudoRET implicit $v8m8
%1:_(s32) = G_CONSTANT i32 0
%0:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR %1(s32)
$v8m8 = COPY %0(<vscale x 16 x s32>)
PseudoRET implicit $v8m8
...
---
name: splat_zero_nxv1i64
legalized: true
regBankSelected: false
body: |
bb.1:
; CHECK-LABEL: name: splat_zero_nxv1i64
; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[MV:%[0-9]+]]:fprb(s64) = G_MERGE_VALUES [[C]](s32), [[C1]](s32)
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[MV]](s64)
; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s64>)
; CHECK-NEXT: PseudoRET implicit $v8
%2:_(s32) = G_CONSTANT i32 0
%3:_(s32) = G_CONSTANT i32 0
%1:_(s64) = G_MERGE_VALUES %2(s32), %3(s32)
%0:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR %1(s64)
$v8 = COPY %0(<vscale x 1 x s64>)
PseudoRET implicit $v8
...
---
name: splat_zero_nxv2i64
legalized: true
regBankSelected: false
body: |
bb.1:
; CHECK-LABEL: name: splat_zero_nxv2i64
; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[MV:%[0-9]+]]:fprb(s64) = G_MERGE_VALUES [[C]](s32), [[C1]](s32)
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[MV]](s64)
; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s64>)
; CHECK-NEXT: PseudoRET implicit $v8m2
%2:_(s32) = G_CONSTANT i32 0
%3:_(s32) = G_CONSTANT i32 0
%1:_(s64) = G_MERGE_VALUES %2(s32), %3(s32)
%0:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR %1(s64)
$v8m2 = COPY %0(<vscale x 2 x s64>)
PseudoRET implicit $v8m2
...
---
name: splat_zero_nxv4i64
legalized: true
regBankSelected: false
body: |
bb.1:
; CHECK-LABEL: name: splat_zero_nxv4i64
; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[MV:%[0-9]+]]:fprb(s64) = G_MERGE_VALUES [[C]](s32), [[C1]](s32)
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[MV]](s64)
; CHECK-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s64>)
; CHECK-NEXT: PseudoRET implicit $v8m4
%2:_(s32) = G_CONSTANT i32 0
%3:_(s32) = G_CONSTANT i32 0
%1:_(s64) = G_MERGE_VALUES %2(s32), %3(s32)
%0:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR %1(s64)
$v8m4 = COPY %0(<vscale x 4 x s64>)
PseudoRET implicit $v8m4
...
---
name: splat_zero_nxv8i64
legalized: true
regBankSelected: false
body: |
bb.1:
; CHECK-LABEL: name: splat_zero_nxv8i64
; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[MV:%[0-9]+]]:fprb(s64) = G_MERGE_VALUES [[C]](s32), [[C1]](s32)
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[MV]](s64)
; CHECK-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s64>)
; CHECK-NEXT: PseudoRET implicit $v8m8
%2:_(s32) = G_CONSTANT i32 0
%3:_(s32) = G_CONSTANT i32 0
%1:_(s64) = G_MERGE_VALUES %2(s32), %3(s32)
%0:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR %1(s64)
$v8m8 = COPY %0(<vscale x 8 x s64>)
PseudoRET implicit $v8m8
...
---
name: splat_zero_nxv1f32
legalized: true
regBankSelected: false
body: |
bb.1:
; CHECK-LABEL: name: splat_zero_nxv1f32
; CHECK: [[C:%[0-9]+]]:fprb(s32) = G_FCONSTANT float 0.000000e+00
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[C]](s32)
; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s32>)
; CHECK-NEXT: PseudoRET implicit $v8
%1:_(s32) = G_FCONSTANT float 0.000000e+00
%0:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR %1(s32)
$v8 = COPY %0(<vscale x 1 x s32>)
PseudoRET implicit $v8
...
---
name: splat_zero_nxv2f32
legalized: true
regBankSelected: false
body: |
bb.1:
; CHECK-LABEL: name: splat_zero_nxv2f32
; CHECK: [[C:%[0-9]+]]:fprb(s32) = G_FCONSTANT float 0.000000e+00
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[C]](s32)
; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s32>)
; CHECK-NEXT: PseudoRET implicit $v8
%1:_(s32) = G_FCONSTANT float 0.000000e+00
%0:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR %1(s32)
$v8 = COPY %0(<vscale x 2 x s32>)
PseudoRET implicit $v8
...
---
name: splat_zero_nxv4f32
legalized: true
regBankSelected: false
body: |
bb.1:
; CHECK-LABEL: name: splat_zero_nxv4f32
; CHECK: [[C:%[0-9]+]]:fprb(s32) = G_FCONSTANT float 0.000000e+00
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[C]](s32)
; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s32>)
; CHECK-NEXT: PseudoRET implicit $v8m2
%1:_(s32) = G_FCONSTANT float 0.000000e+00
%0:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR %1(s32)
$v8m2 = COPY %0(<vscale x 4 x s32>)
PseudoRET implicit $v8m2
...
---
name: splat_zero_nxv8f32
legalized: true
regBankSelected: false
body: |
bb.1:
; CHECK-LABEL: name: splat_zero_nxv8f32
; CHECK: [[C:%[0-9]+]]:fprb(s32) = G_FCONSTANT float 0.000000e+00
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[C]](s32)
; CHECK-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s32>)
; CHECK-NEXT: PseudoRET implicit $v8m4
%1:_(s32) = G_FCONSTANT float 0.000000e+00
%0:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR %1(s32)
$v8m4 = COPY %0(<vscale x 8 x s32>)
PseudoRET implicit $v8m4
...
---
name: splat_zero_nxv16f32
legalized: true
regBankSelected: false
body: |
bb.1:
; CHECK-LABEL: name: splat_zero_nxv16f32
; CHECK: [[C:%[0-9]+]]:fprb(s32) = G_FCONSTANT float 0.000000e+00
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[C]](s32)
; CHECK-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s32>)
; CHECK-NEXT: PseudoRET implicit $v8m8
%1:_(s32) = G_FCONSTANT float 0.000000e+00
%0:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR %1(s32)
$v8m8 = COPY %0(<vscale x 16 x s32>)
PseudoRET implicit $v8m8
...
---
name: splat_zero_nxv1f64
legalized: true
regBankSelected: false
body: |
bb.1:
; CHECK-LABEL: name: splat_zero_nxv1f64
; CHECK: [[C:%[0-9]+]]:fprb(s64) = G_FCONSTANT double 0.000000e+00
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[C]](s64)
; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s64>)
; CHECK-NEXT: PseudoRET implicit $v8
%1:_(s64) = G_FCONSTANT double 0.000000e+00
%0:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR %1(s64)
$v8 = COPY %0(<vscale x 1 x s64>)
PseudoRET implicit $v8
...
---
name: splat_zero_nxv2f64
legalized: true
regBankSelected: false
body: |
bb.1:
; CHECK-LABEL: name: splat_zero_nxv2f64
; CHECK: [[C:%[0-9]+]]:fprb(s64) = G_FCONSTANT double 0.000000e+00
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[C]](s64)
; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s64>)
; CHECK-NEXT: PseudoRET implicit $v8m2
%1:_(s64) = G_FCONSTANT double 0.000000e+00
%0:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR %1(s64)
$v8m2 = COPY %0(<vscale x 2 x s64>)
PseudoRET implicit $v8m2
...
---
name: splat_zero_nxv4f64
legalized: true
regBankSelected: false
body: |
bb.1:
; CHECK-LABEL: name: splat_zero_nxv4f64
; CHECK: [[C:%[0-9]+]]:fprb(s64) = G_FCONSTANT double 0.000000e+00
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[C]](s64)
; CHECK-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s64>)
; CHECK-NEXT: PseudoRET implicit $v8m4
%1:_(s64) = G_FCONSTANT double 0.000000e+00
%0:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR %1(s64)
$v8m4 = COPY %0(<vscale x 4 x s64>)
PseudoRET implicit $v8m4
...
---
name: splat_zero_nxv8f64
legalized: true
regBankSelected: false
body: |
bb.1:
; CHECK-LABEL: name: splat_zero_nxv8f64
; CHECK: [[C:%[0-9]+]]:fprb(s64) = G_FCONSTANT double 0.000000e+00
; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[C]](s64)
; CHECK-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s64>)
; CHECK-NEXT: PseudoRET implicit $v8m8
%1:_(s64) = G_FCONSTANT double 0.000000e+00
%0:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR %1(s64)
$v8m8 = COPY %0(<vscale x 8 x s64>)
PseudoRET implicit $v8m8
...