# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - | FileCheck -check-prefix=RV32I %s
# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - | FileCheck -check-prefix=RV64I %s
---
name: test_nxv1i8
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0.entry:
liveins: $v8, $v9
; RV32I-LABEL: name: test_nxv1i8
; RV32I: liveins: $v8, $v9
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVSUB_VV_MF8_:%[0-9]+]]:vr = PseudoVSUB_VV_MF8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
; RV32I-NEXT: $v8 = COPY [[PseudoVSUB_VV_MF8_]]
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: test_nxv1i8
; RV64I: liveins: $v8, $v9
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVSUB_VV_MF8_:%[0-9]+]]:vr = PseudoVSUB_VV_MF8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
; RV64I-NEXT: $v8 = COPY [[PseudoVSUB_VV_MF8_]]
; RV64I-NEXT: PseudoRET implicit $v8
%0:vrb(<vscale x 1 x s8>) = COPY $v8
%1:vrb(<vscale x 1 x s8>) = COPY $v9
%2:vrb(<vscale x 1 x s8>) = G_SUB %0, %1
$v8 = COPY %2(<vscale x 1 x s8>)
PseudoRET implicit $v8
...
---
name: test_nxv2i8
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0.entry:
liveins: $v8, $v9
; RV32I-LABEL: name: test_nxv2i8
; RV32I: liveins: $v8, $v9
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVSUB_VV_MF4_:%[0-9]+]]:vr = PseudoVSUB_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
; RV32I-NEXT: $v8 = COPY [[PseudoVSUB_VV_MF4_]]
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: test_nxv2i8
; RV64I: liveins: $v8, $v9
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVSUB_VV_MF4_:%[0-9]+]]:vr = PseudoVSUB_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
; RV64I-NEXT: $v8 = COPY [[PseudoVSUB_VV_MF4_]]
; RV64I-NEXT: PseudoRET implicit $v8
%0:vrb(<vscale x 2 x s8>) = COPY $v8
%1:vrb(<vscale x 2 x s8>) = COPY $v9
%2:vrb(<vscale x 2 x s8>) = G_SUB %0, %1
$v8 = COPY %2(<vscale x 2 x s8>)
PseudoRET implicit $v8
...
---
name: test_nxv4i8
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0.entry:
liveins: $v8, $v9
; RV32I-LABEL: name: test_nxv4i8
; RV32I: liveins: $v8, $v9
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVSUB_VV_MF2_:%[0-9]+]]:vr = PseudoVSUB_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
; RV32I-NEXT: $v8 = COPY [[PseudoVSUB_VV_MF2_]]
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: test_nxv4i8
; RV64I: liveins: $v8, $v9
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVSUB_VV_MF2_:%[0-9]+]]:vr = PseudoVSUB_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
; RV64I-NEXT: $v8 = COPY [[PseudoVSUB_VV_MF2_]]
; RV64I-NEXT: PseudoRET implicit $v8
%0:vrb(<vscale x 4 x s8>) = COPY $v8
%1:vrb(<vscale x 4 x s8>) = COPY $v9
%2:vrb(<vscale x 4 x s8>) = G_SUB %0, %1
$v8 = COPY %2(<vscale x 4 x s8>)
PseudoRET implicit $v8
...
---
name: test_nxv8i8
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0.entry:
liveins: $v8, $v9
; RV32I-LABEL: name: test_nxv8i8
; RV32I: liveins: $v8, $v9
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
; RV32I-NEXT: $v8 = COPY [[PseudoVSUB_VV_M1_]]
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: test_nxv8i8
; RV64I: liveins: $v8, $v9
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
; RV64I-NEXT: $v8 = COPY [[PseudoVSUB_VV_M1_]]
; RV64I-NEXT: PseudoRET implicit $v8
%0:vrb(<vscale x 8 x s8>) = COPY $v8
%1:vrb(<vscale x 8 x s8>) = COPY $v9
%2:vrb(<vscale x 8 x s8>) = G_SUB %0, %1
$v8 = COPY %2(<vscale x 8 x s8>)
PseudoRET implicit $v8
...
---
name: test_nxv16i8
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0.entry:
liveins: $v8m2, $v10m2
; RV32I-LABEL: name: test_nxv16i8
; RV32I: liveins: $v8m2, $v10m2
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2
; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
; RV32I-NEXT: $v8m2 = COPY [[PseudoVSUB_VV_M2_]]
; RV32I-NEXT: PseudoRET implicit $v8m2
;
; RV64I-LABEL: name: test_nxv16i8
; RV64I: liveins: $v8m2, $v10m2
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2
; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
; RV64I-NEXT: $v8m2 = COPY [[PseudoVSUB_VV_M2_]]
; RV64I-NEXT: PseudoRET implicit $v8m2
%0:vrb(<vscale x 16 x s8>) = COPY $v8m2
%1:vrb(<vscale x 16 x s8>) = COPY $v10m2
%2:vrb(<vscale x 16 x s8>) = G_SUB %0, %1
$v8m2 = COPY %2(<vscale x 16 x s8>)
PseudoRET implicit $v8m2
...
---
name: test_nxv32i8
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0.entry:
liveins: $v8m4, $v12m4
; RV32I-LABEL: name: test_nxv32i8
; RV32I: liveins: $v8m4, $v12m4
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4
; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
; RV32I-NEXT: $v8m4 = COPY [[PseudoVSUB_VV_M4_]]
; RV32I-NEXT: PseudoRET implicit $v8m4
;
; RV64I-LABEL: name: test_nxv32i8
; RV64I: liveins: $v8m4, $v12m4
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4
; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
; RV64I-NEXT: $v8m4 = COPY [[PseudoVSUB_VV_M4_]]
; RV64I-NEXT: PseudoRET implicit $v8m4
%0:vrb(<vscale x 32 x s8>) = COPY $v8m4
%1:vrb(<vscale x 32 x s8>) = COPY $v12m4
%2:vrb(<vscale x 32 x s8>) = G_SUB %0, %1
$v8m4 = COPY %2(<vscale x 32 x s8>)
PseudoRET implicit $v8m4
...
---
name: test_nxv64i8
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0.entry:
liveins: $v8m8, $v16m8
; RV32I-LABEL: name: test_nxv64i8
; RV32I: liveins: $v8m8, $v16m8
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8
; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
; RV32I-NEXT: $v8m8 = COPY [[PseudoVSUB_VV_M8_]]
; RV32I-NEXT: PseudoRET implicit $v8m8
;
; RV64I-LABEL: name: test_nxv64i8
; RV64I: liveins: $v8m8, $v16m8
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8
; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 3 /* e8 */, 3 /* ta, ma */
; RV64I-NEXT: $v8m8 = COPY [[PseudoVSUB_VV_M8_]]
; RV64I-NEXT: PseudoRET implicit $v8m8
%0:vrb(<vscale x 64 x s8>) = COPY $v8m8
%1:vrb(<vscale x 64 x s8>) = COPY $v16m8
%2:vrb(<vscale x 64 x s8>) = G_SUB %0, %1
$v8m8 = COPY %2(<vscale x 64 x s8>)
PseudoRET implicit $v8m8
...
---
name: test_nxv1i16
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0.entry:
liveins: $v8, $v9
; RV32I-LABEL: name: test_nxv1i16
; RV32I: liveins: $v8, $v9
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVSUB_VV_MF4_:%[0-9]+]]:vr = PseudoVSUB_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
; RV32I-NEXT: $v8 = COPY [[PseudoVSUB_VV_MF4_]]
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: test_nxv1i16
; RV64I: liveins: $v8, $v9
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVSUB_VV_MF4_:%[0-9]+]]:vr = PseudoVSUB_VV_MF4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
; RV64I-NEXT: $v8 = COPY [[PseudoVSUB_VV_MF4_]]
; RV64I-NEXT: PseudoRET implicit $v8
%0:vrb(<vscale x 1 x s16>) = COPY $v8
%1:vrb(<vscale x 1 x s16>) = COPY $v9
%2:vrb(<vscale x 1 x s16>) = G_SUB %0, %1
$v8 = COPY %2(<vscale x 1 x s16>)
PseudoRET implicit $v8
...
---
name: test_nxv2i16
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0.entry:
liveins: $v8, $v9
; RV32I-LABEL: name: test_nxv2i16
; RV32I: liveins: $v8, $v9
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVSUB_VV_MF2_:%[0-9]+]]:vr = PseudoVSUB_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
; RV32I-NEXT: $v8 = COPY [[PseudoVSUB_VV_MF2_]]
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: test_nxv2i16
; RV64I: liveins: $v8, $v9
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVSUB_VV_MF2_:%[0-9]+]]:vr = PseudoVSUB_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
; RV64I-NEXT: $v8 = COPY [[PseudoVSUB_VV_MF2_]]
; RV64I-NEXT: PseudoRET implicit $v8
%0:vrb(<vscale x 2 x s16>) = COPY $v8
%1:vrb(<vscale x 2 x s16>) = COPY $v9
%2:vrb(<vscale x 2 x s16>) = G_SUB %0, %1
$v8 = COPY %2(<vscale x 2 x s16>)
PseudoRET implicit $v8
...
---
name: test_nxv4i16
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0.entry:
liveins: $v8, $v9
; RV32I-LABEL: name: test_nxv4i16
; RV32I: liveins: $v8, $v9
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
; RV32I-NEXT: $v8 = COPY [[PseudoVSUB_VV_M1_]]
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: test_nxv4i16
; RV64I: liveins: $v8, $v9
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
; RV64I-NEXT: $v8 = COPY [[PseudoVSUB_VV_M1_]]
; RV64I-NEXT: PseudoRET implicit $v8
%0:vrb(<vscale x 4 x s16>) = COPY $v8
%1:vrb(<vscale x 4 x s16>) = COPY $v9
%2:vrb(<vscale x 4 x s16>) = G_SUB %0, %1
$v8 = COPY %2(<vscale x 4 x s16>)
PseudoRET implicit $v8
...
---
name: test_nxv8i16
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0.entry:
liveins: $v8m2, $v10m2
; RV32I-LABEL: name: test_nxv8i16
; RV32I: liveins: $v8m2, $v10m2
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2
; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
; RV32I-NEXT: $v8m2 = COPY [[PseudoVSUB_VV_M2_]]
; RV32I-NEXT: PseudoRET implicit $v8m2
;
; RV64I-LABEL: name: test_nxv8i16
; RV64I: liveins: $v8m2, $v10m2
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2
; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
; RV64I-NEXT: $v8m2 = COPY [[PseudoVSUB_VV_M2_]]
; RV64I-NEXT: PseudoRET implicit $v8m2
%0:vrb(<vscale x 8 x s16>) = COPY $v8m2
%1:vrb(<vscale x 8 x s16>) = COPY $v10m2
%2:vrb(<vscale x 8 x s16>) = G_SUB %0, %1
$v8m2 = COPY %2(<vscale x 8 x s16>)
PseudoRET implicit $v8m2
...
---
name: test_nxv16i16
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0.entry:
liveins: $v8m4, $v12m4
; RV32I-LABEL: name: test_nxv16i16
; RV32I: liveins: $v8m4, $v12m4
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4
; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
; RV32I-NEXT: $v8m4 = COPY [[PseudoVSUB_VV_M4_]]
; RV32I-NEXT: PseudoRET implicit $v8m4
;
; RV64I-LABEL: name: test_nxv16i16
; RV64I: liveins: $v8m4, $v12m4
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4
; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
; RV64I-NEXT: $v8m4 = COPY [[PseudoVSUB_VV_M4_]]
; RV64I-NEXT: PseudoRET implicit $v8m4
%0:vrb(<vscale x 16 x s16>) = COPY $v8m4
%1:vrb(<vscale x 16 x s16>) = COPY $v12m4
%2:vrb(<vscale x 16 x s16>) = G_SUB %0, %1
$v8m4 = COPY %2(<vscale x 16 x s16>)
PseudoRET implicit $v8m4
...
---
name: test_nxv32i16
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0.entry:
liveins: $v8m8, $v16m8
; RV32I-LABEL: name: test_nxv32i16
; RV32I: liveins: $v8m8, $v16m8
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8
; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
; RV32I-NEXT: $v8m8 = COPY [[PseudoVSUB_VV_M8_]]
; RV32I-NEXT: PseudoRET implicit $v8m8
;
; RV64I-LABEL: name: test_nxv32i16
; RV64I: liveins: $v8m8, $v16m8
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8
; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 4 /* e16 */, 3 /* ta, ma */
; RV64I-NEXT: $v8m8 = COPY [[PseudoVSUB_VV_M8_]]
; RV64I-NEXT: PseudoRET implicit $v8m8
%0:vrb(<vscale x 32 x s16>) = COPY $v8m8
%1:vrb(<vscale x 32 x s16>) = COPY $v16m8
%2:vrb(<vscale x 32 x s16>) = G_SUB %0, %1
$v8m8 = COPY %2(<vscale x 32 x s16>)
PseudoRET implicit $v8m8
...
---
name: test_nxv1i32
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0.entry:
liveins: $v8, $v9
; RV32I-LABEL: name: test_nxv1i32
; RV32I: liveins: $v8, $v9
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVSUB_VV_MF2_:%[0-9]+]]:vr = PseudoVSUB_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
; RV32I-NEXT: $v8 = COPY [[PseudoVSUB_VV_MF2_]]
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: test_nxv1i32
; RV64I: liveins: $v8, $v9
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVSUB_VV_MF2_:%[0-9]+]]:vr = PseudoVSUB_VV_MF2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
; RV64I-NEXT: $v8 = COPY [[PseudoVSUB_VV_MF2_]]
; RV64I-NEXT: PseudoRET implicit $v8
%0:vrb(<vscale x 1 x s32>) = COPY $v8
%1:vrb(<vscale x 1 x s32>) = COPY $v9
%2:vrb(<vscale x 1 x s32>) = G_SUB %0, %1
$v8 = COPY %2(<vscale x 1 x s32>)
PseudoRET implicit $v8
...
---
name: test_nxv2i32
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0.entry:
liveins: $v8, $v9
; RV32I-LABEL: name: test_nxv2i32
; RV32I: liveins: $v8, $v9
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
; RV32I-NEXT: $v8 = COPY [[PseudoVSUB_VV_M1_]]
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: test_nxv2i32
; RV64I: liveins: $v8, $v9
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
; RV64I-NEXT: $v8 = COPY [[PseudoVSUB_VV_M1_]]
; RV64I-NEXT: PseudoRET implicit $v8
%0:vrb(<vscale x 2 x s32>) = COPY $v8
%1:vrb(<vscale x 2 x s32>) = COPY $v9
%2:vrb(<vscale x 2 x s32>) = G_SUB %0, %1
$v8 = COPY %2(<vscale x 2 x s32>)
PseudoRET implicit $v8
...
---
name: test_nxv4i32
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0.entry:
liveins: $v8m2, $v10m2
; RV32I-LABEL: name: test_nxv4i32
; RV32I: liveins: $v8m2, $v10m2
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2
; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
; RV32I-NEXT: $v8m2 = COPY [[PseudoVSUB_VV_M2_]]
; RV32I-NEXT: PseudoRET implicit $v8m2
;
; RV64I-LABEL: name: test_nxv4i32
; RV64I: liveins: $v8m2, $v10m2
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2
; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
; RV64I-NEXT: $v8m2 = COPY [[PseudoVSUB_VV_M2_]]
; RV64I-NEXT: PseudoRET implicit $v8m2
%0:vrb(<vscale x 4 x s32>) = COPY $v8m2
%1:vrb(<vscale x 4 x s32>) = COPY $v10m2
%2:vrb(<vscale x 4 x s32>) = G_SUB %0, %1
$v8m2 = COPY %2(<vscale x 4 x s32>)
PseudoRET implicit $v8m2
...
---
name: test_nxv8i32
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0.entry:
liveins: $v8m4, $v12m4
; RV32I-LABEL: name: test_nxv8i32
; RV32I: liveins: $v8m4, $v12m4
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4
; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
; RV32I-NEXT: $v8m4 = COPY [[PseudoVSUB_VV_M4_]]
; RV32I-NEXT: PseudoRET implicit $v8m4
;
; RV64I-LABEL: name: test_nxv8i32
; RV64I: liveins: $v8m4, $v12m4
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4
; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
; RV64I-NEXT: $v8m4 = COPY [[PseudoVSUB_VV_M4_]]
; RV64I-NEXT: PseudoRET implicit $v8m4
%0:vrb(<vscale x 8 x s32>) = COPY $v8m4
%1:vrb(<vscale x 8 x s32>) = COPY $v12m4
%2:vrb(<vscale x 8 x s32>) = G_SUB %0, %1
$v8m4 = COPY %2(<vscale x 8 x s32>)
PseudoRET implicit $v8m4
...
---
name: test_nxv16i32
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0.entry:
liveins: $v8m8, $v16m8
; RV32I-LABEL: name: test_nxv16i32
; RV32I: liveins: $v8m8, $v16m8
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8
; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
; RV32I-NEXT: $v8m8 = COPY [[PseudoVSUB_VV_M8_]]
; RV32I-NEXT: PseudoRET implicit $v8m8
;
; RV64I-LABEL: name: test_nxv16i32
; RV64I: liveins: $v8m8, $v16m8
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8
; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 5 /* e32 */, 3 /* ta, ma */
; RV64I-NEXT: $v8m8 = COPY [[PseudoVSUB_VV_M8_]]
; RV64I-NEXT: PseudoRET implicit $v8m8
%0:vrb(<vscale x 16 x s32>) = COPY $v8m8
%1:vrb(<vscale x 16 x s32>) = COPY $v16m8
%2:vrb(<vscale x 16 x s32>) = G_SUB %0, %1
$v8m8 = COPY %2(<vscale x 16 x s32>)
PseudoRET implicit $v8m8
...
---
name: test_nxv1i64
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0.entry:
liveins: $v8, $v9
; RV32I-LABEL: name: test_nxv1i64
; RV32I: liveins: $v8, $v9
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
; RV32I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
; RV32I-NEXT: $v8 = COPY [[PseudoVSUB_VV_M1_]]
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: test_nxv1i64
; RV64I: liveins: $v8, $v9
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
; RV64I-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
; RV64I-NEXT: $v8 = COPY [[PseudoVSUB_VV_M1_]]
; RV64I-NEXT: PseudoRET implicit $v8
%0:vrb(<vscale x 1 x s64>) = COPY $v8
%1:vrb(<vscale x 1 x s64>) = COPY $v9
%2:vrb(<vscale x 1 x s64>) = G_SUB %0, %1
$v8 = COPY %2(<vscale x 1 x s64>)
PseudoRET implicit $v8
...
---
name: test_nxv2i64
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0.entry:
liveins: $v8m2, $v10m2
; RV32I-LABEL: name: test_nxv2i64
; RV32I: liveins: $v8m2, $v10m2
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2
; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
; RV32I-NEXT: $v8m2 = COPY [[PseudoVSUB_VV_M2_]]
; RV32I-NEXT: PseudoRET implicit $v8m2
;
; RV64I-LABEL: name: test_nxv2i64
; RV64I: liveins: $v8m2, $v10m2
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm2 = COPY $v8m2
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm2 = COPY $v10m2
; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVSUB_VV_M2_:%[0-9]+]]:vrm2 = PseudoVSUB_VV_M2 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
; RV64I-NEXT: $v8m2 = COPY [[PseudoVSUB_VV_M2_]]
; RV64I-NEXT: PseudoRET implicit $v8m2
%0:vrb(<vscale x 2 x s64>) = COPY $v8m2
%1:vrb(<vscale x 2 x s64>) = COPY $v10m2
%2:vrb(<vscale x 2 x s64>) = G_SUB %0, %1
$v8m2 = COPY %2(<vscale x 2 x s64>)
PseudoRET implicit $v8m2
...
---
name: test_nxv4i64
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0.entry:
liveins: $v8m4, $v12m4
; RV32I-LABEL: name: test_nxv4i64
; RV32I: liveins: $v8m4, $v12m4
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4
; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
; RV32I-NEXT: $v8m4 = COPY [[PseudoVSUB_VV_M4_]]
; RV32I-NEXT: PseudoRET implicit $v8m4
;
; RV64I-LABEL: name: test_nxv4i64
; RV64I: liveins: $v8m4, $v12m4
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm4 = COPY $v8m4
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm4 = COPY $v12m4
; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVSUB_VV_M4_:%[0-9]+]]:vrm4 = PseudoVSUB_VV_M4 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
; RV64I-NEXT: $v8m4 = COPY [[PseudoVSUB_VV_M4_]]
; RV64I-NEXT: PseudoRET implicit $v8m4
%0:vrb(<vscale x 4 x s64>) = COPY $v8m4
%1:vrb(<vscale x 4 x s64>) = COPY $v12m4
%2:vrb(<vscale x 4 x s64>) = G_SUB %0, %1
$v8m4 = COPY %2(<vscale x 4 x s64>)
PseudoRET implicit $v8m4
...
---
name: test_nxv8i64
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0.entry:
liveins: $v8m8, $v16m8
; RV32I-LABEL: name: test_nxv8i64
; RV32I: liveins: $v8m8, $v16m8
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8
; RV32I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
; RV32I-NEXT: $v8m8 = COPY [[PseudoVSUB_VV_M8_]]
; RV32I-NEXT: PseudoRET implicit $v8m8
;
; RV64I-LABEL: name: test_nxv8i64
; RV64I: liveins: $v8m8, $v16m8
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrm8 = COPY $v16m8
; RV64I-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVSUB_VV_M8_:%[0-9]+]]:vrm8 = PseudoVSUB_VV_M8 [[DEF]], [[COPY]], [[COPY1]], -1, 6 /* e64 */, 3 /* ta, ma */
; RV64I-NEXT: $v8m8 = COPY [[PseudoVSUB_VV_M8_]]
; RV64I-NEXT: PseudoRET implicit $v8m8
%0:vrb(<vscale x 8 x s64>) = COPY $v8m8
%1:vrb(<vscale x 8 x s64>) = COPY $v16m8
%2:vrb(<vscale x 8 x s64>) = G_SUB %0, %1
$v8m8 = COPY %2(<vscale x 8 x s64>)
PseudoRET implicit $v8m8
...