# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=riscv64 -mattr=+v -stop-after=prologepilog %s -o - 2>&1 | FileCheck %s
--- |
target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128"
target triple = "riscv64"
define void @zvlsseg_spill(ptr %base, i64 %vl) {
ret void
}
...
---
name: zvlsseg_spill
tracksRegLiveness: true
stack:
- { id: 0, offset: 0, size: 64, alignment: 8, stack-id: scalable-vector }
body: |
bb.0:
liveins: $x10, $x11
; CHECK-LABEL: name: zvlsseg_spill
; CHECK: liveins: $x10, $x11
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: $x2 = frame-setup ADDI $x2, -16
; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
; CHECK-NEXT: $x12 = frame-setup PseudoReadVLENB
; CHECK-NEXT: $x12 = frame-setup SLLI killed $x12, 3
; CHECK-NEXT: $x2 = frame-setup SUB $x2, killed $x12
; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22
; CHECK-NEXT: dead $x0 = PseudoVSETVLI killed renamable $x11, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
; CHECK-NEXT: $v0_v1_v2_v3_v4_v5_v6 = PseudoVLSEG7E64_V_M1 undef $v0_v1_v2_v3_v4_v5_v6, renamable $x10, $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
; CHECK-NEXT: $x11 = ADDI $x2, 16
; CHECK-NEXT: $x12 = PseudoReadVLENB
; CHECK-NEXT: VS1R_V $v0, $x11, implicit $v0_v1_v2_v3_v4_v5_v6 :: (store unknown-size into %stack.0, align 8)
; CHECK-NEXT: $x11 = ADD killed $x11, $x12
; CHECK-NEXT: VS1R_V $v1, $x11, implicit $v0_v1_v2_v3_v4_v5_v6 :: (store unknown-size into %stack.0, align 8)
; CHECK-NEXT: $x11 = ADD killed $x11, $x12
; CHECK-NEXT: VS1R_V $v2, $x11, implicit $v0_v1_v2_v3_v4_v5_v6 :: (store unknown-size into %stack.0, align 8)
; CHECK-NEXT: $x11 = ADD killed $x11, $x12
; CHECK-NEXT: VS1R_V $v3, $x11, implicit $v0_v1_v2_v3_v4_v5_v6 :: (store unknown-size into %stack.0, align 8)
; CHECK-NEXT: $x11 = ADD killed $x11, $x12
; CHECK-NEXT: VS1R_V $v4, $x11, implicit $v0_v1_v2_v3_v4_v5_v6 :: (store unknown-size into %stack.0, align 8)
; CHECK-NEXT: $x11 = ADD killed $x11, $x12
; CHECK-NEXT: VS1R_V $v5, $x11, implicit $v0_v1_v2_v3_v4_v5_v6 :: (store unknown-size into %stack.0, align 8)
; CHECK-NEXT: $x11 = ADD killed $x11, killed $x12
; CHECK-NEXT: VS1R_V $v6, killed $x11, implicit $v0_v1_v2_v3_v4_v5_v6 :: (store unknown-size into %stack.0, align 8)
; CHECK-NEXT: $x11 = ADDI $x2, 16
; CHECK-NEXT: $x12 = PseudoReadVLENB
; CHECK-NEXT: $v7 = VL1RE8_V $x11 :: (load unknown-size from %stack.0, align 8)
; CHECK-NEXT: $x11 = ADD killed $x11, $x12
; CHECK-NEXT: $v8 = VL1RE8_V $x11 :: (load unknown-size from %stack.0, align 8)
; CHECK-NEXT: $x11 = ADD killed $x11, $x12
; CHECK-NEXT: $v9 = VL1RE8_V $x11 :: (load unknown-size from %stack.0, align 8)
; CHECK-NEXT: $x11 = ADD killed $x11, $x12
; CHECK-NEXT: $v10 = VL1RE8_V $x11 :: (load unknown-size from %stack.0, align 8)
; CHECK-NEXT: $x11 = ADD killed $x11, $x12
; CHECK-NEXT: $v11 = VL1RE8_V $x11 :: (load unknown-size from %stack.0, align 8)
; CHECK-NEXT: $x11 = ADD killed $x11, $x12
; CHECK-NEXT: $v12 = VL1RE8_V $x11 :: (load unknown-size from %stack.0, align 8)
; CHECK-NEXT: $x11 = ADD killed $x11, killed $x12
; CHECK-NEXT: $v13 = VL1RE8_V killed $x11 :: (load unknown-size from %stack.0, align 8)
; CHECK-NEXT: VS1R_V killed $v8, killed renamable $x10
; CHECK-NEXT: $x10 = frame-destroy PseudoReadVLENB
; CHECK-NEXT: $x10 = frame-destroy SLLI killed $x10, 3
; CHECK-NEXT: $x2 = frame-destroy ADD $x2, killed $x10
; CHECK-NEXT: $x2 = frame-destroy ADDI $x2, 16
; CHECK-NEXT: PseudoRET
%0:gpr = COPY $x10
%1:gprnox0 = COPY $x11
$v0_v1_v2_v3_v4_v5_v6 = PseudoVLSEG7E64_V_M1 undef $v0_v1_v2_v3_v4_v5_v6, %0, %1, 6, 0
PseudoVSPILL7_M1 killed renamable $v0_v1_v2_v3_v4_v5_v6, %stack.0 :: (store unknown-size into %stack.0, align 8)
renamable $v7_v8_v9_v10_v11_v12_v13 = PseudoVRELOAD7_M1 %stack.0 :: (load unknown-size from %stack.0, align 8)
VS1R_V killed $v8, %0:gpr
PseudoRET
...