llvm/llvm/test/CodeGen/AArch64/aarch64-fastcc-stackup.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=aarch64-none-linux -O0 -tailcallopt < %s | FileCheck %s

define void @bar() {
; CHECK-LABEL: bar:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ret
  ret void
}

define fastcc i64 @foo(i64 %0, i64 %1, i64 %2, i64 %3, i64 %4, i64 %5, i64 %6, i64 %7, i64 %8) {
; CHECK-LABEL: foo:
; CHECK:       // %bb.0:
; CHECK-NEXT:    add sp, sp, #16
; CHECK-NEXT:    ret
  ret i64 %0
}

; https://github.com/llvm/llvm-project/issues/60972
define fastcc i64 @baz() {
; CHECK-LABEL: baz:
; CHECK:       // %bb.0: // %entry
; CHECK-NEXT:    sub sp, sp, #32
; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
; CHECK-NEXT:    .cfi_def_cfa_offset 32
; CHECK-NEXT:    .cfi_offset w30, -16
; CHECK-NEXT:    mov x8, sp
; CHECK-NEXT:    mov x7, xzr
; CHECK-NEXT:    str xzr, [x8]
; CHECK-NEXT:    mov x0, x7
; CHECK-NEXT:    mov x1, x7
; CHECK-NEXT:    mov x2, x7
; CHECK-NEXT:    mov x3, x7
; CHECK-NEXT:    mov x4, x7
; CHECK-NEXT:    mov x5, x7
; CHECK-NEXT:    mov x6, x7
; CHECK-NEXT:    bl foo
; CHECK-NEXT:    sub sp, sp, #16
; CHECK-NEXT:    str x0, [sp, #8] // 8-byte Folded Spill
; CHECK-NEXT:    bl bar
; CHECK-NEXT:    ldr x0, [sp, #8] // 8-byte Folded Reload
; CHECK-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
; CHECK-NEXT:    add sp, sp, #32
; CHECK-NEXT:    ret
entry:
  %0 = tail call fastcc i64 @foo(i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0)
  call void @bar()
  ret i64 %0
}