llvm/llvm/test/Transforms/PhaseOrdering/varargs.ll

; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt -mtriple=amdgcn-- -S -passes='lto<O2>' < %s | FileCheck %s
target triple = "amdgcn-amd-amdhsa"

; REQUIRES: amdgpu-registered-target

; We use the ExpandVariadics pass to lower variadic functions so they can be
; inlined.

define i32 @foo() {
; CHECK-LABEL: define i32 @foo(
; CHECK-SAME: ) local_unnamed_addr #[[ATTR0:[0-9]+]] {
; CHECK-NEXT:  [[ENTRY:.*:]]
; CHECK-NEXT:    ret i32 6
;
entry:
  %call = tail call i32 (i32, ...) @vararg(i32 poison, i32 noundef 1, i32 noundef 2, i32 noundef 3)
  ret i32 %call
}

define internal i32 @vararg(i32 %first, ...) {
entry:
  %vlist.alloca = alloca ptr, align 8, addrspace(5)
  %vlist = addrspacecast ptr addrspace(5) %vlist.alloca to ptr
  call void @llvm.va_start.p0(ptr %vlist)
  %vlist.promoted = load ptr, ptr %vlist, align 8
  %argp.a = getelementptr inbounds i8, ptr %vlist.promoted, i64 4
  store ptr %argp.a, ptr %vlist, align 8
  %a = load i32, ptr %vlist.promoted, align 4
  %argp.b = getelementptr inbounds i8, ptr %vlist.promoted, i64 8
  store ptr %argp.b, ptr %vlist, align 8
  %b = load i32, ptr %argp.a, align 4
  %sum = add nsw i32 %b, %a
  %argp.c = getelementptr inbounds i8, ptr %vlist.promoted, i64 12
  store ptr %argp.c, ptr %vlist, align 8
  %c = load i32, ptr %argp.b, align 4
  %ret = add nsw i32 %c, %sum
  call void @llvm.va_end.p0(ptr %vlist)
  ret i32 %ret
}

declare void @llvm.va_start.p0(ptr)

declare void @llvm.va_end.p0(ptr)