; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt -S -mtriple=nvptx64-- --passes=expand-variadics --expand-variadics-override=lowering < %s | FileCheck %s
%struct.S1 = type { i32, i8, i64 }
%struct.S2 = type { i64, i64 }
@__const.bar.s1 = private unnamed_addr constant %struct.S1 { i32 1, i8 1, i64 1 }, align 8
@__const.qux.s = private unnamed_addr constant %struct.S2 { i64 1, i64 1 }, align 8
define dso_local i32 @variadics1(i32 noundef %first, ...) {
; CHECK-LABEL: define dso_local i32 @variadics1(
; CHECK-SAME: i32 noundef [[FIRST:%.*]], ptr [[VARARGS:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[VLIST:%.*]] = alloca ptr, align 8
; CHECK-NEXT: store ptr [[VARARGS]], ptr [[VLIST]], align 8
; CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VLIST]], align 8
; CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i64 4
; CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[VLIST]], align 8
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGP_CUR]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[FIRST]], [[TMP0]]
; CHECK-NEXT: [[ARGP_CUR1:%.*]] = load ptr, ptr [[VLIST]], align 8
; CHECK-NEXT: [[ARGP_NEXT2:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR1]], i64 4
; CHECK-NEXT: store ptr [[ARGP_NEXT2]], ptr [[VLIST]], align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARGP_CUR1]], align 4
; CHECK-NEXT: [[ADD3:%.*]] = add nsw i32 [[ADD]], [[TMP1]]
; CHECK-NEXT: [[ARGP_CUR4:%.*]] = load ptr, ptr [[VLIST]], align 8
; CHECK-NEXT: [[ARGP_NEXT5:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR4]], i64 4
; CHECK-NEXT: store ptr [[ARGP_NEXT5]], ptr [[VLIST]], align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARGP_CUR4]], align 4
; CHECK-NEXT: [[ADD6:%.*]] = add nsw i32 [[ADD3]], [[TMP2]]
; CHECK-NEXT: [[ARGP_CUR7:%.*]] = load ptr, ptr [[VLIST]], align 8
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR7]], i32 7
; CHECK-NEXT: [[ARGP_CUR7_ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[TMP3]], i64 -8)
; CHECK-NEXT: [[ARGP_NEXT8:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR7_ALIGNED]], i64 8
; CHECK-NEXT: store ptr [[ARGP_NEXT8]], ptr [[VLIST]], align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr [[ARGP_CUR7_ALIGNED]], align 8
; CHECK-NEXT: [[CONV:%.*]] = sext i32 [[ADD6]] to i64
; CHECK-NEXT: [[ADD9:%.*]] = add nsw i64 [[CONV]], [[TMP4]]
; CHECK-NEXT: [[CONV10:%.*]] = trunc i64 [[ADD9]] to i32
; CHECK-NEXT: [[ARGP_CUR11:%.*]] = load ptr, ptr [[VLIST]], align 8
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR11]], i32 7
; CHECK-NEXT: [[ARGP_CUR11_ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[TMP5]], i64 -8)
; CHECK-NEXT: [[ARGP_NEXT12:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR11_ALIGNED]], i64 8
; CHECK-NEXT: store ptr [[ARGP_NEXT12]], ptr [[VLIST]], align 8
; CHECK-NEXT: [[TMP6:%.*]] = load double, ptr [[ARGP_CUR11_ALIGNED]], align 8
; CHECK-NEXT: [[CONV13:%.*]] = sitofp i32 [[CONV10]] to double
; CHECK-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], [[TMP6]]
; CHECK-NEXT: [[CONV15:%.*]] = fptosi double [[ADD14]] to i32
; CHECK-NEXT: [[ARGP_CUR16:%.*]] = load ptr, ptr [[VLIST]], align 8
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR16]], i32 7
; CHECK-NEXT: [[ARGP_CUR16_ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[TMP7]], i64 -8)
; CHECK-NEXT: [[ARGP_NEXT17:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR16_ALIGNED]], i64 8
; CHECK-NEXT: store ptr [[ARGP_NEXT17]], ptr [[VLIST]], align 8
; CHECK-NEXT: [[TMP8:%.*]] = load double, ptr [[ARGP_CUR16_ALIGNED]], align 8
; CHECK-NEXT: [[CONV18:%.*]] = sitofp i32 [[CONV15]] to double
; CHECK-NEXT: [[ADD19:%.*]] = fadd double [[CONV18]], [[TMP8]]
; CHECK-NEXT: [[CONV20:%.*]] = fptosi double [[ADD19]] to i32
; CHECK-NEXT: ret i32 [[CONV20]]
;
entry:
%vlist = alloca ptr, align 8
call void @llvm.va_start.p0(ptr %vlist)
%argp.cur = load ptr, ptr %vlist, align 8
%argp.next = getelementptr inbounds i8, ptr %argp.cur, i64 4
store ptr %argp.next, ptr %vlist, align 8
%0 = load i32, ptr %argp.cur, align 4
%add = add nsw i32 %first, %0
%argp.cur1 = load ptr, ptr %vlist, align 8
%argp.next2 = getelementptr inbounds i8, ptr %argp.cur1, i64 4
store ptr %argp.next2, ptr %vlist, align 8
%1 = load i32, ptr %argp.cur1, align 4
%add3 = add nsw i32 %add, %1
%argp.cur4 = load ptr, ptr %vlist, align 8
%argp.next5 = getelementptr inbounds i8, ptr %argp.cur4, i64 4
store ptr %argp.next5, ptr %vlist, align 8
%2 = load i32, ptr %argp.cur4, align 4
%add6 = add nsw i32 %add3, %2
%argp.cur7 = load ptr, ptr %vlist, align 8
%3 = getelementptr inbounds i8, ptr %argp.cur7, i32 7
%argp.cur7.aligned = call ptr @llvm.ptrmask.p0.i64(ptr %3, i64 -8)
%argp.next8 = getelementptr inbounds i8, ptr %argp.cur7.aligned, i64 8
store ptr %argp.next8, ptr %vlist, align 8
%4 = load i64, ptr %argp.cur7.aligned, align 8
%conv = sext i32 %add6 to i64
%add9 = add nsw i64 %conv, %4
%conv10 = trunc i64 %add9 to i32
%argp.cur11 = load ptr, ptr %vlist, align 8
%5 = getelementptr inbounds i8, ptr %argp.cur11, i32 7
%argp.cur11.aligned = call ptr @llvm.ptrmask.p0.i64(ptr %5, i64 -8)
%argp.next12 = getelementptr inbounds i8, ptr %argp.cur11.aligned, i64 8
store ptr %argp.next12, ptr %vlist, align 8
%6 = load double, ptr %argp.cur11.aligned, align 8
%conv13 = sitofp i32 %conv10 to double
%add14 = fadd double %conv13, %6
%conv15 = fptosi double %add14 to i32
%argp.cur16 = load ptr, ptr %vlist, align 8
%7 = getelementptr inbounds i8, ptr %argp.cur16, i32 7
%argp.cur16.aligned = call ptr @llvm.ptrmask.p0.i64(ptr %7, i64 -8)
%argp.next17 = getelementptr inbounds i8, ptr %argp.cur16.aligned, i64 8
store ptr %argp.next17, ptr %vlist, align 8
%8 = load double, ptr %argp.cur16.aligned, align 8
%conv18 = sitofp i32 %conv15 to double
%add19 = fadd double %conv18, %8
%conv20 = fptosi double %add19 to i32
call void @llvm.va_end.p0(ptr %vlist)
ret i32 %conv20
}
declare void @llvm.va_start.p0(ptr)
declare ptr @llvm.ptrmask.p0.i64(ptr, i64)
declare void @llvm.va_end.p0(ptr)
define dso_local i32 @foo() {
; CHECK-LABEL: define dso_local i32 @foo() {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[FOO_VARARG:%.*]], align 8
; CHECK-NEXT: [[CONV:%.*]] = sext i8 1 to i32
; CHECK-NEXT: [[CONV1:%.*]] = sext i16 1 to i32
; CHECK-NEXT: [[CONV2:%.*]] = fpext float 1.000000e+00 to double
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 40, ptr [[VARARG_BUFFER]])
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[FOO_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; CHECK-NEXT: store i32 [[CONV]], ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[FOO_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1
; CHECK-NEXT: store i32 [[CONV1]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[FOO_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 2
; CHECK-NEXT: store i32 1, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[FOO_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 4
; CHECK-NEXT: store i64 1, ptr [[TMP3]], align 8
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[FOO_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 5
; CHECK-NEXT: store double [[CONV2]], ptr [[TMP4]], align 8
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[FOO_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 6
; CHECK-NEXT: store double 1.000000e+00, ptr [[TMP5]], align 8
; CHECK-NEXT: [[CALL:%.*]] = call i32 @variadics1(i32 noundef 1, ptr [[VARARG_BUFFER]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 40, ptr [[VARARG_BUFFER]])
; CHECK-NEXT: ret i32 [[CALL]]
;
entry:
%conv = sext i8 1 to i32
%conv1 = sext i16 1 to i32
%conv2 = fpext float 1.000000e+00 to double
%call = call i32 (i32, ...) @variadics1(i32 noundef 1, i32 noundef %conv, i32 noundef %conv1, i32 noundef 1, i64 noundef 1, double noundef %conv2, double noundef 1.000000e+00)
ret i32 %call
}
define dso_local i32 @variadics2(i32 noundef %first, ...) {
; CHECK-LABEL: define dso_local i32 @variadics2(
; CHECK-SAME: i32 noundef [[FIRST:%.*]], ptr [[VARARGS:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[VLIST:%.*]] = alloca ptr, align 8
; CHECK-NEXT: [[S1_SROA_3:%.*]] = alloca [3 x i8], align 1
; CHECK-NEXT: store ptr [[VARARGS]], ptr [[VLIST]], align 8
; CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VLIST]], align 8
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 7
; CHECK-NEXT: [[ARGP_CUR_ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[TMP0]], i64 -8)
; CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR_ALIGNED]], i64 16
; CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[VLIST]], align 8
; CHECK-NEXT: [[S1_SROA_0_0_COPYLOAD:%.*]] = load i32, ptr [[ARGP_CUR_ALIGNED]], align 8
; CHECK-NEXT: [[S1_SROA_2_0_ARGP_CUR_ALIGNED_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR_ALIGNED]], i64 4
; CHECK-NEXT: [[S1_SROA_2_0_COPYLOAD:%.*]] = load i8, ptr [[S1_SROA_2_0_ARGP_CUR_ALIGNED_SROA_IDX]], align 4
; CHECK-NEXT: [[S1_SROA_3_0_ARGP_CUR_ALIGNED_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR_ALIGNED]], i64 5
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[S1_SROA_3]], ptr align 1 [[S1_SROA_3_0_ARGP_CUR_ALIGNED_SROA_IDX]], i64 3, i1 false)
; CHECK-NEXT: [[S1_SROA_31_0_ARGP_CUR_ALIGNED_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR_ALIGNED]], i64 8
; CHECK-NEXT: [[S1_SROA_31_0_COPYLOAD:%.*]] = load i64, ptr [[S1_SROA_31_0_ARGP_CUR_ALIGNED_SROA_IDX]], align 8
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[FIRST]], [[S1_SROA_0_0_COPYLOAD]]
; CHECK-NEXT: [[CONV:%.*]] = sext i8 [[S1_SROA_2_0_COPYLOAD]] to i32
; CHECK-NEXT: [[ADD1:%.*]] = add nsw i32 [[ADD]], [[CONV]]
; CHECK-NEXT: [[CONV2:%.*]] = sext i32 [[ADD1]] to i64
; CHECK-NEXT: [[ADD3:%.*]] = add nsw i64 [[CONV2]], [[S1_SROA_31_0_COPYLOAD]]
; CHECK-NEXT: [[CONV4:%.*]] = trunc i64 [[ADD3]] to i32
; CHECK-NEXT: ret i32 [[CONV4]]
;
entry:
%vlist = alloca ptr, align 8
%s1.sroa.3 = alloca [3 x i8], align 1
call void @llvm.va_start.p0(ptr %vlist)
%argp.cur = load ptr, ptr %vlist, align 8
%0 = getelementptr inbounds i8, ptr %argp.cur, i32 7
%argp.cur.aligned = call ptr @llvm.ptrmask.p0.i64(ptr %0, i64 -8)
%argp.next = getelementptr inbounds i8, ptr %argp.cur.aligned, i64 16
store ptr %argp.next, ptr %vlist, align 8
%s1.sroa.0.0.copyload = load i32, ptr %argp.cur.aligned, align 8
%s1.sroa.2.0.argp.cur.aligned.sroa_idx = getelementptr inbounds i8, ptr %argp.cur.aligned, i64 4
%s1.sroa.2.0.copyload = load i8, ptr %s1.sroa.2.0.argp.cur.aligned.sroa_idx, align 4
%s1.sroa.3.0.argp.cur.aligned.sroa_idx = getelementptr inbounds i8, ptr %argp.cur.aligned, i64 5
call void @llvm.memcpy.p0.p0.i64(ptr align 1 %s1.sroa.3, ptr align 1 %s1.sroa.3.0.argp.cur.aligned.sroa_idx, i64 3, i1 false)
%s1.sroa.31.0.argp.cur.aligned.sroa_idx = getelementptr inbounds i8, ptr %argp.cur.aligned, i64 8
%s1.sroa.31.0.copyload = load i64, ptr %s1.sroa.31.0.argp.cur.aligned.sroa_idx, align 8
%add = add nsw i32 %first, %s1.sroa.0.0.copyload
%conv = sext i8 %s1.sroa.2.0.copyload to i32
%add1 = add nsw i32 %add, %conv
%conv2 = sext i32 %add1 to i64
%add3 = add nsw i64 %conv2, %s1.sroa.31.0.copyload
%conv4 = trunc i64 %add3 to i32
call void @llvm.va_end.p0(ptr %vlist)
ret i32 %conv4
}
declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg)
define dso_local i32 @bar() {
; CHECK-LABEL: define dso_local i32 @bar() {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[S1_SROA_3:%.*]] = alloca [3 x i8], align 1
; CHECK-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[BAR_VARARG:%.*]], align 8
; CHECK-NEXT: [[S1_SROA_0_0_COPYLOAD:%.*]] = load i32, ptr @__const.bar.s1, align 8
; CHECK-NEXT: [[S1_SROA_2_0_COPYLOAD:%.*]] = load i8, ptr getelementptr inbounds (i8, ptr @__const.bar.s1, i64 4), align 4
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[S1_SROA_3]], ptr align 1 getelementptr inbounds (i8, ptr @__const.bar.s1, i64 5), i64 3, i1 false)
; CHECK-NEXT: [[S1_SROA_31_0_COPYLOAD:%.*]] = load i64, ptr getelementptr inbounds (i8, ptr @__const.bar.s1, i64 8), align 8
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[VARARG_BUFFER]])
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[BAR_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; CHECK-NEXT: store i32 [[S1_SROA_0_0_COPYLOAD]], ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[BAR_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1
; CHECK-NEXT: store i8 [[S1_SROA_2_0_COPYLOAD]], ptr [[TMP1]], align 1
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[BAR_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 3
; CHECK-NEXT: store i64 [[S1_SROA_31_0_COPYLOAD]], ptr [[TMP2]], align 8
; CHECK-NEXT: [[CALL:%.*]] = call i32 @variadics2(i32 noundef 1, ptr [[VARARG_BUFFER]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[VARARG_BUFFER]])
; CHECK-NEXT: ret i32 [[CALL]]
;
entry:
%s1.sroa.3 = alloca [3 x i8], align 1
%s1.sroa.0.0.copyload = load i32, ptr @__const.bar.s1, align 8
%s1.sroa.2.0.copyload = load i8, ptr getelementptr inbounds (i8, ptr @__const.bar.s1, i64 4), align 4
call void @llvm.memcpy.p0.p0.i64(ptr align 1 %s1.sroa.3, ptr align 1 getelementptr inbounds (i8, ptr @__const.bar.s1, i64 5), i64 3, i1 false)
%s1.sroa.31.0.copyload = load i64, ptr getelementptr inbounds (i8, ptr @__const.bar.s1, i64 8), align 8
%call = call i32 (i32, ...) @variadics2(i32 noundef 1, i32 %s1.sroa.0.0.copyload, i8 %s1.sroa.2.0.copyload, i64 %s1.sroa.31.0.copyload)
ret i32 %call
}
define dso_local i32 @variadics3(i32 noundef %first, ...) {
; CHECK-LABEL: define dso_local i32 @variadics3(
; CHECK-SAME: i32 noundef [[FIRST:%.*]], ptr [[VARARGS:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[VLIST:%.*]] = alloca ptr, align 8
; CHECK-NEXT: store ptr [[VARARGS]], ptr [[VLIST]], align 8
; CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VLIST]], align 8
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 15
; CHECK-NEXT: [[ARGP_CUR_ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[TMP0]], i64 -16)
; CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR_ALIGNED]], i64 16
; CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[VLIST]], align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[ARGP_CUR_ALIGNED]], align 16
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i32> [[TMP1]], i64 0
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x i32> [[TMP1]], i64 1
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], [[TMP3]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP1]], i64 2
; CHECK-NEXT: [[ADD1:%.*]] = add nsw i32 [[ADD]], [[TMP4]]
; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x i32> [[TMP1]], i64 3
; CHECK-NEXT: [[ADD2:%.*]] = add nsw i32 [[ADD1]], [[TMP5]]
; CHECK-NEXT: ret i32 [[ADD2]]
;
entry:
%vlist = alloca ptr, align 8
call void @llvm.va_start.p0(ptr %vlist)
%argp.cur = load ptr, ptr %vlist, align 8
%0 = getelementptr inbounds i8, ptr %argp.cur, i32 15
%argp.cur.aligned = call ptr @llvm.ptrmask.p0.i64(ptr %0, i64 -16)
%argp.next = getelementptr inbounds i8, ptr %argp.cur.aligned, i64 16
store ptr %argp.next, ptr %vlist, align 8
%1 = load <4 x i32>, ptr %argp.cur.aligned, align 16
call void @llvm.va_end.p0(ptr %vlist)
%2 = extractelement <4 x i32> %1, i64 0
%3 = extractelement <4 x i32> %1, i64 1
%add = add nsw i32 %2, %3
%4 = extractelement <4 x i32> %1, i64 2
%add1 = add nsw i32 %add, %4
%5 = extractelement <4 x i32> %1, i64 3
%add2 = add nsw i32 %add1, %5
ret i32 %add2
}
define dso_local i32 @baz() {
; CHECK-LABEL: define dso_local i32 @baz() {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[BAZ_VARARG:%.*]], align 16
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[VARARG_BUFFER]])
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[BAZ_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; CHECK-NEXT: store <4 x i32> <i32 1, i32 1, i32 1, i32 1>, ptr [[TMP0]], align 16
; CHECK-NEXT: [[CALL:%.*]] = call i32 @variadics3(i32 noundef 1, ptr [[VARARG_BUFFER]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[VARARG_BUFFER]])
; CHECK-NEXT: ret i32 [[CALL]]
;
entry:
%call = call i32 (i32, ...) @variadics3(i32 noundef 1, <4 x i32> noundef <i32 1, i32 1, i32 1, i32 1>)
ret i32 %call
}
define dso_local i32 @variadics4(ptr noundef byval(%struct.S2) align 8 %first, ...) {
; CHECK-LABEL: define dso_local i32 @variadics4(
; CHECK-SAME: ptr noundef byval([[STRUCT_S2:%.*]]) align 8 [[FIRST:%.*]], ptr [[VARARGS:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[VLIST:%.*]] = alloca ptr, align 8
; CHECK-NEXT: store ptr [[VARARGS]], ptr [[VLIST]], align 8
; CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VLIST]], align 8
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 7
; CHECK-NEXT: [[ARGP_CUR_ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[TMP0]], i64 -8)
; CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR_ALIGNED]], i64 8
; CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[VLIST]], align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr [[ARGP_CUR_ALIGNED]], align 8
; CHECK-NEXT: [[X1:%.*]] = getelementptr inbounds [[STRUCT_S2]], ptr [[FIRST]], i32 0, i32 0
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[X1]], align 8
; CHECK-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_S2]], ptr [[FIRST]], i32 0, i32 1
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr [[Y]], align 8
; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP2]], [[TMP3]]
; CHECK-NEXT: [[ADD2:%.*]] = add nsw i64 [[ADD]], [[TMP1]]
; CHECK-NEXT: [[CONV:%.*]] = trunc i64 [[ADD2]] to i32
; CHECK-NEXT: ret i32 [[CONV]]
;
entry:
%vlist = alloca ptr, align 8
call void @llvm.va_start.p0(ptr %vlist)
%argp.cur = load ptr, ptr %vlist, align 8
%0 = getelementptr inbounds i8, ptr %argp.cur, i32 7
%argp.cur.aligned = call ptr @llvm.ptrmask.p0.i64(ptr %0, i64 -8)
%argp.next = getelementptr inbounds i8, ptr %argp.cur.aligned, i64 8
store ptr %argp.next, ptr %vlist, align 8
%1 = load i64, ptr %argp.cur.aligned, align 8
%x1 = getelementptr inbounds %struct.S2, ptr %first, i32 0, i32 0
%2 = load i64, ptr %x1, align 8
%y = getelementptr inbounds %struct.S2, ptr %first, i32 0, i32 1
%3 = load i64, ptr %y, align 8
%add = add nsw i64 %2, %3
%add2 = add nsw i64 %add, %1
%conv = trunc i64 %add2 to i32
call void @llvm.va_end.p0(ptr %vlist)
ret i32 %conv
}
define dso_local void @qux() {
; CHECK-LABEL: define dso_local void @qux() {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[S:%.*]] = alloca [[STRUCT_S2:%.*]], align 8
; CHECK-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[QUX_VARARG:%.*]], align 8
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[S]], ptr align 8 @__const.qux.s, i64 16, i1 false)
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[VARARG_BUFFER]])
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[QUX_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; CHECK-NEXT: store i64 1, ptr [[TMP0]], align 8
; CHECK-NEXT: [[CALL:%.*]] = call i32 @variadics4(ptr noundef byval([[STRUCT_S2]]) align 8 [[S]], ptr [[VARARG_BUFFER]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[VARARG_BUFFER]])
; CHECK-NEXT: ret void
;
entry:
%s = alloca %struct.S2, align 8
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %s, ptr align 8 @__const.qux.s, i64 16, i1 false)
%call = call i32 (ptr, ...) @variadics4(ptr noundef byval(%struct.S2) align 8 %s, i64 noundef 1)
ret void
}