// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -triple powerpc64le-linux-unknown -target-cpu pwr10 \
// RUN: -emit-llvm -o - %s | FileCheck %s
// RUN: %clang_cc1 -triple powerpc64le-linux-unknown -target-cpu pwr9 \
// RUN: -emit-llvm -o - %s | FileCheck %s
// RUN: %clang_cc1 -triple powerpc64le-linux-unknown -target-cpu pwr8 \
// RUN: -emit-llvm -o - %s | FileCheck %s
// CHECK-LABEL: @test1(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[PTR1_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[PTR2_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: store ptr [[PTR1:%.*]], ptr [[PTR1_ADDR]], align 8
// CHECK-NEXT: store ptr [[PTR2:%.*]], ptr [[PTR2_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR1_ADDR]], align 8
// CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds <512 x i1>, ptr [[TMP0]], i64 2
// CHECK-NEXT: [[TMP1:%.*]] = load <512 x i1>, ptr [[ADD_PTR]], align 64
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[PTR2_ADDR]], align 8
// CHECK-NEXT: [[ADD_PTR1:%.*]] = getelementptr inbounds <512 x i1>, ptr [[TMP2]], i64 1
// CHECK-NEXT: store <512 x i1> [[TMP1]], ptr [[ADD_PTR1]], align 64
// CHECK-NEXT: ret void
//
void test1(__vector_quad *ptr1, __vector_quad *ptr2) {
*(ptr2 + 1) = *(ptr1 + 2);
}
// CHECK-LABEL: @test2(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[PTR1_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[PTR2_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: store ptr [[PTR1:%.*]], ptr [[PTR1_ADDR]], align 8
// CHECK-NEXT: store ptr [[PTR2:%.*]], ptr [[PTR2_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR1_ADDR]], align 8
// CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds <256 x i1>, ptr [[TMP0]], i64 2
// CHECK-NEXT: [[TMP1:%.*]] = load <256 x i1>, ptr [[ADD_PTR]], align 32
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[PTR2_ADDR]], align 8
// CHECK-NEXT: [[ADD_PTR1:%.*]] = getelementptr inbounds <256 x i1>, ptr [[TMP2]], i64 1
// CHECK-NEXT: store <256 x i1> [[TMP1]], ptr [[ADD_PTR1]], align 32
// CHECK-NEXT: ret void
//
void test2(__vector_pair *ptr1, __vector_pair *ptr2) {
*(ptr2 + 1) = *(ptr1 + 2);
}
typedef __vector_quad vq_t;
// CHECK-LABEL: @testVQTypedef(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[INP_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[OUTP_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[VQIN:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[VQOUT:%.*]] = alloca ptr, align 8
// CHECK-NEXT: store ptr [[INP:%.*]], ptr [[INP_ADDR]], align 8
// CHECK-NEXT: store ptr [[OUTP:%.*]], ptr [[OUTP_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[INP_ADDR]], align 8
// CHECK-NEXT: store ptr [[TMP0]], ptr [[VQIN]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[OUTP_ADDR]], align 8
// CHECK-NEXT: store ptr [[TMP2]], ptr [[VQOUT]], align 8
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[VQIN]], align 8
// CHECK-NEXT: [[TMP5:%.*]] = load <512 x i1>, ptr [[TMP4]], align 64
// CHECK-NEXT: [[TMP6:%.*]] = load ptr, ptr [[VQOUT]], align 8
// CHECK-NEXT: store <512 x i1> [[TMP5]], ptr [[TMP6]], align 64
// CHECK-NEXT: ret void
//
void testVQTypedef(int *inp, int *outp) {
vq_t *vqin = (vq_t *)inp;
vq_t *vqout = (vq_t *)outp;
*vqout = *vqin;
}
// CHECK-LABEL: @testVQArg3(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VQ_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[VQP:%.*]] = alloca ptr, align 8
// CHECK-NEXT: store ptr [[VQ:%.*]], ptr [[VQ_ADDR]], align 8
// CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: store ptr [[TMP0]], ptr [[VQP]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[VQ_ADDR]], align 8
// CHECK-NEXT: [[TMP3:%.*]] = load <512 x i1>, ptr [[TMP2]], align 64
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[VQP]], align 8
// CHECK-NEXT: store <512 x i1> [[TMP3]], ptr [[TMP4]], align 64
// CHECK-NEXT: ret void
//
void testVQArg3(__vector_quad *vq, int *ptr) {
__vector_quad *vqp = (__vector_quad *)ptr;
*vqp = *vq;
}
// CHECK-LABEL: @testVQArg4(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VQ_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[VQP:%.*]] = alloca ptr, align 8
// CHECK-NEXT: store ptr [[VQ:%.*]], ptr [[VQ_ADDR]], align 8
// CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: store ptr [[TMP0]], ptr [[VQP]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[VQ_ADDR]], align 8
// CHECK-NEXT: [[TMP3:%.*]] = load <512 x i1>, ptr [[TMP2]], align 64
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[VQP]], align 8
// CHECK-NEXT: store <512 x i1> [[TMP3]], ptr [[TMP4]], align 64
// CHECK-NEXT: ret void
//
void testVQArg4(const __vector_quad *const vq, int *ptr) {
__vector_quad *vqp = (__vector_quad *)ptr;
*vqp = *vq;
}
// CHECK-LABEL: @testVQArg5(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VQA_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[VQP:%.*]] = alloca ptr, align 8
// CHECK-NEXT: store ptr [[VQA:%.*]], ptr [[VQA_ADDR]], align 8
// CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: store ptr [[TMP0]], ptr [[VQP]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[VQA_ADDR]], align 8
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds <512 x i1>, ptr [[TMP2]], i64 0
// CHECK-NEXT: [[TMP3:%.*]] = load <512 x i1>, ptr [[ARRAYIDX]], align 64
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[VQP]], align 8
// CHECK-NEXT: store <512 x i1> [[TMP3]], ptr [[TMP4]], align 64
// CHECK-NEXT: ret void
//
void testVQArg5(__vector_quad vqa[], int *ptr) {
__vector_quad *vqp = (__vector_quad *)ptr;
*vqp = vqa[0];
}
// CHECK-LABEL: @testVQArg7(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VQ_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[VQP:%.*]] = alloca ptr, align 8
// CHECK-NEXT: store ptr [[VQ:%.*]], ptr [[VQ_ADDR]], align 8
// CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: store ptr [[TMP0]], ptr [[VQP]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[VQ_ADDR]], align 8
// CHECK-NEXT: [[TMP3:%.*]] = load <512 x i1>, ptr [[TMP2]], align 64
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[VQP]], align 8
// CHECK-NEXT: store <512 x i1> [[TMP3]], ptr [[TMP4]], align 64
// CHECK-NEXT: ret void
//
void testVQArg7(const vq_t *vq, int *ptr) {
__vector_quad *vqp = (__vector_quad *)ptr;
*vqp = *vq;
}
// CHECK-LABEL: @testVQRet2(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[VQP:%.*]] = alloca ptr, align 8
// CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: store ptr [[TMP0]], ptr [[VQP]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[VQP]], align 8
// CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds <512 x i1>, ptr [[TMP2]], i64 2
// CHECK-NEXT: ret ptr [[ADD_PTR]]
//
__vector_quad *testVQRet2(int *ptr) {
__vector_quad *vqp = (__vector_quad *)ptr;
return vqp + 2;
}
// CHECK-LABEL: @testVQRet3(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[VQP:%.*]] = alloca ptr, align 8
// CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: store ptr [[TMP0]], ptr [[VQP]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[VQP]], align 8
// CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds <512 x i1>, ptr [[TMP2]], i64 2
// CHECK-NEXT: ret ptr [[ADD_PTR]]
//
const __vector_quad *testVQRet3(int *ptr) {
__vector_quad *vqp = (__vector_quad *)ptr;
return vqp + 2;
}
// CHECK-LABEL: @testVQRet5(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[VQP:%.*]] = alloca ptr, align 8
// CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: store ptr [[TMP0]], ptr [[VQP]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[VQP]], align 8
// CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds <512 x i1>, ptr [[TMP2]], i64 2
// CHECK-NEXT: ret ptr [[ADD_PTR]]
//
const vq_t *testVQRet5(int *ptr) {
__vector_quad *vqp = (__vector_quad *)ptr;
return vqp + 2;
}
// CHECK-LABEL: @testVQSizeofAlignof(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[VQP:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[VQ:%.*]] = alloca <512 x i1>, align 64
// CHECK-NEXT: [[SIZET:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[ALIGNT:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[SIZEV:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[ALIGNV:%.*]] = alloca i32, align 4
// CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: store ptr [[TMP0]], ptr [[VQP]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[VQP]], align 8
// CHECK-NEXT: [[TMP3:%.*]] = load <512 x i1>, ptr [[TMP2]], align 64
// CHECK-NEXT: store <512 x i1> [[TMP3]], ptr [[VQ]], align 64
// CHECK-NEXT: store i32 64, ptr [[SIZET]], align 4
// CHECK-NEXT: store i32 64, ptr [[ALIGNT]], align 4
// CHECK-NEXT: store i32 64, ptr [[SIZEV]], align 4
// CHECK-NEXT: store i32 64, ptr [[ALIGNV]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[SIZET]], align 4
// CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[ALIGNT]], align 4
// CHECK-NEXT: [[ADD:%.*]] = add i32 [[TMP4]], [[TMP5]]
// CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[SIZEV]], align 4
// CHECK-NEXT: [[ADD1:%.*]] = add i32 [[ADD]], [[TMP6]]
// CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ALIGNV]], align 4
// CHECK-NEXT: [[ADD2:%.*]] = add i32 [[ADD1]], [[TMP7]]
// CHECK-NEXT: ret i32 [[ADD2]]
//
int testVQSizeofAlignof(int *ptr) {
__vector_quad *vqp = (__vector_quad *)ptr;
__vector_quad vq = *vqp;
unsigned sizet = sizeof(__vector_quad);
unsigned alignt = __alignof__(__vector_quad);
unsigned sizev = sizeof(vq);
unsigned alignv = __alignof__(vq);
return sizet + alignt + sizev + alignv;
}
typedef __vector_pair vp_t;
// CHECK-LABEL: @testVPTypedef(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[INP_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[OUTP_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[VPIN:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[VPOUT:%.*]] = alloca ptr, align 8
// CHECK-NEXT: store ptr [[INP:%.*]], ptr [[INP_ADDR]], align 8
// CHECK-NEXT: store ptr [[OUTP:%.*]], ptr [[OUTP_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[INP_ADDR]], align 8
// CHECK-NEXT: store ptr [[TMP0]], ptr [[VPIN]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[OUTP_ADDR]], align 8
// CHECK-NEXT: store ptr [[TMP2]], ptr [[VPOUT]], align 8
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[VPIN]], align 8
// CHECK-NEXT: [[TMP5:%.*]] = load <256 x i1>, ptr [[TMP4]], align 32
// CHECK-NEXT: [[TMP6:%.*]] = load ptr, ptr [[VPOUT]], align 8
// CHECK-NEXT: store <256 x i1> [[TMP5]], ptr [[TMP6]], align 32
// CHECK-NEXT: ret void
//
void testVPTypedef(int *inp, int *outp) {
vp_t *vpin = (vp_t *)inp;
vp_t *vpout = (vp_t *)outp;
*vpout = *vpin;
}
// CHECK-LABEL: @testVPArg3(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VP_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[VPP:%.*]] = alloca ptr, align 8
// CHECK-NEXT: store ptr [[VP:%.*]], ptr [[VP_ADDR]], align 8
// CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: store ptr [[TMP0]], ptr [[VPP]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[VP_ADDR]], align 8
// CHECK-NEXT: [[TMP3:%.*]] = load <256 x i1>, ptr [[TMP2]], align 32
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[VPP]], align 8
// CHECK-NEXT: store <256 x i1> [[TMP3]], ptr [[TMP4]], align 32
// CHECK-NEXT: ret void
//
void testVPArg3(__vector_pair *vp, int *ptr) {
__vector_pair *vpp = (__vector_pair *)ptr;
*vpp = *vp;
}
// CHECK-LABEL: @testVPArg4(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VP_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[VPP:%.*]] = alloca ptr, align 8
// CHECK-NEXT: store ptr [[VP:%.*]], ptr [[VP_ADDR]], align 8
// CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: store ptr [[TMP0]], ptr [[VPP]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[VP_ADDR]], align 8
// CHECK-NEXT: [[TMP3:%.*]] = load <256 x i1>, ptr [[TMP2]], align 32
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[VPP]], align 8
// CHECK-NEXT: store <256 x i1> [[TMP3]], ptr [[TMP4]], align 32
// CHECK-NEXT: ret void
//
void testVPArg4(const __vector_pair *const vp, int *ptr) {
__vector_pair *vpp = (__vector_pair *)ptr;
*vpp = *vp;
}
// CHECK-LABEL: @testVPArg5(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VPA_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[VPP:%.*]] = alloca ptr, align 8
// CHECK-NEXT: store ptr [[VPA:%.*]], ptr [[VPA_ADDR]], align 8
// CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: store ptr [[TMP0]], ptr [[VPP]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[VPA_ADDR]], align 8
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds <256 x i1>, ptr [[TMP2]], i64 0
// CHECK-NEXT: [[TMP3:%.*]] = load <256 x i1>, ptr [[ARRAYIDX]], align 32
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[VPP]], align 8
// CHECK-NEXT: store <256 x i1> [[TMP3]], ptr [[TMP4]], align 32
// CHECK-NEXT: ret void
//
void testVPArg5(__vector_pair vpa[], int *ptr) {
__vector_pair *vpp = (__vector_pair *)ptr;
*vpp = vpa[0];
}
// CHECK-LABEL: @testVPArg7(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[VP_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[VPP:%.*]] = alloca ptr, align 8
// CHECK-NEXT: store ptr [[VP:%.*]], ptr [[VP_ADDR]], align 8
// CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: store ptr [[TMP0]], ptr [[VPP]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[VP_ADDR]], align 8
// CHECK-NEXT: [[TMP3:%.*]] = load <256 x i1>, ptr [[TMP2]], align 32
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[VPP]], align 8
// CHECK-NEXT: store <256 x i1> [[TMP3]], ptr [[TMP4]], align 32
// CHECK-NEXT: ret void
//
void testVPArg7(const vp_t *vp, int *ptr) {
__vector_pair *vpp = (__vector_pair *)ptr;
*vpp = *vp;
}
// CHECK-LABEL: @testVPRet2(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[VPP:%.*]] = alloca ptr, align 8
// CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: store ptr [[TMP0]], ptr [[VPP]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[VPP]], align 8
// CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds <256 x i1>, ptr [[TMP2]], i64 2
// CHECK-NEXT: ret ptr [[ADD_PTR]]
//
__vector_pair *testVPRet2(int *ptr) {
__vector_pair *vpp = (__vector_pair *)ptr;
return vpp + 2;
}
// CHECK-LABEL: @testVPRet3(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[VPP:%.*]] = alloca ptr, align 8
// CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: store ptr [[TMP0]], ptr [[VPP]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[VPP]], align 8
// CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds <256 x i1>, ptr [[TMP2]], i64 2
// CHECK-NEXT: ret ptr [[ADD_PTR]]
//
const __vector_pair *testVPRet3(int *ptr) {
__vector_pair *vpp = (__vector_pair *)ptr;
return vpp + 2;
}
// CHECK-LABEL: @testVPRet5(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[VPP:%.*]] = alloca ptr, align 8
// CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: store ptr [[TMP0]], ptr [[VPP]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[VPP]], align 8
// CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds <256 x i1>, ptr [[TMP2]], i64 2
// CHECK-NEXT: ret ptr [[ADD_PTR]]
//
const vp_t *testVPRet5(int *ptr) {
__vector_pair *vpp = (__vector_pair *)ptr;
return vpp + 2;
}
// CHECK-LABEL: @testVPSizeofAlignof(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[VPP:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[VP:%.*]] = alloca <256 x i1>, align 32
// CHECK-NEXT: [[SIZET:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[ALIGNT:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[SIZEV:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[ALIGNV:%.*]] = alloca i32, align 4
// CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: store ptr [[TMP0]], ptr [[VPP]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[VPP]], align 8
// CHECK-NEXT: [[TMP3:%.*]] = load <256 x i1>, ptr [[TMP2]], align 32
// CHECK-NEXT: store <256 x i1> [[TMP3]], ptr [[VP]], align 32
// CHECK-NEXT: store i32 32, ptr [[SIZET]], align 4
// CHECK-NEXT: store i32 32, ptr [[ALIGNT]], align 4
// CHECK-NEXT: store i32 32, ptr [[SIZEV]], align 4
// CHECK-NEXT: store i32 32, ptr [[ALIGNV]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[SIZET]], align 4
// CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[ALIGNT]], align 4
// CHECK-NEXT: [[ADD:%.*]] = add i32 [[TMP4]], [[TMP5]]
// CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[SIZEV]], align 4
// CHECK-NEXT: [[ADD1:%.*]] = add i32 [[ADD]], [[TMP6]]
// CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ALIGNV]], align 4
// CHECK-NEXT: [[ADD2:%.*]] = add i32 [[ADD1]], [[TMP7]]
// CHECK-NEXT: ret i32 [[ADD2]]
//
int testVPSizeofAlignof(int *ptr) {
__vector_pair *vpp = (__vector_pair *)ptr;
__vector_pair vp = *vpp;
unsigned sizet = sizeof(__vector_pair);
unsigned alignt = __alignof__(__vector_pair);
unsigned sizev = sizeof(vp);
unsigned alignv = __alignof__(vp);
return sizet + alignt + sizev + alignv;
}