// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -triple powerpc64le-unknown-unknown -target-cpu pwr10 \
// RUN: -emit-llvm %s -o - | FileCheck %s
// RUN: %clang_cc1 -triple powerpc64-unknown-unknown -target-cpu pwr10 \
// RUN: -emit-llvm %s -o - | FileCheck %s --check-prefix CHECK-BE
// CHECK-LABEL: @testVQLocal(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[VC_ADDR:%.*]] = alloca <16 x i8>, align 16
// CHECK-NEXT: [[VQP:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[VQ1:%.*]] = alloca <512 x i1>, align 64
// CHECK-NEXT: [[VQ2:%.*]] = alloca <512 x i1>, align 64
// CHECK-NEXT: [[VQ3:%.*]] = alloca <512 x i1>, align 64
// CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: store <16 x i8> [[VC:%.*]], ptr [[VC_ADDR]], align 16
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: store ptr [[TMP0]], ptr [[VQP]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VQP]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = load <512 x i1>, ptr [[TMP1]], align 64
// CHECK-NEXT: store <512 x i1> [[TMP2]], ptr [[VQ1]], align 64
// CHECK-NEXT: [[TMP3:%.*]] = call <512 x i1> @llvm.ppc.mma.xxsetaccz()
// CHECK-NEXT: store <512 x i1> [[TMP3]], ptr [[VQ2]], align 64
// CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr [[VC_ADDR]], align 16
// CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr [[VC_ADDR]], align 16
// CHECK-NEXT: [[TMP6:%.*]] = call <512 x i1> @llvm.ppc.mma.xvi4ger8(<16 x i8> [[TMP4]], <16 x i8> [[TMP5]])
// CHECK-NEXT: store <512 x i1> [[TMP6]], ptr [[VQ3]], align 64
// CHECK-NEXT: [[TMP7:%.*]] = load <512 x i1>, ptr [[VQ3]], align 64
// CHECK-NEXT: [[TMP8:%.*]] = load ptr, ptr [[VQP]], align 8
// CHECK-NEXT: store <512 x i1> [[TMP7]], ptr [[TMP8]], align 64
// CHECK-NEXT: ret void
//
// CHECK-BE-LABEL: @testVQLocal(
// CHECK-BE-NEXT: entry:
// CHECK-BE-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK-BE-NEXT: [[VC_ADDR:%.*]] = alloca <16 x i8>, align 16
// CHECK-BE-NEXT: [[VQP:%.*]] = alloca ptr, align 8
// CHECK-BE-NEXT: [[VQ1:%.*]] = alloca <512 x i1>, align 64
// CHECK-BE-NEXT: [[VQ2:%.*]] = alloca <512 x i1>, align 64
// CHECK-BE-NEXT: [[VQ3:%.*]] = alloca <512 x i1>, align 64
// CHECK-BE-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR]], align 8
// CHECK-BE-NEXT: store <16 x i8> [[VC:%.*]], ptr [[VC_ADDR]], align 16
// CHECK-BE-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
// CHECK-BE-NEXT: store ptr [[TMP0]], ptr [[VQP]], align 8
// CHECK-BE-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VQP]], align 8
// CHECK-BE-NEXT: [[TMP2:%.*]] = load <512 x i1>, ptr [[TMP1]], align 64
// CHECK-BE-NEXT: store <512 x i1> [[TMP2]], ptr [[VQ1]], align 64
// CHECK-BE-NEXT: [[TMP3:%.*]] = call <512 x i1> @llvm.ppc.mma.xxsetaccz()
// CHECK-BE-NEXT: store <512 x i1> [[TMP3]], ptr [[VQ2]], align 64
// CHECK-BE-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr [[VC_ADDR]], align 16
// CHECK-BE-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr [[VC_ADDR]], align 16
// CHECK-BE-NEXT: [[TMP6:%.*]] = call <512 x i1> @llvm.ppc.mma.xvi4ger8(<16 x i8> [[TMP4]], <16 x i8> [[TMP5]])
// CHECK-BE-NEXT: store <512 x i1> [[TMP6]], ptr [[VQ3]], align 64
// CHECK-BE-NEXT: [[TMP7:%.*]] = load <512 x i1>, ptr [[VQ3]], align 64
// CHECK-BE-NEXT: [[TMP8:%.*]] = load ptr, ptr [[VQP]], align 8
// CHECK-BE-NEXT: store <512 x i1> [[TMP7]], ptr [[TMP8]], align 64
// CHECK-BE-NEXT: ret void
//
void testVQLocal(int *ptr, vector unsigned char vc) {
__vector_quad *vqp = (__vector_quad *)ptr;
__vector_quad vq1 = *vqp;
__vector_quad vq2;
__builtin_mma_xxsetaccz(&vq2);
__vector_quad vq3;
__builtin_mma_xvi4ger8(&vq3, vc, vc);
*vqp = vq3;
}
// CHECK-LABEL: @testVPLocal(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[VC_ADDR:%.*]] = alloca <16 x i8>, align 16
// CHECK-NEXT: [[VPP:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[VP1:%.*]] = alloca <256 x i1>, align 32
// CHECK-NEXT: [[VP2:%.*]] = alloca <256 x i1>, align 32
// CHECK-NEXT: [[VP3:%.*]] = alloca <256 x i1>, align 32
// CHECK-NEXT: [[VQ:%.*]] = alloca <512 x i1>, align 64
// CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: store <16 x i8> [[VC:%.*]], ptr [[VC_ADDR]], align 16
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: store ptr [[TMP0]], ptr [[VPP]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VPP]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = load <256 x i1>, ptr [[TMP1]], align 32
// CHECK-NEXT: store <256 x i1> [[TMP2]], ptr [[VP1]], align 32
// CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr [[VC_ADDR]], align 16
// CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr [[VC_ADDR]], align 16
// CHECK-NEXT: [[TMP5:%.*]] = call <256 x i1> @llvm.ppc.vsx.assemble.pair(<16 x i8> [[TMP3]], <16 x i8> [[TMP4]])
// CHECK-NEXT: store <256 x i1> [[TMP5]], ptr [[VP2]], align 64
// CHECK-NEXT: [[TMP6:%.*]] = load <16 x i8>, ptr [[VC_ADDR]], align 16
// CHECK-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr [[VC_ADDR]], align 16
// CHECK-NEXT: [[TMP8:%.*]] = call <256 x i1> @llvm.ppc.vsx.assemble.pair(<16 x i8> [[TMP7]], <16 x i8> [[TMP6]])
// CHECK-NEXT: store <256 x i1> [[TMP8]], ptr [[VP2]], align 64
// CHECK-NEXT: [[TMP9:%.*]] = load <256 x i1>, ptr [[VP3]], align 32
// CHECK-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr [[VC_ADDR]], align 16
// CHECK-NEXT: [[TMP11:%.*]] = call <512 x i1> @llvm.ppc.mma.xvf64ger(<256 x i1> [[TMP9]], <16 x i8> [[TMP10]])
// CHECK-NEXT: store <512 x i1> [[TMP11]], ptr [[VQ]], align 64
// CHECK-NEXT: [[TMP12:%.*]] = load <256 x i1>, ptr [[VP3]], align 32
// CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[VPP]], align 8
// CHECK-NEXT: store <256 x i1> [[TMP12]], ptr [[TMP13]], align 32
// CHECK-NEXT: ret void
//
// CHECK-BE-LABEL: @testVPLocal(
// CHECK-BE-NEXT: entry:
// CHECK-BE-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK-BE-NEXT: [[VC_ADDR:%.*]] = alloca <16 x i8>, align 16
// CHECK-BE-NEXT: [[VPP:%.*]] = alloca ptr, align 8
// CHECK-BE-NEXT: [[VP1:%.*]] = alloca <256 x i1>, align 32
// CHECK-BE-NEXT: [[VP2:%.*]] = alloca <256 x i1>, align 32
// CHECK-BE-NEXT: [[VP3:%.*]] = alloca <256 x i1>, align 32
// CHECK-BE-NEXT: [[VQ:%.*]] = alloca <512 x i1>, align 64
// CHECK-BE-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR]], align 8
// CHECK-BE-NEXT: store <16 x i8> [[VC:%.*]], ptr [[VC_ADDR]], align 16
// CHECK-BE-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
// CHECK-BE-NEXT: store ptr [[TMP0]], ptr [[VPP]], align 8
// CHECK-BE-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VPP]], align 8
// CHECK-BE-NEXT: [[TMP2:%.*]] = load <256 x i1>, ptr [[TMP1]], align 32
// CHECK-BE-NEXT: store <256 x i1> [[TMP2]], ptr [[VP1]], align 32
// CHECK-BE-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr [[VC_ADDR]], align 16
// CHECK-BE-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr [[VC_ADDR]], align 16
// CHECK-BE-NEXT: [[TMP5:%.*]] = call <256 x i1> @llvm.ppc.vsx.assemble.pair(<16 x i8> [[TMP3]], <16 x i8> [[TMP4]])
// CHECK-BE-NEXT: store <256 x i1> [[TMP5]], ptr [[VP2]], align 64
// CHECK-BE-NEXT: [[TMP6:%.*]] = load <16 x i8>, ptr [[VC_ADDR]], align 16
// CHECK-BE-NEXT: [[TMP7:%.*]] = load <16 x i8>, ptr [[VC_ADDR]], align 16
// CHECK-BE-NEXT: [[TMP8:%.*]] = call <256 x i1> @llvm.ppc.vsx.assemble.pair(<16 x i8> [[TMP6]], <16 x i8> [[TMP7]])
// CHECK-BE-NEXT: store <256 x i1> [[TMP8]], ptr [[VP2]], align 64
// CHECK-BE-NEXT: [[TMP9:%.*]] = load <256 x i1>, ptr [[VP3]], align 32
// CHECK-BE-NEXT: [[TMP10:%.*]] = load <16 x i8>, ptr [[VC_ADDR]], align 16
// CHECK-BE-NEXT: [[TMP11:%.*]] = call <512 x i1> @llvm.ppc.mma.xvf64ger(<256 x i1> [[TMP9]], <16 x i8> [[TMP10]])
// CHECK-BE-NEXT: store <512 x i1> [[TMP11]], ptr [[VQ]], align 64
// CHECK-BE-NEXT: [[TMP12:%.*]] = load <256 x i1>, ptr [[VP3]], align 32
// CHECK-BE-NEXT: [[TMP13:%.*]] = load ptr, ptr [[VPP]], align 8
// CHECK-BE-NEXT: store <256 x i1> [[TMP12]], ptr [[TMP13]], align 32
// CHECK-BE-NEXT: ret void
//
void testVPLocal(int *ptr, vector unsigned char vc) {
__vector_pair *vpp = (__vector_pair *)ptr;
__vector_pair vp1 = *vpp;
__vector_pair vp2;
__builtin_vsx_assemble_pair(&vp2, vc, vc);
__builtin_vsx_build_pair(&vp2, vc, vc);
__vector_pair vp3;
__vector_quad vq;
__builtin_mma_xvf64ger(&vq, vp3, vc);
*vpp = vp3;
}
// CHECK-LABEL: @testRestrictQualifiedPointer2(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[ACC_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[ARR:%.*]] = alloca [4 x <4 x float>], align 16
// CHECK-NEXT: store ptr [[ACC:%.*]], ptr [[ACC_ADDR]], align 8
// CHECK-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x <4 x float>], ptr [[ARR]], i64 0, i64 0
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ACC_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[ACC_ADDR]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = load <512 x i1>, ptr [[TMP1]], align 64
// CHECK-NEXT: [[TMP3:%.*]] = call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.ppc.mma.disassemble.acc(<512 x i1> [[TMP2]])
// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } [[TMP3]], 0
// CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds <16 x i8>, ptr [[ARRAYDECAY]], i32 0
// CHECK-NEXT: store <16 x i8> [[TMP4]], ptr [[TMP5]], align 16
// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } [[TMP3]], 1
// CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds <16 x i8>, ptr [[ARRAYDECAY]], i32 1
// CHECK-NEXT: store <16 x i8> [[TMP6]], ptr [[TMP7]], align 16
// CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } [[TMP3]], 2
// CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds <16 x i8>, ptr [[ARRAYDECAY]], i32 2
// CHECK-NEXT: store <16 x i8> [[TMP8]], ptr [[TMP9]], align 16
// CHECK-NEXT: [[TMP10:%.*]] = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } [[TMP3]], 3
// CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds <16 x i8>, ptr [[ARRAYDECAY]], i32 3
// CHECK-NEXT: store <16 x i8> [[TMP10]], ptr [[TMP11]], align 16
// CHECK-NEXT: ret void
//
// CHECK-BE-LABEL: @testRestrictQualifiedPointer2(
// CHECK-BE-NEXT: entry:
// CHECK-BE-NEXT: [[ACC_ADDR:%.*]] = alloca ptr, align 8
// CHECK-BE-NEXT: [[ARR:%.*]] = alloca [4 x <4 x float>], align 16
// CHECK-BE-NEXT: store ptr [[ACC:%.*]], ptr [[ACC_ADDR]], align 8
// CHECK-BE-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x <4 x float>], ptr [[ARR]], i64 0, i64 0
// CHECK-BE-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ACC_ADDR]], align 8
// CHECK-BE-NEXT: [[TMP1:%.*]] = load ptr, ptr [[ACC_ADDR]], align 8
// CHECK-BE-NEXT: [[TMP2:%.*]] = load <512 x i1>, ptr [[TMP1]], align 64
// CHECK-BE-NEXT: [[TMP3:%.*]] = call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.ppc.mma.disassemble.acc(<512 x i1> [[TMP2]])
// CHECK-BE-NEXT: [[TMP4:%.*]] = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } [[TMP3]], 0
// CHECK-BE-NEXT: [[TMP5:%.*]] = getelementptr inbounds <16 x i8>, ptr [[ARRAYDECAY]], i32 0
// CHECK-BE-NEXT: store <16 x i8> [[TMP4]], ptr [[TMP5]], align 16
// CHECK-BE-NEXT: [[TMP6:%.*]] = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } [[TMP3]], 1
// CHECK-BE-NEXT: [[TMP7:%.*]] = getelementptr inbounds <16 x i8>, ptr [[ARRAYDECAY]], i32 1
// CHECK-BE-NEXT: store <16 x i8> [[TMP6]], ptr [[TMP7]], align 16
// CHECK-BE-NEXT: [[TMP8:%.*]] = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } [[TMP3]], 2
// CHECK-BE-NEXT: [[TMP9:%.*]] = getelementptr inbounds <16 x i8>, ptr [[ARRAYDECAY]], i32 2
// CHECK-BE-NEXT: store <16 x i8> [[TMP8]], ptr [[TMP9]], align 16
// CHECK-BE-NEXT: [[TMP10:%.*]] = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } [[TMP3]], 3
// CHECK-BE-NEXT: [[TMP11:%.*]] = getelementptr inbounds <16 x i8>, ptr [[ARRAYDECAY]], i32 3
// CHECK-BE-NEXT: store <16 x i8> [[TMP10]], ptr [[TMP11]], align 16
// CHECK-BE-NEXT: ret void
//
void testRestrictQualifiedPointer2(__vector_quad *__restrict acc) {
vector float arr[4];
__builtin_mma_disassemble_acc(arr, acc);
}
// CHECK-LABEL: @testVolatileQualifiedPointer2(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[ACC_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[ARR:%.*]] = alloca [4 x <4 x float>], align 16
// CHECK-NEXT: store volatile ptr [[ACC:%.*]], ptr [[ACC_ADDR]], align 8
// CHECK-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x <4 x float>], ptr [[ARR]], i64 0, i64 0
// CHECK-NEXT: [[TMP0:%.*]] = load volatile ptr, ptr [[ACC_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load volatile ptr, ptr [[ACC_ADDR]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = load <512 x i1>, ptr [[TMP1]], align 64
// CHECK-NEXT: [[TMP3:%.*]] = call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.ppc.mma.disassemble.acc(<512 x i1> [[TMP2]])
// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } [[TMP3]], 0
// CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds <16 x i8>, ptr [[ARRAYDECAY]], i32 0
// CHECK-NEXT: store <16 x i8> [[TMP4]], ptr [[TMP5]], align 16
// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } [[TMP3]], 1
// CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds <16 x i8>, ptr [[ARRAYDECAY]], i32 1
// CHECK-NEXT: store <16 x i8> [[TMP6]], ptr [[TMP7]], align 16
// CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } [[TMP3]], 2
// CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds <16 x i8>, ptr [[ARRAYDECAY]], i32 2
// CHECK-NEXT: store <16 x i8> [[TMP8]], ptr [[TMP9]], align 16
// CHECK-NEXT: [[TMP10:%.*]] = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } [[TMP3]], 3
// CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds <16 x i8>, ptr [[ARRAYDECAY]], i32 3
// CHECK-NEXT: store <16 x i8> [[TMP10]], ptr [[TMP11]], align 16
// CHECK-NEXT: ret void
//
// CHECK-BE-LABEL: @testVolatileQualifiedPointer2(
// CHECK-BE-NEXT: entry:
// CHECK-BE-NEXT: [[ACC_ADDR:%.*]] = alloca ptr, align 8
// CHECK-BE-NEXT: [[ARR:%.*]] = alloca [4 x <4 x float>], align 16
// CHECK-BE-NEXT: store volatile ptr [[ACC:%.*]], ptr [[ACC_ADDR]], align 8
// CHECK-BE-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x <4 x float>], ptr [[ARR]], i64 0, i64 0
// CHECK-BE-NEXT: [[TMP0:%.*]] = load volatile ptr, ptr [[ACC_ADDR]], align 8
// CHECK-BE-NEXT: [[TMP1:%.*]] = load volatile ptr, ptr [[ACC_ADDR]], align 8
// CHECK-BE-NEXT: [[TMP2:%.*]] = load <512 x i1>, ptr [[TMP1]], align 64
// CHECK-BE-NEXT: [[TMP3:%.*]] = call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.ppc.mma.disassemble.acc(<512 x i1> [[TMP2]])
// CHECK-BE-NEXT: [[TMP4:%.*]] = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } [[TMP3]], 0
// CHECK-BE-NEXT: [[TMP5:%.*]] = getelementptr inbounds <16 x i8>, ptr [[ARRAYDECAY]], i32 0
// CHECK-BE-NEXT: store <16 x i8> [[TMP4]], ptr [[TMP5]], align 16
// CHECK-BE-NEXT: [[TMP6:%.*]] = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } [[TMP3]], 1
// CHECK-BE-NEXT: [[TMP7:%.*]] = getelementptr inbounds <16 x i8>, ptr [[ARRAYDECAY]], i32 1
// CHECK-BE-NEXT: store <16 x i8> [[TMP6]], ptr [[TMP7]], align 16
// CHECK-BE-NEXT: [[TMP8:%.*]] = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } [[TMP3]], 2
// CHECK-BE-NEXT: [[TMP9:%.*]] = getelementptr inbounds <16 x i8>, ptr [[ARRAYDECAY]], i32 2
// CHECK-BE-NEXT: store <16 x i8> [[TMP8]], ptr [[TMP9]], align 16
// CHECK-BE-NEXT: [[TMP10:%.*]] = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } [[TMP3]], 3
// CHECK-BE-NEXT: [[TMP11:%.*]] = getelementptr inbounds <16 x i8>, ptr [[ARRAYDECAY]], i32 3
// CHECK-BE-NEXT: store <16 x i8> [[TMP10]], ptr [[TMP11]], align 16
// CHECK-BE-NEXT: ret void
//
void testVolatileQualifiedPointer2(__vector_quad *__volatile acc) {
vector float arr[4];
__builtin_mma_disassemble_acc(arr, acc);
}