; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt -p sroa -S %s | FileCheck %s
target datalayout = "e-p:64:64:64-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64"
define void @load_store_transfer_split_struct_tbaa_2_float(ptr dereferenceable(24) %res, float %a, float %b) {
; CHECK-LABEL: define void @load_store_transfer_split_struct_tbaa_2_float(
; CHECK-SAME: ptr dereferenceable(24) [[RES:%.*]], float [[A:%.*]], float [[B:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = bitcast float [[A]] to i32
; CHECK-NEXT: [[TMP1:%.*]] = bitcast float [[B]] to i32
; CHECK-NEXT: store i32 [[TMP0]], ptr [[RES]], align 4, !tbaa [[TBAA0:![0-9]+]]
; CHECK-NEXT: [[RES_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[RES]], i64 4
; CHECK-NEXT: store i32 [[TMP1]], ptr [[RES_SROA_IDX]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[P:%.*]] = load ptr, ptr [[RES]], align 8
; CHECK-NEXT: ret void
;
entry:
%tmp = alloca { float, float }, align 4
store float %a, ptr %tmp, align 4
%tmp.4 = getelementptr inbounds i8, ptr %tmp, i64 4
store float %b, ptr %tmp.4, align 4
%l1 = load i64, ptr %tmp, !tbaa.struct !0
store i64 %l1, ptr %res, !tbaa.struct !0
%p = load ptr, ptr %res, align 8
ret void
}
define void @memcpy_transfer(ptr dereferenceable(24) %res, float %a, float %b) {
; CHECK-LABEL: define void @memcpy_transfer(
; CHECK-SAME: ptr dereferenceable(24) [[RES:%.*]], float [[A:%.*]], float [[B:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[L_PTR:%.*]] = load ptr, ptr [[RES]], align 8
; CHECK-NEXT: store float [[A]], ptr [[L_PTR]], align 1, !tbaa [[TBAA0]]
; CHECK-NEXT: [[TMP_SROA_2_0_L_PTR_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[L_PTR]], i64 4
; CHECK-NEXT: store float [[B]], ptr [[TMP_SROA_2_0_L_PTR_SROA_IDX]], align 1, !tbaa [[TBAA0]]
; CHECK-NEXT: ret void
;
entry:
%tmp = alloca { float, float }, align 4
store float %a, ptr %tmp, align 4
%__im_.i.i = getelementptr inbounds i8, ptr %tmp, i64 4
store float %b, ptr %__im_.i.i, align 4
%l.ptr = load ptr, ptr %res, align 8
call void @llvm.memcpy.p0.p0.i64(ptr %l.ptr, ptr %tmp, i64 8, i1 false), !tbaa.struct !0
ret void
}
define void @memcpy_transfer_tbaa_field_and_size_do_not_align(ptr dereferenceable(24) %res, float %a, float %b) {
; CHECK-LABEL: define void @memcpy_transfer_tbaa_field_and_size_do_not_align(
; CHECK-SAME: ptr dereferenceable(24) [[RES:%.*]], float [[A:%.*]], float [[B:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[L_PTR:%.*]] = load ptr, ptr [[RES]], align 8
; CHECK-NEXT: store float [[A]], ptr [[L_PTR]], align 1, !tbaa [[TBAA0]]
; CHECK-NEXT: [[TMP_SROA_2_0_L_PTR_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[L_PTR]], i64 4
; CHECK-NEXT: [[TMP0:%.*]] = bitcast float [[B]] to i32
; CHECK-NEXT: [[TMP_SROA_2_0_EXTRACT_TRUNC:%.*]] = trunc i32 [[TMP0]] to i16
; CHECK-NEXT: store i16 [[TMP_SROA_2_0_EXTRACT_TRUNC]], ptr [[TMP_SROA_2_0_L_PTR_SROA_IDX]], align 1
; CHECK-NEXT: ret void
;
entry:
%tmp = alloca { float, float }, align 4
store float %a, ptr %tmp, align 4
%__im_.i.i = getelementptr inbounds i8, ptr %tmp, i64 4
store float %b, ptr %__im_.i.i, align 4
%l.ptr = load ptr, ptr %res, align 8
call void @llvm.memcpy.p0.p0.i64(ptr %l.ptr, ptr %tmp, i64 6, i1 false), !tbaa.struct !0
ret void
}
define void @load_store_transfer_split_struct_tbaa_2_i31(ptr dereferenceable(24) %res, i31 %a, i31 %b) {
; CHECK-LABEL: define void @load_store_transfer_split_struct_tbaa_2_i31(
; CHECK-SAME: ptr dereferenceable(24) [[RES:%.*]], i31 [[A:%.*]], i31 [[B:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP:%.*]] = alloca { i31, i31 }, align 4
; CHECK-NEXT: store i31 [[A]], ptr [[TMP]], align 4
; CHECK-NEXT: [[TMP_4_TMP_4_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[TMP]], i64 4
; CHECK-NEXT: store i31 [[B]], ptr [[TMP_4_TMP_4_SROA_IDX]], align 4
; CHECK-NEXT: [[TMP_0_L1:%.*]] = load i62, ptr [[TMP]], align 4, !tbaa.struct [[TBAA_STRUCT4:![0-9]+]]
; CHECK-NEXT: store i62 [[TMP_0_L1]], ptr [[RES]], align 4, !tbaa.struct [[TBAA_STRUCT4]]
; CHECK-NEXT: ret void
;
entry:
%tmp = alloca { i31 , i31 }, align 4
store i31 %a, ptr %tmp, align 4
%tmp.4 = getelementptr inbounds i8, ptr %tmp, i64 4
store i31 %b, ptr %tmp.4, align 4
%l1 = load i62, ptr %tmp, !tbaa.struct !0
store i62 %l1, ptr %res, !tbaa.struct !0
ret void
}
declare <2 x float> @foo(ptr)
define void @store_vector_part_first(ptr %y2, float %f) {
; CHECK-LABEL: define void @store_vector_part_first(
; CHECK-SAME: ptr [[Y2:%.*]], float [[F:%.*]]) {
; CHECK-NEXT: [[V_1:%.*]] = call <2 x float> @foo(ptr [[Y2]])
; CHECK-NEXT: store <2 x float> [[V_1]], ptr [[Y2]], align 8, !tbaa [[TBAA5:![0-9]+]]
; CHECK-NEXT: [[X7_SROA_2_0_Y2_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[Y2]], i64 8
; CHECK-NEXT: store float [[F]], ptr [[X7_SROA_2_0_Y2_SROA_IDX]], align 8, !tbaa [[TBAA0]]
; CHECK-NEXT: ret void
;
%x7 = alloca { float, float, float, float }
%v.1 = call <2 x float> @foo(ptr %y2)
store <2 x float> %v.1, ptr %x7
%gep = getelementptr i8, ptr %x7, i64 8
store float %f, ptr %gep
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %y2, ptr align 8 %x7, i64 12, i1 false), !tbaa.struct !7
ret void
}
define void @store_vector_part_second(ptr %y2, float %f) {
; CHECK-LABEL: define void @store_vector_part_second(
; CHECK-SAME: ptr [[Y2:%.*]], float [[F:%.*]]) {
; CHECK-NEXT: [[V_1:%.*]] = call <2 x float> @foo(ptr [[Y2]])
; CHECK-NEXT: store float [[F]], ptr [[Y2]], align 8, !tbaa [[TBAA0]]
; CHECK-NEXT: [[X7_SROA_2_0_Y2_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[Y2]], i64 4
; CHECK-NEXT: store <2 x float> [[V_1]], ptr [[X7_SROA_2_0_Y2_SROA_IDX]], align 4, !tbaa [[TBAA5]]
; CHECK-NEXT: ret void
;
%x7 = alloca { float, float, float, float }
%v.1 = call <2 x float> @foo(ptr %y2)
store float %f, ptr %x7
%gep = getelementptr i8, ptr %x7, i64 4
store <2 x float> %v.1, ptr %gep
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %y2, ptr align 8 %x7, i64 12, i1 false), !tbaa.struct !8
ret void
}
define void @store_vector_single(ptr %y2, float %f) {
; CHECK-LABEL: define void @store_vector_single(
; CHECK-SAME: ptr [[Y2:%.*]], float [[F:%.*]]) {
; CHECK-NEXT: [[V_1:%.*]] = call <2 x float> @foo(ptr [[Y2]])
; CHECK-NEXT: store <2 x float> [[V_1]], ptr [[Y2]], align 4, !tbaa [[TBAA5]]
; CHECK-NEXT: ret void
;
%x7 = alloca { float, float }
%v.1 = call <2 x float> @foo(ptr %y2)
store <2 x float> %v.1, ptr %x7
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %y2, ptr align 4 %x7, i64 8, i1 false), !tbaa.struct !9
ret void
}
declare void @llvm.memset.p0.i8(ptr nocapture, i8, i32, i1) nounwind
define void @memset(ptr %dst, ptr align 8 %src) {
; CHECK-LABEL: define void @memset(
; CHECK-SAME: ptr [[DST:%.*]], ptr align 8 [[SRC:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A_SROA_0:%.*]] = alloca [7 x i8], align 1
; CHECK-NEXT: [[A_SROA_3:%.*]] = alloca i16, align 2
; CHECK-NEXT: [[A_SROA_4:%.*]] = alloca [10 x i8], align 1
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[A_SROA_0]], ptr align 8 [[SRC]], i32 7, i1 false)
; CHECK-NEXT: [[A_SROA_3_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 7
; CHECK-NEXT: [[A_SROA_3_0_COPYLOAD:%.*]] = load i16, ptr [[A_SROA_3_0_SRC_SROA_IDX]], align 1
; CHECK-NEXT: store i16 [[A_SROA_3_0_COPYLOAD]], ptr [[A_SROA_3]], align 2
; CHECK-NEXT: [[A_SROA_4_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 9
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[A_SROA_4]], ptr align 1 [[A_SROA_4_0_SRC_SROA_IDX]], i32 10, i1 false)
; CHECK-NEXT: store i16 1, ptr [[A_SROA_3]], align 2
; CHECK-NEXT: [[A_SROA_0_1_A_1_SROA_IDX2:%.*]] = getelementptr inbounds i8, ptr [[A_SROA_0]], i64 1
; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr align 1 [[A_SROA_0_1_A_1_SROA_IDX2]], i8 42, i32 6, i1 false)
; CHECK-NEXT: store i16 10794, ptr [[A_SROA_3]], align 2, !tbaa [[TBAA0]]
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[DST]], ptr align 1 [[A_SROA_0]], i32 7, i1 true)
; CHECK-NEXT: [[A_SROA_3_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 7
; CHECK-NEXT: [[A_SROA_3_0_A_SROA_3_0_COPYLOAD1:%.*]] = load volatile i16, ptr [[A_SROA_3]], align 2
; CHECK-NEXT: store volatile i16 [[A_SROA_3_0_A_SROA_3_0_COPYLOAD1]], ptr [[A_SROA_3_0_DST_SROA_IDX]], align 1
; CHECK-NEXT: [[A_SROA_4_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 9
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[A_SROA_4_0_DST_SROA_IDX]], ptr align 1 [[A_SROA_4]], i32 10, i1 true)
; CHECK-NEXT: ret void
;
entry:
%a = alloca [19 x i8]
call void @llvm.memcpy.p0.p0.i32(ptr %a, ptr align 8 %src, i32 19, i1 false)
%a.1 = getelementptr i8, ptr %a, i64 1
%a.7 = getelementptr i8, ptr %a, i64 7
store i16 1, ptr %a.7
call void @llvm.memset.p0.i32(ptr %a.1, i8 42, i32 8, i1 false), !tbaa.struct !12
call void @llvm.memcpy.p0.p0.i32(ptr %dst, ptr %a, i32 19, i1 true)
ret void
}
define void @memset2(ptr %dst, ptr align 8 %src) {
; CHECK-LABEL: define void @memset2(
; CHECK-SAME: ptr [[DST:%.*]], ptr align 8 [[SRC:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A_SROA_0:%.*]] = alloca [209 x i8], align 1
; CHECK-NEXT: [[A_SROA_3:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[A_SROA_4:%.*]] = alloca [90 x i8], align 1
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[A_SROA_0]], ptr align 8 [[SRC]], i32 209, i1 false)
; CHECK-NEXT: [[A_SROA_3_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 209
; CHECK-NEXT: [[A_SROA_3_0_COPYLOAD:%.*]] = load i8, ptr [[A_SROA_3_0_SRC_SROA_IDX]], align 1
; CHECK-NEXT: store i8 [[A_SROA_3_0_COPYLOAD]], ptr [[A_SROA_3]], align 1
; CHECK-NEXT: [[A_SROA_4_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 210
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[A_SROA_4]], ptr align 2 [[A_SROA_4_0_SRC_SROA_IDX]], i32 90, i1 false)
; CHECK-NEXT: store i8 1, ptr [[A_SROA_3]], align 1
; CHECK-NEXT: [[A_SROA_0_202_A_202_SROA_IDX2:%.*]] = getelementptr inbounds i8, ptr [[A_SROA_0]], i64 202
; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr align 1 [[A_SROA_0_202_A_202_SROA_IDX2]], i8 42, i32 7, i1 false), !tbaa [[TBAA5]]
; CHECK-NEXT: store i8 42, ptr [[A_SROA_3]], align 1, !tbaa [[TBAA5]]
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[DST]], ptr align 1 [[A_SROA_0]], i32 209, i1 true)
; CHECK-NEXT: [[A_SROA_3_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 209
; CHECK-NEXT: [[A_SROA_3_0_A_SROA_3_0_COPYLOAD1:%.*]] = load volatile i8, ptr [[A_SROA_3]], align 1
; CHECK-NEXT: store volatile i8 [[A_SROA_3_0_A_SROA_3_0_COPYLOAD1]], ptr [[A_SROA_3_0_DST_SROA_IDX]], align 1
; CHECK-NEXT: [[A_SROA_4_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 210
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[A_SROA_4_0_DST_SROA_IDX]], ptr align 1 [[A_SROA_4]], i32 90, i1 true)
; CHECK-NEXT: ret void
;
entry:
%a = alloca [300 x i8]
call void @llvm.memcpy.p0.p0.i32(ptr %a, ptr align 8 %src, i32 300, i1 false)
%a.202 = getelementptr [300 x i8], ptr %a, i64 0, i64 202
%a.209 = getelementptr [300 x i8], ptr %a, i64 0, i64 209
store i8 1, ptr %a.209
call void @llvm.memset.p0.i32(ptr %a.202, i8 42, i32 8, i1 false), !tbaa.struct !15
call void @llvm.memcpy.p0.p0.i32(ptr %dst, ptr %a, i32 300, i1 true)
ret void
}
define void @slice_store_v2i8_1(ptr %dst, ptr %dst.2, ptr %src) {
; CHECK-LABEL: define void @slice_store_v2i8_1(
; CHECK-SAME: ptr [[DST:%.*]], ptr [[DST_2:%.*]], ptr [[SRC:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A_SROA_0:%.*]] = alloca [6 x i8], align 1
; CHECK-NEXT: [[A_SROA_2_SROA_0:%.*]] = alloca <2 x i8>, align 4
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[A_SROA_0]], ptr align 8 [[SRC]], i32 6, i1 false)
; CHECK-NEXT: [[A_SROA_2_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 6
; CHECK-NEXT: [[A_SROA_2_SROA_0_0_COPYLOAD:%.*]] = load <2 x i8>, ptr [[A_SROA_2_0_SRC_SROA_IDX]], align 2
; CHECK-NEXT: store <2 x i8> [[A_SROA_2_SROA_0_0_COPYLOAD]], ptr [[A_SROA_2_SROA_0]], align 4
; CHECK-NEXT: store <2 x i8> bitcast (<1 x i16> <i16 123> to <2 x i8>), ptr [[A_SROA_2_SROA_0]], align 4
; CHECK-NEXT: [[A_SROA_2_SROA_0_0_A_SROA_2_SROA_0_0_A_SROA_2_6_V_4:%.*]] = load <2 x i8>, ptr [[A_SROA_2_SROA_0]], align 4
; CHECK-NEXT: store <2 x i8> [[A_SROA_2_SROA_0_0_A_SROA_2_SROA_0_0_A_SROA_2_6_V_4]], ptr [[DST_2]], align 2
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[DST]], ptr align 1 [[A_SROA_0]], i32 6, i1 true)
; CHECK-NEXT: [[A_SROA_2_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 6
; CHECK-NEXT: [[A_SROA_2_SROA_0_0_A_SROA_2_SROA_0_0_COPYLOAD1:%.*]] = load volatile <2 x i8>, ptr [[A_SROA_2_SROA_0]], align 4
; CHECK-NEXT: store volatile <2 x i8> [[A_SROA_2_SROA_0_0_A_SROA_2_SROA_0_0_COPYLOAD1]], ptr [[A_SROA_2_0_DST_SROA_IDX]], align 1
; CHECK-NEXT: ret void
;
entry:
%a = alloca [20 x i8]
call void @llvm.memcpy.p0.p0.i32(ptr %a, ptr align 8 %src, i32 8, i1 false)
%a.6 = getelementptr inbounds i8, ptr %a, i64 6
store i32 123, ptr %a.6, !tbaa.struct !10
%v.4 = load <2 x i8>, ptr %a.6
store <2 x i8> %v.4, ptr %dst.2
call void @llvm.memcpy.p0.p0.i32(ptr %dst, ptr align 8 %a, i32 8, i1 true)
ret void
}
define void @slice_store_v2i8_2(ptr %dst, ptr %dst.2, ptr %src) {
; CHECK-LABEL: define void @slice_store_v2i8_2(
; CHECK-SAME: ptr [[DST:%.*]], ptr [[DST_2:%.*]], ptr [[SRC:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A_SROA_0_SROA_1:%.*]] = alloca <2 x i8>, align 2
; CHECK-NEXT: [[A_SROA_0_SROA_4:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[A_SROA_4:%.*]] = alloca [5 x i8], align 1
; CHECK-NEXT: [[A_SROA_0_SROA_1_1_COPYLOAD:%.*]] = load <2 x i8>, ptr [[SRC]], align 8
; CHECK-NEXT: store <2 x i8> [[A_SROA_0_SROA_1_1_COPYLOAD]], ptr [[A_SROA_0_SROA_1]], align 2
; CHECK-NEXT: [[A_SROA_0_SROA_4_1_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 2
; CHECK-NEXT: [[A_SROA_0_SROA_4_1_COPYLOAD:%.*]] = load i8, ptr [[A_SROA_0_SROA_4_1_SRC_SROA_IDX]], align 2
; CHECK-NEXT: store i8 [[A_SROA_0_SROA_4_1_COPYLOAD]], ptr [[A_SROA_0_SROA_4]], align 1
; CHECK-NEXT: [[A_SROA_4_1_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 3
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[A_SROA_4]], ptr align 1 [[A_SROA_4_1_SRC_SROA_IDX]], i32 5, i1 false)
; CHECK-NEXT: store <2 x i8> zeroinitializer, ptr [[A_SROA_0_SROA_1]], align 2
; CHECK-NEXT: store i8 0, ptr [[A_SROA_0_SROA_4]], align 1
; CHECK-NEXT: [[A_SROA_0_SROA_1_0_A_SROA_0_SROA_1_1_A_SROA_0_1_V_4:%.*]] = load <2 x i8>, ptr [[A_SROA_0_SROA_1]], align 2
; CHECK-NEXT: store <2 x i8> [[A_SROA_0_SROA_1_0_A_SROA_0_SROA_1_1_A_SROA_0_1_V_4]], ptr [[DST_2]], align 2
; CHECK-NEXT: [[A_SROA_0_SROA_1_0_A_SROA_0_SROA_1_1_COPYLOAD3:%.*]] = load volatile <2 x i8>, ptr [[A_SROA_0_SROA_1]], align 2
; CHECK-NEXT: store volatile <2 x i8> [[A_SROA_0_SROA_1_0_A_SROA_0_SROA_1_1_COPYLOAD3]], ptr [[DST]], align 1
; CHECK-NEXT: [[A_SROA_0_SROA_4_1_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 2
; CHECK-NEXT: [[A_SROA_0_SROA_4_0_A_SROA_0_SROA_4_1_COPYLOAD4:%.*]] = load volatile i8, ptr [[A_SROA_0_SROA_4]], align 1
; CHECK-NEXT: store volatile i8 [[A_SROA_0_SROA_4_0_A_SROA_0_SROA_4_1_COPYLOAD4]], ptr [[A_SROA_0_SROA_4_1_DST_SROA_IDX]], align 1
; CHECK-NEXT: [[A_SROA_4_1_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 3
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[A_SROA_4_1_DST_SROA_IDX]], ptr align 1 [[A_SROA_4]], i32 5, i1 true)
; CHECK-NEXT: ret void
;
entry:
%a = alloca [20 x i8]
%a.1 = getelementptr inbounds i8, ptr %a, i64 1
call void @llvm.memcpy.p0.p0.i32(ptr %a.1, ptr align 8 %src, i32 8, i1 false)
store i32 123, ptr %a, !tbaa.struct !11
%v.4 = load <2 x i8>, ptr %a.1
store <2 x i8> %v.4, ptr %dst.2
call void @llvm.memcpy.p0.p0.i32(ptr %dst, ptr align 8 %a.1, i32 8, i1 true)
ret void
}
define double @tbaa_struct_load(ptr %src, ptr %dst) {
; CHECK-LABEL: define double @tbaa_struct_load(
; CHECK-SAME: ptr [[SRC:%.*]], ptr [[DST:%.*]]) {
; CHECK-NEXT: [[TMP_SROA_0:%.*]] = alloca double, align 8
; CHECK-NEXT: [[TMP_SROA_3:%.*]] = alloca i64, align 8
; CHECK-NEXT: [[TMP_SROA_0_0_COPYLOAD:%.*]] = load double, ptr [[SRC]], align 8
; CHECK-NEXT: store double [[TMP_SROA_0_0_COPYLOAD]], ptr [[TMP_SROA_0]], align 8
; CHECK-NEXT: [[TMP_SROA_3_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 8
; CHECK-NEXT: [[TMP_SROA_3_0_COPYLOAD:%.*]] = load i64, ptr [[TMP_SROA_3_0_SRC_SROA_IDX]], align 8
; CHECK-NEXT: store i64 [[TMP_SROA_3_0_COPYLOAD]], ptr [[TMP_SROA_3]], align 8
; CHECK-NEXT: [[TMP_SROA_0_0_TMP_SROA_0_0_LG:%.*]] = load double, ptr [[TMP_SROA_0]], align 8, !tbaa [[TBAA5]]
; CHECK-NEXT: [[TMP_SROA_0_0_TMP_SROA_0_0_COPYLOAD1:%.*]] = load volatile double, ptr [[TMP_SROA_0]], align 8
; CHECK-NEXT: store volatile double [[TMP_SROA_0_0_TMP_SROA_0_0_COPYLOAD1]], ptr [[DST]], align 8
; CHECK-NEXT: [[TMP_SROA_3_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 8
; CHECK-NEXT: [[TMP_SROA_3_0_TMP_SROA_3_0_COPYLOAD2:%.*]] = load volatile i64, ptr [[TMP_SROA_3]], align 8
; CHECK-NEXT: store volatile i64 [[TMP_SROA_3_0_TMP_SROA_3_0_COPYLOAD2]], ptr [[TMP_SROA_3_0_DST_SROA_IDX]], align 8
; CHECK-NEXT: ret double [[TMP_SROA_0_0_TMP_SROA_0_0_LG]]
;
%tmp = alloca [16 x i8], align 8
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %tmp, ptr align 8 %src, i64 16, i1 false)
%lg = load double, ptr %tmp, align 8, !tbaa.struct !13
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %dst, ptr align 8 %tmp, i64 16, i1 true)
ret double %lg
}
define i32 @shorten_integer_store_single_field(ptr %dst, ptr %dst.2, ptr %src) {
; CHECK-LABEL: define i32 @shorten_integer_store_single_field(
; CHECK-SAME: ptr [[DST:%.*]], ptr [[DST_2:%.*]], ptr [[SRC:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A_SROA_0:%.*]] = alloca i32, align 4
; CHECK-NEXT: store i32 123, ptr [[A_SROA_0]], align 4, !tbaa [[TBAA0]]
; CHECK-NEXT: [[A_SROA_0_0_A_SROA_0_0_L:%.*]] = load i32, ptr [[A_SROA_0]], align 4
; CHECK-NEXT: [[A_SROA_0_0_A_SROA_0_0_COPYLOAD:%.*]] = load volatile i32, ptr [[A_SROA_0]], align 4
; CHECK-NEXT: store volatile i32 [[A_SROA_0_0_A_SROA_0_0_COPYLOAD]], ptr [[DST]], align 1
; CHECK-NEXT: ret i32 [[A_SROA_0_0_A_SROA_0_0_L]]
;
entry:
%a = alloca [8 x i8], align 2
store i64 123, ptr %a, align 2, !tbaa.struct !0
%l = load i32, ptr %a
call void @llvm.memcpy.p0.p0.i32(ptr align 1 %dst, ptr align 1 %a, i32 4, i1 true)
ret i32 %l
}
define i32 @shorten_integer_store_multiple_fields(ptr %dst, ptr %dst.2, ptr %src) {
; CHECK-LABEL: define i32 @shorten_integer_store_multiple_fields(
; CHECK-SAME: ptr [[DST:%.*]], ptr [[DST_2:%.*]], ptr [[SRC:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A_SROA_0:%.*]] = alloca i32, align 4
; CHECK-NEXT: store i32 123, ptr [[A_SROA_0]], align 4, !tbaa [[TBAA5]]
; CHECK-NEXT: [[A_SROA_0_0_A_SROA_0_0_L:%.*]] = load i32, ptr [[A_SROA_0]], align 4
; CHECK-NEXT: [[A_SROA_0_0_A_SROA_0_0_COPYLOAD:%.*]] = load volatile i32, ptr [[A_SROA_0]], align 4
; CHECK-NEXT: store volatile i32 [[A_SROA_0_0_A_SROA_0_0_COPYLOAD]], ptr [[DST]], align 1
; CHECK-NEXT: ret i32 [[A_SROA_0_0_A_SROA_0_0_L]]
;
entry:
%a = alloca [8 x i8], align 2
store i64 123, ptr %a, align 2, !tbaa.struct !14
%l = load i32, ptr %a
call void @llvm.memcpy.p0.p0.i32(ptr align 1 %dst, ptr align 1 %a, i32 4, i1 true)
ret i32 %l
}
define <2 x i16> @shorten_vector_store_multiple_fields(ptr %dst, ptr %dst.2, ptr %src) {
; CHECK-LABEL: define <2 x i16> @shorten_vector_store_multiple_fields(
; CHECK-SAME: ptr [[DST:%.*]], ptr [[DST_2:%.*]], ptr [[SRC:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A_SROA_0:%.*]] = alloca <2 x i32>, align 8
; CHECK-NEXT: store <2 x i32> <i32 1, i32 2>, ptr [[A_SROA_0]], align 8
; CHECK-NEXT: [[A_SROA_0_0_A_SROA_0_0_L:%.*]] = load <2 x i16>, ptr [[A_SROA_0]], align 8
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[DST]], ptr align 8 [[A_SROA_0]], i32 4, i1 true)
; CHECK-NEXT: ret <2 x i16> [[A_SROA_0_0_A_SROA_0_0_L]]
;
entry:
%a = alloca [8 x i8], align 2
store <2 x i32> <i32 1, i32 2>, ptr %a, align 2, !tbaa.struct !0
%l = load <2 x i16>, ptr %a
call void @llvm.memcpy.p0.p0.i32(ptr align 1 %dst, ptr align 1 %a, i32 4, i1 true)
ret <2 x i16> %l
}
define <2 x i16> @shorten_vector_store_single_fields(ptr %dst, ptr %dst.2, ptr %src) {
; CHECK-LABEL: define <2 x i16> @shorten_vector_store_single_fields(
; CHECK-SAME: ptr [[DST:%.*]], ptr [[DST_2:%.*]], ptr [[SRC:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A_SROA_0:%.*]] = alloca <2 x i32>, align 8
; CHECK-NEXT: store <2 x i32> <i32 1, i32 2>, ptr [[A_SROA_0]], align 8
; CHECK-NEXT: [[A_SROA_0_0_A_SROA_0_0_L:%.*]] = load <2 x i16>, ptr [[A_SROA_0]], align 8
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[DST]], ptr align 8 [[A_SROA_0]], i32 4, i1 true)
; CHECK-NEXT: ret <2 x i16> [[A_SROA_0_0_A_SROA_0_0_L]]
;
entry:
%a = alloca [8 x i8], align 8
store <2 x i32> <i32 1, i32 2>, ptr %a, align 8, !tbaa.struct !14
%l = load <2 x i16>, ptr %a
call void @llvm.memcpy.p0.p0.i32(ptr align 1 %dst, ptr align 1 %a, i32 4, i1 true)
ret <2 x i16> %l
}
define i32 @split_load_with_tbaa_struct(i32 %x, ptr %src, ptr %dst) {
; CHECK-LABEL: define i32 @split_load_with_tbaa_struct(
; CHECK-SAME: i32 [[X:%.*]], ptr [[SRC:%.*]], ptr [[DST:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A3_SROA_0:%.*]] = alloca i16, align 8
; CHECK-NEXT: [[A3_SROA_3:%.*]] = alloca i16, align 2
; CHECK-NEXT: [[A3_SROA_33:%.*]] = alloca float, align 4
; CHECK-NEXT: [[A3_SROA_4:%.*]] = alloca i8, align 8
; CHECK-NEXT: [[A3_SROA_5:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[A3_SROA_0_0_COPYLOAD:%.*]] = load i16, ptr [[SRC]], align 1
; CHECK-NEXT: store i16 [[A3_SROA_0_0_COPYLOAD]], ptr [[A3_SROA_0]], align 8
; CHECK-NEXT: [[A3_SROA_3_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 2
; CHECK-NEXT: [[A3_SROA_3_0_COPYLOAD:%.*]] = load i16, ptr [[A3_SROA_3_0_SRC_SROA_IDX]], align 1
; CHECK-NEXT: store i16 [[A3_SROA_3_0_COPYLOAD]], ptr [[A3_SROA_3]], align 2
; CHECK-NEXT: [[A3_SROA_33_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 4
; CHECK-NEXT: [[A3_SROA_33_0_COPYLOAD:%.*]] = load float, ptr [[A3_SROA_33_0_SRC_SROA_IDX]], align 1
; CHECK-NEXT: store float [[A3_SROA_33_0_COPYLOAD]], ptr [[A3_SROA_33]], align 4
; CHECK-NEXT: [[A3_SROA_4_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 8
; CHECK-NEXT: [[A3_SROA_4_0_COPYLOAD:%.*]] = load i8, ptr [[A3_SROA_4_0_SRC_SROA_IDX]], align 1
; CHECK-NEXT: store i8 [[A3_SROA_4_0_COPYLOAD]], ptr [[A3_SROA_4]], align 8
; CHECK-NEXT: [[A3_SROA_5_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 9
; CHECK-NEXT: [[A3_SROA_5_0_COPYLOAD:%.*]] = load i8, ptr [[A3_SROA_5_0_SRC_SROA_IDX]], align 1
; CHECK-NEXT: store i8 [[A3_SROA_5_0_COPYLOAD]], ptr [[A3_SROA_5]], align 1
; CHECK-NEXT: [[A3_SROA_0_0_A3_SROA_0_0_LOAD4_FCA_0_LOAD:%.*]] = load i16, ptr [[A3_SROA_0]], align 8, !tbaa [[TBAA5]]
; CHECK-NEXT: [[LOAD4_FCA_0_INSERT:%.*]] = insertvalue { i16, float, i8 } poison, i16 [[A3_SROA_0_0_A3_SROA_0_0_LOAD4_FCA_0_LOAD]], 0
; CHECK-NEXT: [[A3_SROA_33_0_A3_SROA_33_4_LOAD4_FCA_1_LOAD:%.*]] = load float, ptr [[A3_SROA_33]], align 4, !tbaa [[TBAA5]]
; CHECK-NEXT: [[LOAD4_FCA_1_INSERT:%.*]] = insertvalue { i16, float, i8 } [[LOAD4_FCA_0_INSERT]], float [[A3_SROA_33_0_A3_SROA_33_4_LOAD4_FCA_1_LOAD]], 1
; CHECK-NEXT: [[A3_SROA_4_0_A3_SROA_4_8_LOAD4_FCA_2_LOAD:%.*]] = load i8, ptr [[A3_SROA_4]], align 8, !tbaa [[TBAA5]]
; CHECK-NEXT: [[LOAD4_FCA_2_INSERT:%.*]] = insertvalue { i16, float, i8 } [[LOAD4_FCA_1_INSERT]], i8 [[A3_SROA_4_0_A3_SROA_4_8_LOAD4_FCA_2_LOAD]], 2
; CHECK-NEXT: [[UNWRAP2:%.*]] = extractvalue { i16, float, i8 } [[LOAD4_FCA_2_INSERT]], 1
; CHECK-NEXT: [[VALCAST2:%.*]] = bitcast float [[UNWRAP2]] to i32
; CHECK-NEXT: [[A3_SROA_0_0_A3_SROA_0_0_COPYLOAD1:%.*]] = load volatile i16, ptr [[A3_SROA_0]], align 8
; CHECK-NEXT: store volatile i16 [[A3_SROA_0_0_A3_SROA_0_0_COPYLOAD1]], ptr [[DST]], align 1
; CHECK-NEXT: [[A3_SROA_3_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 2
; CHECK-NEXT: [[A3_SROA_3_0_A3_SROA_3_0_COPYLOAD2:%.*]] = load volatile i16, ptr [[A3_SROA_3]], align 2
; CHECK-NEXT: store volatile i16 [[A3_SROA_3_0_A3_SROA_3_0_COPYLOAD2]], ptr [[A3_SROA_3_0_DST_SROA_IDX]], align 1
; CHECK-NEXT: [[A3_SROA_33_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 4
; CHECK-NEXT: [[A3_SROA_33_0_A3_SROA_33_0_COPYLOAD4:%.*]] = load volatile float, ptr [[A3_SROA_33]], align 4
; CHECK-NEXT: store volatile float [[A3_SROA_33_0_A3_SROA_33_0_COPYLOAD4]], ptr [[A3_SROA_33_0_DST_SROA_IDX]], align 1
; CHECK-NEXT: [[A3_SROA_4_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 8
; CHECK-NEXT: [[A3_SROA_4_0_A3_SROA_4_0_COPYLOAD5:%.*]] = load volatile i8, ptr [[A3_SROA_4]], align 8
; CHECK-NEXT: store volatile i8 [[A3_SROA_4_0_A3_SROA_4_0_COPYLOAD5]], ptr [[A3_SROA_4_0_DST_SROA_IDX]], align 1
; CHECK-NEXT: [[A3_SROA_5_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 9
; CHECK-NEXT: [[A3_SROA_5_0_A3_SROA_5_0_COPYLOAD6:%.*]] = load volatile i8, ptr [[A3_SROA_5]], align 1
; CHECK-NEXT: store volatile i8 [[A3_SROA_5_0_A3_SROA_5_0_COPYLOAD6]], ptr [[A3_SROA_5_0_DST_SROA_IDX]], align 1
; CHECK-NEXT: ret i32 [[VALCAST2]]
;
entry:
%a3 = alloca { float, float , float }
call void @llvm.memcpy.p0.p0.i64(ptr %a3, ptr %src, i64 10, i1 false)
%load4 = load { i16, float , i8}, ptr %a3, !tbaa.struct !16
%unwrap2 = extractvalue { i16, float, i8 } %load4 , 1
%valcast2 = bitcast float %unwrap2 to i32
call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %a3, i64 10, i1 true)
ret i32 %valcast2
}
define i32 @split_store_with_tbaa_struct(i32 %x, ptr %src, ptr %dst) {
; CHECK-LABEL: define i32 @split_store_with_tbaa_struct(
; CHECK-SAME: i32 [[X:%.*]], ptr [[SRC:%.*]], ptr [[DST:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A3_SROA_0:%.*]] = alloca i16, align 8
; CHECK-NEXT: [[A3_SROA_3:%.*]] = alloca i16, align 2
; CHECK-NEXT: [[A3_SROA_33:%.*]] = alloca float, align 4
; CHECK-NEXT: [[A3_SROA_4:%.*]] = alloca i8, align 8
; CHECK-NEXT: [[A3_SROA_5:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[A3_SROA_0_0_COPYLOAD:%.*]] = load i16, ptr [[SRC]], align 1
; CHECK-NEXT: store i16 [[A3_SROA_0_0_COPYLOAD]], ptr [[A3_SROA_0]], align 8
; CHECK-NEXT: [[A3_SROA_3_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 2
; CHECK-NEXT: [[A3_SROA_3_0_COPYLOAD:%.*]] = load i16, ptr [[A3_SROA_3_0_SRC_SROA_IDX]], align 1
; CHECK-NEXT: store i16 [[A3_SROA_3_0_COPYLOAD]], ptr [[A3_SROA_3]], align 2
; CHECK-NEXT: [[A3_SROA_33_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 4
; CHECK-NEXT: [[A3_SROA_33_0_COPYLOAD:%.*]] = load float, ptr [[A3_SROA_33_0_SRC_SROA_IDX]], align 1
; CHECK-NEXT: store float [[A3_SROA_33_0_COPYLOAD]], ptr [[A3_SROA_33]], align 4
; CHECK-NEXT: [[A3_SROA_4_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 8
; CHECK-NEXT: [[A3_SROA_4_0_COPYLOAD:%.*]] = load i8, ptr [[A3_SROA_4_0_SRC_SROA_IDX]], align 1
; CHECK-NEXT: store i8 [[A3_SROA_4_0_COPYLOAD]], ptr [[A3_SROA_4]], align 8
; CHECK-NEXT: [[A3_SROA_5_0_SRC_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 9
; CHECK-NEXT: [[A3_SROA_5_0_COPYLOAD:%.*]] = load i8, ptr [[A3_SROA_5_0_SRC_SROA_IDX]], align 1
; CHECK-NEXT: store i8 [[A3_SROA_5_0_COPYLOAD]], ptr [[A3_SROA_5]], align 1
; CHECK-NEXT: [[I_1:%.*]] = insertvalue { i16, float, i8 } poison, i16 10, 0
; CHECK-NEXT: [[I_2:%.*]] = insertvalue { i16, float, i8 } [[I_1]], float 3.000000e+00, 1
; CHECK-NEXT: [[I_3:%.*]] = insertvalue { i16, float, i8 } [[I_2]], i8 99, 2
; CHECK-NEXT: [[I_3_FCA_0_EXTRACT:%.*]] = extractvalue { i16, float, i8 } [[I_3]], 0
; CHECK-NEXT: store i16 [[I_3_FCA_0_EXTRACT]], ptr [[A3_SROA_0]], align 8, !tbaa [[TBAA5]]
; CHECK-NEXT: [[I_3_FCA_1_EXTRACT:%.*]] = extractvalue { i16, float, i8 } [[I_3]], 1
; CHECK-NEXT: store float [[I_3_FCA_1_EXTRACT]], ptr [[A3_SROA_33]], align 4, !tbaa [[TBAA5]]
; CHECK-NEXT: [[I_3_FCA_2_EXTRACT:%.*]] = extractvalue { i16, float, i8 } [[I_3]], 2
; CHECK-NEXT: store i8 [[I_3_FCA_2_EXTRACT]], ptr [[A3_SROA_4]], align 8, !tbaa [[TBAA5]]
; CHECK-NEXT: [[A3_SROA_0_0_A3_SROA_0_0_COPYLOAD1:%.*]] = load volatile i16, ptr [[A3_SROA_0]], align 8
; CHECK-NEXT: store volatile i16 [[A3_SROA_0_0_A3_SROA_0_0_COPYLOAD1]], ptr [[DST]], align 1
; CHECK-NEXT: [[A3_SROA_3_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 2
; CHECK-NEXT: [[A3_SROA_3_0_A3_SROA_3_0_COPYLOAD2:%.*]] = load volatile i16, ptr [[A3_SROA_3]], align 2
; CHECK-NEXT: store volatile i16 [[A3_SROA_3_0_A3_SROA_3_0_COPYLOAD2]], ptr [[A3_SROA_3_0_DST_SROA_IDX]], align 1
; CHECK-NEXT: [[A3_SROA_33_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 4
; CHECK-NEXT: [[A3_SROA_33_0_A3_SROA_33_0_COPYLOAD4:%.*]] = load volatile float, ptr [[A3_SROA_33]], align 4
; CHECK-NEXT: store volatile float [[A3_SROA_33_0_A3_SROA_33_0_COPYLOAD4]], ptr [[A3_SROA_33_0_DST_SROA_IDX]], align 1
; CHECK-NEXT: [[A3_SROA_4_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 8
; CHECK-NEXT: [[A3_SROA_4_0_A3_SROA_4_0_COPYLOAD5:%.*]] = load volatile i8, ptr [[A3_SROA_4]], align 8
; CHECK-NEXT: store volatile i8 [[A3_SROA_4_0_A3_SROA_4_0_COPYLOAD5]], ptr [[A3_SROA_4_0_DST_SROA_IDX]], align 1
; CHECK-NEXT: [[A3_SROA_5_0_DST_SROA_IDX:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 9
; CHECK-NEXT: [[A3_SROA_5_0_A3_SROA_5_0_COPYLOAD6:%.*]] = load volatile i8, ptr [[A3_SROA_5]], align 1
; CHECK-NEXT: store volatile i8 [[A3_SROA_5_0_A3_SROA_5_0_COPYLOAD6]], ptr [[A3_SROA_5_0_DST_SROA_IDX]], align 1
; CHECK-NEXT: ret i32 0
;
entry:
%a3 = alloca { float, float , float }
call void @llvm.memcpy.p0.p0.i64(ptr %a3, ptr %src, i64 10, i1 false)
%i.1 = insertvalue { i16, float, i8 } poison, i16 10, 0
%i.2 = insertvalue { i16, float, i8 } %i.1, float 3.0, 1
%i.3 = insertvalue { i16, float, i8 } %i.2, i8 99, 2
store { i16, float , i8} %i.3, ptr %a3, !tbaa.struct !16
call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %a3, i64 10, i1 true)
ret i32 0
}
; Function Attrs: mustprogress nocallback nofree nounwind willreturn memory(argmem: readwrite)
declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) #2
!0 = !{i64 0, i64 4, !1, i64 4, i64 4, !1}
!1 = !{!2, !2, i64 0}
!2 = !{!"float", !3, i64 0}
!3 = !{!"omnipotent char", !4, i64 0}
!4 = !{!"Simple C++ TBAA"}
!5 = !{!"v2f32", !3, i64 0}
!6 = !{!5, !5, i64 0}
!7 = !{i64 0, i64 8, !6, i64 8, i64 4, !1}
!8 = !{i64 0, i64 4, !1, i64 4, i64 8, !6}
!9 = !{i64 0, i64 8, !6, i64 4, i64 8, !1}
!10 = !{i64 0, i64 2, !1, i64 2, i64 2, !1}
!11 = !{i64 0, i64 1, !1, i64 1, i64 3, !1}
!12 = !{i64 0, i64 2, !1, i64 2, i64 6, !1}
!13 = !{i64 0, i64 8, !6}
!14 = !{i64 0, i64 4, !6}
!15 = !{i64 0, i64 7, !6, i64 7, i64 1, !6}
!16 = !{i64 0, i64 2, !6, i64 4, i64 4, !6, i64 8, i64 1, !6}
;.
; CHECK: [[TBAA0]] = !{[[META1:![0-9]+]], [[META1]], i64 0}
; CHECK: [[META1]] = !{!"float", [[META2:![0-9]+]], i64 0}
; CHECK: [[META2]] = !{!"omnipotent char", [[META3:![0-9]+]], i64 0}
; CHECK: [[META3]] = !{!"Simple C++ TBAA"}
; CHECK: [[TBAA_STRUCT4]] = !{i64 0, i64 4, [[TBAA0]], i64 4, i64 4, [[TBAA0]]}
; CHECK: [[TBAA5]] = !{[[META6:![0-9]+]], [[META6]], i64 0}
; CHECK: [[META6]] = !{!"v2f32", [[META2]], i64 0}
;.