; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt < %s -passes=instcombine -mtriple=arm -S | FileCheck %s
; The alignment arguments for NEON load/store intrinsics can be increased
; by instcombine. Check for this.
@x = common global [8 x i32] zeroinitializer, align 32
@y = common global [8 x i32] zeroinitializer, align 16
define void @test() {
; CHECK-LABEL: define void @test() {
; CHECK-NEXT: [[TMP1:%.*]] = call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.arm.neon.vld4.v2i32.p0(ptr nonnull @x, i32 32)
; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } [[TMP1]], 0
; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } [[TMP1]], 1
; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } [[TMP1]], 2
; CHECK-NEXT: [[TMP5:%.*]] = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } [[TMP1]], 3
; CHECK-NEXT: call void @llvm.arm.neon.vst4.p0.v2i32(ptr nonnull @y, <2 x i32> [[TMP2]], <2 x i32> [[TMP3]], <2 x i32> [[TMP4]], <2 x i32> [[TMP5]], i32 16)
; CHECK-NEXT: ret void
;
%tmp1 = call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.arm.neon.vld4.v2i32.p0(ptr @x, i32 1)
%tmp2 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %tmp1, 0
%tmp3 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %tmp1, 1
%tmp4 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %tmp1, 2
%tmp5 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %tmp1, 3
call void @llvm.arm.neon.vst4.p0.v2i32(ptr @y, <2 x i32> %tmp2, <2 x i32> %tmp3, <2 x i32> %tmp4, <2 x i32> %tmp5, i32 1)
ret void
}
define { <4 x i16>, <4 x i16> } @test_vld1x2_no_align(ptr align 16 %a) {
; CHECK-LABEL: define { <4 x i16>, <4 x i16> } @test_vld1x2_no_align(
; CHECK-SAME: ptr align 16 [[A:%.*]]) {
; CHECK-NEXT: [[TMP:%.*]] = call { <4 x i16>, <4 x i16> } @llvm.arm.neon.vld1x2.v4i16.p0(ptr align 16 [[A]])
; CHECK-NEXT: ret { <4 x i16>, <4 x i16> } [[TMP]]
;
%tmp = call { <4 x i16>, <4 x i16> } @llvm.arm.neon.vld1x2.v4i16.p0(ptr %a)
ret { <4 x i16>, <4 x i16> } %tmp
}
define { <4 x i16>, <4 x i16> } @test_vld1x2_lower_align(ptr align 16 %a) {
; CHECK-LABEL: define { <4 x i16>, <4 x i16> } @test_vld1x2_lower_align(
; CHECK-SAME: ptr align 16 [[A:%.*]]) {
; CHECK-NEXT: [[TMP:%.*]] = call { <4 x i16>, <4 x i16> } @llvm.arm.neon.vld1x2.v4i16.p0(ptr align 16 [[A]])
; CHECK-NEXT: ret { <4 x i16>, <4 x i16> } [[TMP]]
;
%tmp = call { <4 x i16>, <4 x i16> } @llvm.arm.neon.vld1x2.v4i16.p0(ptr align 8 %a)
ret { <4 x i16>, <4 x i16> } %tmp
}
define { <4 x i16>, <4 x i16> } @test_vld1x2_higher_align(ptr align 8 %a) {
; CHECK-LABEL: define { <4 x i16>, <4 x i16> } @test_vld1x2_higher_align(
; CHECK-SAME: ptr align 8 [[A:%.*]]) {
; CHECK-NEXT: [[TMP:%.*]] = call { <4 x i16>, <4 x i16> } @llvm.arm.neon.vld1x2.v4i16.p0(ptr align 16 [[A]])
; CHECK-NEXT: ret { <4 x i16>, <4 x i16> } [[TMP]]
;
%tmp = call { <4 x i16>, <4 x i16> } @llvm.arm.neon.vld1x2.v4i16.p0(ptr align 16 %a)
ret { <4 x i16>, <4 x i16> } %tmp
}
define void @test_vst1x2_no_align(ptr align 16 %a, <4 x i16> %b0, <4 x i16> %b1) {
; CHECK-LABEL: define void @test_vst1x2_no_align(
; CHECK-SAME: ptr align 16 [[A:%.*]], <4 x i16> [[B0:%.*]], <4 x i16> [[B1:%.*]]) {
; CHECK-NEXT: call void @llvm.arm.neon.vst1x2.p0.v4i16(ptr align 16 [[A]], <4 x i16> [[B0]], <4 x i16> [[B1]])
; CHECK-NEXT: ret void
;
call void @llvm.arm.neon.vst1x2.p0.v4i16(ptr %a, <4 x i16> %b0, <4 x i16> %b1)
ret void
}