; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2 -force-streaming -verify-machineinstrs < %s | FileCheck %s
;
; S/UQRSHR x2
;
define <vscale x 8 x i16> @multi_vector_sat_shift_narrow_x2_s16(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2) {
; CHECK-LABEL: multi_vector_sat_shift_narrow_x2_s16:
; CHECK: // %bb.0:
; CHECK-NEXT: mov z3.d, z2.d
; CHECK-NEXT: mov z2.d, z1.d
; CHECK-NEXT: sqrshr z0.h, { z2.s, z3.s }, #16
; CHECK-NEXT: ret
%res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrshr.x2.nxv8i16(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, i32 16)
ret <vscale x 8 x i16> %res
}
define <vscale x 8 x i16> @multi_vector_sat_shift_narrow_x2_u16(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2) {
; CHECK-LABEL: multi_vector_sat_shift_narrow_x2_u16:
; CHECK: // %bb.0:
; CHECK-NEXT: mov z3.d, z2.d
; CHECK-NEXT: mov z2.d, z1.d
; CHECK-NEXT: uqrshr z0.h, { z2.s, z3.s }, #16
; CHECK-NEXT: ret
%res = call <vscale x 8 x i16> @llvm.aarch64.sve.uqrshr.x2.nxv8i16(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, i32 16)
ret <vscale x 8 x i16> %res
}
;
; S/UQRSHR x4
;
define <vscale x 16 x i8> @multi_vector_sat_shift_narrow_x4_s8(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4) {
; CHECK-LABEL: multi_vector_sat_shift_narrow_x4_s8:
; CHECK: // %bb.0:
; CHECK-NEXT: mov z7.d, z4.d
; CHECK-NEXT: mov z6.d, z3.d
; CHECK-NEXT: mov z5.d, z2.d
; CHECK-NEXT: mov z4.d, z1.d
; CHECK-NEXT: sqrshr z0.b, { z4.s - z7.s }, #32
; CHECK-NEXT: ret
%res = call <vscale x 16 x i8> @llvm.aarch64.sve.sqrshr.x4.nxv16i8(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4, i32 32)
ret <vscale x 16 x i8> %res
}
define <vscale x 8 x i16> @multi_vector_sat_shift_narrow_x4_s16(<vscale x 2 x i64> %unused, <vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4) {
; CHECK-LABEL: multi_vector_sat_shift_narrow_x4_s16:
; CHECK: // %bb.0:
; CHECK-NEXT: mov z7.d, z4.d
; CHECK-NEXT: mov z6.d, z3.d
; CHECK-NEXT: mov z5.d, z2.d
; CHECK-NEXT: mov z4.d, z1.d
; CHECK-NEXT: sqrshr z0.h, { z4.d - z7.d }, #64
; CHECK-NEXT: ret
%res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrshr.x4.nxv8i16(<vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4, i32 64)
ret <vscale x 8 x i16> %res
}
define <vscale x 16 x i8> @multi_vector_sat_shift_narrow_x4_u8(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4) {
; CHECK-LABEL: multi_vector_sat_shift_narrow_x4_u8:
; CHECK: // %bb.0:
; CHECK-NEXT: mov z7.d, z4.d
; CHECK-NEXT: mov z6.d, z3.d
; CHECK-NEXT: mov z5.d, z2.d
; CHECK-NEXT: mov z4.d, z1.d
; CHECK-NEXT: uqrshr z0.b, { z4.s - z7.s }, #32
; CHECK-NEXT: ret
%res = call <vscale x 16 x i8> @llvm.aarch64.sve.uqrshr.x4.nxv16i8(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4, i32 32)
ret <vscale x 16 x i8> %res
}
define <vscale x 8 x i16> @multi_vector_sat_shift_narrow_x4_u16(<vscale x 2 x i64> %unused, <vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4) {
; CHECK-LABEL: multi_vector_sat_shift_narrow_x4_u16:
; CHECK: // %bb.0:
; CHECK-NEXT: mov z7.d, z4.d
; CHECK-NEXT: mov z6.d, z3.d
; CHECK-NEXT: mov z5.d, z2.d
; CHECK-NEXT: mov z4.d, z1.d
; CHECK-NEXT: uqrshr z0.h, { z4.d - z7.d }, #64
; CHECK-NEXT: ret
%res = call <vscale x 8 x i16> @llvm.aarch64.sve.uqrshr.x4.nxv8i16(<vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4, i32 64)
ret <vscale x 8 x i16> %res
}
; S/UQRSHRN x4
define <vscale x 16 x i8> @multi_vector_sat_shift_narrow_interleave_x4_s8(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4) {
; CHECK-LABEL: multi_vector_sat_shift_narrow_interleave_x4_s8:
; CHECK: // %bb.0:
; CHECK-NEXT: mov z7.d, z4.d
; CHECK-NEXT: mov z6.d, z3.d
; CHECK-NEXT: mov z5.d, z2.d
; CHECK-NEXT: mov z4.d, z1.d
; CHECK-NEXT: sqrshrn z0.b, { z4.s - z7.s }, #32
; CHECK-NEXT: ret
%res = call <vscale x 16 x i8> @llvm.aarch64.sve.sqrshrn.x4.nxv16i8(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4, i32 32)
ret <vscale x 16 x i8> %res
}
define <vscale x 8 x i16> @multi_vector_sat_shift_narrow_interleave_x4_s16(<vscale x 2 x i64> %unused, <vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4) {
; CHECK-LABEL: multi_vector_sat_shift_narrow_interleave_x4_s16:
; CHECK: // %bb.0:
; CHECK-NEXT: mov z7.d, z4.d
; CHECK-NEXT: mov z6.d, z3.d
; CHECK-NEXT: mov z5.d, z2.d
; CHECK-NEXT: mov z4.d, z1.d
; CHECK-NEXT: sqrshrn z0.h, { z4.d - z7.d }, #64
; CHECK-NEXT: ret
%res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrshrn.x4.nxv8i16(<vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4, i32 64)
ret <vscale x 8 x i16> %res
}
define <vscale x 16 x i8> @multi_vector_sat_shift_narrow_interleave_x4_u8(<vscale x 2 x i64> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4) {
; CHECK-LABEL: multi_vector_sat_shift_narrow_interleave_x4_u8:
; CHECK: // %bb.0:
; CHECK-NEXT: mov z7.d, z4.d
; CHECK-NEXT: mov z6.d, z3.d
; CHECK-NEXT: mov z5.d, z2.d
; CHECK-NEXT: mov z4.d, z1.d
; CHECK-NEXT: uqrshrn z0.b, { z4.s - z7.s }, #32
; CHECK-NEXT: ret
%res = call <vscale x 16 x i8> @llvm.aarch64.sve.uqrshrn.x4.nxv16i8(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4, i32 32)
ret <vscale x 16 x i8> %res
}
define <vscale x 8 x i16> @multi_vector_sat_shift_narrow_interleave_x4_u16(<vscale x 2 x i64> %unused, <vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4) {
; CHECK-LABEL: multi_vector_sat_shift_narrow_interleave_x4_u16:
; CHECK: // %bb.0:
; CHECK-NEXT: mov z7.d, z4.d
; CHECK-NEXT: mov z6.d, z3.d
; CHECK-NEXT: mov z5.d, z2.d
; CHECK-NEXT: mov z4.d, z1.d
; CHECK-NEXT: uqrshrn z0.h, { z4.d - z7.d }, #64
; CHECK-NEXT: ret
%res = call <vscale x 8 x i16> @llvm.aarch64.sve.uqrshrn.x4.nxv8i16(<vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4, i32 64)
ret <vscale x 8 x i16> %res
}
; SQRSHRU x2
define <vscale x 8 x i16> @multi_vector_sat_shift_unsigned_narrow_x2_u16(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2) {
; CHECK-LABEL: multi_vector_sat_shift_unsigned_narrow_x2_u16:
; CHECK: // %bb.0:
; CHECK-NEXT: mov z3.d, z2.d
; CHECK-NEXT: mov z2.d, z1.d
; CHECK-NEXT: sqrshru z0.h, { z2.s, z3.s }, #16
; CHECK-NEXT: ret
%res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrshru.x2.nxv8i16(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, i32 16)
ret <vscale x 8 x i16> %res
}
; SQRSHRU x4
define <vscale x 16 x i8> @multi_vector_sat_shift_unsigned_narrow_x4_u8(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4) {
; CHECK-LABEL: multi_vector_sat_shift_unsigned_narrow_x4_u8:
; CHECK: // %bb.0:
; CHECK-NEXT: mov z7.d, z4.d
; CHECK-NEXT: mov z6.d, z3.d
; CHECK-NEXT: mov z5.d, z2.d
; CHECK-NEXT: mov z4.d, z1.d
; CHECK-NEXT: sqrshru z0.b, { z4.s - z7.s }, #32
; CHECK-NEXT: ret
%res = call <vscale x 16 x i8> @llvm.aarch64.sve.sqrshru.x4.nxv16i8(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4, i32 32)
ret <vscale x 16 x i8> %res
}
define <vscale x 8 x i16> @multi_vector_sat_shift_unsigned_narrow_x4_u16(<vscale x 2 x i64> %unused, <vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4) {
; CHECK-LABEL: multi_vector_sat_shift_unsigned_narrow_x4_u16:
; CHECK: // %bb.0:
; CHECK-NEXT: mov z7.d, z4.d
; CHECK-NEXT: mov z6.d, z3.d
; CHECK-NEXT: mov z5.d, z2.d
; CHECK-NEXT: mov z4.d, z1.d
; CHECK-NEXT: sqrshru z0.h, { z4.d - z7.d }, #64
; CHECK-NEXT: ret
%res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrshru.x4.nxv8i16(<vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4, i32 64)
ret <vscale x 8 x i16> %res
}
; SQRSHRUN x4
define <vscale x 16 x i8> @multi_vector_sat_shift_unsigned_narrow_interleave_x4_u8(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4) {
; CHECK-LABEL: multi_vector_sat_shift_unsigned_narrow_interleave_x4_u8:
; CHECK: // %bb.0:
; CHECK-NEXT: mov z7.d, z4.d
; CHECK-NEXT: mov z6.d, z3.d
; CHECK-NEXT: mov z5.d, z2.d
; CHECK-NEXT: mov z4.d, z1.d
; CHECK-NEXT: sqrshrun z0.b, { z4.s - z7.s }, #32
; CHECK-NEXT: ret
%res = call <vscale x 16 x i8> @llvm.aarch64.sve.sqrshrun.x4.nxv16i8(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4, i32 32)
ret <vscale x 16 x i8> %res
}
define <vscale x 8 x i16> @multi_vector_sat_shift_unsigned_narrow_interleave_x4_u16(<vscale x 2 x i64> %unused, <vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4) {
; CHECK-LABEL: multi_vector_sat_shift_unsigned_narrow_interleave_x4_u16:
; CHECK: // %bb.0:
; CHECK-NEXT: mov z7.d, z4.d
; CHECK-NEXT: mov z6.d, z3.d
; CHECK-NEXT: mov z5.d, z2.d
; CHECK-NEXT: mov z4.d, z1.d
; CHECK-NEXT: sqrshrun z0.h, { z4.d - z7.d }, #64
; CHECK-NEXT: ret
%res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrshrun.x4.nxv8i16(<vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4, i32 64)
ret <vscale x 8 x i16> %res
}
declare <vscale x 8 x i16> @llvm.aarch64.sve.sqrshr.x2.nxv8i16(<vscale x 4 x i32>, <vscale x 4 x i32>, i32)
declare <vscale x 16 x i8> @llvm.aarch64.sve.sqrshr.x4.nxv16i8(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32)
declare <vscale x 8 x i16> @llvm.aarch64.sve.sqrshr.x4.nxv8i16(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, i32)
declare <vscale x 8 x i16> @llvm.aarch64.sve.uqrshr.x2.nxv8i16(<vscale x 4 x i32>, <vscale x 4 x i32>, i32)
declare <vscale x 16 x i8> @llvm.aarch64.sve.uqrshr.x4.nxv16i8(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32)
declare <vscale x 8 x i16> @llvm.aarch64.sve.uqrshr.x4.nxv8i16(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, i32)
declare <vscale x 16 x i8> @llvm.aarch64.sve.sqrshrn.x4.nxv16i8(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32)
declare <vscale x 8 x i16> @llvm.aarch64.sve.sqrshrn.x4.nxv8i16(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, i32)
declare <vscale x 16 x i8> @llvm.aarch64.sve.uqrshrn.x4.nxv16i8(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32)
declare <vscale x 8 x i16> @llvm.aarch64.sve.uqrshrn.x4.nxv8i16(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, i32)
declare <vscale x 8 x i16> @llvm.aarch64.sve.sqrshru.x2.nxv8i16(<vscale x 4 x i32>, <vscale x 4 x i32>, i32)
declare <vscale x 16 x i8> @llvm.aarch64.sve.sqrshru.x4.nxv16i8(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32)
declare <vscale x 8 x i16> @llvm.aarch64.sve.sqrshru.x4.nxv8i16(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, i32)
declare <vscale x 16 x i8> @llvm.aarch64.sve.sqrshrun.x4.nxv16i8(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32)
declare <vscale x 8 x i16> @llvm.aarch64.sve.sqrshrun.x4.nxv8i16(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, i32)