; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+m,+d,+zfh,+zvfh,+v -target-abi=ilp32d \
; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+zfh,+zvfh,+v -target-abi=lp64d \
; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 1 x i8> @llvm.masked.gather.nxv1i8.nxv1p0(<vscale x 1 x ptr>, i32, <vscale x 1 x i1>, <vscale x 1 x i8>)
define <vscale x 1 x i8> @mgather_nxv1i8(<vscale x 1 x ptr> %ptrs, <vscale x 1 x i1> %m, <vscale x 1 x i8> %passthru) {
; RV32-LABEL: mgather_nxv1i8:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_nxv1i8:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t
; RV64-NEXT: vmv1r.v v8, v9
; RV64-NEXT: ret
%v = call <vscale x 1 x i8> @llvm.masked.gather.nxv1i8.nxv1p0(<vscale x 1 x ptr> %ptrs, i32 1, <vscale x 1 x i1> %m, <vscale x 1 x i8> %passthru)
ret <vscale x 1 x i8> %v
}
declare <vscale x 2 x i8> @llvm.masked.gather.nxv2i8.nxv2p0(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i8>)
define <vscale x 2 x i8> @mgather_nxv2i8(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %m, <vscale x 2 x i8> %passthru) {
; RV32-LABEL: mgather_nxv2i8:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_nxv2i8:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vmv1r.v v8, v10
; RV64-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8.nxv2p0(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %m, <vscale x 2 x i8> %passthru)
ret <vscale x 2 x i8> %v
}
define <vscale x 2 x i16> @mgather_nxv2i8_sextload_nxv2i16(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %m, <vscale x 2 x i8> %passthru) {
; RV32-LABEL: mgather_nxv2i8_sextload_nxv2i16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; RV32-NEXT: vsext.vf2 v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_nxv2i8_sextload_nxv2i16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; RV64-NEXT: vsext.vf2 v8, v10
; RV64-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8.nxv2p0(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %m, <vscale x 2 x i8> %passthru)
%ev = sext <vscale x 2 x i8> %v to <vscale x 2 x i16>
ret <vscale x 2 x i16> %ev
}
define <vscale x 2 x i16> @mgather_nxv2i8_zextload_nxv2i16(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %m, <vscale x 2 x i8> %passthru) {
; RV32-LABEL: mgather_nxv2i8_zextload_nxv2i16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; RV32-NEXT: vzext.vf2 v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_nxv2i8_zextload_nxv2i16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; RV64-NEXT: vzext.vf2 v8, v10
; RV64-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8.nxv2p0(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %m, <vscale x 2 x i8> %passthru)
%ev = zext <vscale x 2 x i8> %v to <vscale x 2 x i16>
ret <vscale x 2 x i16> %ev
}
define <vscale x 2 x i32> @mgather_nxv2i8_sextload_nxv2i32(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %m, <vscale x 2 x i8> %passthru) {
; RV32-LABEL: mgather_nxv2i8_sextload_nxv2i32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32-NEXT: vsext.vf4 v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_nxv2i8_sextload_nxv2i32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64-NEXT: vsext.vf4 v8, v10
; RV64-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8.nxv2p0(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %m, <vscale x 2 x i8> %passthru)
%ev = sext <vscale x 2 x i8> %v to <vscale x 2 x i32>
ret <vscale x 2 x i32> %ev
}
define <vscale x 2 x i32> @mgather_nxv2i8_zextload_nxv2i32(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %m, <vscale x 2 x i8> %passthru) {
; RV32-LABEL: mgather_nxv2i8_zextload_nxv2i32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32-NEXT: vzext.vf4 v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_nxv2i8_zextload_nxv2i32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64-NEXT: vzext.vf4 v8, v10
; RV64-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8.nxv2p0(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %m, <vscale x 2 x i8> %passthru)
%ev = zext <vscale x 2 x i8> %v to <vscale x 2 x i32>
ret <vscale x 2 x i32> %ev
}
define <vscale x 2 x i64> @mgather_nxv2i8_sextload_nxv2i64(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %m, <vscale x 2 x i8> %passthru) {
; RV32-LABEL: mgather_nxv2i8_sextload_nxv2i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV32-NEXT: vsext.vf8 v10, v9
; RV32-NEXT: vmv.v.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_nxv2i8_sextload_nxv2i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV64-NEXT: vsext.vf8 v8, v10
; RV64-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8.nxv2p0(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %m, <vscale x 2 x i8> %passthru)
%ev = sext <vscale x 2 x i8> %v to <vscale x 2 x i64>
ret <vscale x 2 x i64> %ev
}
define <vscale x 2 x i64> @mgather_nxv2i8_zextload_nxv2i64(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %m, <vscale x 2 x i8> %passthru) {
; RV32-LABEL: mgather_nxv2i8_zextload_nxv2i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV32-NEXT: vzext.vf8 v10, v9
; RV32-NEXT: vmv.v.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_nxv2i8_zextload_nxv2i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV64-NEXT: vzext.vf8 v8, v10
; RV64-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8.nxv2p0(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %m, <vscale x 2 x i8> %passthru)
%ev = zext <vscale x 2 x i8> %v to <vscale x 2 x i64>
ret <vscale x 2 x i64> %ev
}
declare <vscale x 4 x i8> @llvm.masked.gather.nxv4i8.nxv4p0(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x i8>)
define <vscale x 4 x i8> @mgather_nxv4i8(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> %m, <vscale x 4 x i8> %passthru) {
; RV32-LABEL: mgather_nxv4i8:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t
; RV32-NEXT: vmv1r.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_nxv4i8:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t
; RV64-NEXT: vmv1r.v v8, v12
; RV64-NEXT: ret
%v = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 1, <vscale x 4 x i1> %m, <vscale x 4 x i8> %passthru)
ret <vscale x 4 x i8> %v
}
define <vscale x 4 x i8> @mgather_truemask_nxv4i8(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i8> %passthru) {
; RV32-LABEL: mgather_truemask_nxv4i8:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
; RV32-NEXT: vluxei32.v v10, (zero), v8
; RV32-NEXT: vmv1r.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_truemask_nxv4i8:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
; RV64-NEXT: vluxei64.v v12, (zero), v8
; RV64-NEXT: vmv1r.v v8, v12
; RV64-NEXT: ret
%v = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 1, <vscale x 4 x i1> splat (i1 1), <vscale x 4 x i8> %passthru)
ret <vscale x 4 x i8> %v
}
define <vscale x 4 x i8> @mgather_falsemask_nxv4i8(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i8> %passthru) {
; RV32-LABEL: mgather_falsemask_nxv4i8:
; RV32: # %bb.0:
; RV32-NEXT: vmv1r.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_falsemask_nxv4i8:
; RV64: # %bb.0:
; RV64-NEXT: vmv1r.v v8, v12
; RV64-NEXT: ret
%v = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 1, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x i8> %passthru)
ret <vscale x 4 x i8> %v
}
declare <vscale x 8 x i8> @llvm.masked.gather.nxv8i8.nxv8p0(<vscale x 8 x ptr>, i32, <vscale x 8 x i1>, <vscale x 8 x i8>)
define <vscale x 8 x i8> @mgather_nxv8i8(<vscale x 8 x ptr> %ptrs, <vscale x 8 x i1> %m, <vscale x 8 x i8> %passthru) {
; RV32-LABEL: mgather_nxv8i8:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v12
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_nxv8i8:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
%v = call <vscale x 8 x i8> @llvm.masked.gather.nxv8i8.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 1, <vscale x 8 x i1> %m, <vscale x 8 x i8> %passthru)
ret <vscale x 8 x i8> %v
}
define <vscale x 8 x i8> @mgather_baseidx_nxv8i8(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i8> %passthru) {
; RV32-LABEL: mgather_baseidx_nxv8i8:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
; RV32-NEXT: vsext.vf4 v12, v8
; RV32-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; RV32-NEXT: vluxei32.v v9, (a0), v12, v0.t
; RV32-NEXT: vmv.v.v v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_baseidx_nxv8i8:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v8
; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; RV64-NEXT: vluxei64.v v9, (a0), v16, v0.t
; RV64-NEXT: vmv.v.v v8, v9
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i8, ptr %base, <vscale x 8 x i8> %idxs
%v = call <vscale x 8 x i8> @llvm.masked.gather.nxv8i8.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 1, <vscale x 8 x i1> %m, <vscale x 8 x i8> %passthru)
ret <vscale x 8 x i8> %v
}
declare <vscale x 1 x i16> @llvm.masked.gather.nxv1i16.nxv1p0(<vscale x 1 x ptr>, i32, <vscale x 1 x i1>, <vscale x 1 x i16>)
define <vscale x 1 x i16> @mgather_nxv1i16(<vscale x 1 x ptr> %ptrs, <vscale x 1 x i1> %m, <vscale x 1 x i16> %passthru) {
; RV32-LABEL: mgather_nxv1i16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_nxv1i16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t
; RV64-NEXT: vmv1r.v v8, v9
; RV64-NEXT: ret
%v = call <vscale x 1 x i16> @llvm.masked.gather.nxv1i16.nxv1p0(<vscale x 1 x ptr> %ptrs, i32 2, <vscale x 1 x i1> %m, <vscale x 1 x i16> %passthru)
ret <vscale x 1 x i16> %v
}
declare <vscale x 2 x i16> @llvm.masked.gather.nxv2i16.nxv2p0(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
define <vscale x 2 x i16> @mgather_nxv2i16(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %m, <vscale x 2 x i16> %passthru) {
; RV32-LABEL: mgather_nxv2i16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_nxv2i16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vmv1r.v v8, v10
; RV64-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16.nxv2p0(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %m, <vscale x 2 x i16> %passthru)
ret <vscale x 2 x i16> %v
}
define <vscale x 2 x i32> @mgather_nxv2i16_sextload_nxv2i32(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %m, <vscale x 2 x i16> %passthru) {
; RV32-LABEL: mgather_nxv2i16_sextload_nxv2i32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32-NEXT: vsext.vf2 v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_nxv2i16_sextload_nxv2i32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64-NEXT: vsext.vf2 v8, v10
; RV64-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16.nxv2p0(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %m, <vscale x 2 x i16> %passthru)
%ev = sext <vscale x 2 x i16> %v to <vscale x 2 x i32>
ret <vscale x 2 x i32> %ev
}
define <vscale x 2 x i32> @mgather_nxv2i16_zextload_nxv2i32(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %m, <vscale x 2 x i16> %passthru) {
; RV32-LABEL: mgather_nxv2i16_zextload_nxv2i32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32-NEXT: vzext.vf2 v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_nxv2i16_zextload_nxv2i32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64-NEXT: vzext.vf2 v8, v10
; RV64-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16.nxv2p0(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %m, <vscale x 2 x i16> %passthru)
%ev = zext <vscale x 2 x i16> %v to <vscale x 2 x i32>
ret <vscale x 2 x i32> %ev
}
define <vscale x 2 x i64> @mgather_nxv2i16_sextload_nxv2i64(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %m, <vscale x 2 x i16> %passthru) {
; RV32-LABEL: mgather_nxv2i16_sextload_nxv2i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV32-NEXT: vsext.vf4 v10, v9
; RV32-NEXT: vmv.v.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_nxv2i16_sextload_nxv2i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV64-NEXT: vsext.vf4 v8, v10
; RV64-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16.nxv2p0(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %m, <vscale x 2 x i16> %passthru)
%ev = sext <vscale x 2 x i16> %v to <vscale x 2 x i64>
ret <vscale x 2 x i64> %ev
}
define <vscale x 2 x i64> @mgather_nxv2i16_zextload_nxv2i64(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %m, <vscale x 2 x i16> %passthru) {
; RV32-LABEL: mgather_nxv2i16_zextload_nxv2i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV32-NEXT: vzext.vf4 v10, v9
; RV32-NEXT: vmv.v.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_nxv2i16_zextload_nxv2i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV64-NEXT: vzext.vf4 v8, v10
; RV64-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16.nxv2p0(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %m, <vscale x 2 x i16> %passthru)
%ev = zext <vscale x 2 x i16> %v to <vscale x 2 x i64>
ret <vscale x 2 x i64> %ev
}
declare <vscale x 4 x i16> @llvm.masked.gather.nxv4i16.nxv4p0(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x i16>)
define <vscale x 4 x i16> @mgather_nxv4i16(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> %m, <vscale x 4 x i16> %passthru) {
; RV32-LABEL: mgather_nxv4i16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_nxv4i16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v12
; RV64-NEXT: ret
%v = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %m, <vscale x 4 x i16> %passthru)
ret <vscale x 4 x i16> %v
}
define <vscale x 4 x i16> @mgather_truemask_nxv4i16(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i16> %passthru) {
; RV32-LABEL: mgather_truemask_nxv4i16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; RV32-NEXT: vluxei32.v v10, (zero), v8
; RV32-NEXT: vmv.v.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_truemask_nxv4i16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; RV64-NEXT: vluxei64.v v12, (zero), v8
; RV64-NEXT: vmv.v.v v8, v12
; RV64-NEXT: ret
%v = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> splat (i1 1), <vscale x 4 x i16> %passthru)
ret <vscale x 4 x i16> %v
}
define <vscale x 4 x i16> @mgather_falsemask_nxv4i16(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i16> %passthru) {
; RV32-LABEL: mgather_falsemask_nxv4i16:
; RV32: # %bb.0:
; RV32-NEXT: vmv1r.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_falsemask_nxv4i16:
; RV64: # %bb.0:
; RV64-NEXT: vmv1r.v v8, v12
; RV64-NEXT: ret
%v = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x i16> %passthru)
ret <vscale x 4 x i16> %v
}
declare <vscale x 8 x i16> @llvm.masked.gather.nxv8i16.nxv8p0(<vscale x 8 x ptr>, i32, <vscale x 8 x i1>, <vscale x 8 x i16>)
define <vscale x 8 x i16> @mgather_nxv8i16(<vscale x 8 x ptr> %ptrs, <vscale x 8 x i1> %m, <vscale x 8 x i16> %passthru) {
; RV32-LABEL: mgather_nxv8i16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v12
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_nxv8i16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
%v = call <vscale x 8 x i16> @llvm.masked.gather.nxv8i16.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 2, <vscale x 8 x i1> %m, <vscale x 8 x i16> %passthru)
ret <vscale x 8 x i16> %v
}
define <vscale x 8 x i16> @mgather_baseidx_nxv8i8_nxv8i16(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i16> %passthru) {
; RV32-LABEL: mgather_baseidx_nxv8i8_nxv8i16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
; RV32-NEXT: vsext.vf4 v12, v8
; RV32-NEXT: vadd.vv v12, v12, v12
; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; RV32-NEXT: vluxei32.v v10, (a0), v12, v0.t
; RV32-NEXT: vmv.v.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_baseidx_nxv8i8_nxv8i16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v8
; RV64-NEXT: vadd.vv v16, v16, v16
; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t
; RV64-NEXT: vmv.v.v v8, v10
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i16, ptr %base, <vscale x 8 x i8> %idxs
%v = call <vscale x 8 x i16> @llvm.masked.gather.nxv8i16.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 2, <vscale x 8 x i1> %m, <vscale x 8 x i16> %passthru)
ret <vscale x 8 x i16> %v
}
define <vscale x 8 x i16> @mgather_baseidx_sext_nxv8i8_nxv8i16(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i16> %passthru) {
; RV32-LABEL: mgather_baseidx_sext_nxv8i8_nxv8i16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
; RV32-NEXT: vsext.vf4 v12, v8
; RV32-NEXT: vadd.vv v12, v12, v12
; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; RV32-NEXT: vluxei32.v v10, (a0), v12, v0.t
; RV32-NEXT: vmv.v.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_baseidx_sext_nxv8i8_nxv8i16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v8
; RV64-NEXT: vadd.vv v16, v16, v16
; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t
; RV64-NEXT: vmv.v.v v8, v10
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i8> %idxs to <vscale x 8 x i16>
%ptrs = getelementptr inbounds i16, ptr %base, <vscale x 8 x i16> %eidxs
%v = call <vscale x 8 x i16> @llvm.masked.gather.nxv8i16.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 2, <vscale x 8 x i1> %m, <vscale x 8 x i16> %passthru)
ret <vscale x 8 x i16> %v
}
define <vscale x 8 x i16> @mgather_baseidx_zext_nxv8i8_nxv8i16(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i16> %passthru) {
; CHECK-LABEL: mgather_baseidx_zext_nxv8i8_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vwaddu.vv v12, v8, v8
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vluxei16.v v10, (a0), v12, v0.t
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i16>
%ptrs = getelementptr inbounds i16, ptr %base, <vscale x 8 x i16> %eidxs
%v = call <vscale x 8 x i16> @llvm.masked.gather.nxv8i16.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 2, <vscale x 8 x i1> %m, <vscale x 8 x i16> %passthru)
ret <vscale x 8 x i16> %v
}
define <vscale x 8 x i16> @mgather_baseidx_nxv8i16(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i16> %passthru) {
; RV32-LABEL: mgather_baseidx_nxv8i16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; RV32-NEXT: vwadd.vv v12, v8, v8
; RV32-NEXT: vluxei32.v v10, (a0), v12, v0.t
; RV32-NEXT: vmv.v.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_baseidx_nxv8i16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf4 v16, v8
; RV64-NEXT: vadd.vv v16, v16, v16
; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t
; RV64-NEXT: vmv.v.v v8, v10
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i16, ptr %base, <vscale x 8 x i16> %idxs
%v = call <vscale x 8 x i16> @llvm.masked.gather.nxv8i16.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 2, <vscale x 8 x i1> %m, <vscale x 8 x i16> %passthru)
ret <vscale x 8 x i16> %v
}
declare <vscale x 1 x i32> @llvm.masked.gather.nxv1i32.nxv1p0(<vscale x 1 x ptr>, i32, <vscale x 1 x i1>, <vscale x 1 x i32>)
define <vscale x 1 x i32> @mgather_nxv1i32(<vscale x 1 x ptr> %ptrs, <vscale x 1 x i1> %m, <vscale x 1 x i32> %passthru) {
; RV32-LABEL: mgather_nxv1i32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_nxv1i32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t
; RV64-NEXT: vmv1r.v v8, v9
; RV64-NEXT: ret
%v = call <vscale x 1 x i32> @llvm.masked.gather.nxv1i32.nxv1p0(<vscale x 1 x ptr> %ptrs, i32 4, <vscale x 1 x i1> %m, <vscale x 1 x i32> %passthru)
ret <vscale x 1 x i32> %v
}
declare <vscale x 2 x i32> @llvm.masked.gather.nxv2i32.nxv2p0(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
define <vscale x 2 x i32> @mgather_nxv2i32(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %m, <vscale x 2 x i32> %passthru) {
; RV32-LABEL: mgather_nxv2i32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_nxv2i32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v10
; RV64-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32.nxv2p0(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %m, <vscale x 2 x i32> %passthru)
ret <vscale x 2 x i32> %v
}
define <vscale x 2 x i64> @mgather_nxv2i32_sextload_nxv2i64(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %m, <vscale x 2 x i32> %passthru) {
; RV32-LABEL: mgather_nxv2i32_sextload_nxv2i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV32-NEXT: vsext.vf2 v10, v9
; RV32-NEXT: vmv.v.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_nxv2i32_sextload_nxv2i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV64-NEXT: vsext.vf2 v8, v10
; RV64-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32.nxv2p0(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %m, <vscale x 2 x i32> %passthru)
%ev = sext <vscale x 2 x i32> %v to <vscale x 2 x i64>
ret <vscale x 2 x i64> %ev
}
define <vscale x 2 x i64> @mgather_nxv2i32_zextload_nxv2i64(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %m, <vscale x 2 x i32> %passthru) {
; RV32-LABEL: mgather_nxv2i32_zextload_nxv2i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV32-NEXT: vzext.vf2 v10, v9
; RV32-NEXT: vmv.v.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_nxv2i32_zextload_nxv2i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV64-NEXT: vzext.vf2 v8, v10
; RV64-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32.nxv2p0(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %m, <vscale x 2 x i32> %passthru)
%ev = zext <vscale x 2 x i32> %v to <vscale x 2 x i64>
ret <vscale x 2 x i64> %ev
}
declare <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x i32>)
define <vscale x 4 x i32> @mgather_nxv4i32(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> %m, <vscale x 4 x i32> %passthru) {
; RV32-LABEL: mgather_nxv4i32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_nxv4i32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v12
; RV64-NEXT: ret
%v = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %m, <vscale x 4 x i32> %passthru)
ret <vscale x 4 x i32> %v
}
define <vscale x 4 x i32> @mgather_truemask_nxv4i32(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i32> %passthru) {
; RV32-LABEL: mgather_truemask_nxv4i32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma
; RV32-NEXT: vluxei32.v v8, (zero), v8
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_truemask_nxv4i32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma
; RV64-NEXT: vluxei64.v v12, (zero), v8
; RV64-NEXT: vmv.v.v v8, v12
; RV64-NEXT: ret
%v = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> splat (i1 1), <vscale x 4 x i32> %passthru)
ret <vscale x 4 x i32> %v
}
define <vscale x 4 x i32> @mgather_falsemask_nxv4i32(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i32> %passthru) {
; RV32-LABEL: mgather_falsemask_nxv4i32:
; RV32: # %bb.0:
; RV32-NEXT: vmv2r.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_falsemask_nxv4i32:
; RV64: # %bb.0:
; RV64-NEXT: vmv2r.v v8, v12
; RV64-NEXT: ret
%v = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x i32> %passthru)
ret <vscale x 4 x i32> %v
}
declare <vscale x 8 x i32> @llvm.masked.gather.nxv8i32.nxv8p0(<vscale x 8 x ptr>, i32, <vscale x 8 x i1>, <vscale x 8 x i32>)
define <vscale x 8 x i32> @mgather_nxv8i32(<vscale x 8 x ptr> %ptrs, <vscale x 8 x i1> %m, <vscale x 8 x i32> %passthru) {
; RV32-LABEL: mgather_nxv8i32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v12
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_nxv8i32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.masked.gather.nxv8i32.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 4, <vscale x 8 x i1> %m, <vscale x 8 x i32> %passthru)
ret <vscale x 8 x i32> %v
}
define <vscale x 8 x i32> @mgather_baseidx_nxv8i8_nxv8i32(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i32> %passthru) {
; RV32-LABEL: mgather_baseidx_nxv8i8_nxv8i32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vsext.vf4 v16, v8
; RV32-NEXT: vsll.vi v8, v16, 2
; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v12
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_baseidx_nxv8i8_nxv8i32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v8
; RV64-NEXT: vsll.vi v16, v16, 2
; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t
; RV64-NEXT: vmv.v.v v8, v12
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i32, ptr %base, <vscale x 8 x i8> %idxs
%v = call <vscale x 8 x i32> @llvm.masked.gather.nxv8i32.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 4, <vscale x 8 x i1> %m, <vscale x 8 x i32> %passthru)
ret <vscale x 8 x i32> %v
}
define <vscale x 8 x i32> @mgather_baseidx_sext_nxv8i8_nxv8i32(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i32> %passthru) {
; RV32-LABEL: mgather_baseidx_sext_nxv8i8_nxv8i32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vsext.vf4 v16, v8
; RV32-NEXT: vsll.vi v8, v16, 2
; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v12
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_baseidx_sext_nxv8i8_nxv8i32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v8
; RV64-NEXT: vsll.vi v16, v16, 2
; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t
; RV64-NEXT: vmv.v.v v8, v12
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i8> %idxs to <vscale x 8 x i32>
%ptrs = getelementptr inbounds i32, ptr %base, <vscale x 8 x i32> %eidxs
%v = call <vscale x 8 x i32> @llvm.masked.gather.nxv8i32.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 4, <vscale x 8 x i1> %m, <vscale x 8 x i32> %passthru)
ret <vscale x 8 x i32> %v
}
define <vscale x 8 x i32> @mgather_baseidx_zext_nxv8i8_nxv8i32(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i32> %passthru) {
; CHECK-LABEL: mgather_baseidx_zext_nxv8i8_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vzext.vf2 v10, v8
; CHECK-NEXT: vsll.vi v8, v10, 2
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vluxei16.v v12, (a0), v8, v0.t
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i32>
%ptrs = getelementptr inbounds i32, ptr %base, <vscale x 8 x i32> %eidxs
%v = call <vscale x 8 x i32> @llvm.masked.gather.nxv8i32.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 4, <vscale x 8 x i1> %m, <vscale x 8 x i32> %passthru)
ret <vscale x 8 x i32> %v
}
define <vscale x 8 x i32> @mgather_baseidx_nxv8i16_nxv8i32(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i32> %passthru) {
; RV32-LABEL: mgather_baseidx_nxv8i16_nxv8i32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vsext.vf2 v16, v8
; RV32-NEXT: vsll.vi v8, v16, 2
; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v12
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_baseidx_nxv8i16_nxv8i32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf4 v16, v8
; RV64-NEXT: vsll.vi v16, v16, 2
; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t
; RV64-NEXT: vmv.v.v v8, v12
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i32, ptr %base, <vscale x 8 x i16> %idxs
%v = call <vscale x 8 x i32> @llvm.masked.gather.nxv8i32.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 4, <vscale x 8 x i1> %m, <vscale x 8 x i32> %passthru)
ret <vscale x 8 x i32> %v
}
define <vscale x 8 x i32> @mgather_baseidx_sext_nxv8i16_nxv8i32(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i32> %passthru) {
; RV32-LABEL: mgather_baseidx_sext_nxv8i16_nxv8i32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vsext.vf2 v16, v8
; RV32-NEXT: vsll.vi v8, v16, 2
; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v12
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_baseidx_sext_nxv8i16_nxv8i32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf4 v16, v8
; RV64-NEXT: vsll.vi v16, v16, 2
; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t
; RV64-NEXT: vmv.v.v v8, v12
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i16> %idxs to <vscale x 8 x i32>
%ptrs = getelementptr inbounds i32, ptr %base, <vscale x 8 x i32> %eidxs
%v = call <vscale x 8 x i32> @llvm.masked.gather.nxv8i32.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 4, <vscale x 8 x i1> %m, <vscale x 8 x i32> %passthru)
ret <vscale x 8 x i32> %v
}
define <vscale x 8 x i32> @mgather_baseidx_zext_nxv8i16_nxv8i32(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i32> %passthru) {
; CHECK-LABEL: mgather_baseidx_zext_nxv8i16_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vzext.vf2 v16, v8
; CHECK-NEXT: vsll.vi v8, v16, 2
; CHECK-NEXT: vluxei32.v v12, (a0), v8, v0.t
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i32>
%ptrs = getelementptr inbounds i32, ptr %base, <vscale x 8 x i32> %eidxs
%v = call <vscale x 8 x i32> @llvm.masked.gather.nxv8i32.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 4, <vscale x 8 x i1> %m, <vscale x 8 x i32> %passthru)
ret <vscale x 8 x i32> %v
}
define <vscale x 8 x i32> @mgather_baseidx_nxv8i32(ptr %base, <vscale x 8 x i32> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i32> %passthru) {
; RV32-LABEL: mgather_baseidx_nxv8i32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vsll.vi v8, v8, 2
; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v12
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_baseidx_nxv8i32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf2 v16, v8
; RV64-NEXT: vsll.vi v16, v16, 2
; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t
; RV64-NEXT: vmv.v.v v8, v12
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i32, ptr %base, <vscale x 8 x i32> %idxs
%v = call <vscale x 8 x i32> @llvm.masked.gather.nxv8i32.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 4, <vscale x 8 x i1> %m, <vscale x 8 x i32> %passthru)
ret <vscale x 8 x i32> %v
}
declare <vscale x 1 x i64> @llvm.masked.gather.nxv1i64.nxv1p0(<vscale x 1 x ptr>, i32, <vscale x 1 x i1>, <vscale x 1 x i64>)
define <vscale x 1 x i64> @mgather_nxv1i64(<vscale x 1 x ptr> %ptrs, <vscale x 1 x i1> %m, <vscale x 1 x i64> %passthru) {
; RV32-LABEL: mgather_nxv1i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_nxv1i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v9
; RV64-NEXT: ret
%v = call <vscale x 1 x i64> @llvm.masked.gather.nxv1i64.nxv1p0(<vscale x 1 x ptr> %ptrs, i32 8, <vscale x 1 x i1> %m, <vscale x 1 x i64> %passthru)
ret <vscale x 1 x i64> %v
}
declare <vscale x 2 x i64> @llvm.masked.gather.nxv2i64.nxv2p0(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)
define <vscale x 2 x i64> @mgather_nxv2i64(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %m, <vscale x 2 x i64> %passthru) {
; RV32-LABEL: mgather_nxv2i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_nxv2i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v10
; RV64-NEXT: ret
%v = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64.nxv2p0(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %m, <vscale x 2 x i64> %passthru)
ret <vscale x 2 x i64> %v
}
declare <vscale x 4 x i64> @llvm.masked.gather.nxv4i64.nxv4p0(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x i64>)
define <vscale x 4 x i64> @mgather_nxv4i64(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> %m, <vscale x 4 x i64> %passthru) {
; RV32-LABEL: mgather_nxv4i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v12
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_nxv4i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v12
; RV64-NEXT: ret
%v = call <vscale x 4 x i64> @llvm.masked.gather.nxv4i64.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 8, <vscale x 4 x i1> %m, <vscale x 4 x i64> %passthru)
ret <vscale x 4 x i64> %v
}
define <vscale x 4 x i64> @mgather_truemask_nxv4i64(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i64> %passthru) {
; RV32-LABEL: mgather_truemask_nxv4i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma
; RV32-NEXT: vluxei32.v v12, (zero), v8
; RV32-NEXT: vmv.v.v v8, v12
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_truemask_nxv4i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma
; RV64-NEXT: vluxei64.v v8, (zero), v8
; RV64-NEXT: ret
%v = call <vscale x 4 x i64> @llvm.masked.gather.nxv4i64.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 8, <vscale x 4 x i1> splat (i1 1), <vscale x 4 x i64> %passthru)
ret <vscale x 4 x i64> %v
}
define <vscale x 4 x i64> @mgather_falsemask_nxv4i64(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i64> %passthru) {
; CHECK-LABEL: mgather_falsemask_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: ret
%v = call <vscale x 4 x i64> @llvm.masked.gather.nxv4i64.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 8, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x i64> %passthru)
ret <vscale x 4 x i64> %v
}
declare <vscale x 8 x i64> @llvm.masked.gather.nxv8i64.nxv8p0(<vscale x 8 x ptr>, i32, <vscale x 8 x i1>, <vscale x 8 x i64>)
define <vscale x 8 x i64> @mgather_nxv8i64(<vscale x 8 x ptr> %ptrs, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru) {
; RV32-LABEL: mgather_nxv8i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v16
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_nxv8i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
%v = call <vscale x 8 x i64> @llvm.masked.gather.nxv8i64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru)
ret <vscale x 8 x i64> %v
}
define <vscale x 8 x i64> @mgather_baseidx_nxv8i8_nxv8i64(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru) {
; RV32-LABEL: mgather_baseidx_nxv8i8_nxv8i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
; RV32-NEXT: vsext.vf4 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 3
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v16
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_baseidx_nxv8i8_nxv8i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf8 v24, v8
; RV64-NEXT: vsll.vi v8, v24, 3
; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i8> %idxs
%v = call <vscale x 8 x i64> @llvm.masked.gather.nxv8i64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru)
ret <vscale x 8 x i64> %v
}
define <vscale x 8 x i64> @mgather_baseidx_sext_nxv8i8_nxv8i64(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru) {
; RV32-LABEL: mgather_baseidx_sext_nxv8i8_nxv8i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
; RV32-NEXT: vsext.vf4 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 3
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v16
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_baseidx_sext_nxv8i8_nxv8i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf8 v24, v8
; RV64-NEXT: vsll.vi v8, v24, 3
; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i8> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i64> %eidxs
%v = call <vscale x 8 x i64> @llvm.masked.gather.nxv8i64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru)
ret <vscale x 8 x i64> %v
}
define <vscale x 8 x i64> @mgather_baseidx_zext_nxv8i8_nxv8i64(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru) {
; CHECK-LABEL: mgather_baseidx_zext_nxv8i8_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vzext.vf2 v10, v8
; CHECK-NEXT: vsll.vi v8, v10, 3
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vluxei16.v v16, (a0), v8, v0.t
; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i64> %eidxs
%v = call <vscale x 8 x i64> @llvm.masked.gather.nxv8i64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru)
ret <vscale x 8 x i64> %v
}
define <vscale x 8 x i64> @mgather_baseidx_nxv8i16_nxv8i64(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru) {
; RV32-LABEL: mgather_baseidx_nxv8i16_nxv8i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
; RV32-NEXT: vsext.vf2 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 3
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v16
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_baseidx_nxv8i16_nxv8i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf4 v24, v8
; RV64-NEXT: vsll.vi v8, v24, 3
; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i16> %idxs
%v = call <vscale x 8 x i64> @llvm.masked.gather.nxv8i64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru)
ret <vscale x 8 x i64> %v
}
define <vscale x 8 x i64> @mgather_baseidx_sext_nxv8i16_nxv8i64(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru) {
; RV32-LABEL: mgather_baseidx_sext_nxv8i16_nxv8i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
; RV32-NEXT: vsext.vf2 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 3
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v16
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_baseidx_sext_nxv8i16_nxv8i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf4 v24, v8
; RV64-NEXT: vsll.vi v8, v24, 3
; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i16> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i64> %eidxs
%v = call <vscale x 8 x i64> @llvm.masked.gather.nxv8i64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru)
ret <vscale x 8 x i64> %v
}
define <vscale x 8 x i64> @mgather_baseidx_zext_nxv8i16_nxv8i64(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru) {
; CHECK-LABEL: mgather_baseidx_zext_nxv8i16_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
; CHECK-NEXT: vzext.vf2 v12, v8
; CHECK-NEXT: vsll.vi v8, v12, 3
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vluxei32.v v16, (a0), v8, v0.t
; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i64> %eidxs
%v = call <vscale x 8 x i64> @llvm.masked.gather.nxv8i64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru)
ret <vscale x 8 x i64> %v
}
define <vscale x 8 x i64> @mgather_baseidx_nxv8i32_nxv8i64(ptr %base, <vscale x 8 x i32> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru) {
; RV32-LABEL: mgather_baseidx_nxv8i32_nxv8i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
; RV32-NEXT: vsll.vi v8, v8, 3
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v16
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_baseidx_nxv8i32_nxv8i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf2 v24, v8
; RV64-NEXT: vsll.vi v8, v24, 3
; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i32> %idxs
%v = call <vscale x 8 x i64> @llvm.masked.gather.nxv8i64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru)
ret <vscale x 8 x i64> %v
}
define <vscale x 8 x i64> @mgather_baseidx_sext_nxv8i32_nxv8i64(ptr %base, <vscale x 8 x i32> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru) {
; RV32-LABEL: mgather_baseidx_sext_nxv8i32_nxv8i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
; RV32-NEXT: vsll.vi v8, v8, 3
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v16
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_baseidx_sext_nxv8i32_nxv8i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf2 v24, v8
; RV64-NEXT: vsll.vi v8, v24, 3
; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i64> %eidxs
%v = call <vscale x 8 x i64> @llvm.masked.gather.nxv8i64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru)
ret <vscale x 8 x i64> %v
}
define <vscale x 8 x i64> @mgather_baseidx_zext_nxv8i32_nxv8i64(ptr %base, <vscale x 8 x i32> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru) {
; RV32-LABEL: mgather_baseidx_zext_nxv8i32_nxv8i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
; RV32-NEXT: vsll.vi v8, v8, 3
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v16
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_baseidx_zext_nxv8i32_nxv8i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vzext.vf2 v24, v8
; RV64-NEXT: vsll.vi v8, v24, 3
; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i64> %eidxs
%v = call <vscale x 8 x i64> @llvm.masked.gather.nxv8i64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru)
ret <vscale x 8 x i64> %v
}
define <vscale x 8 x i64> @mgather_baseidx_nxv8i64(ptr %base, <vscale x 8 x i64> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru) {
; RV32-LABEL: mgather_baseidx_nxv8i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
; RV32-NEXT: vnsrl.wi v24, v8, 0
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v16
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_baseidx_nxv8i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsll.vi v8, v8, 3
; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i64> %idxs
%v = call <vscale x 8 x i64> @llvm.masked.gather.nxv8i64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru)
ret <vscale x 8 x i64> %v
}
declare <vscale x 16 x i64> @llvm.masked.gather.nxv16i64.nxv16p0(<vscale x 16 x ptr>, i32, <vscale x 16 x i1>, <vscale x 16 x i64>)
declare <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64>, <vscale x 8 x i64>, i64 %idx)
declare <vscale x 16 x ptr> @llvm.vector.insert.nxv8p0.nxv16p0(<vscale x 16 x ptr>, <vscale x 8 x ptr>, i64 %idx)
define void @mgather_nxv16i64(<vscale x 8 x ptr> %ptrs0, <vscale x 8 x ptr> %ptrs1, <vscale x 16 x i1> %m, <vscale x 8 x i64> %passthru0, <vscale x 8 x i64> %passthru1, ptr %out) {
; RV32-LABEL: mgather_nxv16i64:
; RV32: # %bb.0:
; RV32-NEXT: vl8re64.v v24, (a0)
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: srli a2, a0, 3
; RV32-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vx v7, v0, a2
; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (zero), v8, v0.t
; RV32-NEXT: vmv1r.v v0, v7
; RV32-NEXT: vluxei32.v v24, (zero), v12, v0.t
; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add a0, a1, a0
; RV32-NEXT: vs8r.v v24, (a0)
; RV32-NEXT: vs8r.v v16, (a1)
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_nxv16i64:
; RV64: # %bb.0:
; RV64-NEXT: addi sp, sp, -16
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: csrr a3, vlenb
; RV64-NEXT: slli a3, a3, 3
; RV64-NEXT: sub sp, sp, a3
; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; RV64-NEXT: addi a3, sp, 16
; RV64-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
; RV64-NEXT: vmv8r.v v16, v8
; RV64-NEXT: vl8re64.v v24, (a0)
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: vl8re64.v v8, (a1)
; RV64-NEXT: srli a1, a0, 3
; RV64-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vx v7, v0, a1
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vluxei64.v v24, (zero), v16, v0.t
; RV64-NEXT: vmv1r.v v0, v7
; RV64-NEXT: addi a1, sp, 16
; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; RV64-NEXT: vluxei64.v v8, (zero), v16, v0.t
; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: add a0, a2, a0
; RV64-NEXT: vs8r.v v8, (a0)
; RV64-NEXT: vs8r.v v24, (a2)
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: add sp, sp, a0
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
%p0 = call <vscale x 16 x ptr> @llvm.vector.insert.nxv8p0.nxv16p0(<vscale x 16 x ptr> undef, <vscale x 8 x ptr> %ptrs0, i64 0)
%p1 = call <vscale x 16 x ptr> @llvm.vector.insert.nxv8p0.nxv16p0(<vscale x 16 x ptr> %p0, <vscale x 8 x ptr> %ptrs1, i64 8)
%pt0 = call <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> undef, <vscale x 8 x i64> %passthru0, i64 0)
%pt1 = call <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> %pt0, <vscale x 8 x i64> %passthru1, i64 8)
%v = call <vscale x 16 x i64> @llvm.masked.gather.nxv16i64.nxv16p0(<vscale x 16 x ptr> %p1, i32 8, <vscale x 16 x i1> %m, <vscale x 16 x i64> %pt1)
store <vscale x 16 x i64> %v, ptr %out
ret void
}
declare <vscale x 1 x half> @llvm.masked.gather.nxv1f16.nxv1p0(<vscale x 1 x ptr>, i32, <vscale x 1 x i1>, <vscale x 1 x half>)
define <vscale x 1 x half> @mgather_nxv1f16(<vscale x 1 x ptr> %ptrs, <vscale x 1 x i1> %m, <vscale x 1 x half> %passthru) {
; RV32-LABEL: mgather_nxv1f16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_nxv1f16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t
; RV64-NEXT: vmv1r.v v8, v9
; RV64-NEXT: ret
%v = call <vscale x 1 x half> @llvm.masked.gather.nxv1f16.nxv1p0(<vscale x 1 x ptr> %ptrs, i32 2, <vscale x 1 x i1> %m, <vscale x 1 x half> %passthru)
ret <vscale x 1 x half> %v
}
declare <vscale x 2 x half> @llvm.masked.gather.nxv2f16.nxv2p0(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x half>)
define <vscale x 2 x half> @mgather_nxv2f16(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %m, <vscale x 2 x half> %passthru) {
; RV32-LABEL: mgather_nxv2f16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_nxv2f16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vmv1r.v v8, v10
; RV64-NEXT: ret
%v = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16.nxv2p0(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %m, <vscale x 2 x half> %passthru)
ret <vscale x 2 x half> %v
}
declare <vscale x 4 x half> @llvm.masked.gather.nxv4f16.nxv4p0(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x half>)
define <vscale x 4 x half> @mgather_nxv4f16(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> %m, <vscale x 4 x half> %passthru) {
; RV32-LABEL: mgather_nxv4f16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_nxv4f16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v12
; RV64-NEXT: ret
%v = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %m, <vscale x 4 x half> %passthru)
ret <vscale x 4 x half> %v
}
define <vscale x 4 x half> @mgather_truemask_nxv4f16(<vscale x 4 x ptr> %ptrs, <vscale x 4 x half> %passthru) {
; RV32-LABEL: mgather_truemask_nxv4f16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; RV32-NEXT: vluxei32.v v10, (zero), v8
; RV32-NEXT: vmv.v.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_truemask_nxv4f16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; RV64-NEXT: vluxei64.v v12, (zero), v8
; RV64-NEXT: vmv.v.v v8, v12
; RV64-NEXT: ret
%v = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> splat (i1 1), <vscale x 4 x half> %passthru)
ret <vscale x 4 x half> %v
}
define <vscale x 4 x half> @mgather_falsemask_nxv4f16(<vscale x 4 x ptr> %ptrs, <vscale x 4 x half> %passthru) {
; RV32-LABEL: mgather_falsemask_nxv4f16:
; RV32: # %bb.0:
; RV32-NEXT: vmv1r.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_falsemask_nxv4f16:
; RV64: # %bb.0:
; RV64-NEXT: vmv1r.v v8, v12
; RV64-NEXT: ret
%v = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x half> %passthru)
ret <vscale x 4 x half> %v
}
declare <vscale x 8 x half> @llvm.masked.gather.nxv8f16.nxv8p0(<vscale x 8 x ptr>, i32, <vscale x 8 x i1>, <vscale x 8 x half>)
define <vscale x 8 x half> @mgather_nxv8f16(<vscale x 8 x ptr> %ptrs, <vscale x 8 x i1> %m, <vscale x 8 x half> %passthru) {
; RV32-LABEL: mgather_nxv8f16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v12
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_nxv8f16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
%v = call <vscale x 8 x half> @llvm.masked.gather.nxv8f16.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 2, <vscale x 8 x i1> %m, <vscale x 8 x half> %passthru)
ret <vscale x 8 x half> %v
}
define <vscale x 8 x half> @mgather_baseidx_nxv8i8_nxv8f16(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x half> %passthru) {
; RV32-LABEL: mgather_baseidx_nxv8i8_nxv8f16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
; RV32-NEXT: vsext.vf4 v12, v8
; RV32-NEXT: vadd.vv v12, v12, v12
; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; RV32-NEXT: vluxei32.v v10, (a0), v12, v0.t
; RV32-NEXT: vmv.v.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_baseidx_nxv8i8_nxv8f16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v8
; RV64-NEXT: vadd.vv v16, v16, v16
; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t
; RV64-NEXT: vmv.v.v v8, v10
; RV64-NEXT: ret
%ptrs = getelementptr inbounds half, ptr %base, <vscale x 8 x i8> %idxs
%v = call <vscale x 8 x half> @llvm.masked.gather.nxv8f16.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 2, <vscale x 8 x i1> %m, <vscale x 8 x half> %passthru)
ret <vscale x 8 x half> %v
}
define <vscale x 8 x half> @mgather_baseidx_sext_nxv8i8_nxv8f16(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x half> %passthru) {
; RV32-LABEL: mgather_baseidx_sext_nxv8i8_nxv8f16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
; RV32-NEXT: vsext.vf4 v12, v8
; RV32-NEXT: vadd.vv v12, v12, v12
; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; RV32-NEXT: vluxei32.v v10, (a0), v12, v0.t
; RV32-NEXT: vmv.v.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_baseidx_sext_nxv8i8_nxv8f16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v8
; RV64-NEXT: vadd.vv v16, v16, v16
; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t
; RV64-NEXT: vmv.v.v v8, v10
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i8> %idxs to <vscale x 8 x i16>
%ptrs = getelementptr inbounds half, ptr %base, <vscale x 8 x i16> %eidxs
%v = call <vscale x 8 x half> @llvm.masked.gather.nxv8f16.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 2, <vscale x 8 x i1> %m, <vscale x 8 x half> %passthru)
ret <vscale x 8 x half> %v
}
define <vscale x 8 x half> @mgather_baseidx_zext_nxv8i8_nxv8f16(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x half> %passthru) {
; CHECK-LABEL: mgather_baseidx_zext_nxv8i8_nxv8f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vwaddu.vv v12, v8, v8
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vluxei16.v v10, (a0), v12, v0.t
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i16>
%ptrs = getelementptr inbounds half, ptr %base, <vscale x 8 x i16> %eidxs
%v = call <vscale x 8 x half> @llvm.masked.gather.nxv8f16.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 2, <vscale x 8 x i1> %m, <vscale x 8 x half> %passthru)
ret <vscale x 8 x half> %v
}
define <vscale x 8 x half> @mgather_baseidx_nxv8f16(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x half> %passthru) {
; RV32-LABEL: mgather_baseidx_nxv8f16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; RV32-NEXT: vwadd.vv v12, v8, v8
; RV32-NEXT: vluxei32.v v10, (a0), v12, v0.t
; RV32-NEXT: vmv.v.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_baseidx_nxv8f16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf4 v16, v8
; RV64-NEXT: vadd.vv v16, v16, v16
; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t
; RV64-NEXT: vmv.v.v v8, v10
; RV64-NEXT: ret
%ptrs = getelementptr inbounds half, ptr %base, <vscale x 8 x i16> %idxs
%v = call <vscale x 8 x half> @llvm.masked.gather.nxv8f16.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 2, <vscale x 8 x i1> %m, <vscale x 8 x half> %passthru)
ret <vscale x 8 x half> %v
}
declare <vscale x 1 x float> @llvm.masked.gather.nxv1f32.nxv1p0(<vscale x 1 x ptr>, i32, <vscale x 1 x i1>, <vscale x 1 x float>)
define <vscale x 1 x float> @mgather_nxv1f32(<vscale x 1 x ptr> %ptrs, <vscale x 1 x i1> %m, <vscale x 1 x float> %passthru) {
; RV32-LABEL: mgather_nxv1f32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_nxv1f32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t
; RV64-NEXT: vmv1r.v v8, v9
; RV64-NEXT: ret
%v = call <vscale x 1 x float> @llvm.masked.gather.nxv1f32.nxv1p0(<vscale x 1 x ptr> %ptrs, i32 4, <vscale x 1 x i1> %m, <vscale x 1 x float> %passthru)
ret <vscale x 1 x float> %v
}
declare <vscale x 2 x float> @llvm.masked.gather.nxv2f32.nxv2p0(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x float>)
define <vscale x 2 x float> @mgather_nxv2f32(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %m, <vscale x 2 x float> %passthru) {
; RV32-LABEL: mgather_nxv2f32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_nxv2f32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v10
; RV64-NEXT: ret
%v = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32.nxv2p0(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %m, <vscale x 2 x float> %passthru)
ret <vscale x 2 x float> %v
}
declare <vscale x 4 x float> @llvm.masked.gather.nxv4f32.nxv4p0(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x float>)
define <vscale x 4 x float> @mgather_nxv4f32(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> %m, <vscale x 4 x float> %passthru) {
; RV32-LABEL: mgather_nxv4f32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_nxv4f32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v12
; RV64-NEXT: ret
%v = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %m, <vscale x 4 x float> %passthru)
ret <vscale x 4 x float> %v
}
define <vscale x 4 x float> @mgather_truemask_nxv4f32(<vscale x 4 x ptr> %ptrs, <vscale x 4 x float> %passthru) {
; RV32-LABEL: mgather_truemask_nxv4f32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma
; RV32-NEXT: vluxei32.v v8, (zero), v8
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_truemask_nxv4f32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma
; RV64-NEXT: vluxei64.v v12, (zero), v8
; RV64-NEXT: vmv.v.v v8, v12
; RV64-NEXT: ret
%v = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> splat (i1 1), <vscale x 4 x float> %passthru)
ret <vscale x 4 x float> %v
}
define <vscale x 4 x float> @mgather_falsemask_nxv4f32(<vscale x 4 x ptr> %ptrs, <vscale x 4 x float> %passthru) {
; RV32-LABEL: mgather_falsemask_nxv4f32:
; RV32: # %bb.0:
; RV32-NEXT: vmv2r.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_falsemask_nxv4f32:
; RV64: # %bb.0:
; RV64-NEXT: vmv2r.v v8, v12
; RV64-NEXT: ret
%v = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x float> %passthru)
ret <vscale x 4 x float> %v
}
declare <vscale x 8 x float> @llvm.masked.gather.nxv8f32.nxv8p0(<vscale x 8 x ptr>, i32, <vscale x 8 x i1>, <vscale x 8 x float>)
define <vscale x 8 x float> @mgather_nxv8f32(<vscale x 8 x ptr> %ptrs, <vscale x 8 x i1> %m, <vscale x 8 x float> %passthru) {
; RV32-LABEL: mgather_nxv8f32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v12
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_nxv8f32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
%v = call <vscale x 8 x float> @llvm.masked.gather.nxv8f32.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 4, <vscale x 8 x i1> %m, <vscale x 8 x float> %passthru)
ret <vscale x 8 x float> %v
}
define <vscale x 8 x float> @mgather_baseidx_nxv8i8_nxv8f32(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x float> %passthru) {
; RV32-LABEL: mgather_baseidx_nxv8i8_nxv8f32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vsext.vf4 v16, v8
; RV32-NEXT: vsll.vi v8, v16, 2
; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v12
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_baseidx_nxv8i8_nxv8f32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v8
; RV64-NEXT: vsll.vi v16, v16, 2
; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t
; RV64-NEXT: vmv.v.v v8, v12
; RV64-NEXT: ret
%ptrs = getelementptr inbounds float, ptr %base, <vscale x 8 x i8> %idxs
%v = call <vscale x 8 x float> @llvm.masked.gather.nxv8f32.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 4, <vscale x 8 x i1> %m, <vscale x 8 x float> %passthru)
ret <vscale x 8 x float> %v
}
define <vscale x 8 x float> @mgather_baseidx_sext_nxv8i8_nxv8f32(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x float> %passthru) {
; RV32-LABEL: mgather_baseidx_sext_nxv8i8_nxv8f32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vsext.vf4 v16, v8
; RV32-NEXT: vsll.vi v8, v16, 2
; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v12
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_baseidx_sext_nxv8i8_nxv8f32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v8
; RV64-NEXT: vsll.vi v16, v16, 2
; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t
; RV64-NEXT: vmv.v.v v8, v12
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i8> %idxs to <vscale x 8 x i32>
%ptrs = getelementptr inbounds float, ptr %base, <vscale x 8 x i32> %eidxs
%v = call <vscale x 8 x float> @llvm.masked.gather.nxv8f32.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 4, <vscale x 8 x i1> %m, <vscale x 8 x float> %passthru)
ret <vscale x 8 x float> %v
}
define <vscale x 8 x float> @mgather_baseidx_zext_nxv8i8_nxv8f32(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x float> %passthru) {
; CHECK-LABEL: mgather_baseidx_zext_nxv8i8_nxv8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vzext.vf2 v10, v8
; CHECK-NEXT: vsll.vi v8, v10, 2
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vluxei16.v v12, (a0), v8, v0.t
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i32>
%ptrs = getelementptr inbounds float, ptr %base, <vscale x 8 x i32> %eidxs
%v = call <vscale x 8 x float> @llvm.masked.gather.nxv8f32.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 4, <vscale x 8 x i1> %m, <vscale x 8 x float> %passthru)
ret <vscale x 8 x float> %v
}
define <vscale x 8 x float> @mgather_baseidx_nxv8i16_nxv8f32(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x float> %passthru) {
; RV32-LABEL: mgather_baseidx_nxv8i16_nxv8f32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vsext.vf2 v16, v8
; RV32-NEXT: vsll.vi v8, v16, 2
; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v12
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_baseidx_nxv8i16_nxv8f32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf4 v16, v8
; RV64-NEXT: vsll.vi v16, v16, 2
; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t
; RV64-NEXT: vmv.v.v v8, v12
; RV64-NEXT: ret
%ptrs = getelementptr inbounds float, ptr %base, <vscale x 8 x i16> %idxs
%v = call <vscale x 8 x float> @llvm.masked.gather.nxv8f32.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 4, <vscale x 8 x i1> %m, <vscale x 8 x float> %passthru)
ret <vscale x 8 x float> %v
}
define <vscale x 8 x float> @mgather_baseidx_sext_nxv8i16_nxv8f32(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x float> %passthru) {
; RV32-LABEL: mgather_baseidx_sext_nxv8i16_nxv8f32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vsext.vf2 v16, v8
; RV32-NEXT: vsll.vi v8, v16, 2
; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v12
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_baseidx_sext_nxv8i16_nxv8f32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf4 v16, v8
; RV64-NEXT: vsll.vi v16, v16, 2
; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t
; RV64-NEXT: vmv.v.v v8, v12
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i16> %idxs to <vscale x 8 x i32>
%ptrs = getelementptr inbounds float, ptr %base, <vscale x 8 x i32> %eidxs
%v = call <vscale x 8 x float> @llvm.masked.gather.nxv8f32.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 4, <vscale x 8 x i1> %m, <vscale x 8 x float> %passthru)
ret <vscale x 8 x float> %v
}
define <vscale x 8 x float> @mgather_baseidx_zext_nxv8i16_nxv8f32(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x float> %passthru) {
; CHECK-LABEL: mgather_baseidx_zext_nxv8i16_nxv8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vzext.vf2 v16, v8
; CHECK-NEXT: vsll.vi v8, v16, 2
; CHECK-NEXT: vluxei32.v v12, (a0), v8, v0.t
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i32>
%ptrs = getelementptr inbounds float, ptr %base, <vscale x 8 x i32> %eidxs
%v = call <vscale x 8 x float> @llvm.masked.gather.nxv8f32.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 4, <vscale x 8 x i1> %m, <vscale x 8 x float> %passthru)
ret <vscale x 8 x float> %v
}
define <vscale x 8 x float> @mgather_baseidx_nxv8f32(ptr %base, <vscale x 8 x i32> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x float> %passthru) {
; RV32-LABEL: mgather_baseidx_nxv8f32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vsll.vi v8, v8, 2
; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v12
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_baseidx_nxv8f32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf2 v16, v8
; RV64-NEXT: vsll.vi v16, v16, 2
; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t
; RV64-NEXT: vmv.v.v v8, v12
; RV64-NEXT: ret
%ptrs = getelementptr inbounds float, ptr %base, <vscale x 8 x i32> %idxs
%v = call <vscale x 8 x float> @llvm.masked.gather.nxv8f32.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 4, <vscale x 8 x i1> %m, <vscale x 8 x float> %passthru)
ret <vscale x 8 x float> %v
}
declare <vscale x 1 x double> @llvm.masked.gather.nxv1f64.nxv1p0(<vscale x 1 x ptr>, i32, <vscale x 1 x i1>, <vscale x 1 x double>)
define <vscale x 1 x double> @mgather_nxv1f64(<vscale x 1 x ptr> %ptrs, <vscale x 1 x i1> %m, <vscale x 1 x double> %passthru) {
; RV32-LABEL: mgather_nxv1f64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_nxv1f64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v9
; RV64-NEXT: ret
%v = call <vscale x 1 x double> @llvm.masked.gather.nxv1f64.nxv1p0(<vscale x 1 x ptr> %ptrs, i32 8, <vscale x 1 x i1> %m, <vscale x 1 x double> %passthru)
ret <vscale x 1 x double> %v
}
declare <vscale x 2 x double> @llvm.masked.gather.nxv2f64.nxv2p0(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x double>)
define <vscale x 2 x double> @mgather_nxv2f64(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %m, <vscale x 2 x double> %passthru) {
; RV32-LABEL: mgather_nxv2f64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_nxv2f64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v10
; RV64-NEXT: ret
%v = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64.nxv2p0(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %m, <vscale x 2 x double> %passthru)
ret <vscale x 2 x double> %v
}
declare <vscale x 4 x double> @llvm.masked.gather.nxv4f64.nxv4p0(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x double>)
define <vscale x 4 x double> @mgather_nxv4f64(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> %m, <vscale x 4 x double> %passthru) {
; RV32-LABEL: mgather_nxv4f64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v12
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_nxv4f64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v12
; RV64-NEXT: ret
%v = call <vscale x 4 x double> @llvm.masked.gather.nxv4f64.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 8, <vscale x 4 x i1> %m, <vscale x 4 x double> %passthru)
ret <vscale x 4 x double> %v
}
define <vscale x 4 x double> @mgather_truemask_nxv4f64(<vscale x 4 x ptr> %ptrs, <vscale x 4 x double> %passthru) {
; RV32-LABEL: mgather_truemask_nxv4f64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma
; RV32-NEXT: vluxei32.v v12, (zero), v8
; RV32-NEXT: vmv.v.v v8, v12
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_truemask_nxv4f64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma
; RV64-NEXT: vluxei64.v v8, (zero), v8
; RV64-NEXT: ret
%v = call <vscale x 4 x double> @llvm.masked.gather.nxv4f64.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 8, <vscale x 4 x i1> splat (i1 1), <vscale x 4 x double> %passthru)
ret <vscale x 4 x double> %v
}
define <vscale x 4 x double> @mgather_falsemask_nxv4f64(<vscale x 4 x ptr> %ptrs, <vscale x 4 x double> %passthru) {
; CHECK-LABEL: mgather_falsemask_nxv4f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: ret
%v = call <vscale x 4 x double> @llvm.masked.gather.nxv4f64.nxv4p0(<vscale x 4 x ptr> %ptrs, i32 8, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x double> %passthru)
ret <vscale x 4 x double> %v
}
declare <vscale x 8 x double> @llvm.masked.gather.nxv8f64.nxv8p0(<vscale x 8 x ptr>, i32, <vscale x 8 x i1>, <vscale x 8 x double>)
define <vscale x 8 x double> @mgather_nxv8f64(<vscale x 8 x ptr> %ptrs, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru) {
; RV32-LABEL: mgather_nxv8f64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v16
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_nxv8f64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
%v = call <vscale x 8 x double> @llvm.masked.gather.nxv8f64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru)
ret <vscale x 8 x double> %v
}
define <vscale x 8 x double> @mgather_baseidx_nxv8i8_nxv8f64(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru) {
; RV32-LABEL: mgather_baseidx_nxv8i8_nxv8f64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
; RV32-NEXT: vsext.vf4 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 3
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v16
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_baseidx_nxv8i8_nxv8f64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf8 v24, v8
; RV64-NEXT: vsll.vi v8, v24, 3
; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i8> %idxs
%v = call <vscale x 8 x double> @llvm.masked.gather.nxv8f64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru)
ret <vscale x 8 x double> %v
}
define <vscale x 8 x double> @mgather_baseidx_sext_nxv8i8_nxv8f64(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru) {
; RV32-LABEL: mgather_baseidx_sext_nxv8i8_nxv8f64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
; RV32-NEXT: vsext.vf4 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 3
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v16
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_baseidx_sext_nxv8i8_nxv8f64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf8 v24, v8
; RV64-NEXT: vsll.vi v8, v24, 3
; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i8> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i64> %eidxs
%v = call <vscale x 8 x double> @llvm.masked.gather.nxv8f64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru)
ret <vscale x 8 x double> %v
}
define <vscale x 8 x double> @mgather_baseidx_zext_nxv8i8_nxv8f64(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru) {
; CHECK-LABEL: mgather_baseidx_zext_nxv8i8_nxv8f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vzext.vf2 v10, v8
; CHECK-NEXT: vsll.vi v8, v10, 3
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vluxei16.v v16, (a0), v8, v0.t
; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i64> %eidxs
%v = call <vscale x 8 x double> @llvm.masked.gather.nxv8f64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru)
ret <vscale x 8 x double> %v
}
define <vscale x 8 x double> @mgather_baseidx_nxv8i16_nxv8f64(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru) {
; RV32-LABEL: mgather_baseidx_nxv8i16_nxv8f64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
; RV32-NEXT: vsext.vf2 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 3
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v16
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_baseidx_nxv8i16_nxv8f64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf4 v24, v8
; RV64-NEXT: vsll.vi v8, v24, 3
; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i16> %idxs
%v = call <vscale x 8 x double> @llvm.masked.gather.nxv8f64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru)
ret <vscale x 8 x double> %v
}
define <vscale x 8 x double> @mgather_baseidx_sext_nxv8i16_nxv8f64(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru) {
; RV32-LABEL: mgather_baseidx_sext_nxv8i16_nxv8f64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
; RV32-NEXT: vsext.vf2 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 3
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v16
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_baseidx_sext_nxv8i16_nxv8f64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf4 v24, v8
; RV64-NEXT: vsll.vi v8, v24, 3
; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i16> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i64> %eidxs
%v = call <vscale x 8 x double> @llvm.masked.gather.nxv8f64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru)
ret <vscale x 8 x double> %v
}
define <vscale x 8 x double> @mgather_baseidx_zext_nxv8i16_nxv8f64(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru) {
; CHECK-LABEL: mgather_baseidx_zext_nxv8i16_nxv8f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
; CHECK-NEXT: vzext.vf2 v12, v8
; CHECK-NEXT: vsll.vi v8, v12, 3
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vluxei32.v v16, (a0), v8, v0.t
; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i64> %eidxs
%v = call <vscale x 8 x double> @llvm.masked.gather.nxv8f64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru)
ret <vscale x 8 x double> %v
}
define <vscale x 8 x double> @mgather_baseidx_nxv8i32_nxv8f64(ptr %base, <vscale x 8 x i32> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru) {
; RV32-LABEL: mgather_baseidx_nxv8i32_nxv8f64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
; RV32-NEXT: vsll.vi v8, v8, 3
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v16
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_baseidx_nxv8i32_nxv8f64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf2 v24, v8
; RV64-NEXT: vsll.vi v8, v24, 3
; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i32> %idxs
%v = call <vscale x 8 x double> @llvm.masked.gather.nxv8f64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru)
ret <vscale x 8 x double> %v
}
define <vscale x 8 x double> @mgather_baseidx_sext_nxv8i32_nxv8f64(ptr %base, <vscale x 8 x i32> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru) {
; RV32-LABEL: mgather_baseidx_sext_nxv8i32_nxv8f64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
; RV32-NEXT: vsll.vi v8, v8, 3
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v16
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_baseidx_sext_nxv8i32_nxv8f64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf2 v24, v8
; RV64-NEXT: vsll.vi v8, v24, 3
; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i64> %eidxs
%v = call <vscale x 8 x double> @llvm.masked.gather.nxv8f64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru)
ret <vscale x 8 x double> %v
}
define <vscale x 8 x double> @mgather_baseidx_zext_nxv8i32_nxv8f64(ptr %base, <vscale x 8 x i32> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru) {
; RV32-LABEL: mgather_baseidx_zext_nxv8i32_nxv8f64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
; RV32-NEXT: vsll.vi v8, v8, 3
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v16
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_baseidx_zext_nxv8i32_nxv8f64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vzext.vf2 v24, v8
; RV64-NEXT: vsll.vi v8, v24, 3
; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i64> %eidxs
%v = call <vscale x 8 x double> @llvm.masked.gather.nxv8f64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru)
ret <vscale x 8 x double> %v
}
define <vscale x 8 x double> @mgather_baseidx_nxv8f64(ptr %base, <vscale x 8 x i64> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru) {
; RV32-LABEL: mgather_baseidx_nxv8f64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
; RV32-NEXT: vnsrl.wi v24, v8, 0
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v16
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_baseidx_nxv8f64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsll.vi v8, v8, 3
; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i64> %idxs
%v = call <vscale x 8 x double> @llvm.masked.gather.nxv8f64.nxv8p0(<vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru)
ret <vscale x 8 x double> %v
}
declare <vscale x 16 x i8> @llvm.masked.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr>, i32, <vscale x 16 x i1>, <vscale x 16 x i8>)
define <vscale x 16 x i8> @mgather_baseidx_nxv16i8(ptr %base, <vscale x 16 x i8> %idxs, <vscale x 16 x i1> %m, <vscale x 16 x i8> %passthru) {
; RV32-LABEL: mgather_baseidx_nxv16i8:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma
; RV32-NEXT: vsext.vf4 v16, v8
; RV32-NEXT: vsetvli zero, zero, e8, m2, ta, mu
; RV32-NEXT: vluxei32.v v10, (a0), v16, v0.t
; RV32-NEXT: vmv.v.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_baseidx_nxv16i8:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v8
; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: srli a1, a1, 3
; RV64-NEXT: vsetvli a2, zero, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vx v0, v0, a1
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v9
; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; RV64-NEXT: vluxei64.v v11, (a0), v16, v0.t
; RV64-NEXT: vmv2r.v v8, v10
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i8, ptr %base, <vscale x 16 x i8> %idxs
%v = call <vscale x 16 x i8> @llvm.masked.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> %ptrs, i32 2, <vscale x 16 x i1> %m, <vscale x 16 x i8> %passthru)
ret <vscale x 16 x i8> %v
}
declare <vscale x 32 x i8> @llvm.masked.gather.nxv32i8.nxv32p0(<vscale x 32 x ptr>, i32, <vscale x 32 x i1>, <vscale x 32 x i8>)
define <vscale x 32 x i8> @mgather_baseidx_nxv32i8(ptr %base, <vscale x 32 x i8> %idxs, <vscale x 32 x i1> %m, <vscale x 32 x i8> %passthru) {
; RV32-LABEL: mgather_baseidx_nxv32i8:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma
; RV32-NEXT: vsext.vf4 v16, v8
; RV32-NEXT: vsetvli zero, zero, e8, m2, ta, mu
; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: srli a1, a1, 2
; RV32-NEXT: vsetvli a2, zero, e8, mf2, ta, ma
; RV32-NEXT: vslidedown.vx v0, v0, a1
; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma
; RV32-NEXT: vsext.vf4 v16, v10
; RV32-NEXT: vsetvli zero, zero, e8, m2, ta, mu
; RV32-NEXT: vluxei32.v v14, (a0), v16, v0.t
; RV32-NEXT: vmv4r.v v8, v12
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_baseidx_nxv32i8:
; RV64: # %bb.0:
; RV64-NEXT: vmv1r.v v16, v0
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v24, v8
; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; RV64-NEXT: vluxei64.v v12, (a0), v24, v0.t
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: srli a2, a1, 3
; RV64-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vx v0, v0, a2
; RV64-NEXT: vsetvli a3, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v24, v9
; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; RV64-NEXT: vluxei64.v v13, (a0), v24, v0.t
; RV64-NEXT: srli a1, a1, 2
; RV64-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
; RV64-NEXT: vslidedown.vx v8, v16, a1
; RV64-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vx v0, v8, a2
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v11
; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; RV64-NEXT: vluxei64.v v15, (a0), v16, v0.t
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v10
; RV64-NEXT: vmv1r.v v0, v8
; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; RV64-NEXT: vluxei64.v v14, (a0), v16, v0.t
; RV64-NEXT: vmv4r.v v8, v12
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i8, ptr %base, <vscale x 32 x i8> %idxs
%v = call <vscale x 32 x i8> @llvm.masked.gather.nxv32i8.nxv32p0(<vscale x 32 x ptr> %ptrs, i32 2, <vscale x 32 x i1> %m, <vscale x 32 x i8> %passthru)
ret <vscale x 32 x i8> %v
}
define <vscale x 1 x i8> @mgather_baseidx_zext_nxv1i1_nxv1i8(ptr %base, <vscale x 1 x i1> %idxs, <vscale x 1 x i1> %m, <vscale x 1 x i8> %passthru) {
; CHECK-LABEL: mgather_baseidx_zext_nxv1i1_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmv.v.i v10, 0
; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vluxei8.v v9, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%eidxs = zext <vscale x 1 x i1> %idxs to <vscale x 1 x i8>
%ptrs = getelementptr inbounds i8, ptr %base, <vscale x 1 x i8> %eidxs
%v = call <vscale x 1 x i8> @llvm.masked.gather.nxv1i8.nxv1p0(<vscale x 1 x ptr> %ptrs, i32 1, <vscale x 1 x i1> %m, <vscale x 1 x i8> %passthru)
ret <vscale x 1 x i8> %v
}