; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc -mtriple=riscv32 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
declare <1 x i8> @llvm.masked.expandload.v1i8(ptr, <1 x i1>, <1 x i8>)
define <1 x i8> @expandload_v1i8(ptr %base, <1 x i8> %src0, <1 x i1> %mask) {
; CHECK-LABEL: expandload_v1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-NEXT: vfirst.m a1, v0
; CHECK-NEXT: bnez a1, .LBB0_2
; CHECK-NEXT: # %bb.1: # %cond.load
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: .LBB0_2: # %else
; CHECK-NEXT: ret
%res = call <1 x i8> @llvm.masked.expandload.v1i8(ptr %base, <1 x i1> %mask, <1 x i8> %src0)
ret <1 x i8>%res
}
declare <2 x i8> @llvm.masked.expandload.v2i8(ptr, <2 x i1>, <2 x i8>)
define <2 x i8> @expandload_v2i8(ptr %base, <2 x i8> %src0, <2 x i1> %mask) {
; CHECK-LABEL: expandload_v2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv.x.s a1, v0
; CHECK-NEXT: andi a2, a1, 1
; CHECK-NEXT: bnez a2, .LBB1_3
; CHECK-NEXT: # %bb.1: # %else
; CHECK-NEXT: andi a1, a1, 2
; CHECK-NEXT: bnez a1, .LBB1_4
; CHECK-NEXT: .LBB1_2: # %else2
; CHECK-NEXT: ret
; CHECK-NEXT: .LBB1_3: # %cond.load
; CHECK-NEXT: lbu a2, 0(a0)
; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, ma
; CHECK-NEXT: vmv.s.x v8, a2
; CHECK-NEXT: addi a0, a0, 1
; CHECK-NEXT: andi a1, a1, 2
; CHECK-NEXT: beqz a1, .LBB1_2
; CHECK-NEXT: .LBB1_4: # %cond.load1
; CHECK-NEXT: lbu a0, 0(a0)
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <2 x i8> @llvm.masked.expandload.v2i8(ptr %base, <2 x i1> %mask, <2 x i8> %src0)
ret <2 x i8>%res
}
declare <4 x i8> @llvm.masked.expandload.v4i8(ptr, <4 x i1>, <4 x i8>)
define <4 x i8> @expandload_v4i8(ptr %base, <4 x i8> %src0, <4 x i1> %mask) {
; CHECK-LABEL: expandload_v4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv.x.s a1, v0
; CHECK-NEXT: andi a2, a1, 1
; CHECK-NEXT: bnez a2, .LBB2_5
; CHECK-NEXT: # %bb.1: # %else
; CHECK-NEXT: andi a2, a1, 2
; CHECK-NEXT: bnez a2, .LBB2_6
; CHECK-NEXT: .LBB2_2: # %else2
; CHECK-NEXT: andi a2, a1, 4
; CHECK-NEXT: bnez a2, .LBB2_7
; CHECK-NEXT: .LBB2_3: # %else6
; CHECK-NEXT: andi a1, a1, 8
; CHECK-NEXT: bnez a1, .LBB2_8
; CHECK-NEXT: .LBB2_4: # %else10
; CHECK-NEXT: ret
; CHECK-NEXT: .LBB2_5: # %cond.load
; CHECK-NEXT: lbu a2, 0(a0)
; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, ma
; CHECK-NEXT: vmv.s.x v8, a2
; CHECK-NEXT: addi a0, a0, 1
; CHECK-NEXT: andi a2, a1, 2
; CHECK-NEXT: beqz a2, .LBB2_2
; CHECK-NEXT: .LBB2_6: # %cond.load1
; CHECK-NEXT: lbu a2, 0(a0)
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
; CHECK-NEXT: vmv.s.x v9, a2
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, tu, ma
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: addi a0, a0, 1
; CHECK-NEXT: andi a2, a1, 4
; CHECK-NEXT: beqz a2, .LBB2_3
; CHECK-NEXT: .LBB2_7: # %cond.load5
; CHECK-NEXT: lbu a2, 0(a0)
; CHECK-NEXT: vsetivli zero, 3, e8, mf4, tu, ma
; CHECK-NEXT: vmv.s.x v9, a2
; CHECK-NEXT: vslideup.vi v8, v9, 2
; CHECK-NEXT: addi a0, a0, 1
; CHECK-NEXT: andi a1, a1, 8
; CHECK-NEXT: beqz a1, .LBB2_4
; CHECK-NEXT: .LBB2_8: # %cond.load9
; CHECK-NEXT: lbu a0, 0(a0)
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%res = call <4 x i8> @llvm.masked.expandload.v4i8(ptr %base, <4 x i1> %mask, <4 x i8> %src0)
ret <4 x i8>%res
}
declare <8 x i8> @llvm.masked.expandload.v8i8(ptr, <8 x i1>, <8 x i8>)
define <8 x i8> @expandload_v8i8(ptr %base, <8 x i8> %src0, <8 x i1> %mask) {
; CHECK-LABEL: expandload_v8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv.x.s a1, v0
; CHECK-NEXT: andi a2, a1, 1
; CHECK-NEXT: bnez a2, .LBB3_9
; CHECK-NEXT: # %bb.1: # %else
; CHECK-NEXT: andi a2, a1, 2
; CHECK-NEXT: bnez a2, .LBB3_10
; CHECK-NEXT: .LBB3_2: # %else2
; CHECK-NEXT: andi a2, a1, 4
; CHECK-NEXT: bnez a2, .LBB3_11
; CHECK-NEXT: .LBB3_3: # %else6
; CHECK-NEXT: andi a2, a1, 8
; CHECK-NEXT: bnez a2, .LBB3_12
; CHECK-NEXT: .LBB3_4: # %else10
; CHECK-NEXT: andi a2, a1, 16
; CHECK-NEXT: bnez a2, .LBB3_13
; CHECK-NEXT: .LBB3_5: # %else14
; CHECK-NEXT: andi a2, a1, 32
; CHECK-NEXT: bnez a2, .LBB3_14
; CHECK-NEXT: .LBB3_6: # %else18
; CHECK-NEXT: andi a2, a1, 64
; CHECK-NEXT: bnez a2, .LBB3_15
; CHECK-NEXT: .LBB3_7: # %else22
; CHECK-NEXT: andi a1, a1, -128
; CHECK-NEXT: bnez a1, .LBB3_16
; CHECK-NEXT: .LBB3_8: # %else26
; CHECK-NEXT: ret
; CHECK-NEXT: .LBB3_9: # %cond.load
; CHECK-NEXT: lbu a2, 0(a0)
; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, ma
; CHECK-NEXT: vmv.s.x v8, a2
; CHECK-NEXT: addi a0, a0, 1
; CHECK-NEXT: andi a2, a1, 2
; CHECK-NEXT: beqz a2, .LBB3_2
; CHECK-NEXT: .LBB3_10: # %cond.load1
; CHECK-NEXT: lbu a2, 0(a0)
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
; CHECK-NEXT: vmv.s.x v9, a2
; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: addi a0, a0, 1
; CHECK-NEXT: andi a2, a1, 4
; CHECK-NEXT: beqz a2, .LBB3_3
; CHECK-NEXT: .LBB3_11: # %cond.load5
; CHECK-NEXT: lbu a2, 0(a0)
; CHECK-NEXT: vsetivli zero, 3, e8, mf2, tu, ma
; CHECK-NEXT: vmv.s.x v9, a2
; CHECK-NEXT: vslideup.vi v8, v9, 2
; CHECK-NEXT: addi a0, a0, 1
; CHECK-NEXT: andi a2, a1, 8
; CHECK-NEXT: beqz a2, .LBB3_4
; CHECK-NEXT: .LBB3_12: # %cond.load9
; CHECK-NEXT: lbu a2, 0(a0)
; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma
; CHECK-NEXT: vmv.s.x v9, a2
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: addi a0, a0, 1
; CHECK-NEXT: andi a2, a1, 16
; CHECK-NEXT: beqz a2, .LBB3_5
; CHECK-NEXT: .LBB3_13: # %cond.load13
; CHECK-NEXT: lbu a2, 0(a0)
; CHECK-NEXT: vsetivli zero, 5, e8, mf2, tu, ma
; CHECK-NEXT: vmv.s.x v9, a2
; CHECK-NEXT: vslideup.vi v8, v9, 4
; CHECK-NEXT: addi a0, a0, 1
; CHECK-NEXT: andi a2, a1, 32
; CHECK-NEXT: beqz a2, .LBB3_6
; CHECK-NEXT: .LBB3_14: # %cond.load17
; CHECK-NEXT: lbu a2, 0(a0)
; CHECK-NEXT: vsetivli zero, 6, e8, mf2, tu, ma
; CHECK-NEXT: vmv.s.x v9, a2
; CHECK-NEXT: vslideup.vi v8, v9, 5
; CHECK-NEXT: addi a0, a0, 1
; CHECK-NEXT: andi a2, a1, 64
; CHECK-NEXT: beqz a2, .LBB3_7
; CHECK-NEXT: .LBB3_15: # %cond.load21
; CHECK-NEXT: lbu a2, 0(a0)
; CHECK-NEXT: vsetivli zero, 7, e8, mf2, tu, ma
; CHECK-NEXT: vmv.s.x v9, a2
; CHECK-NEXT: vslideup.vi v8, v9, 6
; CHECK-NEXT: addi a0, a0, 1
; CHECK-NEXT: andi a1, a1, -128
; CHECK-NEXT: beqz a1, .LBB3_8
; CHECK-NEXT: .LBB3_16: # %cond.load25
; CHECK-NEXT: lbu a0, 0(a0)
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vslideup.vi v8, v9, 7
; CHECK-NEXT: ret
%res = call <8 x i8> @llvm.masked.expandload.v8i8(ptr %base, <8 x i1> %mask, <8 x i8> %src0)
ret <8 x i8>%res
}
declare <1 x i16> @llvm.masked.expandload.v1i16(ptr, <1 x i1>, <1 x i16>)
define <1 x i16> @expandload_v1i16(ptr %base, <1 x i16> %src0, <1 x i1> %mask) {
; CHECK-LABEL: expandload_v1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-NEXT: vfirst.m a1, v0
; CHECK-NEXT: bnez a1, .LBB4_2
; CHECK-NEXT: # %bb.1: # %cond.load
; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: .LBB4_2: # %else
; CHECK-NEXT: ret
%res = call <1 x i16> @llvm.masked.expandload.v1i16(ptr align 2 %base, <1 x i1> %mask, <1 x i16> %src0)
ret <1 x i16>%res
}
declare <2 x i16> @llvm.masked.expandload.v2i16(ptr, <2 x i1>, <2 x i16>)
define <2 x i16> @expandload_v2i16(ptr %base, <2 x i16> %src0, <2 x i1> %mask) {
; CHECK-LABEL: expandload_v2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv.x.s a1, v0
; CHECK-NEXT: andi a2, a1, 1
; CHECK-NEXT: bnez a2, .LBB5_3
; CHECK-NEXT: # %bb.1: # %else
; CHECK-NEXT: andi a1, a1, 2
; CHECK-NEXT: bnez a1, .LBB5_4
; CHECK-NEXT: .LBB5_2: # %else2
; CHECK-NEXT: ret
; CHECK-NEXT: .LBB5_3: # %cond.load
; CHECK-NEXT: lh a2, 0(a0)
; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, ma
; CHECK-NEXT: vmv.s.x v8, a2
; CHECK-NEXT: addi a0, a0, 2
; CHECK-NEXT: andi a1, a1, 2
; CHECK-NEXT: beqz a1, .LBB5_2
; CHECK-NEXT: .LBB5_4: # %cond.load1
; CHECK-NEXT: lh a0, 0(a0)
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <2 x i16> @llvm.masked.expandload.v2i16(ptr align 2 %base, <2 x i1> %mask, <2 x i16> %src0)
ret <2 x i16>%res
}
declare <4 x i16> @llvm.masked.expandload.v4i16(ptr, <4 x i1>, <4 x i16>)
define <4 x i16> @expandload_v4i16(ptr %base, <4 x i16> %src0, <4 x i1> %mask) {
; CHECK-LABEL: expandload_v4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv.x.s a1, v0
; CHECK-NEXT: andi a2, a1, 1
; CHECK-NEXT: bnez a2, .LBB6_5
; CHECK-NEXT: # %bb.1: # %else
; CHECK-NEXT: andi a2, a1, 2
; CHECK-NEXT: bnez a2, .LBB6_6
; CHECK-NEXT: .LBB6_2: # %else2
; CHECK-NEXT: andi a2, a1, 4
; CHECK-NEXT: bnez a2, .LBB6_7
; CHECK-NEXT: .LBB6_3: # %else6
; CHECK-NEXT: andi a1, a1, 8
; CHECK-NEXT: bnez a1, .LBB6_8
; CHECK-NEXT: .LBB6_4: # %else10
; CHECK-NEXT: ret
; CHECK-NEXT: .LBB6_5: # %cond.load
; CHECK-NEXT: lh a2, 0(a0)
; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, ma
; CHECK-NEXT: vmv.s.x v8, a2
; CHECK-NEXT: addi a0, a0, 2
; CHECK-NEXT: andi a2, a1, 2
; CHECK-NEXT: beqz a2, .LBB6_2
; CHECK-NEXT: .LBB6_6: # %cond.load1
; CHECK-NEXT: lh a2, 0(a0)
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv.s.x v9, a2
; CHECK-NEXT: vsetivli zero, 2, e16, mf2, tu, ma
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: addi a0, a0, 2
; CHECK-NEXT: andi a2, a1, 4
; CHECK-NEXT: beqz a2, .LBB6_3
; CHECK-NEXT: .LBB6_7: # %cond.load5
; CHECK-NEXT: lh a2, 0(a0)
; CHECK-NEXT: vsetivli zero, 3, e16, mf2, tu, ma
; CHECK-NEXT: vmv.s.x v9, a2
; CHECK-NEXT: vslideup.vi v8, v9, 2
; CHECK-NEXT: addi a0, a0, 2
; CHECK-NEXT: andi a1, a1, 8
; CHECK-NEXT: beqz a1, .LBB6_4
; CHECK-NEXT: .LBB6_8: # %cond.load9
; CHECK-NEXT: lh a0, 0(a0)
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%res = call <4 x i16> @llvm.masked.expandload.v4i16(ptr align 2 %base, <4 x i1> %mask, <4 x i16> %src0)
ret <4 x i16>%res
}
declare <8 x i16> @llvm.masked.expandload.v8i16(ptr, <8 x i1>, <8 x i16>)
define <8 x i16> @expandload_v8i16(ptr %base, <8 x i16> %src0, <8 x i1> %mask) {
; CHECK-LABEL: expandload_v8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv.x.s a1, v0
; CHECK-NEXT: andi a2, a1, 1
; CHECK-NEXT: bnez a2, .LBB7_9
; CHECK-NEXT: # %bb.1: # %else
; CHECK-NEXT: andi a2, a1, 2
; CHECK-NEXT: bnez a2, .LBB7_10
; CHECK-NEXT: .LBB7_2: # %else2
; CHECK-NEXT: andi a2, a1, 4
; CHECK-NEXT: bnez a2, .LBB7_11
; CHECK-NEXT: .LBB7_3: # %else6
; CHECK-NEXT: andi a2, a1, 8
; CHECK-NEXT: bnez a2, .LBB7_12
; CHECK-NEXT: .LBB7_4: # %else10
; CHECK-NEXT: andi a2, a1, 16
; CHECK-NEXT: bnez a2, .LBB7_13
; CHECK-NEXT: .LBB7_5: # %else14
; CHECK-NEXT: andi a2, a1, 32
; CHECK-NEXT: bnez a2, .LBB7_14
; CHECK-NEXT: .LBB7_6: # %else18
; CHECK-NEXT: andi a2, a1, 64
; CHECK-NEXT: bnez a2, .LBB7_15
; CHECK-NEXT: .LBB7_7: # %else22
; CHECK-NEXT: andi a1, a1, -128
; CHECK-NEXT: bnez a1, .LBB7_16
; CHECK-NEXT: .LBB7_8: # %else26
; CHECK-NEXT: ret
; CHECK-NEXT: .LBB7_9: # %cond.load
; CHECK-NEXT: lh a2, 0(a0)
; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, ma
; CHECK-NEXT: vmv.s.x v8, a2
; CHECK-NEXT: addi a0, a0, 2
; CHECK-NEXT: andi a2, a1, 2
; CHECK-NEXT: beqz a2, .LBB7_2
; CHECK-NEXT: .LBB7_10: # %cond.load1
; CHECK-NEXT: lh a2, 0(a0)
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv.s.x v9, a2
; CHECK-NEXT: vsetivli zero, 2, e16, m1, tu, ma
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: addi a0, a0, 2
; CHECK-NEXT: andi a2, a1, 4
; CHECK-NEXT: beqz a2, .LBB7_3
; CHECK-NEXT: .LBB7_11: # %cond.load5
; CHECK-NEXT: lh a2, 0(a0)
; CHECK-NEXT: vsetivli zero, 3, e16, m1, tu, ma
; CHECK-NEXT: vmv.s.x v9, a2
; CHECK-NEXT: vslideup.vi v8, v9, 2
; CHECK-NEXT: addi a0, a0, 2
; CHECK-NEXT: andi a2, a1, 8
; CHECK-NEXT: beqz a2, .LBB7_4
; CHECK-NEXT: .LBB7_12: # %cond.load9
; CHECK-NEXT: lh a2, 0(a0)
; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, ma
; CHECK-NEXT: vmv.s.x v9, a2
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: addi a0, a0, 2
; CHECK-NEXT: andi a2, a1, 16
; CHECK-NEXT: beqz a2, .LBB7_5
; CHECK-NEXT: .LBB7_13: # %cond.load13
; CHECK-NEXT: lh a2, 0(a0)
; CHECK-NEXT: vsetivli zero, 5, e16, m1, tu, ma
; CHECK-NEXT: vmv.s.x v9, a2
; CHECK-NEXT: vslideup.vi v8, v9, 4
; CHECK-NEXT: addi a0, a0, 2
; CHECK-NEXT: andi a2, a1, 32
; CHECK-NEXT: beqz a2, .LBB7_6
; CHECK-NEXT: .LBB7_14: # %cond.load17
; CHECK-NEXT: lh a2, 0(a0)
; CHECK-NEXT: vsetivli zero, 6, e16, m1, tu, ma
; CHECK-NEXT: vmv.s.x v9, a2
; CHECK-NEXT: vslideup.vi v8, v9, 5
; CHECK-NEXT: addi a0, a0, 2
; CHECK-NEXT: andi a2, a1, 64
; CHECK-NEXT: beqz a2, .LBB7_7
; CHECK-NEXT: .LBB7_15: # %cond.load21
; CHECK-NEXT: lh a2, 0(a0)
; CHECK-NEXT: vsetivli zero, 7, e16, m1, tu, ma
; CHECK-NEXT: vmv.s.x v9, a2
; CHECK-NEXT: vslideup.vi v8, v9, 6
; CHECK-NEXT: addi a0, a0, 2
; CHECK-NEXT: andi a1, a1, -128
; CHECK-NEXT: beqz a1, .LBB7_8
; CHECK-NEXT: .LBB7_16: # %cond.load25
; CHECK-NEXT: lh a0, 0(a0)
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vslideup.vi v8, v9, 7
; CHECK-NEXT: ret
%res = call <8 x i16> @llvm.masked.expandload.v8i16(ptr align 2 %base, <8 x i1> %mask, <8 x i16> %src0)
ret <8 x i16>%res
}
declare <1 x i32> @llvm.masked.expandload.v1i32(ptr, <1 x i1>, <1 x i32>)
define <1 x i32> @expandload_v1i32(ptr %base, <1 x i32> %src0, <1 x i1> %mask) {
; CHECK-LABEL: expandload_v1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-NEXT: vfirst.m a1, v0
; CHECK-NEXT: bnez a1, .LBB8_2
; CHECK-NEXT: # %bb.1: # %cond.load
; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: .LBB8_2: # %else
; CHECK-NEXT: ret
%res = call <1 x i32> @llvm.masked.expandload.v1i32(ptr align 4 %base, <1 x i1> %mask, <1 x i32> %src0)
ret <1 x i32>%res
}
declare <2 x i32> @llvm.masked.expandload.v2i32(ptr, <2 x i1>, <2 x i32>)
define <2 x i32> @expandload_v2i32(ptr %base, <2 x i32> %src0, <2 x i1> %mask) {
; CHECK-LABEL: expandload_v2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv.x.s a1, v0
; CHECK-NEXT: andi a2, a1, 1
; CHECK-NEXT: bnez a2, .LBB9_3
; CHECK-NEXT: # %bb.1: # %else
; CHECK-NEXT: andi a1, a1, 2
; CHECK-NEXT: bnez a1, .LBB9_4
; CHECK-NEXT: .LBB9_2: # %else2
; CHECK-NEXT: ret
; CHECK-NEXT: .LBB9_3: # %cond.load
; CHECK-NEXT: lw a2, 0(a0)
; CHECK-NEXT: vsetvli zero, zero, e32, m4, tu, ma
; CHECK-NEXT: vmv.s.x v8, a2
; CHECK-NEXT: addi a0, a0, 4
; CHECK-NEXT: andi a1, a1, 2
; CHECK-NEXT: beqz a1, .LBB9_2
; CHECK-NEXT: .LBB9_4: # %cond.load1
; CHECK-NEXT: lw a0, 0(a0)
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <2 x i32> @llvm.masked.expandload.v2i32(ptr align 4 %base, <2 x i1> %mask, <2 x i32> %src0)
ret <2 x i32>%res
}
declare <4 x i32> @llvm.masked.expandload.v4i32(ptr, <4 x i1>, <4 x i32>)
define <4 x i32> @expandload_v4i32(ptr %base, <4 x i32> %src0, <4 x i1> %mask) {
; CHECK-LABEL: expandload_v4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv.x.s a1, v0
; CHECK-NEXT: andi a2, a1, 1
; CHECK-NEXT: bnez a2, .LBB10_5
; CHECK-NEXT: # %bb.1: # %else
; CHECK-NEXT: andi a2, a1, 2
; CHECK-NEXT: bnez a2, .LBB10_6
; CHECK-NEXT: .LBB10_2: # %else2
; CHECK-NEXT: andi a2, a1, 4
; CHECK-NEXT: bnez a2, .LBB10_7
; CHECK-NEXT: .LBB10_3: # %else6
; CHECK-NEXT: andi a1, a1, 8
; CHECK-NEXT: bnez a1, .LBB10_8
; CHECK-NEXT: .LBB10_4: # %else10
; CHECK-NEXT: ret
; CHECK-NEXT: .LBB10_5: # %cond.load
; CHECK-NEXT: lw a2, 0(a0)
; CHECK-NEXT: vsetvli zero, zero, e32, m4, tu, ma
; CHECK-NEXT: vmv.s.x v8, a2
; CHECK-NEXT: addi a0, a0, 4
; CHECK-NEXT: andi a2, a1, 2
; CHECK-NEXT: beqz a2, .LBB10_2
; CHECK-NEXT: .LBB10_6: # %cond.load1
; CHECK-NEXT: lw a2, 0(a0)
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv.s.x v9, a2
; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, ma
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: addi a0, a0, 4
; CHECK-NEXT: andi a2, a1, 4
; CHECK-NEXT: beqz a2, .LBB10_3
; CHECK-NEXT: .LBB10_7: # %cond.load5
; CHECK-NEXT: lw a2, 0(a0)
; CHECK-NEXT: vsetivli zero, 3, e32, m1, tu, ma
; CHECK-NEXT: vmv.s.x v9, a2
; CHECK-NEXT: vslideup.vi v8, v9, 2
; CHECK-NEXT: addi a0, a0, 4
; CHECK-NEXT: andi a1, a1, 8
; CHECK-NEXT: beqz a1, .LBB10_4
; CHECK-NEXT: .LBB10_8: # %cond.load9
; CHECK-NEXT: lw a0, 0(a0)
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%res = call <4 x i32> @llvm.masked.expandload.v4i32(ptr align 4 %base, <4 x i1> %mask, <4 x i32> %src0)
ret <4 x i32>%res
}
declare <8 x i32> @llvm.masked.expandload.v8i32(ptr, <8 x i1>, <8 x i32>)
define <8 x i32> @expandload_v8i32(ptr %base, <8 x i32> %src0, <8 x i1> %mask) {
; CHECK-LABEL: expandload_v8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv.x.s a1, v0
; CHECK-NEXT: andi a2, a1, 1
; CHECK-NEXT: bnez a2, .LBB11_9
; CHECK-NEXT: # %bb.1: # %else
; CHECK-NEXT: andi a2, a1, 2
; CHECK-NEXT: bnez a2, .LBB11_10
; CHECK-NEXT: .LBB11_2: # %else2
; CHECK-NEXT: andi a2, a1, 4
; CHECK-NEXT: bnez a2, .LBB11_11
; CHECK-NEXT: .LBB11_3: # %else6
; CHECK-NEXT: andi a2, a1, 8
; CHECK-NEXT: bnez a2, .LBB11_12
; CHECK-NEXT: .LBB11_4: # %else10
; CHECK-NEXT: andi a2, a1, 16
; CHECK-NEXT: bnez a2, .LBB11_13
; CHECK-NEXT: .LBB11_5: # %else14
; CHECK-NEXT: andi a2, a1, 32
; CHECK-NEXT: bnez a2, .LBB11_14
; CHECK-NEXT: .LBB11_6: # %else18
; CHECK-NEXT: andi a2, a1, 64
; CHECK-NEXT: bnez a2, .LBB11_15
; CHECK-NEXT: .LBB11_7: # %else22
; CHECK-NEXT: andi a1, a1, -128
; CHECK-NEXT: bnez a1, .LBB11_16
; CHECK-NEXT: .LBB11_8: # %else26
; CHECK-NEXT: ret
; CHECK-NEXT: .LBB11_9: # %cond.load
; CHECK-NEXT: lw a2, 0(a0)
; CHECK-NEXT: vsetvli zero, zero, e32, m4, tu, ma
; CHECK-NEXT: vmv.s.x v8, a2
; CHECK-NEXT: addi a0, a0, 4
; CHECK-NEXT: andi a2, a1, 2
; CHECK-NEXT: beqz a2, .LBB11_2
; CHECK-NEXT: .LBB11_10: # %cond.load1
; CHECK-NEXT: lw a2, 0(a0)
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv.s.x v10, a2
; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, ma
; CHECK-NEXT: vslideup.vi v8, v10, 1
; CHECK-NEXT: addi a0, a0, 4
; CHECK-NEXT: andi a2, a1, 4
; CHECK-NEXT: beqz a2, .LBB11_3
; CHECK-NEXT: .LBB11_11: # %cond.load5
; CHECK-NEXT: lw a2, 0(a0)
; CHECK-NEXT: vsetivli zero, 3, e32, m1, tu, ma
; CHECK-NEXT: vmv.s.x v10, a2
; CHECK-NEXT: vslideup.vi v8, v10, 2
; CHECK-NEXT: addi a0, a0, 4
; CHECK-NEXT: andi a2, a1, 8
; CHECK-NEXT: beqz a2, .LBB11_4
; CHECK-NEXT: .LBB11_12: # %cond.load9
; CHECK-NEXT: lw a2, 0(a0)
; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, ma
; CHECK-NEXT: vmv.s.x v10, a2
; CHECK-NEXT: vslideup.vi v8, v10, 3
; CHECK-NEXT: addi a0, a0, 4
; CHECK-NEXT: andi a2, a1, 16
; CHECK-NEXT: beqz a2, .LBB11_5
; CHECK-NEXT: .LBB11_13: # %cond.load13
; CHECK-NEXT: lw a2, 0(a0)
; CHECK-NEXT: vsetivli zero, 5, e32, m2, tu, ma
; CHECK-NEXT: vmv.s.x v10, a2
; CHECK-NEXT: vslideup.vi v8, v10, 4
; CHECK-NEXT: addi a0, a0, 4
; CHECK-NEXT: andi a2, a1, 32
; CHECK-NEXT: beqz a2, .LBB11_6
; CHECK-NEXT: .LBB11_14: # %cond.load17
; CHECK-NEXT: lw a2, 0(a0)
; CHECK-NEXT: vsetivli zero, 6, e32, m2, tu, ma
; CHECK-NEXT: vmv.s.x v10, a2
; CHECK-NEXT: vslideup.vi v8, v10, 5
; CHECK-NEXT: addi a0, a0, 4
; CHECK-NEXT: andi a2, a1, 64
; CHECK-NEXT: beqz a2, .LBB11_7
; CHECK-NEXT: .LBB11_15: # %cond.load21
; CHECK-NEXT: lw a2, 0(a0)
; CHECK-NEXT: vsetivli zero, 7, e32, m2, tu, ma
; CHECK-NEXT: vmv.s.x v10, a2
; CHECK-NEXT: vslideup.vi v8, v10, 6
; CHECK-NEXT: addi a0, a0, 4
; CHECK-NEXT: andi a1, a1, -128
; CHECK-NEXT: beqz a1, .LBB11_8
; CHECK-NEXT: .LBB11_16: # %cond.load25
; CHECK-NEXT: lw a0, 0(a0)
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-NEXT: vmv.s.x v10, a0
; CHECK-NEXT: vslideup.vi v8, v10, 7
; CHECK-NEXT: ret
%res = call <8 x i32> @llvm.masked.expandload.v8i32(ptr align 4 %base, <8 x i1> %mask, <8 x i32> %src0)
ret <8 x i32>%res
}
declare <1 x i64> @llvm.masked.expandload.v1i64(ptr, <1 x i1>, <1 x i64>)
define <1 x i64> @expandload_v1i64(ptr %base, <1 x i64> %src0, <1 x i1> %mask) {
; RV32-LABEL: expandload_v1i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; RV32-NEXT: vfirst.m a1, v0
; RV32-NEXT: bnez a1, .LBB12_2
; RV32-NEXT: # %bb.1: # %cond.load
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: lw a1, 4(a0)
; RV32-NEXT: lw a0, 0(a0)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV32-NEXT: vlse64.v v8, (a0), zero
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .LBB12_2: # %else
; RV32-NEXT: ret
;
; RV64-LABEL: expandload_v1i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; RV64-NEXT: vfirst.m a1, v0
; RV64-NEXT: bnez a1, .LBB12_2
; RV64-NEXT: # %bb.1: # %cond.load
; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV64-NEXT: vle64.v v8, (a0)
; RV64-NEXT: .LBB12_2: # %else
; RV64-NEXT: ret
%res = call <1 x i64> @llvm.masked.expandload.v1i64(ptr align 8 %base, <1 x i1> %mask, <1 x i64> %src0)
ret <1 x i64>%res
}
declare <2 x i64> @llvm.masked.expandload.v2i64(ptr, <2 x i1>, <2 x i64>)
define <2 x i64> @expandload_v2i64(ptr %base, <2 x i64> %src0, <2 x i1> %mask) {
; RV32-LABEL: expandload_v2i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV32-NEXT: vmv.x.s a1, v0
; RV32-NEXT: andi a2, a1, 1
; RV32-NEXT: bnez a2, .LBB13_3
; RV32-NEXT: # %bb.1: # %else
; RV32-NEXT: andi a1, a1, 2
; RV32-NEXT: bnez a1, .LBB13_4
; RV32-NEXT: .LBB13_2: # %else2
; RV32-NEXT: ret
; RV32-NEXT: .LBB13_3: # %cond.load
; RV32-NEXT: lw a2, 0(a0)
; RV32-NEXT: lw a3, 4(a0)
; RV32-NEXT: vsetivli zero, 2, e32, m1, tu, ma
; RV32-NEXT: vslide1down.vx v8, v8, a2
; RV32-NEXT: vslide1down.vx v8, v8, a3
; RV32-NEXT: addi a0, a0, 8
; RV32-NEXT: andi a1, a1, 2
; RV32-NEXT: beqz a1, .LBB13_2
; RV32-NEXT: .LBB13_4: # %cond.load1
; RV32-NEXT: lw a1, 0(a0)
; RV32-NEXT: lw a0, 4(a0)
; RV32-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV32-NEXT: vslide1down.vx v9, v8, a1
; RV32-NEXT: vslide1down.vx v9, v9, a0
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-NEXT: vslideup.vi v8, v9, 1
; RV32-NEXT: ret
;
; RV64-LABEL: expandload_v2i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64-NEXT: vmv.x.s a1, v0
; RV64-NEXT: andi a2, a1, 1
; RV64-NEXT: bnez a2, .LBB13_3
; RV64-NEXT: # %bb.1: # %else
; RV64-NEXT: andi a1, a1, 2
; RV64-NEXT: bnez a1, .LBB13_4
; RV64-NEXT: .LBB13_2: # %else2
; RV64-NEXT: ret
; RV64-NEXT: .LBB13_3: # %cond.load
; RV64-NEXT: ld a2, 0(a0)
; RV64-NEXT: vsetvli zero, zero, e64, m8, tu, ma
; RV64-NEXT: vmv.s.x v8, a2
; RV64-NEXT: addi a0, a0, 8
; RV64-NEXT: andi a1, a1, 2
; RV64-NEXT: beqz a1, .LBB13_2
; RV64-NEXT: .LBB13_4: # %cond.load1
; RV64-NEXT: ld a0, 0(a0)
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: vmv.s.x v9, a0
; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-NEXT: vslideup.vi v8, v9, 1
; RV64-NEXT: ret
%res = call <2 x i64> @llvm.masked.expandload.v2i64(ptr align 8 %base, <2 x i1> %mask, <2 x i64> %src0)
ret <2 x i64>%res
}
declare <4 x i64> @llvm.masked.expandload.v4i64(ptr, <4 x i1>, <4 x i64>)
define <4 x i64> @expandload_v4i64(ptr %base, <4 x i64> %src0, <4 x i1> %mask) {
; RV32-LABEL: expandload_v4i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV32-NEXT: vmv.x.s a1, v0
; RV32-NEXT: andi a2, a1, 1
; RV32-NEXT: bnez a2, .LBB14_5
; RV32-NEXT: # %bb.1: # %else
; RV32-NEXT: andi a2, a1, 2
; RV32-NEXT: bnez a2, .LBB14_6
; RV32-NEXT: .LBB14_2: # %else2
; RV32-NEXT: andi a2, a1, 4
; RV32-NEXT: bnez a2, .LBB14_7
; RV32-NEXT: .LBB14_3: # %else6
; RV32-NEXT: andi a1, a1, 8
; RV32-NEXT: bnez a1, .LBB14_8
; RV32-NEXT: .LBB14_4: # %else10
; RV32-NEXT: ret
; RV32-NEXT: .LBB14_5: # %cond.load
; RV32-NEXT: lw a2, 0(a0)
; RV32-NEXT: lw a3, 4(a0)
; RV32-NEXT: vsetivli zero, 2, e32, m1, tu, ma
; RV32-NEXT: vslide1down.vx v8, v8, a2
; RV32-NEXT: vslide1down.vx v8, v8, a3
; RV32-NEXT: addi a0, a0, 8
; RV32-NEXT: andi a2, a1, 2
; RV32-NEXT: beqz a2, .LBB14_2
; RV32-NEXT: .LBB14_6: # %cond.load1
; RV32-NEXT: lw a2, 0(a0)
; RV32-NEXT: lw a3, 4(a0)
; RV32-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV32-NEXT: vslide1down.vx v10, v8, a2
; RV32-NEXT: vslide1down.vx v10, v10, a3
; RV32-NEXT: vsetivli zero, 2, e64, m1, tu, ma
; RV32-NEXT: vslideup.vi v8, v10, 1
; RV32-NEXT: addi a0, a0, 8
; RV32-NEXT: andi a2, a1, 4
; RV32-NEXT: beqz a2, .LBB14_3
; RV32-NEXT: .LBB14_7: # %cond.load5
; RV32-NEXT: lw a2, 0(a0)
; RV32-NEXT: lw a3, 4(a0)
; RV32-NEXT: vsetivli zero, 2, e32, m2, ta, ma
; RV32-NEXT: vslide1down.vx v10, v8, a2
; RV32-NEXT: vslide1down.vx v10, v10, a3
; RV32-NEXT: vsetivli zero, 3, e64, m2, tu, ma
; RV32-NEXT: vslideup.vi v8, v10, 2
; RV32-NEXT: addi a0, a0, 8
; RV32-NEXT: andi a1, a1, 8
; RV32-NEXT: beqz a1, .LBB14_4
; RV32-NEXT: .LBB14_8: # %cond.load9
; RV32-NEXT: lw a1, 0(a0)
; RV32-NEXT: lw a0, 4(a0)
; RV32-NEXT: vsetivli zero, 2, e32, m2, ta, ma
; RV32-NEXT: vslide1down.vx v10, v8, a1
; RV32-NEXT: vslide1down.vx v10, v10, a0
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV32-NEXT: vslideup.vi v8, v10, 3
; RV32-NEXT: ret
;
; RV64-LABEL: expandload_v4i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64-NEXT: vmv.x.s a1, v0
; RV64-NEXT: andi a2, a1, 1
; RV64-NEXT: bnez a2, .LBB14_5
; RV64-NEXT: # %bb.1: # %else
; RV64-NEXT: andi a2, a1, 2
; RV64-NEXT: bnez a2, .LBB14_6
; RV64-NEXT: .LBB14_2: # %else2
; RV64-NEXT: andi a2, a1, 4
; RV64-NEXT: bnez a2, .LBB14_7
; RV64-NEXT: .LBB14_3: # %else6
; RV64-NEXT: andi a1, a1, 8
; RV64-NEXT: bnez a1, .LBB14_8
; RV64-NEXT: .LBB14_4: # %else10
; RV64-NEXT: ret
; RV64-NEXT: .LBB14_5: # %cond.load
; RV64-NEXT: ld a2, 0(a0)
; RV64-NEXT: vsetvli zero, zero, e64, m8, tu, ma
; RV64-NEXT: vmv.s.x v8, a2
; RV64-NEXT: addi a0, a0, 8
; RV64-NEXT: andi a2, a1, 2
; RV64-NEXT: beqz a2, .LBB14_2
; RV64-NEXT: .LBB14_6: # %cond.load1
; RV64-NEXT: ld a2, 0(a0)
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: vmv.s.x v10, a2
; RV64-NEXT: vsetivli zero, 2, e64, m1, tu, ma
; RV64-NEXT: vslideup.vi v8, v10, 1
; RV64-NEXT: addi a0, a0, 8
; RV64-NEXT: andi a2, a1, 4
; RV64-NEXT: beqz a2, .LBB14_3
; RV64-NEXT: .LBB14_7: # %cond.load5
; RV64-NEXT: ld a2, 0(a0)
; RV64-NEXT: vsetivli zero, 3, e64, m2, tu, ma
; RV64-NEXT: vmv.s.x v10, a2
; RV64-NEXT: vslideup.vi v8, v10, 2
; RV64-NEXT: addi a0, a0, 8
; RV64-NEXT: andi a1, a1, 8
; RV64-NEXT: beqz a1, .LBB14_4
; RV64-NEXT: .LBB14_8: # %cond.load9
; RV64-NEXT: ld a0, 0(a0)
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64-NEXT: vmv.s.x v10, a0
; RV64-NEXT: vslideup.vi v8, v10, 3
; RV64-NEXT: ret
%res = call <4 x i64> @llvm.masked.expandload.v4i64(ptr align 8 %base, <4 x i1> %mask, <4 x i64> %src0)
ret <4 x i64>%res
}
declare <8 x i64> @llvm.masked.expandload.v8i64(ptr, <8 x i1>, <8 x i64>)
define <8 x i64> @expandload_v8i64(ptr %base, <8 x i64> %src0, <8 x i1> %mask) {
; RV32-LABEL: expandload_v8i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV32-NEXT: vmv.x.s a1, v0
; RV32-NEXT: andi a2, a1, 1
; RV32-NEXT: bnez a2, .LBB15_9
; RV32-NEXT: # %bb.1: # %else
; RV32-NEXT: andi a2, a1, 2
; RV32-NEXT: bnez a2, .LBB15_10
; RV32-NEXT: .LBB15_2: # %else2
; RV32-NEXT: andi a2, a1, 4
; RV32-NEXT: bnez a2, .LBB15_11
; RV32-NEXT: .LBB15_3: # %else6
; RV32-NEXT: andi a2, a1, 8
; RV32-NEXT: bnez a2, .LBB15_12
; RV32-NEXT: .LBB15_4: # %else10
; RV32-NEXT: andi a2, a1, 16
; RV32-NEXT: bnez a2, .LBB15_13
; RV32-NEXT: .LBB15_5: # %else14
; RV32-NEXT: andi a2, a1, 32
; RV32-NEXT: bnez a2, .LBB15_14
; RV32-NEXT: .LBB15_6: # %else18
; RV32-NEXT: andi a2, a1, 64
; RV32-NEXT: bnez a2, .LBB15_15
; RV32-NEXT: .LBB15_7: # %else22
; RV32-NEXT: andi a1, a1, -128
; RV32-NEXT: bnez a1, .LBB15_16
; RV32-NEXT: .LBB15_8: # %else26
; RV32-NEXT: ret
; RV32-NEXT: .LBB15_9: # %cond.load
; RV32-NEXT: lw a2, 0(a0)
; RV32-NEXT: lw a3, 4(a0)
; RV32-NEXT: vsetivli zero, 2, e32, m1, tu, ma
; RV32-NEXT: vslide1down.vx v8, v8, a2
; RV32-NEXT: vslide1down.vx v8, v8, a3
; RV32-NEXT: addi a0, a0, 8
; RV32-NEXT: andi a2, a1, 2
; RV32-NEXT: beqz a2, .LBB15_2
; RV32-NEXT: .LBB15_10: # %cond.load1
; RV32-NEXT: lw a2, 0(a0)
; RV32-NEXT: lw a3, 4(a0)
; RV32-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV32-NEXT: vslide1down.vx v12, v8, a2
; RV32-NEXT: vslide1down.vx v12, v12, a3
; RV32-NEXT: vsetivli zero, 2, e64, m1, tu, ma
; RV32-NEXT: vslideup.vi v8, v12, 1
; RV32-NEXT: addi a0, a0, 8
; RV32-NEXT: andi a2, a1, 4
; RV32-NEXT: beqz a2, .LBB15_3
; RV32-NEXT: .LBB15_11: # %cond.load5
; RV32-NEXT: lw a2, 0(a0)
; RV32-NEXT: lw a3, 4(a0)
; RV32-NEXT: vsetivli zero, 2, e32, m2, ta, ma
; RV32-NEXT: vslide1down.vx v12, v8, a2
; RV32-NEXT: vslide1down.vx v12, v12, a3
; RV32-NEXT: vsetivli zero, 3, e64, m2, tu, ma
; RV32-NEXT: vslideup.vi v8, v12, 2
; RV32-NEXT: addi a0, a0, 8
; RV32-NEXT: andi a2, a1, 8
; RV32-NEXT: beqz a2, .LBB15_4
; RV32-NEXT: .LBB15_12: # %cond.load9
; RV32-NEXT: lw a2, 0(a0)
; RV32-NEXT: lw a3, 4(a0)
; RV32-NEXT: vsetivli zero, 2, e32, m2, ta, ma
; RV32-NEXT: vslide1down.vx v12, v8, a2
; RV32-NEXT: vslide1down.vx v12, v12, a3
; RV32-NEXT: vsetivli zero, 4, e64, m2, tu, ma
; RV32-NEXT: vslideup.vi v8, v12, 3
; RV32-NEXT: addi a0, a0, 8
; RV32-NEXT: andi a2, a1, 16
; RV32-NEXT: beqz a2, .LBB15_5
; RV32-NEXT: .LBB15_13: # %cond.load13
; RV32-NEXT: lw a2, 0(a0)
; RV32-NEXT: lw a3, 4(a0)
; RV32-NEXT: vsetivli zero, 2, e32, m4, ta, ma
; RV32-NEXT: vslide1down.vx v12, v8, a2
; RV32-NEXT: vslide1down.vx v12, v12, a3
; RV32-NEXT: vsetivli zero, 5, e64, m4, tu, ma
; RV32-NEXT: vslideup.vi v8, v12, 4
; RV32-NEXT: addi a0, a0, 8
; RV32-NEXT: andi a2, a1, 32
; RV32-NEXT: beqz a2, .LBB15_6
; RV32-NEXT: .LBB15_14: # %cond.load17
; RV32-NEXT: lw a2, 0(a0)
; RV32-NEXT: lw a3, 4(a0)
; RV32-NEXT: vsetivli zero, 2, e32, m4, ta, ma
; RV32-NEXT: vslide1down.vx v12, v8, a2
; RV32-NEXT: vslide1down.vx v12, v12, a3
; RV32-NEXT: vsetivli zero, 6, e64, m4, tu, ma
; RV32-NEXT: vslideup.vi v8, v12, 5
; RV32-NEXT: addi a0, a0, 8
; RV32-NEXT: andi a2, a1, 64
; RV32-NEXT: beqz a2, .LBB15_7
; RV32-NEXT: .LBB15_15: # %cond.load21
; RV32-NEXT: lw a2, 0(a0)
; RV32-NEXT: lw a3, 4(a0)
; RV32-NEXT: vsetivli zero, 2, e32, m4, ta, ma
; RV32-NEXT: vslide1down.vx v12, v8, a2
; RV32-NEXT: vslide1down.vx v12, v12, a3
; RV32-NEXT: vsetivli zero, 7, e64, m4, tu, ma
; RV32-NEXT: vslideup.vi v8, v12, 6
; RV32-NEXT: addi a0, a0, 8
; RV32-NEXT: andi a1, a1, -128
; RV32-NEXT: beqz a1, .LBB15_8
; RV32-NEXT: .LBB15_16: # %cond.load25
; RV32-NEXT: lw a1, 0(a0)
; RV32-NEXT: lw a0, 4(a0)
; RV32-NEXT: vsetivli zero, 2, e32, m4, ta, ma
; RV32-NEXT: vslide1down.vx v12, v8, a1
; RV32-NEXT: vslide1down.vx v12, v12, a0
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV32-NEXT: vslideup.vi v8, v12, 7
; RV32-NEXT: ret
;
; RV64-LABEL: expandload_v8i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64-NEXT: vmv.x.s a1, v0
; RV64-NEXT: andi a2, a1, 1
; RV64-NEXT: bnez a2, .LBB15_9
; RV64-NEXT: # %bb.1: # %else
; RV64-NEXT: andi a2, a1, 2
; RV64-NEXT: bnez a2, .LBB15_10
; RV64-NEXT: .LBB15_2: # %else2
; RV64-NEXT: andi a2, a1, 4
; RV64-NEXT: bnez a2, .LBB15_11
; RV64-NEXT: .LBB15_3: # %else6
; RV64-NEXT: andi a2, a1, 8
; RV64-NEXT: bnez a2, .LBB15_12
; RV64-NEXT: .LBB15_4: # %else10
; RV64-NEXT: andi a2, a1, 16
; RV64-NEXT: bnez a2, .LBB15_13
; RV64-NEXT: .LBB15_5: # %else14
; RV64-NEXT: andi a2, a1, 32
; RV64-NEXT: bnez a2, .LBB15_14
; RV64-NEXT: .LBB15_6: # %else18
; RV64-NEXT: andi a2, a1, 64
; RV64-NEXT: bnez a2, .LBB15_15
; RV64-NEXT: .LBB15_7: # %else22
; RV64-NEXT: andi a1, a1, -128
; RV64-NEXT: bnez a1, .LBB15_16
; RV64-NEXT: .LBB15_8: # %else26
; RV64-NEXT: ret
; RV64-NEXT: .LBB15_9: # %cond.load
; RV64-NEXT: ld a2, 0(a0)
; RV64-NEXT: vsetvli zero, zero, e64, m8, tu, ma
; RV64-NEXT: vmv.s.x v8, a2
; RV64-NEXT: addi a0, a0, 8
; RV64-NEXT: andi a2, a1, 2
; RV64-NEXT: beqz a2, .LBB15_2
; RV64-NEXT: .LBB15_10: # %cond.load1
; RV64-NEXT: ld a2, 0(a0)
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: vmv.s.x v12, a2
; RV64-NEXT: vsetivli zero, 2, e64, m1, tu, ma
; RV64-NEXT: vslideup.vi v8, v12, 1
; RV64-NEXT: addi a0, a0, 8
; RV64-NEXT: andi a2, a1, 4
; RV64-NEXT: beqz a2, .LBB15_3
; RV64-NEXT: .LBB15_11: # %cond.load5
; RV64-NEXT: ld a2, 0(a0)
; RV64-NEXT: vsetivli zero, 3, e64, m2, tu, ma
; RV64-NEXT: vmv.s.x v12, a2
; RV64-NEXT: vslideup.vi v8, v12, 2
; RV64-NEXT: addi a0, a0, 8
; RV64-NEXT: andi a2, a1, 8
; RV64-NEXT: beqz a2, .LBB15_4
; RV64-NEXT: .LBB15_12: # %cond.load9
; RV64-NEXT: ld a2, 0(a0)
; RV64-NEXT: vsetivli zero, 4, e64, m2, tu, ma
; RV64-NEXT: vmv.s.x v12, a2
; RV64-NEXT: vslideup.vi v8, v12, 3
; RV64-NEXT: addi a0, a0, 8
; RV64-NEXT: andi a2, a1, 16
; RV64-NEXT: beqz a2, .LBB15_5
; RV64-NEXT: .LBB15_13: # %cond.load13
; RV64-NEXT: ld a2, 0(a0)
; RV64-NEXT: vsetivli zero, 5, e64, m4, tu, ma
; RV64-NEXT: vmv.s.x v12, a2
; RV64-NEXT: vslideup.vi v8, v12, 4
; RV64-NEXT: addi a0, a0, 8
; RV64-NEXT: andi a2, a1, 32
; RV64-NEXT: beqz a2, .LBB15_6
; RV64-NEXT: .LBB15_14: # %cond.load17
; RV64-NEXT: ld a2, 0(a0)
; RV64-NEXT: vsetivli zero, 6, e64, m4, tu, ma
; RV64-NEXT: vmv.s.x v12, a2
; RV64-NEXT: vslideup.vi v8, v12, 5
; RV64-NEXT: addi a0, a0, 8
; RV64-NEXT: andi a2, a1, 64
; RV64-NEXT: beqz a2, .LBB15_7
; RV64-NEXT: .LBB15_15: # %cond.load21
; RV64-NEXT: ld a2, 0(a0)
; RV64-NEXT: vsetivli zero, 7, e64, m4, tu, ma
; RV64-NEXT: vmv.s.x v12, a2
; RV64-NEXT: vslideup.vi v8, v12, 6
; RV64-NEXT: addi a0, a0, 8
; RV64-NEXT: andi a1, a1, -128
; RV64-NEXT: beqz a1, .LBB15_8
; RV64-NEXT: .LBB15_16: # %cond.load25
; RV64-NEXT: ld a0, 0(a0)
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vmv.s.x v12, a0
; RV64-NEXT: vslideup.vi v8, v12, 7
; RV64-NEXT: ret
%res = call <8 x i64> @llvm.masked.expandload.v8i64(ptr align 8 %base, <8 x i1> %mask, <8 x i64> %src0)
ret <8 x i64>%res
}