; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
define i1 @extractelt_nxv1i1(ptr %x, i64 %idx) nounwind {
; CHECK-LABEL: extractelt_nxv1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vslidedown.vx v8, v8, a1
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%a = load <vscale x 1 x i8>, ptr %x
%b = icmp eq <vscale x 1 x i8> %a, zeroinitializer
%c = extractelement <vscale x 1 x i1> %b, i64 %idx
ret i1 %c
}
define i1 @extractelt_nxv2i1(ptr %x, i64 %idx) nounwind {
; CHECK-LABEL: extractelt_nxv2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vslidedown.vx v8, v8, a1
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%a = load <vscale x 2 x i8>, ptr %x
%b = icmp eq <vscale x 2 x i8> %a, zeroinitializer
%c = extractelement <vscale x 2 x i1> %b, i64 %idx
ret i1 %c
}
define i1 @extractelt_nxv4i1(ptr %x, i64 %idx) nounwind {
; CHECK-LABEL: extractelt_nxv4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vslidedown.vx v8, v8, a1
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%a = load <vscale x 4 x i8>, ptr %x
%b = icmp eq <vscale x 4 x i8> %a, zeroinitializer
%c = extractelement <vscale x 4 x i1> %b, i64 %idx
ret i1 %c
}
define i1 @extractelt_nxv8i1(ptr %x, i64 %idx) nounwind {
; CHECK-LABEL: extractelt_nxv8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vl1r.v v8, (a0)
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vslidedown.vx v8, v8, a1
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%a = load <vscale x 8 x i8>, ptr %x
%b = icmp eq <vscale x 8 x i8> %a, zeroinitializer
%c = extractelement <vscale x 8 x i1> %b, i64 %idx
ret i1 %c
}
define i1 @extractelt_nxv16i1(ptr %x, i64 %idx) nounwind {
; CHECK-LABEL: extractelt_nxv16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vl2r.v v8, (a0)
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vslidedown.vx v8, v8, a1
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%a = load <vscale x 16 x i8>, ptr %x
%b = icmp eq <vscale x 16 x i8> %a, zeroinitializer
%c = extractelement <vscale x 16 x i1> %b, i64 %idx
ret i1 %c
}
define i1 @extractelt_nxv32i1(ptr %x, i64 %idx) nounwind {
; CHECK-LABEL: extractelt_nxv32i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vl4r.v v8, (a0)
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vslidedown.vx v8, v8, a1
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%a = load <vscale x 32 x i8>, ptr %x
%b = icmp eq <vscale x 32 x i8> %a, zeroinitializer
%c = extractelement <vscale x 32 x i1> %b, i64 %idx
ret i1 %c
}
define i1 @extractelt_nxv64i1(ptr %x, i64 %idx) nounwind {
; CHECK-LABEL: extractelt_nxv64i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vl8r.v v8, (a0)
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vslidedown.vx v8, v8, a1
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%a = load <vscale x 64 x i8>, ptr %x
%b = icmp eq <vscale x 64 x i8> %a, zeroinitializer
%c = extractelement <vscale x 64 x i1> %b, i64 %idx
ret i1 %c
}
define i1 @extractelt_nxv128i1(ptr %x, i64 %idx) nounwind {
; RV32-LABEL: extractelt_nxv128i1:
; RV32: # %bb.0:
; RV32-NEXT: csrr a2, vlenb
; RV32-NEXT: slli a3, a2, 4
; RV32-NEXT: addi a3, a3, -1
; RV32-NEXT: bltu a1, a3, .LBB7_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a1, a3
; RV32-NEXT: .LBB7_2:
; RV32-NEXT: addi sp, sp, -80
; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
; RV32-NEXT: addi s0, sp, 80
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: slli a3, a3, 4
; RV32-NEXT: sub sp, sp, a3
; RV32-NEXT: andi sp, sp, -64
; RV32-NEXT: slli a2, a2, 3
; RV32-NEXT: add a3, a0, a2
; RV32-NEXT: vl8r.v v16, (a3)
; RV32-NEXT: vl8r.v v24, (a0)
; RV32-NEXT: addi a0, sp, 64
; RV32-NEXT: add a1, a0, a1
; RV32-NEXT: vsetvli a3, zero, e8, m8, ta, ma
; RV32-NEXT: vmseq.vi v8, v16, 0
; RV32-NEXT: vmseq.vi v0, v24, 0
; RV32-NEXT: vmv.v.i v16, 0
; RV32-NEXT: vmerge.vim v24, v16, 1, v0
; RV32-NEXT: vs8r.v v24, (a0)
; RV32-NEXT: add a0, a0, a2
; RV32-NEXT: vmv1r.v v0, v8
; RV32-NEXT: vmerge.vim v8, v16, 1, v0
; RV32-NEXT: vs8r.v v8, (a0)
; RV32-NEXT: lbu a0, 0(a1)
; RV32-NEXT: addi sp, s0, -80
; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 80
; RV32-NEXT: ret
;
; RV64-LABEL: extractelt_nxv128i1:
; RV64: # %bb.0:
; RV64-NEXT: csrr a2, vlenb
; RV64-NEXT: slli a3, a2, 4
; RV64-NEXT: addi a3, a3, -1
; RV64-NEXT: bltu a1, a3, .LBB7_2
; RV64-NEXT: # %bb.1:
; RV64-NEXT: mv a1, a3
; RV64-NEXT: .LBB7_2:
; RV64-NEXT: addi sp, sp, -80
; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
; RV64-NEXT: addi s0, sp, 80
; RV64-NEXT: csrr a3, vlenb
; RV64-NEXT: slli a3, a3, 4
; RV64-NEXT: sub sp, sp, a3
; RV64-NEXT: andi sp, sp, -64
; RV64-NEXT: slli a2, a2, 3
; RV64-NEXT: add a3, a0, a2
; RV64-NEXT: vl8r.v v16, (a3)
; RV64-NEXT: vl8r.v v24, (a0)
; RV64-NEXT: addi a0, sp, 64
; RV64-NEXT: add a1, a0, a1
; RV64-NEXT: vsetvli a3, zero, e8, m8, ta, ma
; RV64-NEXT: vmseq.vi v8, v16, 0
; RV64-NEXT: vmseq.vi v0, v24, 0
; RV64-NEXT: vmv.v.i v16, 0
; RV64-NEXT: vmerge.vim v24, v16, 1, v0
; RV64-NEXT: vs8r.v v24, (a0)
; RV64-NEXT: add a0, a0, a2
; RV64-NEXT: vmv1r.v v0, v8
; RV64-NEXT: vmerge.vim v8, v16, 1, v0
; RV64-NEXT: vs8r.v v8, (a0)
; RV64-NEXT: lbu a0, 0(a1)
; RV64-NEXT: addi sp, s0, -80
; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 80
; RV64-NEXT: ret
%a = load <vscale x 128 x i8>, ptr %x
%b = icmp eq <vscale x 128 x i8> %a, zeroinitializer
%c = extractelement <vscale x 128 x i1> %b, i64 %idx
ret i1 %c
}
define i1 @extractelt_nxv1i1_idx0(ptr %x) nounwind {
; CHECK-LABEL: extractelt_nxv1i1_idx0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vmseq.vi v8, v8, 0
; CHECK-NEXT: vfirst.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
%a = load <vscale x 1 x i8>, ptr %x
%b = icmp eq <vscale x 1 x i8> %a, zeroinitializer
%c = extractelement <vscale x 1 x i1> %b, i64 0
ret i1 %c
}
define i1 @extractelt_nxv2i1_idx0(ptr %x) nounwind {
; CHECK-LABEL: extractelt_nxv2i1_idx0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vmseq.vi v8, v8, 0
; CHECK-NEXT: vfirst.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
%a = load <vscale x 2 x i8>, ptr %x
%b = icmp eq <vscale x 2 x i8> %a, zeroinitializer
%c = extractelement <vscale x 2 x i1> %b, i64 0
ret i1 %c
}
define i1 @extractelt_nxv4i1_idx0(ptr %x) nounwind {
; CHECK-LABEL: extractelt_nxv4i1_idx0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vmseq.vi v8, v8, 0
; CHECK-NEXT: vfirst.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
%a = load <vscale x 4 x i8>, ptr %x
%b = icmp eq <vscale x 4 x i8> %a, zeroinitializer
%c = extractelement <vscale x 4 x i1> %b, i64 0
ret i1 %c
}
define i1 @extractelt_nxv8i1_idx0(ptr %x) nounwind {
; CHECK-LABEL: extractelt_nxv8i1_idx0:
; CHECK: # %bb.0:
; CHECK-NEXT: vl1r.v v8, (a0)
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
; CHECK-NEXT: vmseq.vi v8, v8, 0
; CHECK-NEXT: vfirst.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
%a = load <vscale x 8 x i8>, ptr %x
%b = icmp eq <vscale x 8 x i8> %a, zeroinitializer
%c = extractelement <vscale x 8 x i1> %b, i64 0
ret i1 %c
}
define i1 @extractelt_nxv16i1_idx0(ptr %x) nounwind {
; CHECK-LABEL: extractelt_nxv16i1_idx0:
; CHECK: # %bb.0:
; CHECK-NEXT: vl2r.v v8, (a0)
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
; CHECK-NEXT: vmseq.vi v10, v8, 0
; CHECK-NEXT: vfirst.m a0, v10
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
%a = load <vscale x 16 x i8>, ptr %x
%b = icmp eq <vscale x 16 x i8> %a, zeroinitializer
%c = extractelement <vscale x 16 x i1> %b, i64 0
ret i1 %c
}
define i1 @extractelt_nxv32i1_idx0(ptr %x) nounwind {
; CHECK-LABEL: extractelt_nxv32i1_idx0:
; CHECK: # %bb.0:
; CHECK-NEXT: vl4r.v v8, (a0)
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
; CHECK-NEXT: vmseq.vi v12, v8, 0
; CHECK-NEXT: vfirst.m a0, v12
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
%a = load <vscale x 32 x i8>, ptr %x
%b = icmp eq <vscale x 32 x i8> %a, zeroinitializer
%c = extractelement <vscale x 32 x i1> %b, i64 0
ret i1 %c
}
define i1 @extractelt_nxv64i1_idx0(ptr %x) nounwind {
; CHECK-LABEL: extractelt_nxv64i1_idx0:
; CHECK: # %bb.0:
; CHECK-NEXT: vl8r.v v8, (a0)
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
; CHECK-NEXT: vmseq.vi v16, v8, 0
; CHECK-NEXT: vfirst.m a0, v16
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
%a = load <vscale x 64 x i8>, ptr %x
%b = icmp eq <vscale x 64 x i8> %a, zeroinitializer
%c = extractelement <vscale x 64 x i1> %b, i64 0
ret i1 %c
}