; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV32
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV64
; Negative test to ensure we don't try to generate a vector reduce when
; vector instructions are not available.
define i32 @reduce_sum_4xi32(<4 x i32> %v) {
; RV32-LABEL: reduce_sum_4xi32:
; RV32: # %bb.0:
; RV32-NEXT: lw a1, 0(a0)
; RV32-NEXT: lw a2, 4(a0)
; RV32-NEXT: lw a3, 8(a0)
; RV32-NEXT: lw a0, 12(a0)
; RV32-NEXT: add a1, a1, a2
; RV32-NEXT: add a0, a3, a0
; RV32-NEXT: add a0, a1, a0
; RV32-NEXT: ret
;
; RV64-LABEL: reduce_sum_4xi32:
; RV64: # %bb.0:
; RV64-NEXT: lw a1, 0(a0)
; RV64-NEXT: lw a2, 8(a0)
; RV64-NEXT: lw a3, 16(a0)
; RV64-NEXT: lw a0, 24(a0)
; RV64-NEXT: add a1, a1, a2
; RV64-NEXT: add a0, a3, a0
; RV64-NEXT: addw a0, a1, a0
; RV64-NEXT: ret
%e0 = extractelement <4 x i32> %v, i32 0
%e1 = extractelement <4 x i32> %v, i32 1
%e2 = extractelement <4 x i32> %v, i32 2
%e3 = extractelement <4 x i32> %v, i32 3
%add0 = add i32 %e0, %e1
%add1 = add i32 %add0, %e2
%add2 = add i32 %add1, %e3
ret i32 %add2
}
define i32 @reduce_xor_4xi32(<4 x i32> %v) {
; RV32-LABEL: reduce_xor_4xi32:
; RV32: # %bb.0:
; RV32-NEXT: lw a1, 0(a0)
; RV32-NEXT: lw a2, 4(a0)
; RV32-NEXT: lw a3, 8(a0)
; RV32-NEXT: lw a0, 12(a0)
; RV32-NEXT: xor a1, a1, a2
; RV32-NEXT: xor a0, a3, a0
; RV32-NEXT: xor a0, a1, a0
; RV32-NEXT: ret
;
; RV64-LABEL: reduce_xor_4xi32:
; RV64: # %bb.0:
; RV64-NEXT: ld a1, 0(a0)
; RV64-NEXT: ld a2, 8(a0)
; RV64-NEXT: ld a3, 16(a0)
; RV64-NEXT: ld a0, 24(a0)
; RV64-NEXT: xor a1, a1, a2
; RV64-NEXT: xor a0, a3, a0
; RV64-NEXT: xor a0, a1, a0
; RV64-NEXT: ret
%e0 = extractelement <4 x i32> %v, i32 0
%e1 = extractelement <4 x i32> %v, i32 1
%e2 = extractelement <4 x i32> %v, i32 2
%e3 = extractelement <4 x i32> %v, i32 3
%xor0 = xor i32 %e0, %e1
%xor1 = xor i32 %xor0, %e2
%xor2 = xor i32 %xor1, %e3
ret i32 %xor2
}
define i32 @reduce_or_4xi32(<4 x i32> %v) {
; RV32-LABEL: reduce_or_4xi32:
; RV32: # %bb.0:
; RV32-NEXT: lw a1, 0(a0)
; RV32-NEXT: lw a2, 4(a0)
; RV32-NEXT: lw a3, 8(a0)
; RV32-NEXT: lw a0, 12(a0)
; RV32-NEXT: or a1, a1, a2
; RV32-NEXT: or a0, a3, a0
; RV32-NEXT: or a0, a1, a0
; RV32-NEXT: ret
;
; RV64-LABEL: reduce_or_4xi32:
; RV64: # %bb.0:
; RV64-NEXT: ld a1, 0(a0)
; RV64-NEXT: ld a2, 8(a0)
; RV64-NEXT: ld a3, 16(a0)
; RV64-NEXT: ld a0, 24(a0)
; RV64-NEXT: or a1, a1, a2
; RV64-NEXT: or a0, a3, a0
; RV64-NEXT: or a0, a1, a0
; RV64-NEXT: ret
%e0 = extractelement <4 x i32> %v, i32 0
%e1 = extractelement <4 x i32> %v, i32 1
%e2 = extractelement <4 x i32> %v, i32 2
%e3 = extractelement <4 x i32> %v, i32 3
%or0 = or i32 %e0, %e1
%or1 = or i32 %or0, %e2
%or2 = or i32 %or1, %e3
ret i32 %or2
}