; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc -mtriple=riscv32 -mattr='+v' -O3 %s -o - | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32)
declare <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
i32, i32)
declare <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32)
declare <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32)
define <vscale x 1 x i8> @simple_vadd_vv(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
; CHECK-LABEL: simple_vadd_vv:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vadd.vv v9, v8, v9
; CHECK-NEXT: vadd.vv v8, v8, v8
; CHECK-NEXT: vadd.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
%b = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %a,
i32 %2)
%c = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %b,
i32 %2)
ret <vscale x 1 x i8> %c
}
define <vscale x 1 x i8> @simple_vadd_vsub_vv(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
; CHECK-LABEL: simple_vadd_vsub_vv:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vsub.vv v9, v8, v9
; CHECK-NEXT: vadd.vv v8, v8, v8
; CHECK-NEXT: vadd.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8(
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
%b = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %a,
i32 %2)
%c = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %b,
i32 %2)
ret <vscale x 1 x i8> %c
}
define <vscale x 1 x i8> @simple_vmul_vv(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
; CHECK-LABEL: simple_vmul_vv:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vmul.vv v9, v8, v9
; CHECK-NEXT: vmul.vv v8, v8, v8
; CHECK-NEXT: vmul.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8(
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
%b = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8(
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %a,
i32 %2)
%c = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8(
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %b,
i32 %2)
ret <vscale x 1 x i8> %c
}
; With passthru and masks.
define <vscale x 1 x i8> @vadd_vv_passthru(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
; CHECK-LABEL: vadd_vv_passthru:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
; CHECK-NEXT: vadd.vv v10, v8, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vadd.vv v9, v8, v8
; CHECK-NEXT: vadd.vv v8, v9, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
%b = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %a,
i32 %2)
%c = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %b,
i32 %2)
ret <vscale x 1 x i8> %c
}
define <vscale x 1 x i8> @vadd_vv_passthru_negative(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
; CHECK-LABEL: vadd_vv_passthru_negative:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
; CHECK-NEXT: vadd.vv v10, v8, v9
; CHECK-NEXT: vadd.vv v9, v8, v10
; CHECK-NEXT: vadd.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
%b = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %a,
i32 %2)
%c = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %b,
i32 %2)
ret <vscale x 1 x i8> %c
}
define <vscale x 1 x i8> @vadd_vv_mask(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, <vscale x 1 x i1> %m) nounwind {
; CHECK-LABEL: vadd_vv_mask:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vadd.vv v10, v8, v9, v0.t
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vadd.vv v9, v8, v8, v0.t
; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
<vscale x 1 x i1> %m,
i32 %2, i32 1)
%b = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %a,
<vscale x 1 x i1> %m,
i32 %2, i32 1)
%c = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %b,
<vscale x 1 x i1> %m,
i32 %2, i32 1)
ret <vscale x 1 x i8> %c
}
define <vscale x 1 x i8> @vadd_vv_mask_negative(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, <vscale x 1 x i1> %m, <vscale x 1 x i1> %m2) nounwind {
; CHECK-LABEL: vadd_vv_mask_negative:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vadd.vv v11, v8, v9, v0.t
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vadd.vv v9, v8, v11, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
<vscale x 1 x i1> %m,
i32 %2, i32 1)
%b = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %a,
<vscale x 1 x i1> %m,
i32 %2, i32 1)
%c = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %b,
<vscale x 1 x i1> %m2,
i32 %2, i32 1)
ret <vscale x 1 x i8> %c
}