; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
declare i8 @llvm.riscv.vmv.x.s.nxv1i8(<vscale x 1 x i8>)
define signext i8 @intrinsic_vmv.x.s_s_nxv1i8(<vscale x 1 x i8> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
%a = call i8 @llvm.riscv.vmv.x.s.nxv1i8(<vscale x 1 x i8> %0)
ret i8 %a
}
declare i8 @llvm.riscv.vmv.x.s.nxv2i8(<vscale x 2 x i8>)
define signext i8 @intrinsic_vmv.x.s_s_nxv2i8(<vscale x 2 x i8> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
%a = call i8 @llvm.riscv.vmv.x.s.nxv2i8(<vscale x 2 x i8> %0)
ret i8 %a
}
declare i8 @llvm.riscv.vmv.x.s.nxv4i8(<vscale x 4 x i8>)
define signext i8 @intrinsic_vmv.x.s_s_nxv4i8(<vscale x 4 x i8> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
%a = call i8 @llvm.riscv.vmv.x.s.nxv4i8(<vscale x 4 x i8> %0)
ret i8 %a
}
declare i8 @llvm.riscv.vmv.x.s.nxv8i8(<vscale x 8 x i8>)
define signext i8 @intrinsic_vmv.x.s_s_nxv8i8(<vscale x 8 x i8> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
%a = call i8 @llvm.riscv.vmv.x.s.nxv8i8(<vscale x 8 x i8> %0)
ret i8 %a
}
declare i8 @llvm.riscv.vmv.x.s.nxv16i8(<vscale x 16 x i8>)
define signext i8 @intrinsic_vmv.x.s_s_nxv16i8(<vscale x 16 x i8> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
%a = call i8 @llvm.riscv.vmv.x.s.nxv16i8(<vscale x 16 x i8> %0)
ret i8 %a
}
declare i8 @llvm.riscv.vmv.x.s.nxv32i8(<vscale x 32 x i8>)
define signext i8 @intrinsic_vmv.x.s_s_nxv32i8(<vscale x 32 x i8> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
%a = call i8 @llvm.riscv.vmv.x.s.nxv32i8(<vscale x 32 x i8> %0)
ret i8 %a
}
declare i8 @llvm.riscv.vmv.x.s.nxv64i8(<vscale x 64 x i8>)
define signext i8 @intrinsic_vmv.x.s_s_nxv64i8(<vscale x 64 x i8> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
%a = call i8 @llvm.riscv.vmv.x.s.nxv64i8(<vscale x 64 x i8> %0)
ret i8 %a
}
declare i16 @llvm.riscv.vmv.x.s.nxv1i16(<vscale x 1 x i16>)
define signext i16 @intrinsic_vmv.x.s_s_nxv1i16(<vscale x 1 x i16> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
%a = call i16 @llvm.riscv.vmv.x.s.nxv1i16(<vscale x 1 x i16> %0)
ret i16 %a
}
declare i16 @llvm.riscv.vmv.x.s.nxv2i16(<vscale x 2 x i16>)
define signext i16 @intrinsic_vmv.x.s_s_nxv2i16(<vscale x 2 x i16> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
%a = call i16 @llvm.riscv.vmv.x.s.nxv2i16(<vscale x 2 x i16> %0)
ret i16 %a
}
declare i16 @llvm.riscv.vmv.x.s.nxv4i16(<vscale x 4 x i16>)
define signext i16 @intrinsic_vmv.x.s_s_nxv4i16(<vscale x 4 x i16> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
%a = call i16 @llvm.riscv.vmv.x.s.nxv4i16(<vscale x 4 x i16> %0)
ret i16 %a
}
declare i16 @llvm.riscv.vmv.x.s.nxv8i16(<vscale x 8 x i16>)
define signext i16 @intrinsic_vmv.x.s_s_nxv8i16(<vscale x 8 x i16> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
%a = call i16 @llvm.riscv.vmv.x.s.nxv8i16(<vscale x 8 x i16> %0)
ret i16 %a
}
declare i16 @llvm.riscv.vmv.x.s.nxv16i16(<vscale x 16 x i16>)
define signext i16 @intrinsic_vmv.x.s_s_nxv16i16(<vscale x 16 x i16> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
%a = call i16 @llvm.riscv.vmv.x.s.nxv16i16( <vscale x 16 x i16> %0)
ret i16 %a
}
declare i16 @llvm.riscv.vmv.x.s.nxv32i16( <vscale x 32 x i16>)
define signext i16 @intrinsic_vmv.x.s_s_nxv32i16(<vscale x 32 x i16> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
%a = call i16 @llvm.riscv.vmv.x.s.nxv32i16( <vscale x 32 x i16> %0)
ret i16 %a
}
declare i32 @llvm.riscv.vmv.x.s.nxv1i32( <vscale x 1 x i32>)
define signext i32 @intrinsic_vmv.x.s_s_nxv1i32(<vscale x 1 x i32> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
%a = call i32 @llvm.riscv.vmv.x.s.nxv1i32( <vscale x 1 x i32> %0)
ret i32 %a
}
declare i32 @llvm.riscv.vmv.x.s.nxv2i32( <vscale x 2 x i32>)
define signext i32 @intrinsic_vmv.x.s_s_nxv2i32(<vscale x 2 x i32> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
%a = call i32 @llvm.riscv.vmv.x.s.nxv2i32( <vscale x 2 x i32> %0)
ret i32 %a
}
declare i32 @llvm.riscv.vmv.x.s.nxv4i32( <vscale x 4 x i32>)
define signext i32 @intrinsic_vmv.x.s_s_nxv4i32(<vscale x 4 x i32> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
%a = call i32 @llvm.riscv.vmv.x.s.nxv4i32( <vscale x 4 x i32> %0)
ret i32 %a
}
declare i32 @llvm.riscv.vmv.x.s.nxv8i32( <vscale x 8 x i32>)
define signext i32 @intrinsic_vmv.x.s_s_nxv8i32(<vscale x 8 x i32> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
%a = call i32 @llvm.riscv.vmv.x.s.nxv8i32( <vscale x 8 x i32> %0)
ret i32 %a
}
declare i32 @llvm.riscv.vmv.x.s.nxv16i32( <vscale x 16 x i32>)
define signext i32 @intrinsic_vmv.x.s_s_nxv16i32(<vscale x 16 x i32> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
%a = call i32 @llvm.riscv.vmv.x.s.nxv16i32( <vscale x 16 x i32> %0)
ret i32 %a
}
declare i64 @llvm.riscv.vmv.x.s.nxv1i64( <vscale x 1 x i64>)
define i64 @intrinsic_vmv.x.s_s_nxv1i64(<vscale x 1 x i64> %0) nounwind {
; RV32-LABEL: intrinsic_vmv.x.s_s_nxv1i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: li a0, 32
; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV32-NEXT: vsrl.vx v9, v8, a0
; RV32-NEXT: vmv.x.s a1, v9
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: ret
;
; RV64-LABEL: intrinsic_vmv.x.s_s_nxv1i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: ret
entry:
%a = call i64 @llvm.riscv.vmv.x.s.nxv1i64( <vscale x 1 x i64> %0)
ret i64 %a
}
declare i64 @llvm.riscv.vmv.x.s.nxv2i64( <vscale x 2 x i64>)
define i64 @intrinsic_vmv.x.s_s_nxv2i64(<vscale x 2 x i64> %0) nounwind {
; RV32-LABEL: intrinsic_vmv.x.s_s_nxv2i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: li a0, 32
; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV32-NEXT: vsrl.vx v10, v8, a0
; RV32-NEXT: vmv.x.s a1, v10
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: ret
;
; RV64-LABEL: intrinsic_vmv.x.s_s_nxv2i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: ret
entry:
%a = call i64 @llvm.riscv.vmv.x.s.nxv2i64( <vscale x 2 x i64> %0)
ret i64 %a
}
declare i64 @llvm.riscv.vmv.x.s.nxv4i64( <vscale x 4 x i64>)
define i64 @intrinsic_vmv.x.s_s_nxv4i64(<vscale x 4 x i64> %0) nounwind {
; RV32-LABEL: intrinsic_vmv.x.s_s_nxv4i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: li a0, 32
; RV32-NEXT: vsetivli zero, 1, e64, m4, ta, ma
; RV32-NEXT: vsrl.vx v12, v8, a0
; RV32-NEXT: vmv.x.s a1, v12
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: ret
;
; RV64-LABEL: intrinsic_vmv.x.s_s_nxv4i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: ret
entry:
%a = call i64 @llvm.riscv.vmv.x.s.nxv4i64( <vscale x 4 x i64> %0)
ret i64 %a
}
declare i64 @llvm.riscv.vmv.x.s.nxv8i64(<vscale x 8 x i64>)
define i64 @intrinsic_vmv.x.s_s_nxv8i64(<vscale x 8 x i64> %0) nounwind {
; RV32-LABEL: intrinsic_vmv.x.s_s_nxv8i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: li a0, 32
; RV32-NEXT: vsetivli zero, 1, e64, m8, ta, ma
; RV32-NEXT: vsrl.vx v16, v8, a0
; RV32-NEXT: vmv.x.s a1, v16
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: ret
;
; RV64-LABEL: intrinsic_vmv.x.s_s_nxv8i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: ret
entry:
%a = call i64 @llvm.riscv.vmv.x.s.nxv8i64(<vscale x 8 x i64> %0)
ret i64 %a
}