llvm/llvm/test/CodeGen/RISCV/rvv/vmv.v.x.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
; RUN:   -verify-machineinstrs  | FileCheck %s --check-prefixes=CHECK,RV32
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
; RUN:   -verify-machineinstrs  | FileCheck %s --check-prefixes=CHECK,RV64

declare <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8(
  <vscale x 1 x i8>,
  i8,
  iXLen);

define <vscale x 1 x i8> @intrinsic_vmv.v.x_x_nxv1i8(i8 %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i8:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT:    vmv.v.x v8, a0
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8(
    <vscale x 1 x i8> undef,
    i8 %0,
    iXLen %1)

  ret <vscale x 1 x i8> %a
}

declare <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8(
  <vscale x 2 x i8>,
  i8,
  iXLen);

define <vscale x 2 x i8> @intrinsic_vmv.v.x_x_nxv2i8(i8 %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i8:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT:    vmv.v.x v8, a0
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8(
    <vscale x 2 x i8> undef,
    i8 %0,
    iXLen %1)

  ret <vscale x 2 x i8> %a
}

declare <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8(
  <vscale x 4 x i8>,
  i8,
  iXLen);

define <vscale x 4 x i8> @intrinsic_vmv.v.x_x_nxv4i8(i8 %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i8:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT:    vmv.v.x v8, a0
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8(
    <vscale x 4 x i8> undef,
    i8 %0,
    iXLen %1)

  ret <vscale x 4 x i8> %a
}

declare <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8(
  <vscale x 8 x i8>,
  i8,
  iXLen);

define <vscale x 8 x i8> @intrinsic_vmv.v.x_x_nxv8i8(i8 %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i8:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT:    vmv.v.x v8, a0
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8(
    <vscale x 8 x i8> undef,
    i8 %0,
    iXLen %1)

  ret <vscale x 8 x i8> %a
}

declare <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8(
  <vscale x 16 x i8>,
  i8,
  iXLen);

define <vscale x 16 x i8> @intrinsic_vmv.v.x_x_nxv16i8(i8 %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i8:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT:    vmv.v.x v8, a0
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8(
    <vscale x 16 x i8> undef,
    i8 %0,
    iXLen %1)

  ret <vscale x 16 x i8> %a
}

declare <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8(
  <vscale x 32 x i8>,
  i8,
  iXLen);

define <vscale x 32 x i8> @intrinsic_vmv.v.x_x_nxv32i8(i8 %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv32i8:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT:    vmv.v.x v8, a0
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8(
    <vscale x 32 x i8> undef,
    i8 %0,
    iXLen %1)

  ret <vscale x 32 x i8> %a
}

declare <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8(
  <vscale x 64 x i8>,
  i8,
  iXLen);

define <vscale x 64 x i8> @intrinsic_vmv.v.x_x_nxv64i8(i8 %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv64i8:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT:    vmv.v.x v8, a0
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8(
    <vscale x 64 x i8> undef,
    i8 %0,
    iXLen %1)

  ret <vscale x 64 x i8> %a
}

declare <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16(
  <vscale x 1 x i16>,
  i16,
  iXLen);

define <vscale x 1 x i16> @intrinsic_vmv.v.x_x_nxv1i16(i16 %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i16:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT:    vmv.v.x v8, a0
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16(
    <vscale x 1 x i16> undef,
    i16 %0,
    iXLen %1)

  ret <vscale x 1 x i16> %a
}

declare <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16(
  <vscale x 2 x i16>,
  i16,
  iXLen);

define <vscale x 2 x i16> @intrinsic_vmv.v.x_x_nxv2i16(i16 %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i16:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT:    vmv.v.x v8, a0
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16(
    <vscale x 2 x i16> undef,
    i16 %0,
    iXLen %1)

  ret <vscale x 2 x i16> %a
}

declare <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16(
  <vscale x 4 x i16>,
  i16,
  iXLen);

define <vscale x 4 x i16> @intrinsic_vmv.v.x_x_nxv4i16(i16 %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i16:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT:    vmv.v.x v8, a0
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16(
    <vscale x 4 x i16> undef,
    i16 %0,
    iXLen %1)

  ret <vscale x 4 x i16> %a
}

declare <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16(
  <vscale x 8 x i16>,
  i16,
  iXLen);

define <vscale x 8 x i16> @intrinsic_vmv.v.x_x_nxv8i16(i16 %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i16:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT:    vmv.v.x v8, a0
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16(
    <vscale x 8 x i16> undef,
    i16 %0,
    iXLen %1)

  ret <vscale x 8 x i16> %a
}

declare <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16(
  <vscale x 16 x i16>,
  i16,
  iXLen);

define <vscale x 16 x i16> @intrinsic_vmv.v.x_x_nxv16i16(i16 %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i16:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT:    vmv.v.x v8, a0
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16(
    <vscale x 16 x i16> undef,
    i16 %0,
    iXLen %1)

  ret <vscale x 16 x i16> %a
}

declare <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16(
  <vscale x 32 x i16>,
  i16,
  iXLen);

define <vscale x 32 x i16> @intrinsic_vmv.v.x_x_nxv32i16(i16 %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv32i16:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
; CHECK-NEXT:    vmv.v.x v8, a0
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16(
    <vscale x 32 x i16> undef,
    i16 %0,
    iXLen %1)

  ret <vscale x 32 x i16> %a
}

declare <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32(
  <vscale x 1 x i32>,
  i32,
  iXLen);

define <vscale x 1 x i32> @intrinsic_vmv.v.x_x_nxv1i32(i32 %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i32:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT:    vmv.v.x v8, a0
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32(
    <vscale x 1 x i32> undef,
    i32 %0,
    iXLen %1)

  ret <vscale x 1 x i32> %a
}

declare <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32(
  <vscale x 2 x i32>,
  i32,
  iXLen);

define <vscale x 2 x i32> @intrinsic_vmv.v.x_x_nxv2i32(i32 %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i32:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT:    vmv.v.x v8, a0
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32(
    <vscale x 2 x i32> undef,
    i32 %0,
    iXLen %1)

  ret <vscale x 2 x i32> %a
}

declare <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32(
  <vscale x 4 x i32>,
  i32,
  iXLen);

define <vscale x 4 x i32> @intrinsic_vmv.v.x_x_nxv4i32(i32 %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i32:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT:    vmv.v.x v8, a0
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32(
    <vscale x 4 x i32> undef,
    i32 %0,
    iXLen %1)

  ret <vscale x 4 x i32> %a
}

declare <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32(
  <vscale x 8 x i32>,
  i32,
  iXLen);

define <vscale x 8 x i32> @intrinsic_vmv.v.x_x_nxv8i32(i32 %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i32:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT:    vmv.v.x v8, a0
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32(
    <vscale x 8 x i32> undef,
    i32 %0,
    iXLen %1)

  ret <vscale x 8 x i32> %a
}

declare <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32(
  <vscale x 16 x i32>,
  i32,
  iXLen);

define <vscale x 16 x i32> @intrinsic_vmv.v.x_x_nxv16i32(i32 %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i32:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT:    vmv.v.x v8, a0
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32(
    <vscale x 16 x i32> undef,
    i32 %0,
    iXLen %1)

  ret <vscale x 16 x i32> %a
}

declare <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64(
  <vscale x 1 x i64>,
  i64,
  iXLen);

define <vscale x 1 x i64> @intrinsic_vmv.v.x_x_nxv1i64(i64 %0, iXLen %1) nounwind {
; RV32-LABEL: intrinsic_vmv.v.x_x_nxv1i64:
; RV32:       # %bb.0: # %entry
; RV32-NEXT:    addi sp, sp, -16
; RV32-NEXT:    sw a1, 12(sp)
; RV32-NEXT:    sw a0, 8(sp)
; RV32-NEXT:    addi a0, sp, 8
; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT:    vlse64.v v8, (a0), zero
; RV32-NEXT:    addi sp, sp, 16
; RV32-NEXT:    ret
;
; RV64-LABEL: intrinsic_vmv.v.x_x_nxv1i64:
; RV64:       # %bb.0: # %entry
; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
; RV64-NEXT:    vmv.v.x v8, a0
; RV64-NEXT:    ret
entry:
  %a = call <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64(
    <vscale x 1 x i64> undef,
    i64 %0,
    iXLen %1)

  ret <vscale x 1 x i64> %a
}

declare <vscale x 2 x i64> @llvm.riscv.vmv.v.x.nxv2i64(
  <vscale x 2 x i64>,
  i64,
  iXLen);

define <vscale x 2 x i64> @intrinsic_vmv.v.x_x_nxv2i64(i64 %0, iXLen %1) nounwind {
; RV32-LABEL: intrinsic_vmv.v.x_x_nxv2i64:
; RV32:       # %bb.0: # %entry
; RV32-NEXT:    addi sp, sp, -16
; RV32-NEXT:    sw a1, 12(sp)
; RV32-NEXT:    sw a0, 8(sp)
; RV32-NEXT:    addi a0, sp, 8
; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
; RV32-NEXT:    vlse64.v v8, (a0), zero
; RV32-NEXT:    addi sp, sp, 16
; RV32-NEXT:    ret
;
; RV64-LABEL: intrinsic_vmv.v.x_x_nxv2i64:
; RV64:       # %bb.0: # %entry
; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
; RV64-NEXT:    vmv.v.x v8, a0
; RV64-NEXT:    ret
entry:
  %a = call <vscale x 2 x i64> @llvm.riscv.vmv.v.x.nxv2i64(
    <vscale x 2 x i64> undef,
    i64 %0,
    iXLen %1)

  ret <vscale x 2 x i64> %a
}

declare <vscale x 4 x i64> @llvm.riscv.vmv.v.x.nxv4i64(
  <vscale x 4 x i64>,
  i64,
  iXLen);

define <vscale x 4 x i64> @intrinsic_vmv.v.x_x_nxv4i64(i64 %0, iXLen %1) nounwind {
; RV32-LABEL: intrinsic_vmv.v.x_x_nxv4i64:
; RV32:       # %bb.0: # %entry
; RV32-NEXT:    addi sp, sp, -16
; RV32-NEXT:    sw a1, 12(sp)
; RV32-NEXT:    sw a0, 8(sp)
; RV32-NEXT:    addi a0, sp, 8
; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT:    vlse64.v v8, (a0), zero
; RV32-NEXT:    addi sp, sp, 16
; RV32-NEXT:    ret
;
; RV64-LABEL: intrinsic_vmv.v.x_x_nxv4i64:
; RV64:       # %bb.0: # %entry
; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT:    vmv.v.x v8, a0
; RV64-NEXT:    ret
entry:
  %a = call <vscale x 4 x i64> @llvm.riscv.vmv.v.x.nxv4i64(
    <vscale x 4 x i64> undef,
    i64 %0,
    iXLen %1)

  ret <vscale x 4 x i64> %a
}

declare <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64(
  <vscale x 8 x i64>,
  i64,
  iXLen);

define <vscale x 8 x i64> @intrinsic_vmv.v.x_x_nxv8i64(i64 %0, iXLen %1) nounwind {
; RV32-LABEL: intrinsic_vmv.v.x_x_nxv8i64:
; RV32:       # %bb.0: # %entry
; RV32-NEXT:    addi sp, sp, -16
; RV32-NEXT:    sw a1, 12(sp)
; RV32-NEXT:    sw a0, 8(sp)
; RV32-NEXT:    addi a0, sp, 8
; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT:    vlse64.v v8, (a0), zero
; RV32-NEXT:    addi sp, sp, 16
; RV32-NEXT:    ret
;
; RV64-LABEL: intrinsic_vmv.v.x_x_nxv8i64:
; RV64:       # %bb.0: # %entry
; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT:    vmv.v.x v8, a0
; RV64-NEXT:    ret
entry:
  %a = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64(
    <vscale x 8 x i64> undef,
    i64 %0,
    iXLen %1)

  ret <vscale x 8 x i64> %a
}

define <vscale x 1 x i8> @intrinsic_vmv.v.x_i_nxv1i8(iXLen %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i8:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT:    vmv.v.i v8, 9
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8(
    <vscale x 1 x i8> undef,
    i8 9,
    iXLen %0)

  ret <vscale x 1 x i8> %a
}

define <vscale x 2 x i8> @intrinsic_vmv.v.x_i_nxv2i8(iXLen %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i8:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT:    vmv.v.i v8, 9
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8(
    <vscale x 2 x i8> undef,
    i8 9,
    iXLen %0)

  ret <vscale x 2 x i8> %a
}

define <vscale x 4 x i8> @intrinsic_vmv.v.x_i_nxv4i8(iXLen %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i8:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT:    vmv.v.i v8, 9
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8(
    <vscale x 4 x i8> undef,
    i8 9,
    iXLen %0)

  ret <vscale x 4 x i8> %a
}

define <vscale x 8 x i8> @intrinsic_vmv.v.x_i_nxv8i8(iXLen %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i8:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT:    vmv.v.i v8, 9
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8(
    <vscale x 8 x i8> undef,
    i8 9,
    iXLen %0)

  ret <vscale x 8 x i8> %a
}

define <vscale x 16 x i8> @intrinsic_vmv.v.x_i_nxv16i8(iXLen %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i8:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT:    vmv.v.i v8, 9
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8(
    <vscale x 16 x i8> undef,
    i8 9,
    iXLen %0)

  ret <vscale x 16 x i8> %a
}

define <vscale x 32 x i8> @intrinsic_vmv.v.x_i_nxv32i8(iXLen %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv32i8:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT:    vmv.v.i v8, 9
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8(
    <vscale x 32 x i8> undef,
    i8 9,
    iXLen %0)

  ret <vscale x 32 x i8> %a
}

define <vscale x 64 x i8> @intrinsic_vmv.v.x_i_nxv64i8(iXLen %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv64i8:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT:    vmv.v.i v8, 9
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8(
    <vscale x 64 x i8> undef,
    i8 9,
    iXLen %0)

  ret <vscale x 64 x i8> %a
}

define <vscale x 1 x i16> @intrinsic_vmv.v.x_i_nxv1i16(iXLen %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i16:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT:    vmv.v.i v8, 9
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16(
    <vscale x 1 x i16> undef,
    i16 9,
    iXLen %0)

  ret <vscale x 1 x i16> %a
}

define <vscale x 2 x i16> @intrinsic_vmv.v.x_i_nxv2i16(iXLen %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i16:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT:    vmv.v.i v8, 9
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16(
    <vscale x 2 x i16> undef,
    i16 9,
    iXLen %0)

  ret <vscale x 2 x i16> %a
}

define <vscale x 4 x i16> @intrinsic_vmv.v.x_i_nxv4i16(iXLen %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i16:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT:    vmv.v.i v8, 9
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16(
    <vscale x 4 x i16> undef,
    i16 9,
    iXLen %0)

  ret <vscale x 4 x i16> %a
}

define <vscale x 8 x i16> @intrinsic_vmv.v.x_i_nxv8i16(iXLen %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i16:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT:    vmv.v.i v8, 9
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16(
    <vscale x 8 x i16> undef,
    i16 9,
    iXLen %0)

  ret <vscale x 8 x i16> %a
}

define <vscale x 16 x i16> @intrinsic_vmv.v.x_i_nxv16i16(iXLen %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i16:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT:    vmv.v.i v8, 9
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16(
    <vscale x 16 x i16> undef,
    i16 9,
    iXLen %0)

  ret <vscale x 16 x i16> %a
}

define <vscale x 32 x i16> @intrinsic_vmv.v.x_i_nxv32i16(iXLen %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv32i16:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT:    vmv.v.i v8, 9
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16(
    <vscale x 32 x i16> undef,
    i16 9,
    iXLen %0)

  ret <vscale x 32 x i16> %a
}

define <vscale x 1 x i32> @intrinsic_vmv.v.x_i_nxv1i32(iXLen %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i32:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT:    vmv.v.i v8, 9
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32(
    <vscale x 1 x i32> undef,
    i32 9,
    iXLen %0)

  ret <vscale x 1 x i32> %a
}

define <vscale x 2 x i32> @intrinsic_vmv.v.x_i_nxv2i32(iXLen %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i32:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT:    vmv.v.i v8, 9
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32(
    <vscale x 2 x i32> undef,
    i32 9,
    iXLen %0)

  ret <vscale x 2 x i32> %a
}

define <vscale x 4 x i32> @intrinsic_vmv.v.x_i_nxv4i32(iXLen %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i32:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT:    vmv.v.i v8, 9
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32(
    <vscale x 4 x i32> undef,
    i32 9,
    iXLen %0)

  ret <vscale x 4 x i32> %a
}

define <vscale x 8 x i32> @intrinsic_vmv.v.x_i_nxv8i32(iXLen %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i32:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT:    vmv.v.i v8, 9
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32(
    <vscale x 8 x i32> undef,
    i32 9,
    iXLen %0)

  ret <vscale x 8 x i32> %a
}

define <vscale x 16 x i32> @intrinsic_vmv.v.x_i_nxv16i32(iXLen %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i32:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT:    vmv.v.i v8, 9
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32(
    <vscale x 16 x i32> undef,
    i32 9,
    iXLen %0)

  ret <vscale x 16 x i32> %a
}

define <vscale x 1 x i64> @intrinsic_vmv.v.x_i_nxv1i64(iXLen %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i64:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT:    vmv.v.i v8, 9
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64(
    <vscale x 1 x i64> undef,
    i64 9,
    iXLen %0)

  ret <vscale x 1 x i64> %a
}

define <vscale x 2 x i64> @intrinsic_vmv.v.x_i_nxv2i64(iXLen %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i64:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT:    vmv.v.i v8, 9
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 2 x i64> @llvm.riscv.vmv.v.x.nxv2i64(
    <vscale x 2 x i64> undef,
    i64 9,
    iXLen %0)

  ret <vscale x 2 x i64> %a
}

define <vscale x 4 x i64> @intrinsic_vmv.v.x_i_nxv4i64(iXLen %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i64:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT:    vmv.v.i v8, 9
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 4 x i64> @llvm.riscv.vmv.v.x.nxv4i64(
    <vscale x 4 x i64> undef,
    i64 9,
    iXLen %0)

  ret <vscale x 4 x i64> %a
}

define <vscale x 8 x i64> @intrinsic_vmv.v.x_i_nxv8i64(iXLen %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i64:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT:    vmv.v.i v8, 9
; CHECK-NEXT:    ret
entry:
  %a = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64(
    <vscale x 8 x i64> undef,
    i64 9,
    iXLen %0)

  ret <vscale x 8 x i64> %a
}