llvm/clang/test/CodeGen/RISCV/attr-rvv-vector-bits-codegen.c

// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -triple riscv64-none-linux-gnu -target-feature +f -target-feature +d -target-feature +zve64d -mvscale-min=4 -mvscale-max=4 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s

// REQUIRES: riscv-registered-target

#include <riscv_vector.h>

typedef __rvv_int8m1_t vint8m1_t;
typedef __rvv_uint8m1_t vuint8m1_t;
typedef __rvv_int16m1_t vint16m1_t;
typedef __rvv_uint16m1_t vuint16m1_t;
typedef __rvv_int32m1_t vint32m1_t;
typedef __rvv_uint32m1_t vuint32m1_t;
typedef __rvv_int64m1_t vint64m1_t;
typedef __rvv_uint64m1_t vuint64m1_t;
typedef __rvv_float32m1_t vfloat32m1_t;
typedef __rvv_float64m1_t vfloat64m1_t;

typedef __rvv_int8m2_t vint8m2_t;
typedef __rvv_uint8m2_t vuint8m2_t;
typedef __rvv_int16m2_t vint16m2_t;
typedef __rvv_uint16m2_t vuint16m2_t;
typedef __rvv_int32m2_t vint32m2_t;
typedef __rvv_uint32m2_t vuint32m2_t;
typedef __rvv_int64m2_t vint64m2_t;
typedef __rvv_uint64m2_t vuint64m2_t;
typedef __rvv_float32m2_t vfloat32m2_t;
typedef __rvv_float64m2_t vfloat64m2_t;

typedef __rvv_bool1_t vbool1_t;
typedef __rvv_bool4_t vbool4_t;
typedef __rvv_bool32_t vbool32_t;

typedef vint32m1_t fixed_int32m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen)));
typedef vint32m2_t fixed_int32m2_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen * 2)));
typedef vint16m4_t fixed_int16m4_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen * 4)));
typedef vint8m8_t fixed_int8m8_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen * 8)));
typedef vbool1_t fixed_bool1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen)));
typedef vbool4_t fixed_bool4_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen/4)));
typedef vbool32_t fixed_bool32_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen/32)));

fixed_int32m1_t global_vec;
fixed_int32m2_t global_vec_m2;
fixed_int8m8_t global_vec_int8m8;
fixed_int16m4_t global_vec_int16m4;
fixed_bool1_t global_bool1;
fixed_bool4_t global_bool4;
fixed_bool32_t global_bool32;

// CHECK-LABEL: @test_bool1(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[RETVAL:%.*]] = alloca <256 x i8>, align 8
// CHECK-NEXT:    [[M_ADDR:%.*]] = alloca <vscale x 64 x i1>, align 1
// CHECK-NEXT:    [[VEC_ADDR:%.*]] = alloca <vscale x 64 x i8>, align 1
// CHECK-NEXT:    [[MASK:%.*]] = alloca <vscale x 64 x i1>, align 1
// CHECK-NEXT:    store <vscale x 64 x i1> [[M:%.*]], ptr [[M_ADDR]], align 1
// CHECK-NEXT:    store <vscale x 64 x i8> [[VEC:%.*]], ptr [[VEC_ADDR]], align 1
// CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 64 x i1>, ptr [[M_ADDR]], align 1
// CHECK-NEXT:    [[TMP1:%.*]] = load <32 x i8>, ptr @global_bool1, align 8
// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[TMP1]], i64 0)
// CHECK-NEXT:    [[TMP2:%.*]] = bitcast <vscale x 8 x i8> [[CAST_SCALABLE]] to <vscale x 64 x i1>
// CHECK-NEXT:    [[TMP3:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmand.nxv64i1.i64(<vscale x 64 x i1> [[TMP0]], <vscale x 64 x i1> [[TMP2]], i64 256)
// CHECK-NEXT:    store <vscale x 64 x i1> [[TMP3]], ptr [[MASK]], align 1
// CHECK-NEXT:    [[TMP4:%.*]] = load <vscale x 64 x i1>, ptr [[MASK]], align 1
// CHECK-NEXT:    [[TMP5:%.*]] = load <vscale x 64 x i8>, ptr [[VEC_ADDR]], align 1
// CHECK-NEXT:    [[TMP6:%.*]] = load <256 x i8>, ptr @global_vec_int8m8, align 8
// CHECK-NEXT:    [[CAST_SCALABLE1:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.v256i8(<vscale x 64 x i8> undef, <256 x i8> [[TMP6]], i64 0)
// CHECK-NEXT:    [[TMP7:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[TMP5]], <vscale x 64 x i8> [[CAST_SCALABLE1]], <vscale x 64 x i1> [[TMP4]], i64 256, i64 3)
// CHECK-NEXT:    [[CAST_FIXED:%.*]] = call <256 x i8> @llvm.vector.extract.v256i8.nxv64i8(<vscale x 64 x i8> [[TMP7]], i64 0)
// CHECK-NEXT:    store <256 x i8> [[CAST_FIXED]], ptr [[RETVAL]], align 8
// CHECK-NEXT:    [[TMP8:%.*]] = load <256 x i8>, ptr [[RETVAL]], align 8
// CHECK-NEXT:    [[CAST_SCALABLE2:%.*]] = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.v256i8(<vscale x 64 x i8> undef, <256 x i8> [[TMP8]], i64 0)
// CHECK-NEXT:    ret <vscale x 64 x i8> [[CAST_SCALABLE2]]
//
fixed_int8m8_t test_bool1(vbool1_t m, vint8m8_t vec) {
  vbool1_t mask = __riscv_vmand(m, global_bool1, __riscv_v_fixed_vlen);
  return __riscv_vadd(mask, vec, global_vec_int8m8, __riscv_v_fixed_vlen);
}

// CHECK-LABEL: @test_bool4(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[RETVAL:%.*]] = alloca <64 x i16>, align 8
// CHECK-NEXT:    [[M_ADDR:%.*]] = alloca <vscale x 16 x i1>, align 1
// CHECK-NEXT:    [[VEC_ADDR:%.*]] = alloca <vscale x 16 x i16>, align 2
// CHECK-NEXT:    [[MASK:%.*]] = alloca <vscale x 16 x i1>, align 1
// CHECK-NEXT:    store <vscale x 16 x i1> [[M:%.*]], ptr [[M_ADDR]], align 1
// CHECK-NEXT:    store <vscale x 16 x i16> [[VEC:%.*]], ptr [[VEC_ADDR]], align 2
// CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 16 x i1>, ptr [[M_ADDR]], align 1
// CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr @global_bool4, align 8
// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v8i8(<vscale x 2 x i8> undef, <8 x i8> [[TMP1]], i64 0)
// CHECK-NEXT:    [[TMP2:%.*]] = bitcast <vscale x 2 x i8> [[CAST_SCALABLE]] to <vscale x 16 x i1>
// CHECK-NEXT:    [[TMP3:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmand.nxv16i1.i64(<vscale x 16 x i1> [[TMP0]], <vscale x 16 x i1> [[TMP2]], i64 64)
// CHECK-NEXT:    store <vscale x 16 x i1> [[TMP3]], ptr [[MASK]], align 1
// CHECK-NEXT:    [[TMP4:%.*]] = load <vscale x 16 x i1>, ptr [[MASK]], align 1
// CHECK-NEXT:    [[TMP5:%.*]] = load <vscale x 16 x i16>, ptr [[VEC_ADDR]], align 2
// CHECK-NEXT:    [[TMP6:%.*]] = load <64 x i16>, ptr @global_vec_int16m4, align 8
// CHECK-NEXT:    [[CAST_SCALABLE1:%.*]] = call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.v64i16(<vscale x 16 x i16> undef, <64 x i16> [[TMP6]], i64 0)
// CHECK-NEXT:    [[TMP7:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[TMP5]], <vscale x 16 x i16> [[CAST_SCALABLE1]], <vscale x 16 x i1> [[TMP4]], i64 64, i64 3)
// CHECK-NEXT:    [[CAST_FIXED:%.*]] = call <64 x i16> @llvm.vector.extract.v64i16.nxv16i16(<vscale x 16 x i16> [[TMP7]], i64 0)
// CHECK-NEXT:    store <64 x i16> [[CAST_FIXED]], ptr [[RETVAL]], align 8
// CHECK-NEXT:    [[TMP8:%.*]] = load <64 x i16>, ptr [[RETVAL]], align 8
// CHECK-NEXT:    [[CAST_SCALABLE2:%.*]] = call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.v64i16(<vscale x 16 x i16> undef, <64 x i16> [[TMP8]], i64 0)
// CHECK-NEXT:    ret <vscale x 16 x i16> [[CAST_SCALABLE2]]
//
fixed_int16m4_t test_bool4(vbool4_t m, vint16m4_t vec) {
  vbool4_t mask = __riscv_vmand(m, global_bool4, __riscv_v_fixed_vlen/4);
  return __riscv_vadd(mask, vec, global_vec_int16m4, __riscv_v_fixed_vlen/4);
}

// CHECK-LABEL: @test_bool32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[RETVAL:%.*]] = alloca <8 x i32>, align 8
// CHECK-NEXT:    [[M_ADDR:%.*]] = alloca <vscale x 2 x i1>, align 1
// CHECK-NEXT:    [[VEC_ADDR:%.*]] = alloca <vscale x 2 x i32>, align 4
// CHECK-NEXT:    [[MASK:%.*]] = alloca <vscale x 2 x i1>, align 1
// CHECK-NEXT:    [[SAVED_VALUE:%.*]] = alloca <1 x i8>, align 1
// CHECK-NEXT:    store <vscale x 2 x i1> [[M:%.*]], ptr [[M_ADDR]], align 1
// CHECK-NEXT:    store <vscale x 2 x i32> [[VEC:%.*]], ptr [[VEC_ADDR]], align 4
// CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 2 x i1>, ptr [[M_ADDR]], align 1
// CHECK-NEXT:    [[TMP1:%.*]] = load <1 x i8>, ptr @global_bool32, align 1
// CHECK-NEXT:    store <1 x i8> [[TMP1]], ptr [[SAVED_VALUE]], align 1
// CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 2 x i1>, ptr [[SAVED_VALUE]], align 1
// CHECK-NEXT:    [[TMP3:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmand.nxv2i1.i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i1> [[TMP2]], i64 8)
// CHECK-NEXT:    store <vscale x 2 x i1> [[TMP3]], ptr [[MASK]], align 1
// CHECK-NEXT:    [[TMP4:%.*]] = load <vscale x 2 x i1>, ptr [[MASK]], align 1
// CHECK-NEXT:    [[TMP5:%.*]] = load <vscale x 2 x i32>, ptr [[VEC_ADDR]], align 4
// CHECK-NEXT:    [[TMP6:%.*]] = load <8 x i32>, ptr @global_vec, align 8
// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[TMP6]], i64 0)
// CHECK-NEXT:    [[TMP7:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[TMP5]], <vscale x 2 x i32> [[CAST_SCALABLE]], <vscale x 2 x i1> [[TMP4]], i64 8, i64 3)
// CHECK-NEXT:    [[CAST_FIXED:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[TMP7]], i64 0)
// CHECK-NEXT:    store <8 x i32> [[CAST_FIXED]], ptr [[RETVAL]], align 8
// CHECK-NEXT:    [[TMP8:%.*]] = load <8 x i32>, ptr [[RETVAL]], align 8
// CHECK-NEXT:    [[CAST_SCALABLE1:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[TMP8]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE1]]
//
fixed_int32m1_t test_bool32(vbool32_t m, vint32m1_t vec) {
  vbool32_t mask = __riscv_vmand(m, global_bool32, __riscv_v_fixed_vlen/32);
  return __riscv_vadd(mask, vec, global_vec, __riscv_v_fixed_vlen/32);
}

// CHECK-LABEL: @test_ptr_to_global(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[RETVAL:%.*]] = alloca <8 x i32>, align 8
// CHECK-NEXT:    [[GLOBAL_VEC_PTR:%.*]] = alloca ptr, align 8
// CHECK-NEXT:    store ptr @global_vec, ptr [[GLOBAL_VEC_PTR]], align 8
// CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[GLOBAL_VEC_PTR]], align 8
// CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr [[TMP0]], align 8
// CHECK-NEXT:    store <8 x i32> [[TMP1]], ptr [[RETVAL]], align 8
// CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, ptr [[RETVAL]], align 8
// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[TMP2]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
//
fixed_int32m1_t test_ptr_to_global() {
  fixed_int32m1_t *global_vec_ptr;
  global_vec_ptr = &global_vec;
  return *global_vec_ptr;
}

//
// Test casting pointer from fixed-length array to scalable vector.
// CHECK-LABEL: @array_arg(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[RETVAL:%.*]] = alloca <8 x i32>, align 8
// CHECK-NEXT:    [[ARR_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT:    store ptr [[ARR:%.*]], ptr [[ARR_ADDR]], align 8
// CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[ARR_ADDR]], align 8
// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds <8 x i32>, ptr [[TMP0]], i64 0
// CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr [[ARRAYIDX]], align 8
// CHECK-NEXT:    store <8 x i32> [[TMP1]], ptr [[RETVAL]], align 8
// CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, ptr [[RETVAL]], align 8
// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[TMP2]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE]]
//
fixed_int32m1_t array_arg(fixed_int32m1_t arr[]) {
  return arr[0];
}

// CHECK-LABEL: @address_of_array_idx_bool1(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[RETVAL:%.*]] = alloca <32 x i8>, align 8
// CHECK-NEXT:    [[ARR:%.*]] = alloca [3 x <32 x i8>], align 8
// CHECK-NEXT:    [[PARR:%.*]] = alloca ptr, align 8
// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <32 x i8>], ptr [[ARR]], i64 0, i64 0
// CHECK-NEXT:    store ptr [[ARRAYIDX]], ptr [[PARR]], align 8
// CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[PARR]], align 8
// CHECK-NEXT:    [[TMP1:%.*]] = load <32 x i8>, ptr [[TMP0]], align 8
// CHECK-NEXT:    store <32 x i8> [[TMP1]], ptr [[RETVAL]], align 8
// CHECK-NEXT:    [[TMP2:%.*]] = load <32 x i8>, ptr [[RETVAL]], align 8
// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[TMP2]], i64 0)
// CHECK-NEXT:    [[TMP3:%.*]] = bitcast <vscale x 8 x i8> [[CAST_SCALABLE]] to <vscale x 64 x i1>
// CHECK-NEXT:    ret <vscale x 64 x i1> [[TMP3]]
//
fixed_bool1_t address_of_array_idx_bool1() {
  fixed_bool1_t arr[3];
  fixed_bool1_t *parr;
  parr = &arr[0];
  return *parr;
}

// CHECK-LABEL: @address_of_array_idx_bool4(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[RETVAL:%.*]] = alloca <8 x i8>, align 8
// CHECK-NEXT:    [[ARR:%.*]] = alloca [3 x <8 x i8>], align 8
// CHECK-NEXT:    [[PARR:%.*]] = alloca ptr, align 8
// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <8 x i8>], ptr [[ARR]], i64 0, i64 0
// CHECK-NEXT:    store ptr [[ARRAYIDX]], ptr [[PARR]], align 8
// CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[PARR]], align 8
// CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr [[TMP0]], align 8
// CHECK-NEXT:    store <8 x i8> [[TMP1]], ptr [[RETVAL]], align 8
// CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i8>, ptr [[RETVAL]], align 8
// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v8i8(<vscale x 2 x i8> undef, <8 x i8> [[TMP2]], i64 0)
// CHECK-NEXT:    [[TMP3:%.*]] = bitcast <vscale x 2 x i8> [[CAST_SCALABLE]] to <vscale x 16 x i1>
// CHECK-NEXT:    ret <vscale x 16 x i1> [[TMP3]]
//
fixed_bool4_t address_of_array_idx_bool4() {
  fixed_bool4_t arr[3];
  fixed_bool4_t *parr;
  parr = &arr[0];
  return *parr;
}

// CHECK-LABEL: @address_of_array_idx_bool32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[RETVAL:%.*]] = alloca <1 x i8>, align 1
// CHECK-NEXT:    [[ARR:%.*]] = alloca [3 x <1 x i8>], align 1
// CHECK-NEXT:    [[PARR:%.*]] = alloca ptr, align 8
// CHECK-NEXT:    [[RETVAL_COERCE:%.*]] = alloca <vscale x 2 x i1>, align 1
// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <1 x i8>], ptr [[ARR]], i64 0, i64 0
// CHECK-NEXT:    store ptr [[ARRAYIDX]], ptr [[PARR]], align 8
// CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[PARR]], align 8
// CHECK-NEXT:    [[TMP1:%.*]] = load <1 x i8>, ptr [[TMP0]], align 1
// CHECK-NEXT:    store <1 x i8> [[TMP1]], ptr [[RETVAL]], align 1
// CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[RETVAL_COERCE]], ptr align 1 [[RETVAL]], i64 1, i1 false)
// CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 2 x i1>, ptr [[RETVAL_COERCE]], align 1
// CHECK-NEXT:    ret <vscale x 2 x i1> [[TMP2]]
//
fixed_bool32_t address_of_array_idx_bool32() {
  fixed_bool32_t arr[3];
  fixed_bool32_t *parr;
  parr = &arr[0];
  return *parr;
}

// CHECK-LABEL: @test_cast(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[RETVAL:%.*]] = alloca <8 x i32>, align 8
// CHECK-NEXT:    [[VEC_ADDR:%.*]] = alloca <vscale x 2 x i32>, align 4
// CHECK-NEXT:    store <vscale x 2 x i32> [[VEC:%.*]], ptr [[VEC_ADDR]], align 4
// CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i32>, ptr @global_vec, align 8
// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[TMP0]], i64 0)
// CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 2 x i32>, ptr [[VEC_ADDR]], align 4
// CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[CAST_SCALABLE]], <vscale x 2 x i32> [[TMP1]], i64 8)
// CHECK-NEXT:    [[CAST_FIXED:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[TMP2]], i64 0)
// CHECK-NEXT:    store <8 x i32> [[CAST_FIXED]], ptr [[RETVAL]], align 8
// CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i32>, ptr [[RETVAL]], align 8
// CHECK-NEXT:    [[CAST_SCALABLE1:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[TMP3]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x i32> [[CAST_SCALABLE1]]
//
fixed_int32m1_t test_cast(vint32m1_t vec) {
  return __riscv_vadd(global_vec, vec, __riscv_v_fixed_vlen/32);
}

// CHECK-LABEL: @test_ptr_to_global_m2(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[RETVAL:%.*]] = alloca <16 x i32>, align 8
// CHECK-NEXT:    [[GLOBAL_VEC_PTR:%.*]] = alloca ptr, align 8
// CHECK-NEXT:    store ptr @global_vec_m2, ptr [[GLOBAL_VEC_PTR]], align 8
// CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[GLOBAL_VEC_PTR]], align 8
// CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i32>, ptr [[TMP0]], align 8
// CHECK-NEXT:    store <16 x i32> [[TMP1]], ptr [[RETVAL]], align 8
// CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i32>, ptr [[RETVAL]], align 8
// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[TMP2]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x i32> [[CAST_SCALABLE]]
//
fixed_int32m2_t test_ptr_to_global_m2() {
  fixed_int32m2_t *global_vec_ptr;
  global_vec_ptr = &global_vec_m2;
  return *global_vec_ptr;
}

//
// Test casting pointer from fixed-length array to scalable vector.
// CHECK-LABEL: @array_arg_m2(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[RETVAL:%.*]] = alloca <16 x i32>, align 8
// CHECK-NEXT:    [[ARR_ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT:    store ptr [[ARR:%.*]], ptr [[ARR_ADDR]], align 8
// CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[ARR_ADDR]], align 8
// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds <16 x i32>, ptr [[TMP0]], i64 0
// CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i32>, ptr [[ARRAYIDX]], align 8
// CHECK-NEXT:    store <16 x i32> [[TMP1]], ptr [[RETVAL]], align 8
// CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i32>, ptr [[RETVAL]], align 8
// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[TMP2]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x i32> [[CAST_SCALABLE]]
//
fixed_int32m2_t array_arg_m2(fixed_int32m2_t arr[]) {
  return arr[0];
}

// CHECK-LABEL: @test_cast_m2(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[RETVAL:%.*]] = alloca <16 x i32>, align 8
// CHECK-NEXT:    [[VEC_ADDR:%.*]] = alloca <vscale x 4 x i32>, align 4
// CHECK-NEXT:    store <vscale x 4 x i32> [[VEC:%.*]], ptr [[VEC_ADDR]], align 4
// CHECK-NEXT:    [[TMP0:%.*]] = load <16 x i32>, ptr @global_vec_m2, align 8
// CHECK-NEXT:    [[CAST_SCALABLE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[TMP0]], i64 0)
// CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 4 x i32>, ptr [[VEC_ADDR]], align 4
// CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[CAST_SCALABLE]], <vscale x 4 x i32> [[TMP1]], i64 16)
// CHECK-NEXT:    [[CAST_FIXED:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[TMP2]], i64 0)
// CHECK-NEXT:    store <16 x i32> [[CAST_FIXED]], ptr [[RETVAL]], align 8
// CHECK-NEXT:    [[TMP3:%.*]] = load <16 x i32>, ptr [[RETVAL]], align 8
// CHECK-NEXT:    [[CAST_SCALABLE1:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[TMP3]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x i32> [[CAST_SCALABLE1]]
//
fixed_int32m2_t test_cast_m2(vint32m2_t vec) {
  return __riscv_vadd(global_vec_m2, vec, __riscv_v_fixed_vlen/16);
}