// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -triple aarch64 -target-feature +sve -mvscale-min=4 -mvscale-max=4 -O1 -emit-llvm -o - %s | FileCheck %s
// REQUIRES: aarch64-registered-target
#include <arm_sve.h>
#define N __ARM_FEATURE_SVE_BITS
typedef svint32_t fixed_int32_t __attribute__((arm_sve_vector_bits(N)));
typedef svfloat64_t fixed_float64_t __attribute__((arm_sve_vector_bits(N)));
typedef svbool_t fixed_bool_t __attribute__((arm_sve_vector_bits(N)));
typedef int32_t gnu_int32_t __attribute__((vector_size(N / 8)));
// CHECK-LABEL: @to_svint32_t(
// CHECK-NEXT: entry:
// CHECK-NEXT: ret <vscale x 4 x i32> [[TYPE_COERCE:%.*]]
//
svint32_t to_svint32_t(fixed_int32_t type) {
return type;
}
// CHECK-LABEL: @from_svint32_t(
// CHECK-NEXT: entry:
// CHECK-NEXT: ret <vscale x 4 x i32> [[TYPE:%.*]]
//
fixed_int32_t from_svint32_t(svint32_t type) {
return type;
}
// CHECK-LABEL: @to_svfloat64_t(
// CHECK-NEXT: entry:
// CHECK-NEXT: ret <vscale x 2 x double> [[TYPE_COERCE:%.*]]
//
svfloat64_t to_svfloat64_t(fixed_float64_t type) {
return type;
}
// CHECK-LABEL: @from_svfloat64_t(
// CHECK-NEXT: entry:
// CHECK-NEXT: ret <vscale x 2 x double> [[TYPE:%.*]]
//
fixed_float64_t from_svfloat64_t(svfloat64_t type) {
return type;
}
// CHECK-LABEL: @to_svbool_t(
// CHECK-NEXT: entry:
// CHECK-NEXT: ret <vscale x 16 x i1> [[TMP0:%.*]]
//
svbool_t to_svbool_t(fixed_bool_t type) {
return type;
}
// CHECK-LABEL: @from_svbool_t(
// CHECK-NEXT: entry:
// CHECK-NEXT: ret <vscale x 16 x i1> [[TYPE:%.*]]
//
fixed_bool_t from_svbool_t(svbool_t type) {
return type;
}
// CHECK-LABEL: @lax_cast(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[SAVED_VALUE:%.*]] = alloca <16 x i32>, align 64
// CHECK-NEXT: [[TYPE:%.*]] = tail call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[TYPE_COERCE:%.*]], i64 0)
// CHECK-NEXT: store <16 x i32> [[TYPE]], ptr [[SAVED_VALUE]], align 64, !tbaa [[TBAA6:![0-9]+]]
// CHECK-NEXT: [[TMP0:%.*]] = load <vscale x 2 x i64>, ptr [[SAVED_VALUE]], align 64, !tbaa [[TBAA6]]
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
svint64_t lax_cast(fixed_int32_t type) {
return type;
}
// CHECK-LABEL: @to_svint32_t__from_gnu_int32_t(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TYPE:%.*]] = load <16 x i32>, ptr [[TMP0:%.*]], align 16, !tbaa [[TBAA6]]
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[TYPE]], i64 0)
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
//
svint32_t to_svint32_t__from_gnu_int32_t(gnu_int32_t type) {
return type;
}
// CHECK-LABEL: @from_svint32_t__to_gnu_int32_t(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = tail call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[TYPE:%.*]], i64 0)
// CHECK-NEXT: store <16 x i32> [[CASTFIXEDSVE]], ptr [[AGG_RESULT:%.*]], align 16, !tbaa [[TBAA6]]
// CHECK-NEXT: ret void
//
gnu_int32_t from_svint32_t__to_gnu_int32_t(svint32_t type) {
return type;
}
// CHECK-LABEL: @to_fixed_int32_t__from_gnu_int32_t(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TYPE:%.*]] = load <16 x i32>, ptr [[TMP0:%.*]], align 16, !tbaa [[TBAA6]]
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[TYPE]], i64 0)
// CHECK-NEXT: ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
//
fixed_int32_t to_fixed_int32_t__from_gnu_int32_t(gnu_int32_t type) {
return type;
}
// CHECK-LABEL: @from_fixed_int32_t__to_gnu_int32_t(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TYPE:%.*]] = tail call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[TYPE_COERCE:%.*]], i64 0)
// CHECK-NEXT: store <16 x i32> [[TYPE]], ptr [[AGG_RESULT:%.*]], align 16, !tbaa [[TBAA6]]
// CHECK-NEXT: ret void
//
gnu_int32_t from_fixed_int32_t__to_gnu_int32_t(fixed_int32_t type) {
return type;
}