chromium/third_party/cpuinfo/src/include/cpuinfo.h

#pragma once
#ifndef CPUINFO_H
#define CPUINFO_H

#ifndef __cplusplus
#include <stdbool.h>
#endif

#ifdef __APPLE__
#include <TargetConditionals.h>
#endif

#include <stdint.h>

/* Identify architecture and define corresponding macro */

#if defined(__i386__) || defined(__i486__) || defined(__i586__) || defined(__i686__) || defined(_M_IX86)
#define CPUINFO_ARCH_X86
#endif

#if defined(__x86_64__) || defined(__x86_64) || defined(_M_X64) || defined(_M_AMD64)
#define CPUINFO_ARCH_X86_64
#endif

#if defined(__arm__) || defined(_M_ARM)
#define CPUINFO_ARCH_ARM
#endif

#if defined(__aarch64__) || defined(_M_ARM64)
#define CPUINFO_ARCH_ARM64
#endif

#if defined(__PPC64__) || defined(__powerpc64__) || defined(_ARCH_PPC64)
#define CPUINFO_ARCH_PPC64
#endif

#if defined(__asmjs__)
#define CPUINFO_ARCH_ASMJS
#endif

#if defined(__wasm__)
#if defined(__wasm_simd128__)
#define CPUINFO_ARCH_WASMSIMD
#else
#define CPUINFO_ARCH_WASM
#endif
#endif

#if defined(__riscv)
#if (__riscv_xlen == 32)
#define CPUINFO_ARCH_RISCV32
#elif (__riscv_xlen == 64)
#define CPUINFO_ARCH_RISCV64
#endif
#endif

/* Define other architecture-specific macros as 0 */

#ifndef CPUINFO_ARCH_X86
#define CPUINFO_ARCH_X86
#endif

#ifndef CPUINFO_ARCH_X86_64
#define CPUINFO_ARCH_X86_64
#endif

#ifndef CPUINFO_ARCH_ARM
#define CPUINFO_ARCH_ARM
#endif

#ifndef CPUINFO_ARCH_ARM64
#define CPUINFO_ARCH_ARM64
#endif

#ifndef CPUINFO_ARCH_PPC64
#define CPUINFO_ARCH_PPC64
#endif

#ifndef CPUINFO_ARCH_ASMJS
#define CPUINFO_ARCH_ASMJS
#endif

#ifndef CPUINFO_ARCH_WASM
#define CPUINFO_ARCH_WASM
#endif

#ifndef CPUINFO_ARCH_WASMSIMD
#define CPUINFO_ARCH_WASMSIMD
#endif

#ifndef CPUINFO_ARCH_RISCV32
#define CPUINFO_ARCH_RISCV32
#endif

#ifndef CPUINFO_ARCH_RISCV64
#define CPUINFO_ARCH_RISCV64
#endif

#if CPUINFO_ARCH_X86 && defined(_MSC_VER)
#define CPUINFO_ABI
#elif CPUINFO_ARCH_X86 && defined(__GNUC__)
#define CPUINFO_ABI
#else
#define CPUINFO_ABI
#endif

#define CPUINFO_CACHE_UNIFIED
#define CPUINFO_CACHE_INCLUSIVE
#define CPUINFO_CACHE_COMPLEX_INDEXING

struct cpuinfo_cache {};

struct cpuinfo_trace_cache {};

#define CPUINFO_PAGE_SIZE_4KB
#define CPUINFO_PAGE_SIZE_1MB
#define CPUINFO_PAGE_SIZE_2MB
#define CPUINFO_PAGE_SIZE_4MB
#define CPUINFO_PAGE_SIZE_16MB
#define CPUINFO_PAGE_SIZE_1GB

struct cpuinfo_tlb {};

/** Vendor of processor core design */
enum cpuinfo_vendor {};

/**
 * Processor microarchitecture
 *
 * Processors with different microarchitectures often have different instruction
 * performance characteristics, and may have dramatically different pipeline
 * organization.
 */
enum cpuinfo_uarch {};

struct cpuinfo_processor {};

struct cpuinfo_core {};

struct cpuinfo_cluster {};

#define CPUINFO_PACKAGE_NAME_MAX

struct cpuinfo_package {};

struct cpuinfo_uarch_info {};

#ifdef __cplusplus
extern "C" {
#endif

bool CPUINFO_ABI cpuinfo_initialize(void);

void CPUINFO_ABI cpuinfo_deinitialize(void);

#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
/* This structure is not a part of stable API. Use cpuinfo_has_x86_* functions
 * instead. */
struct cpuinfo_x86_isa {};

extern struct cpuinfo_x86_isa cpuinfo_isa;
#endif

static inline bool cpuinfo_has_x86_rdtsc(void) {}

static inline bool cpuinfo_has_x86_rdtscp(void) {}

static inline bool cpuinfo_has_x86_rdpid(void) {}

static inline bool cpuinfo_has_x86_clzero(void) {}

static inline bool cpuinfo_has_x86_mwait(void) {}

static inline bool cpuinfo_has_x86_mwaitx(void) {}

static inline bool cpuinfo_has_x86_fxsave(void) {}

static inline bool cpuinfo_has_x86_xsave(void) {}

static inline bool cpuinfo_has_x86_fpu(void) {}

static inline bool cpuinfo_has_x86_mmx(void) {}

static inline bool cpuinfo_has_x86_mmx_plus(void) {}

static inline bool cpuinfo_has_x86_3dnow(void) {}

static inline bool cpuinfo_has_x86_3dnow_plus(void) {}

static inline bool cpuinfo_has_x86_3dnow_geode(void) {}

static inline bool cpuinfo_has_x86_prefetch(void) {}

static inline bool cpuinfo_has_x86_prefetchw(void) {}

static inline bool cpuinfo_has_x86_prefetchwt1(void) {}

static inline bool cpuinfo_has_x86_daz(void) {}

static inline bool cpuinfo_has_x86_sse(void) {}

static inline bool cpuinfo_has_x86_sse2(void) {}

static inline bool cpuinfo_has_x86_sse3(void) {}

static inline bool cpuinfo_has_x86_ssse3(void) {}

static inline bool cpuinfo_has_x86_sse4_1(void) {}

static inline bool cpuinfo_has_x86_sse4_2(void) {}

static inline bool cpuinfo_has_x86_sse4a(void) {}

static inline bool cpuinfo_has_x86_misaligned_sse(void) {}

static inline bool cpuinfo_has_x86_avx(void) {}

static inline bool cpuinfo_has_x86_avxvnni(void) {}

static inline bool cpuinfo_has_x86_fma3(void) {}

static inline bool cpuinfo_has_x86_fma4(void) {}

static inline bool cpuinfo_has_x86_xop(void) {}

static inline bool cpuinfo_has_x86_f16c(void) {}

static inline bool cpuinfo_has_x86_avx2(void) {}

static inline bool cpuinfo_has_x86_avx512f(void) {}

static inline bool cpuinfo_has_x86_avx512pf(void) {}

static inline bool cpuinfo_has_x86_avx512er(void) {}

static inline bool cpuinfo_has_x86_avx512cd(void) {}

static inline bool cpuinfo_has_x86_avx512dq(void) {}

static inline bool cpuinfo_has_x86_avx512bw(void) {}

static inline bool cpuinfo_has_x86_avx512vl(void) {}

static inline bool cpuinfo_has_x86_avx512ifma(void) {}

static inline bool cpuinfo_has_x86_avx512vbmi(void) {}

static inline bool cpuinfo_has_x86_avx512vbmi2(void) {}

static inline bool cpuinfo_has_x86_avx512bitalg(void) {}

static inline bool cpuinfo_has_x86_avx512vpopcntdq(void) {}

static inline bool cpuinfo_has_x86_avx512vnni(void) {}

static inline bool cpuinfo_has_x86_avx512bf16(void) {}

static inline bool cpuinfo_has_x86_avx512fp16(void) {}

static inline bool cpuinfo_has_x86_avx512vp2intersect(void) {}

static inline bool cpuinfo_has_x86_avx512_4vnniw(void) {}

static inline bool cpuinfo_has_x86_avx512_4fmaps(void) {}

/* [NOTE] Intel Advanced Matrix Extensions (AMX) detection
 *
 * I.  AMX is a new extensions to the x86 ISA to work on matrices, consists of
 *   1) 2-dimentional registers (tiles), hold sub-matrices from larger matrices in memory
 *   2) Accelerator called Tile Matrix Multiply (TMUL), contains instructions operating on tiles
 *
 * II. Platforms that supports AMX:
 * +-----------------+-----+----------+----------+----------+----------+
 * |    Platforms    | Gen | amx-bf16 | amx-tile | amx-int8 | amx-fp16 |
 * +-----------------+-----+----------+----------+----------+----------+
 * | Sapphire Rapids | 4th |   YES    |   YES    |   YES    |    NO    |
 * +-----------------+-----+----------+----------+----------+----------+
 * | Emerald Rapids  | 5th |   YES    |   YES    |   YES    |    NO    |
 * +-----------------+-----+----------+----------+----------+----------+
 * | Granite Rapids  | 6th |   YES    |   YES    |   YES    |   YES    |
 * +-----------------+-----+----------+----------+----------+----------+
 *
 * Reference: https://www.intel.com/content/www/us/en/products/docs
 *    /accelerator-engines/advanced-matrix-extensions/overview.html
 */
static inline bool cpuinfo_has_x86_amx_bf16(void) {}

static inline bool cpuinfo_has_x86_amx_tile(void) {}

static inline bool cpuinfo_has_x86_amx_int8(void) {}

static inline bool cpuinfo_has_x86_amx_fp16(void) {}

/*
 * Intel AVX Vector Neural Network Instructions (VNNI) INT8
 * Supported Platfroms: Sierra Forest, Arrow Lake, Lunar Lake
 */
static inline bool cpuinfo_has_x86_avx_vnni_int8(void) {}

/*
 * Intel AVX Vector Neural Network Instructions (VNNI) INT16
 * Supported Platfroms: Arrow Lake, Lunar Lake
 */
static inline bool cpuinfo_has_x86_avx_vnni_int16(void) {}

/*
 * A new set of instructions, which can convert low precision floating point
 * like BF16/FP16 to high precision floating point FP32, as well as convert FP32
 * elements to BF16. This instruction allows the platform to have improved AI
 * capabilities and better compatibility.
 *
 * Supported Platforms: Sierra Forest, Arrow Lake, Lunar Lake
 */
static inline bool cpuinfo_has_x86_avx_ne_convert(void) {}

static inline bool cpuinfo_has_x86_hle(void) {}

static inline bool cpuinfo_has_x86_rtm(void) {}

static inline bool cpuinfo_has_x86_xtest(void) {}

static inline bool cpuinfo_has_x86_mpx(void) {}

static inline bool cpuinfo_has_x86_cmov(void) {}

static inline bool cpuinfo_has_x86_cmpxchg8b(void) {}

static inline bool cpuinfo_has_x86_cmpxchg16b(void) {}

static inline bool cpuinfo_has_x86_clwb(void) {}

static inline bool cpuinfo_has_x86_movbe(void) {}

static inline bool cpuinfo_has_x86_lahf_sahf(void) {}

static inline bool cpuinfo_has_x86_lzcnt(void) {}

static inline bool cpuinfo_has_x86_popcnt(void) {}

static inline bool cpuinfo_has_x86_tbm(void) {}

static inline bool cpuinfo_has_x86_bmi(void) {}

static inline bool cpuinfo_has_x86_bmi2(void) {}

static inline bool cpuinfo_has_x86_adx(void) {}

static inline bool cpuinfo_has_x86_aes(void) {}

static inline bool cpuinfo_has_x86_vaes(void) {}

static inline bool cpuinfo_has_x86_pclmulqdq(void) {}

static inline bool cpuinfo_has_x86_vpclmulqdq(void) {}

static inline bool cpuinfo_has_x86_gfni(void) {}

static inline bool cpuinfo_has_x86_rdrand(void) {}

static inline bool cpuinfo_has_x86_rdseed(void) {}

static inline bool cpuinfo_has_x86_sha(void) {}

#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
/* This structure is not a part of stable API. Use cpuinfo_has_arm_* functions
 * instead. */
struct cpuinfo_arm_isa {
#if CPUINFO_ARCH_ARM
	bool thumb;
	bool thumb2;
	bool thumbee;
	bool jazelle;
	bool armv5e;
	bool armv6;
	bool armv6k;
	bool armv7;
	bool armv7mp;
	bool armv8;
	bool idiv;

	bool vfpv2;
	bool vfpv3;
	bool d32;
	bool fp16;
	bool fma;

	bool wmmx;
	bool wmmx2;
	bool neon;
#endif
#if CPUINFO_ARCH_ARM64
	bool atomics;
	bool bf16;
	bool sve;
	bool sve2;
	bool i8mm;
#endif
	bool rdm;
	bool fp16arith;
	bool dot;
	bool jscvt;
	bool fcma;
	bool fhm;

	bool aes;
	bool sha1;
	bool sha2;
	bool pmull;
	bool crc32;
};

extern struct cpuinfo_arm_isa cpuinfo_isa;
#endif

static inline bool cpuinfo_has_arm_thumb(void) {}

static inline bool cpuinfo_has_arm_thumb2(void) {}

static inline bool cpuinfo_has_arm_v5e(void) {}

static inline bool cpuinfo_has_arm_v6(void) {}

static inline bool cpuinfo_has_arm_v6k(void) {}

static inline bool cpuinfo_has_arm_v7(void) {}

static inline bool cpuinfo_has_arm_v7mp(void) {}

static inline bool cpuinfo_has_arm_v8(void) {}

static inline bool cpuinfo_has_arm_idiv(void) {}

static inline bool cpuinfo_has_arm_vfpv2(void) {}

static inline bool cpuinfo_has_arm_vfpv3(void) {}

static inline bool cpuinfo_has_arm_vfpv3_d32(void) {}

static inline bool cpuinfo_has_arm_vfpv3_fp16(void) {}

static inline bool cpuinfo_has_arm_vfpv3_fp16_d32(void) {}

static inline bool cpuinfo_has_arm_vfpv4(void) {}

static inline bool cpuinfo_has_arm_vfpv4_d32(void) {}

static inline bool cpuinfo_has_arm_fp16_arith(void) {}

static inline bool cpuinfo_has_arm_bf16(void) {}

static inline bool cpuinfo_has_arm_wmmx(void) {}

static inline bool cpuinfo_has_arm_wmmx2(void) {}

static inline bool cpuinfo_has_arm_neon(void) {}

static inline bool cpuinfo_has_arm_neon_fp16(void) {}

static inline bool cpuinfo_has_arm_neon_fma(void) {}

static inline bool cpuinfo_has_arm_neon_v8(void) {}

static inline bool cpuinfo_has_arm_atomics(void) {}

static inline bool cpuinfo_has_arm_neon_rdm(void) {}

static inline bool cpuinfo_has_arm_neon_fp16_arith(void) {}

static inline bool cpuinfo_has_arm_fhm(void) {}

static inline bool cpuinfo_has_arm_neon_dot(void) {}

static inline bool cpuinfo_has_arm_neon_bf16(void) {}

static inline bool cpuinfo_has_arm_jscvt(void) {}

static inline bool cpuinfo_has_arm_fcma(void) {}

static inline bool cpuinfo_has_arm_i8mm(void) {}

static inline bool cpuinfo_has_arm_aes(void) {}

static inline bool cpuinfo_has_arm_sha1(void) {}

static inline bool cpuinfo_has_arm_sha2(void) {}

static inline bool cpuinfo_has_arm_pmull(void) {}

static inline bool cpuinfo_has_arm_crc32(void) {}

static inline bool cpuinfo_has_arm_sve(void) {}

static inline bool cpuinfo_has_arm_sve_bf16(void) {}

static inline bool cpuinfo_has_arm_sve2(void) {}

#if CPUINFO_ARCH_RISCV32 || CPUINFO_ARCH_RISCV64
/* This structure is not a part of stable API. Use cpuinfo_has_riscv_* functions
 * instead. */
struct cpuinfo_riscv_isa {
	/**
	 * Keep fields in line with the canonical order as defined by
	 * Section 27.11 Subset Naming Convention.
	 */
	/* RV32I/64I/128I Base ISA. */
	bool i;
#if CPUINFO_ARCH_RISCV32
	/* RV32E Base ISA. */
	bool e;
#endif
	/* Integer Multiply/Divide Extension. */
	bool m;
	/* Atomic Extension. */
	bool a;
	/* Single-Precision Floating-Point Extension. */
	bool f;
	/* Double-Precision Floating-Point Extension. */
	bool d;
	/* Compressed Extension. */
	bool c;
	/* Vector Extension. */
	bool v;
};

extern struct cpuinfo_riscv_isa cpuinfo_isa;
#endif

static inline bool cpuinfo_has_riscv_i(void) {}

static inline bool cpuinfo_has_riscv_e(void) {}

static inline bool cpuinfo_has_riscv_m(void) {}

static inline bool cpuinfo_has_riscv_a(void) {}

static inline bool cpuinfo_has_riscv_f(void) {}

static inline bool cpuinfo_has_riscv_d(void) {}

static inline bool cpuinfo_has_riscv_g(void) {}

static inline bool cpuinfo_has_riscv_c(void) {}

static inline bool cpuinfo_has_riscv_v(void) {}

const struct cpuinfo_processor* CPUINFO_ABI cpuinfo_get_processors(void);
const struct cpuinfo_core* CPUINFO_ABI cpuinfo_get_cores(void);
const struct cpuinfo_cluster* CPUINFO_ABI cpuinfo_get_clusters(void);
const struct cpuinfo_package* CPUINFO_ABI cpuinfo_get_packages(void);
const struct cpuinfo_uarch_info* CPUINFO_ABI cpuinfo_get_uarchs(void);
const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1i_caches(void);
const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1d_caches(void);
const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l2_caches(void);
const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l3_caches(void);
const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l4_caches(void);

const struct cpuinfo_processor* CPUINFO_ABI cpuinfo_get_processor(uint32_t index);
const struct cpuinfo_core* CPUINFO_ABI cpuinfo_get_core(uint32_t index);
const struct cpuinfo_cluster* CPUINFO_ABI cpuinfo_get_cluster(uint32_t index);
const struct cpuinfo_package* CPUINFO_ABI cpuinfo_get_package(uint32_t index);
const struct cpuinfo_uarch_info* CPUINFO_ABI cpuinfo_get_uarch(uint32_t index);
const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1i_cache(uint32_t index);
const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1d_cache(uint32_t index);
const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l2_cache(uint32_t index);
const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l3_cache(uint32_t index);
const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l4_cache(uint32_t index);

uint32_t CPUINFO_ABI cpuinfo_get_processors_count(void);
uint32_t CPUINFO_ABI cpuinfo_get_cores_count(void);
uint32_t CPUINFO_ABI cpuinfo_get_clusters_count(void);
uint32_t CPUINFO_ABI cpuinfo_get_packages_count(void);
uint32_t CPUINFO_ABI cpuinfo_get_uarchs_count(void);
uint32_t CPUINFO_ABI cpuinfo_get_l1i_caches_count(void);
uint32_t CPUINFO_ABI cpuinfo_get_l1d_caches_count(void);
uint32_t CPUINFO_ABI cpuinfo_get_l2_caches_count(void);
uint32_t CPUINFO_ABI cpuinfo_get_l3_caches_count(void);
uint32_t CPUINFO_ABI cpuinfo_get_l4_caches_count(void);

/**
 * Returns upper bound on cache size.
 */
uint32_t CPUINFO_ABI cpuinfo_get_max_cache_size(void);

/**
 * Identify the logical processor that executes the current thread.
 *
 * There is no guarantee that the thread will stay on the same logical processor
 * for any time. Callers should treat the result as only a hint, and be prepared
 * to handle NULL return value.
 */
const struct cpuinfo_processor* CPUINFO_ABI cpuinfo_get_current_processor(void);

/**
 * Identify the core that executes the current thread.
 *
 * There is no guarantee that the thread will stay on the same core for any
 * time. Callers should treat the result as only a hint, and be prepared to
 * handle NULL return value.
 */
const struct cpuinfo_core* CPUINFO_ABI cpuinfo_get_current_core(void);

/**
 * Identify the microarchitecture index of the core that executes the current
 * thread. If the system does not support such identification, the function
 * returns 0.
 *
 * There is no guarantee that the thread will stay on the same type of core for
 * any time. Callers should treat the result as only a hint.
 */
uint32_t CPUINFO_ABI cpuinfo_get_current_uarch_index(void);

/**
 * Identify the microarchitecture index of the core that executes the current
 * thread. If the system does not support such identification, the function
 * returns the user-specified default value.
 *
 * There is no guarantee that the thread will stay on the same type of core for
 * any time. Callers should treat the result as only a hint.
 */
uint32_t CPUINFO_ABI cpuinfo_get_current_uarch_index_with_default(uint32_t default_uarch_index);

#ifdef __cplusplus
} /* extern "C" */
#endif

#endif /* CPUINFO_H */