linux/arch/x86/kernel/cpu/cacheinfo.c

// SPDX-License-Identifier: GPL-2.0
/*
 *	Routines to identify caches on Intel CPU.
 *
 *	Changes:
 *	Venkatesh Pallipadi	: Adding cache identification through cpuid(4)
 *	Ashok Raj <[email protected]>: Work with CPU hotplug infrastructure.
 *	Andi Kleen / Andreas Herrmann	: CPUID4 emulation on AMD.
 */

#include <linux/slab.h>
#include <linux/cacheinfo.h>
#include <linux/cpu.h>
#include <linux/cpuhotplug.h>
#include <linux/sched.h>
#include <linux/capability.h>
#include <linux/sysfs.h>
#include <linux/pci.h>
#include <linux/stop_machine.h>

#include <asm/cpufeature.h>
#include <asm/cacheinfo.h>
#include <asm/amd_nb.h>
#include <asm/smp.h>
#include <asm/mtrr.h>
#include <asm/tlbflush.h>

#include "cpu.h"

#define LVL_1_INST
#define LVL_1_DATA
#define LVL_2
#define LVL_3
#define LVL_TRACE

/* Shared last level cache maps */
DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);

/* Shared L2 cache maps */
DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_l2c_shared_map);

static cpumask_var_t cpu_cacheinfo_mask;

/* Kernel controls MTRR and/or PAT MSRs. */
unsigned int memory_caching_control __ro_after_init;

struct _cache_table {};

#define MB(x)

/* All the cache descriptor types we care about (no TLB or
   trace cache entries) */

static const struct _cache_table cache_table[] =;


enum _cache_type {};

_cpuid4_leaf_eax;

_cpuid4_leaf_ebx;

_cpuid4_leaf_ecx;

struct _cpuid4_info_regs {};

static unsigned short num_cache_leaves;

/* AMD doesn't have CPUID4. Emulate it here to report the same
   information to the user.  This makes some assumptions about the machine:
   L2 not shared, no SMT etc. that is currently true on AMD CPUs.

   In theory the TLBs could be reported as fake type (they are in "dummy").
   Maybe later */
l1_cache;

l2_cache;

l3_cache;

static const unsigned short assocs[] =;

static const unsigned char levels[] =;
static const unsigned char types[] =;

static const enum cache_type cache_type_map[] =;

static void
amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
		     union _cpuid4_leaf_ebx *ebx,
		     union _cpuid4_leaf_ecx *ecx)
{}

#if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS)

/*
 * L3 cache descriptors
 */
static void amd_calc_l3_indices(struct amd_northbridge *nb)
{}

/*
 * check whether a slot used for disabling an L3 index is occupied.
 * @l3: L3 cache descriptor
 * @slot: slot number (0..1)
 *
 * @returns: the disabled index if used or negative value if slot free.
 */
static int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot)
{}

static ssize_t show_cache_disable(struct cacheinfo *this_leaf, char *buf,
				  unsigned int slot)
{}

#define SHOW_CACHE_DISABLE(slot)
SHOW_CACHE_DISABLE()
SHOW_CACHE_DISABLE()

static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu,
				 unsigned slot, unsigned long idx)
{}

/*
 * disable a L3 cache index by using a disable-slot
 *
 * @l3:    L3 cache descriptor
 * @cpu:   A CPU on the node containing the L3 cache
 * @slot:  slot number (0..1)
 * @index: index to disable
 *
 * @return: 0 on success, error status on failure
 */
static int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu,
			    unsigned slot, unsigned long index)
{}

static ssize_t store_cache_disable(struct cacheinfo *this_leaf,
				   const char *buf, size_t count,
				   unsigned int slot)
{}

#define STORE_CACHE_DISABLE(slot)
STORE_CACHE_DISABLE()
STORE_CACHE_DISABLE()

static ssize_t subcaches_show(struct device *dev,
			      struct device_attribute *attr, char *buf)
{}

static ssize_t subcaches_store(struct device *dev,
			       struct device_attribute *attr,
			       const char *buf, size_t count)
{}

static DEVICE_ATTR_RW(cache_disable_0);
static DEVICE_ATTR_RW(cache_disable_1);
static DEVICE_ATTR_RW(subcaches);

static umode_t
cache_private_attrs_is_visible(struct kobject *kobj,
			       struct attribute *attr, int unused)
{}

static struct attribute_group cache_private_group =;

static void init_amd_l3_attrs(void)
{}

const struct attribute_group *
cache_get_priv_group(struct cacheinfo *this_leaf)
{}

static void amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
{}
#else
#define amd_init_l3_cache
#endif  /* CONFIG_AMD_NB && CONFIG_SYSFS */

static int
cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf)
{}

static int find_num_cache_leaves(struct cpuinfo_x86 *c)
{}

void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, u16 die_id)
{}

void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c)
{}

void init_amd_cacheinfo(struct cpuinfo_x86 *c)
{}

void init_hygon_cacheinfo(struct cpuinfo_x86 *c)
{}

void init_intel_cacheinfo(struct cpuinfo_x86 *c)
{}

static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
				    struct _cpuid4_info_regs *base)
{}

static void __cache_cpumap_setup(unsigned int cpu, int index,
				 struct _cpuid4_info_regs *base)
{}

static void ci_leaf_init(struct cacheinfo *this_leaf,
			 struct _cpuid4_info_regs *base)
{}

int init_cache_level(unsigned int cpu)
{}

/*
 * The max shared threads number comes from CPUID.4:EAX[25-14] with input
 * ECX as cache index. Then right shift apicid by the number's order to get
 * cache id for this cache node.
 */
static void get_cache_id(int cpu, struct _cpuid4_info_regs *id4_regs)
{}

int populate_cache_leaves(unsigned int cpu)
{}

/*
 * Disable and enable caches. Needed for changing MTRRs and the PAT MSR.
 *
 * Since we are disabling the cache don't allow any interrupts,
 * they would run extremely slow and would only increase the pain.
 *
 * The caller must ensure that local interrupts are disabled and
 * are reenabled after cache_enable() has been called.
 */
static unsigned long saved_cr4;
static DEFINE_RAW_SPINLOCK(cache_disable_lock);

void cache_disable(void) __acquires(cache_disable_lock)
{}

void cache_enable(void) __releases(cache_disable_lock)
{}

static void cache_cpu_init(void)
{}

static bool cache_aps_delayed_init =;

void set_cache_aps_delayed_init(bool val)
{}

bool get_cache_aps_delayed_init(void)
{}

static int cache_rendezvous_handler(void *unused)
{}

void __init cache_bp_init(void)
{}

void cache_bp_restore(void)
{}

static int cache_ap_online(unsigned int cpu)
{}

static int cache_ap_offline(unsigned int cpu)
{}

/*
 * Delayed cache initialization for all AP's
 */
void cache_aps_init(void)
{}

static int __init cache_ap_register(void)
{}
early_initcall(cache_ap_register);