#include <linux/slab.h>
#include <linux/cacheinfo.h>
#include <linux/cpu.h>
#include <linux/cpuhotplug.h>
#include <linux/sched.h>
#include <linux/capability.h>
#include <linux/sysfs.h>
#include <linux/pci.h>
#include <linux/stop_machine.h>
#include <asm/cpufeature.h>
#include <asm/cacheinfo.h>
#include <asm/amd_nb.h>
#include <asm/smp.h>
#include <asm/mtrr.h>
#include <asm/tlbflush.h>
#include "cpu.h"
#define LVL_1_INST …
#define LVL_1_DATA …
#define LVL_2 …
#define LVL_3 …
#define LVL_TRACE …
DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_l2c_shared_map);
static cpumask_var_t cpu_cacheinfo_mask;
unsigned int memory_caching_control __ro_after_init;
struct _cache_table { … };
#define MB(x) …
static const struct _cache_table cache_table[] = …;
enum _cache_type { … };
_cpuid4_leaf_eax;
_cpuid4_leaf_ebx;
_cpuid4_leaf_ecx;
struct _cpuid4_info_regs { … };
static unsigned short num_cache_leaves;
l1_cache;
l2_cache;
l3_cache;
static const unsigned short assocs[] = …;
static const unsigned char levels[] = …;
static const unsigned char types[] = …;
static const enum cache_type cache_type_map[] = …;
static void
amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
union _cpuid4_leaf_ebx *ebx,
union _cpuid4_leaf_ecx *ecx)
{ … }
#if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS)
static void amd_calc_l3_indices(struct amd_northbridge *nb)
{ … }
static int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot)
{ … }
static ssize_t show_cache_disable(struct cacheinfo *this_leaf, char *buf,
unsigned int slot)
{ … }
#define SHOW_CACHE_DISABLE(slot) …
SHOW_CACHE_DISABLE(…)
SHOW_CACHE_DISABLE(…)
static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu,
unsigned slot, unsigned long idx)
{ … }
static int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu,
unsigned slot, unsigned long index)
{ … }
static ssize_t store_cache_disable(struct cacheinfo *this_leaf,
const char *buf, size_t count,
unsigned int slot)
{ … }
#define STORE_CACHE_DISABLE(slot) …
STORE_CACHE_DISABLE(…)
STORE_CACHE_DISABLE(…)
static ssize_t subcaches_show(struct device *dev,
struct device_attribute *attr, char *buf)
{ … }
static ssize_t subcaches_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{ … }
static DEVICE_ATTR_RW(cache_disable_0);
static DEVICE_ATTR_RW(cache_disable_1);
static DEVICE_ATTR_RW(subcaches);
static umode_t
cache_private_attrs_is_visible(struct kobject *kobj,
struct attribute *attr, int unused)
{ … }
static struct attribute_group cache_private_group = …;
static void init_amd_l3_attrs(void)
{ … }
const struct attribute_group *
cache_get_priv_group(struct cacheinfo *this_leaf)
{ … }
static void amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
{ … }
#else
#define amd_init_l3_cache …
#endif
static int
cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf)
{ … }
static int find_num_cache_leaves(struct cpuinfo_x86 *c)
{ … }
void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, u16 die_id)
{ … }
void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c)
{ … }
void init_amd_cacheinfo(struct cpuinfo_x86 *c)
{ … }
void init_hygon_cacheinfo(struct cpuinfo_x86 *c)
{ … }
void init_intel_cacheinfo(struct cpuinfo_x86 *c)
{ … }
static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
struct _cpuid4_info_regs *base)
{ … }
static void __cache_cpumap_setup(unsigned int cpu, int index,
struct _cpuid4_info_regs *base)
{ … }
static void ci_leaf_init(struct cacheinfo *this_leaf,
struct _cpuid4_info_regs *base)
{ … }
int init_cache_level(unsigned int cpu)
{ … }
static void get_cache_id(int cpu, struct _cpuid4_info_regs *id4_regs)
{ … }
int populate_cache_leaves(unsigned int cpu)
{ … }
static unsigned long saved_cr4;
static DEFINE_RAW_SPINLOCK(cache_disable_lock);
void cache_disable(void) __acquires(cache_disable_lock)
{ … }
void cache_enable(void) __releases(cache_disable_lock)
{ … }
static void cache_cpu_init(void)
{ … }
static bool cache_aps_delayed_init = …;
void set_cache_aps_delayed_init(bool val)
{ … }
bool get_cache_aps_delayed_init(void)
{ … }
static int cache_rendezvous_handler(void *unused)
{ … }
void __init cache_bp_init(void)
{ … }
void cache_bp_restore(void)
{ … }
static int cache_ap_online(unsigned int cpu)
{ … }
static int cache_ap_offline(unsigned int cpu)
{ … }
void cache_aps_init(void)
{ … }
static int __init cache_ap_register(void)
{ … }
early_initcall(cache_ap_register);