linux/arch/x86/kernel/cpu/resctrl/core.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Resource Director Technology(RDT)
 * - Cache Allocation code.
 *
 * Copyright (C) 2016 Intel Corporation
 *
 * Authors:
 *    Fenghua Yu <[email protected]>
 *    Tony Luck <[email protected]>
 *    Vikas Shivappa <[email protected]>
 *
 * More information about RDT be found in the Intel (R) x86 Architecture
 * Software Developer Manual June 2016, volume 3, section 17.17.
 */

#define pr_fmt(fmt)

#include <linux/cpu.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/cpuhotplug.h>

#include <asm/cpu_device_id.h>
#include <asm/resctrl.h>
#include "internal.h"

/*
 * rdt_domain structures are kfree()d when their last CPU goes offline,
 * and allocated when the first CPU in a new domain comes online.
 * The rdt_resource's domain list is updated when this happens. Readers of
 * the domain list must either take cpus_read_lock(), or rely on an RCU
 * read-side critical section, to avoid observing concurrent modification.
 * All writers take this mutex:
 */
static DEFINE_MUTEX(domain_list_lock);

/*
 * The cached resctrl_pqr_state is strictly per CPU and can never be
 * updated from a remote CPU. Functions which modify the state
 * are called with interrupts disabled and no preemption, which
 * is sufficient for the protection.
 */
DEFINE_PER_CPU(struct resctrl_pqr_state, pqr_state);

/*
 * Used to store the max resource name width and max resource data width
 * to display the schemata in a tabular format
 */
int max_name_width, max_data_width;

/*
 * Global boolean for rdt_alloc which is true if any
 * resource allocation is enabled.
 */
bool rdt_alloc_capable;

static void mba_wrmsr_intel(struct msr_param *m);
static void cat_wrmsr(struct msr_param *m);
static void mba_wrmsr_amd(struct msr_param *m);

#define ctrl_domain_init(id)
#define mon_domain_init(id)

struct rdt_hw_resource rdt_resources_all[] =;

u32 resctrl_arch_system_num_rmid_idx(void)
{}

/*
 * cache_alloc_hsw_probe() - Have to probe for Intel haswell server CPUs
 * as they do not have CPUID enumeration support for Cache allocation.
 * The check for Vendor/Family/Model is not enough to guarantee that
 * the MSRs won't #GP fault because only the following SKUs support
 * CAT:
 *	Intel(R) Xeon(R)  CPU E5-2658  v3  @  2.20GHz
 *	Intel(R) Xeon(R)  CPU E5-2648L v3  @  1.80GHz
 *	Intel(R) Xeon(R)  CPU E5-2628L v3  @  2.00GHz
 *	Intel(R) Xeon(R)  CPU E5-2618L v3  @  2.30GHz
 *	Intel(R) Xeon(R)  CPU E5-2608L v3  @  2.00GHz
 *	Intel(R) Xeon(R)  CPU E5-2658A v3  @  2.20GHz
 *
 * Probe by trying to write the first of the L3 cache mask registers
 * and checking that the bits stick. Max CLOSids is always 4 and max cbm length
 * is always 20 on hsw server parts. The minimum cache bitmask length
 * allowed for HSW server is always 2 bits. Hardcode all of them.
 */
static inline void cache_alloc_hsw_probe(void)
{}

bool is_mba_sc(struct rdt_resource *r)
{}

/*
 * rdt_get_mb_table() - get a mapping of bandwidth(b/w) percentage values
 * exposed to user interface and the h/w understandable delay values.
 *
 * The non-linear delay values have the granularity of power of two
 * and also the h/w does not guarantee a curve for configured delay
 * values vs. actual b/w enforced.
 * Hence we need a mapping that is pre calibrated so the user can
 * express the memory b/w as a percentage value.
 */
static inline bool rdt_get_mb_table(struct rdt_resource *r)
{}

static __init bool __get_mem_config_intel(struct rdt_resource *r)
{}

static __init bool __rdt_get_mem_config_amd(struct rdt_resource *r)
{}

static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r)
{}

static void rdt_get_cdp_config(int level)
{}

static void rdt_get_cdp_l3_config(void)
{}

static void rdt_get_cdp_l2_config(void)
{}

static void mba_wrmsr_amd(struct msr_param *m)
{}

/*
 * Map the memory b/w percentage value to delay values
 * that can be written to QOS_MSRs.
 * There are currently no SKUs which support non linear delay values.
 */
static u32 delay_bw_map(unsigned long bw, struct rdt_resource *r)
{}

static void mba_wrmsr_intel(struct msr_param *m)
{}

static void cat_wrmsr(struct msr_param *m)
{}

struct rdt_ctrl_domain *get_ctrl_domain_from_cpu(int cpu, struct rdt_resource *r)
{}

struct rdt_mon_domain *get_mon_domain_from_cpu(int cpu, struct rdt_resource *r)
{}

u32 resctrl_arch_get_num_closid(struct rdt_resource *r)
{}

void rdt_ctrl_update(void *arg)
{}

/*
 * rdt_find_domain - Search for a domain id in a resource domain list.
 *
 * Search the domain list to find the domain id. If the domain id is
 * found, return the domain. NULL otherwise.  If the domain id is not
 * found (and NULL returned) then the first domain with id bigger than
 * the input id can be returned to the caller via @pos.
 */
struct rdt_domain_hdr *rdt_find_domain(struct list_head *h, int id,
				       struct list_head **pos)
{}

static void setup_default_ctrlval(struct rdt_resource *r, u32 *dc)
{}

static void ctrl_domain_free(struct rdt_hw_ctrl_domain *hw_dom)
{}

static void mon_domain_free(struct rdt_hw_mon_domain *hw_dom)
{}

static int domain_setup_ctrlval(struct rdt_resource *r, struct rdt_ctrl_domain *d)
{}

/**
 * arch_domain_mbm_alloc() - Allocate arch private storage for the MBM counters
 * @num_rmid:	The size of the MBM counter array
 * @hw_dom:	The domain that owns the allocated arrays
 */
static int arch_domain_mbm_alloc(u32 num_rmid, struct rdt_hw_mon_domain *hw_dom)
{}

static int get_domain_id_from_scope(int cpu, enum resctrl_scope scope)
{}

static void domain_add_cpu_ctrl(int cpu, struct rdt_resource *r)
{}

static void domain_add_cpu_mon(int cpu, struct rdt_resource *r)
{}

static void domain_add_cpu(int cpu, struct rdt_resource *r)
{}

static void domain_remove_cpu_ctrl(int cpu, struct rdt_resource *r)
{}

static void domain_remove_cpu_mon(int cpu, struct rdt_resource *r)
{}

static void domain_remove_cpu(int cpu, struct rdt_resource *r)
{}

static void clear_closid_rmid(int cpu)
{}

static int resctrl_arch_online_cpu(unsigned int cpu)
{}

static int resctrl_arch_offline_cpu(unsigned int cpu)
{}

/*
 * Choose a width for the resource name and resource data based on the
 * resource that has widest name and cbm.
 */
static __init void rdt_init_padding(void)
{}

enum {};

#define RDT_OPT(idx, n, f)

struct rdt_options {};

static struct rdt_options rdt_options[]  __initdata =;
#define NUM_RDT_OPTIONS

static int __init set_rdt_options(char *str)
{}
__setup();

bool __init rdt_cpu_has(int flag)
{}

static __init bool get_mem_config(void)
{}

static __init bool get_slow_mem_config(void)
{}

static __init bool get_rdt_alloc_resources(void)
{}

static __init bool get_rdt_mon_resources(void)
{}

static __init void __check_quirks_intel(void)
{}

static __init void check_quirks(void)
{}

static __init bool get_rdt_resources(void)
{}

static __init void rdt_init_res_defs_intel(void)
{}

static __init void rdt_init_res_defs_amd(void)
{}

static __init void rdt_init_res_defs(void)
{}

static enum cpuhp_state rdt_online;

/* Runs once on the BSP during boot. */
void resctrl_cpu_detect(struct cpuinfo_x86 *c)
{}

static int __init resctrl_late_init(void)
{}

late_initcall(resctrl_late_init);

static void __exit resctrl_exit(void)
{}

__exitcall();