// SPDX-License-Identifier: GPL-2.0-only /* * Resource Director Technology(RDT) * - Monitoring code * * Copyright (C) 2017 Intel Corporation * * Author: * Vikas Shivappa <[email protected]> * * This replaces the cqm.c based on perf but we reuse a lot of * code and datastructures originally from Peter Zijlstra and Matt Fleming. * * More information about RDT be found in the Intel (R) x86 Architecture * Software Developer Manual June 2016, volume 3, section 17.17. */ #define pr_fmt(fmt) … #include <linux/cpu.h> #include <linux/module.h> #include <linux/sizes.h> #include <linux/slab.h> #include <asm/cpu_device_id.h> #include <asm/resctrl.h> #include "internal.h" #include "trace.h" /** * struct rmid_entry - dirty tracking for all RMID. * @closid: The CLOSID for this entry. * @rmid: The RMID for this entry. * @busy: The number of domains with cached data using this RMID. * @list: Member of the rmid_free_lru list when busy == 0. * * Depending on the architecture the correct monitor is accessed using * both @closid and @rmid, or @rmid only. * * Take the rdtgroup_mutex when accessing. */ struct rmid_entry { … }; /* * @rmid_free_lru - A least recently used list of free RMIDs * These RMIDs are guaranteed to have an occupancy less than the * threshold occupancy */ static LIST_HEAD(rmid_free_lru); /* * @closid_num_dirty_rmid The number of dirty RMID each CLOSID has. * Only allocated when CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID is defined. * Indexed by CLOSID. Protected by rdtgroup_mutex. */ static u32 *closid_num_dirty_rmid; /* * @rmid_limbo_count - count of currently unused but (potentially) * dirty RMIDs. * This counts RMIDs that no one is currently using but that * may have a occupancy value > resctrl_rmid_realloc_threshold. User can * change the threshold occupancy value. */ static unsigned int rmid_limbo_count; /* * @rmid_entry - The entry in the limbo and free lists. */ static struct rmid_entry *rmid_ptrs; /* * Global boolean for rdt_monitor which is true if any * resource monitoring is enabled. */ bool rdt_mon_capable; /* * Global to indicate which monitoring events are enabled. */ unsigned int rdt_mon_features; /* * This is the threshold cache occupancy in bytes at which we will consider an * RMID available for re-allocation. */ unsigned int resctrl_rmid_realloc_threshold; /* * This is the maximum value for the reallocation threshold, in bytes. */ unsigned int resctrl_rmid_realloc_limit; #define CF(cf) … static int snc_nodes_per_l3_cache = …; /* * The correction factor table is documented in Documentation/arch/x86/resctrl.rst. * If rmid > rmid threshold, MBM total and local values should be multiplied * by the correction factor. * * The original table is modified for better code: * * 1. The threshold 0 is changed to rmid count - 1 so don't do correction * for the case. * 2. MBM total and local correction table indexed by core counter which is * equal to (x86_cache_max_rmid + 1) / 8 - 1 and is from 0 up to 27. * 3. The correction factor is normalized to 2^20 (1048576) so it's faster * to calculate corrected value by shifting: * corrected_value = (original_value * correction_factor) >> 20 */ static const struct mbm_correction_factor_table { … } mbm_cf_table[] __initconst = …; static u32 mbm_cf_rmidthreshold __read_mostly = …; static u64 mbm_cf __read_mostly; static inline u64 get_corrected_mbm_count(u32 rmid, unsigned long val) { … } /* * x86 and arm64 differ in their handling of monitoring. * x86's RMID are independent numbers, there is only one source of traffic * with an RMID value of '1'. * arm64's PMG extends the PARTID/CLOSID space, there are multiple sources of * traffic with a PMG value of '1', one for each CLOSID, meaning the RMID * value is no longer unique. * To account for this, resctrl uses an index. On x86 this is just the RMID, * on arm64 it encodes the CLOSID and RMID. This gives a unique number. * * The domain's rmid_busy_llc and rmid_ptrs[] are sized by index. The arch code * must accept an attempt to read every index. */ static inline struct rmid_entry *__rmid_entry(u32 idx) { … } /* * When Sub-NUMA Cluster (SNC) mode is not enabled (as indicated by * "snc_nodes_per_l3_cache == 1") no translation of the RMID value is * needed. The physical RMID is the same as the logical RMID. * * On a platform with SNC mode enabled, Linux enables RMID sharing mode * via MSR 0xCA0 (see the "RMID Sharing Mode" section in the "Intel * Resource Director Technology Architecture Specification" for a full * description of RMID sharing mode). * * In RMID sharing mode there are fewer "logical RMID" values available * to accumulate data ("physical RMIDs" are divided evenly between SNC * nodes that share an L3 cache). Linux creates an rdt_mon_domain for * each SNC node. * * The value loaded into IA32_PQR_ASSOC is the "logical RMID". * * Data is collected independently on each SNC node and can be retrieved * using the "physical RMID" value computed by this function and loaded * into IA32_QM_EVTSEL. @cpu can be any CPU in the SNC node. * * The scope of the IA32_QM_EVTSEL and IA32_QM_CTR MSRs is at the L3 * cache. So a "physical RMID" may be read from any CPU that shares * the L3 cache with the desired SNC node, not just from a CPU in * the specific SNC node. */ static int logical_rmid_to_physical_rmid(int cpu, int lrmid) { … } static int __rmid_read_phys(u32 prmid, enum resctrl_event_id eventid, u64 *val) { … } static struct arch_mbm_state *get_arch_mbm_state(struct rdt_hw_mon_domain *hw_dom, u32 rmid, enum resctrl_event_id eventid) { … } void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_mon_domain *d, u32 unused, u32 rmid, enum resctrl_event_id eventid) { … } /* * Assumes that hardware counters are also reset and thus that there is * no need to record initial non-zero counts. */ void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_mon_domain *d) { … } static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width) { … } int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_mon_domain *d, u32 unused, u32 rmid, enum resctrl_event_id eventid, u64 *val, void *ignored) { … } static void limbo_release_entry(struct rmid_entry *entry) { … } /* * Check the RMIDs that are marked as busy for this domain. If the * reported LLC occupancy is below the threshold clear the busy bit and * decrement the count. If the busy count gets to zero on an RMID, we * free the RMID */ void __check_limbo(struct rdt_mon_domain *d, bool force_free) { … } bool has_busy_rmid(struct rdt_mon_domain *d) { … } static struct rmid_entry *resctrl_find_free_rmid(u32 closid) { … } /** * resctrl_find_cleanest_closid() - Find a CLOSID where all the associated * RMID are clean, or the CLOSID that has * the most clean RMID. * * MPAM's equivalent of RMID are per-CLOSID, meaning a freshly allocated CLOSID * may not be able to allocate clean RMID. To avoid this the allocator will * choose the CLOSID with the most clean RMID. * * When the CLOSID and RMID are independent numbers, the first free CLOSID will * be returned. */ int resctrl_find_cleanest_closid(void) { … } /* * For MPAM the RMID value is not unique, and has to be considered with * the CLOSID. The (CLOSID, RMID) pair is allocated on all domains, which * allows all domains to be managed by a single free list. * Each domain also has a rmid_busy_llc to reduce the work of the limbo handler. */ int alloc_rmid(u32 closid) { … } static void add_rmid_to_limbo(struct rmid_entry *entry) { … } void free_rmid(u32 closid, u32 rmid) { … } static struct mbm_state *get_mbm_state(struct rdt_mon_domain *d, u32 closid, u32 rmid, enum resctrl_event_id evtid) { … } static int __mon_event_count(u32 closid, u32 rmid, struct rmid_read *rr) { … } /* * mbm_bw_count() - Update bw count from values previously read by * __mon_event_count(). * @closid: The closid used to identify the cached mbm_state. * @rmid: The rmid used to identify the cached mbm_state. * @rr: The struct rmid_read populated by __mon_event_count(). * * Supporting function to calculate the memory bandwidth * and delta bandwidth in MBps. The chunks value previously read by * __mon_event_count() is compared with the chunks value from the previous * invocation. This must be called once per second to maintain values in MBps. */ static void mbm_bw_count(u32 closid, u32 rmid, struct rmid_read *rr) { … } /* * This is scheduled by mon_event_read() to read the CQM/MBM counters * on a domain. */ void mon_event_count(void *info) { … } /* * Feedback loop for MBA software controller (mba_sc) * * mba_sc is a feedback loop where we periodically read MBM counters and * adjust the bandwidth percentage values via the IA32_MBA_THRTL_MSRs so * that: * * current bandwidth(cur_bw) < user specified bandwidth(user_bw) * * This uses the MBM counters to measure the bandwidth and MBA throttle * MSRs to control the bandwidth for a particular rdtgrp. It builds on the * fact that resctrl rdtgroups have both monitoring and control. * * The frequency of the checks is 1s and we just tag along the MBM overflow * timer. Having 1s interval makes the calculation of bandwidth simpler. * * Although MBA's goal is to restrict the bandwidth to a maximum, there may * be a need to increase the bandwidth to avoid unnecessarily restricting * the L2 <-> L3 traffic. * * Since MBA controls the L2 external bandwidth where as MBM measures the * L3 external bandwidth the following sequence could lead to such a * situation. * * Consider an rdtgroup which had high L3 <-> memory traffic in initial * phases -> mba_sc kicks in and reduced bandwidth percentage values -> but * after some time rdtgroup has mostly L2 <-> L3 traffic. * * In this case we may restrict the rdtgroup's L2 <-> L3 traffic as its * throttle MSRs already have low percentage values. To avoid * unnecessarily restricting such rdtgroups, we also increase the bandwidth. */ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_mon_domain *dom_mbm) { … } static void mbm_update(struct rdt_resource *r, struct rdt_mon_domain *d, u32 closid, u32 rmid) { … } /* * Handler to scan the limbo list and move the RMIDs * to free list whose occupancy < threshold_occupancy. */ void cqm_handle_limbo(struct work_struct *work) { … } /** * cqm_setup_limbo_handler() - Schedule the limbo handler to run for this * domain. * @dom: The domain the limbo handler should run for. * @delay_ms: How far in the future the handler should run. * @exclude_cpu: Which CPU the handler should not run on, * RESCTRL_PICK_ANY_CPU to pick any CPU. */ void cqm_setup_limbo_handler(struct rdt_mon_domain *dom, unsigned long delay_ms, int exclude_cpu) { … } void mbm_handle_overflow(struct work_struct *work) { … } /** * mbm_setup_overflow_handler() - Schedule the overflow handler to run for this * domain. * @dom: The domain the overflow handler should run for. * @delay_ms: How far in the future the handler should run. * @exclude_cpu: Which CPU the handler should not run on, * RESCTRL_PICK_ANY_CPU to pick any CPU. */ void mbm_setup_overflow_handler(struct rdt_mon_domain *dom, unsigned long delay_ms, int exclude_cpu) { … } static int dom_data_init(struct rdt_resource *r) { … } static void __exit dom_data_exit(void) { … } static struct mon_evt llc_occupancy_event = …; static struct mon_evt mbm_total_event = …; static struct mon_evt mbm_local_event = …; /* * Initialize the event list for the resource. * * Note that MBM events are also part of RDT_RESOURCE_L3 resource * because as per the SDM the total and local memory bandwidth * are enumerated as part of L3 monitoring. */ static void l3_mon_evt_init(struct rdt_resource *r) { … } /* * The power-on reset value of MSR_RMID_SNC_CONFIG is 0x1 * which indicates that RMIDs are configured in legacy mode. * This mode is incompatible with Linux resctrl semantics * as RMIDs are partitioned between SNC nodes, which requires * a user to know which RMID is allocated to a task. * Clearing bit 0 reconfigures the RMID counters for use * in RMID sharing mode. This mode is better for Linux. * The RMID space is divided between all SNC nodes with the * RMIDs renumbered to start from zero in each node when * counting operations from tasks. Code to read the counters * must adjust RMID counter numbers based on SNC node. See * logical_rmid_to_physical_rmid() for code that does this. */ void arch_mon_domain_online(struct rdt_resource *r, struct rdt_mon_domain *d) { … } /* CPU models that support MSR_RMID_SNC_CONFIG */ static const struct x86_cpu_id snc_cpu_ids[] __initconst = …; /* * There isn't a simple hardware bit that indicates whether a CPU is running * in Sub-NUMA Cluster (SNC) mode. Infer the state by comparing the * number of CPUs sharing the L3 cache with CPU0 to the number of CPUs in * the same NUMA node as CPU0. * It is not possible to accurately determine SNC state if the system is * booted with a maxcpus=N parameter. That distorts the ratio of SNC nodes * to L3 caches. It will be OK if system is booted with hyperthreading * disabled (since this doesn't affect the ratio). */ static __init int snc_get_config(void) { … } int __init rdt_get_mon_l3_config(struct rdt_resource *r) { … } void __exit rdt_put_mon_l3_config(void) { … } void __init intel_rdt_mbm_apply_quirk(void) { … }