linux/arch/x86/kernel/cpu/mtrr/generic.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
 * because MTRRs can span up to 40 bits (36bits on most modern x86)
 */

#include <linux/export.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/cc_platform.h>
#include <asm/processor-flags.h>
#include <asm/cacheinfo.h>
#include <asm/cpufeature.h>
#include <asm/hypervisor.h>
#include <asm/mshyperv.h>
#include <asm/tlbflush.h>
#include <asm/mtrr.h>
#include <asm/msr.h>
#include <asm/memtype.h>

#include "mtrr.h"

struct fixed_range_block {};

static struct fixed_range_block fixed_range_blocks[] =;

struct cache_map {};

bool mtrr_debug;

static int __init mtrr_param_setup(char *str)
{}
early_param();

/*
 * CACHE_MAP_MAX is the maximum number of memory ranges in cache_map, where
 * no 2 adjacent ranges have the same cache mode (those would be merged).
 * The number is based on the worst case:
 * - no two adjacent fixed MTRRs share the same cache mode
 * - one variable MTRR is spanning a huge area with mode WB
 * - 255 variable MTRRs with mode UC all overlap with the WB MTRR, creating 2
 *   additional ranges each (result like "ababababa...aba" with a = WB, b = UC),
 *   accounting for MTRR_MAX_VAR_RANGES * 2 - 1 range entries
 * - a TOP_MEM2 area (even with overlapping an UC MTRR can't add 2 range entries
 *   to the possible maximum, as it always starts at 4GB, thus it can't be in
 *   the middle of that MTRR, unless that MTRR starts at 0, which would remove
 *   the initial "a" from the "abababa" pattern above)
 * The map won't contain ranges with no matching MTRR (those fall back to the
 * default cache mode).
 */
#define CACHE_MAP_MAX

static struct cache_map init_cache_map[CACHE_MAP_MAX] __initdata;
static struct cache_map *cache_map __refdata =;
static unsigned int cache_map_size =;
static unsigned int cache_map_n;
static unsigned int cache_map_fixed;

static unsigned long smp_changes_mask;
static int mtrr_state_set;
u64 mtrr_tom2;

struct mtrr_state_type mtrr_state;
EXPORT_SYMBOL_GPL();

/* Reserved bits in the high portion of the MTRRphysBaseN MSR. */
u32 phys_hi_rsvd;

/*
 * BIOS is expected to clear MtrrFixDramModEn bit, see for example
 * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
 * Opteron Processors" (26094 Rev. 3.30 February 2006), section
 * "13.2.1.2 SYSCFG Register": "The MtrrFixDramModEn bit should be set
 * to 1 during BIOS initialization of the fixed MTRRs, then cleared to
 * 0 for operation."
 */
static inline void k8_check_syscfg_dram_mod_en(void)
{}

/* Get the size of contiguous MTRR range */
static u64 get_mtrr_size(u64 mask)
{}

static u8 get_var_mtrr_state(unsigned int reg, u64 *start, u64 *size)
{}

static u8 get_effective_type(u8 type1, u8 type2)
{}

static void rm_map_entry_at(int idx)
{}

/*
 * Add an entry into cache_map at a specific index.  Merges adjacent entries if
 * appropriate.  Return the number of merges for correcting the scan index
 * (this is needed as merging will reduce the number of entries, which will
 * result in skipping entries in future iterations if the scan index isn't
 * corrected).
 * Note that the corrected index can never go below -1 (resulting in being 0 in
 * the next scan iteration), as "2" is returned only if the current index is
 * larger than zero.
 */
static int add_map_entry_at(u64 start, u64 end, u8 type, int idx)
{}

/* Clear a part of an entry. Return 1 if start of entry is still valid. */
static int clr_map_range_at(u64 start, u64 end, int idx)
{}

/*
 * Add MTRR to the map.  The current map is scanned and each part of the MTRR
 * either overlapping with an existing entry or with a hole in the map is
 * handled separately.
 */
static void add_map_entry(u64 start, u64 end, u8 type)
{}

/* Add variable MTRRs to cache map. */
static void map_add_var(void)
{}

/*
 * Rebuild map by replacing variable entries.  Needs to be called when MTRR
 * registers are being changed after boot, as such changes could include
 * removals of registers, which are complicated to handle without rebuild of
 * the map.
 */
void generic_rebuild_map(void)
{}

static unsigned int __init get_cache_map_size(void)
{}

/* Build the cache_map containing the cache modes per memory range. */
void __init mtrr_build_map(void)
{}

/* Copy the cache_map from __initdata memory to dynamically allocated one. */
void __init mtrr_copy_map(void)
{}

/**
 * mtrr_overwrite_state - set static MTRR state
 *
 * Used to set MTRR state via different means (e.g. with data obtained from
 * a hypervisor).
 * Is allowed only for special cases when running virtualized. Must be called
 * from the x86_init.hyper.init_platform() hook.  It can be called only once.
 * The MTRR state can't be changed afterwards.  To ensure that, X86_FEATURE_MTRR
 * is cleared.
 *
 * @var: MTRR variable range array to use
 * @num_var: length of the @var array
 * @def_type: default caching type
 */
void mtrr_overwrite_state(struct mtrr_var_range *var, unsigned int num_var,
			  mtrr_type def_type)
{}

static u8 type_merge(u8 type, u8 new_type, u8 *uniform)
{}

/**
 * mtrr_type_lookup - look up memory type in MTRR
 *
 * @start: Begin of the physical address range
 * @end: End of the physical address range
 * @uniform: output argument:
 *  - 1: the returned MTRR type is valid for the whole region
 *  - 0: otherwise
 *
 * Return Values:
 * MTRR_TYPE_(type)  - The effective MTRR type for the region
 * MTRR_TYPE_INVALID - MTRR is disabled
 */
u8 mtrr_type_lookup(u64 start, u64 end, u8 *uniform)
{}

/* Get the MSR pair relating to a var range */
static void
get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
{}

/* Fill the MSR pair relating to a var range */
void fill_mtrr_var_range(unsigned int index,
		u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi)
{}

static void get_fixed_ranges(mtrr_type *frs)
{}

void mtrr_save_fixed_ranges(void *info)
{}

static unsigned __initdata last_fixed_start;
static unsigned __initdata last_fixed_end;
static mtrr_type __initdata last_fixed_type;

static void __init print_fixed_last(void)
{}

static void __init update_fixed_last(unsigned base, unsigned end,
				     mtrr_type type)
{}

static void __init
print_fixed(unsigned base, unsigned step, const mtrr_type *types)
{}

static void __init print_mtrr_state(void)
{}

/* Grab all of the MTRR state for this CPU into *state */
bool __init get_mtrr_state(void)
{}

/* Some BIOS's are messed up and don't set all MTRRs the same! */
void __init mtrr_state_warn(void)
{}

/*
 * Doesn't attempt to pass an error out to MTRR users
 * because it's quite complicated in some cases and probably not
 * worth it because the best error handling is to ignore it.
 */
void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
{}

/**
 * set_fixed_range - checks & updates a fixed-range MTRR if it
 *		     differs from the value it should have
 * @msr: MSR address of the MTTR which should be checked and updated
 * @changed: pointer which indicates whether the MTRR needed to be changed
 * @msrwords: pointer to the MSR values which the MSR should have
 */
static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
{}

/**
 * generic_get_free_region - Get a free MTRR.
 * @base: The starting (base) address of the region.
 * @size: The size (in bytes) of the region.
 * @replace_reg: mtrr index to be replaced; set to invalid value if none.
 *
 * Returns: The index of the region on success, else negative on error.
 */
int
generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
{}

static void generic_get_mtrr(unsigned int reg, unsigned long *base,
			     unsigned long *size, mtrr_type *type)
{}

/**
 * set_fixed_ranges - checks & updates the fixed-range MTRRs if they
 *		      differ from the saved set
 * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges()
 */
static int set_fixed_ranges(mtrr_type *frs)
{}

/*
 * Set the MSR pair relating to a var range.
 * Returns true if changes are made.
 */
static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
{}

static u32 deftype_lo, deftype_hi;

/**
 * set_mtrr_state - Set the MTRR state for this CPU.
 *
 * NOTE: The CPU must already be in a safe state for MTRR changes, including
 *       measures that only a single CPU can be active in set_mtrr_state() in
 *       order to not be subject to races for usage of deftype_lo. This is
 *       accomplished by taking cache_disable_lock.
 * RETURNS: 0 if no changes made, else a mask indicating what was changed.
 */
static unsigned long set_mtrr_state(void)
{}

void mtrr_disable(void)
{}

void mtrr_enable(void)
{}

void mtrr_generic_set_state(void)
{}

/**
 * generic_set_mtrr - set variable MTRR register on the local CPU.
 *
 * @reg: The register to set.
 * @base: The base address of the region.
 * @size: The size of the region. If this is 0 the region is disabled.
 * @type: The type of the region.
 *
 * Returns nothing.
 */
static void generic_set_mtrr(unsigned int reg, unsigned long base,
			     unsigned long size, mtrr_type type)
{}

int generic_validate_add_page(unsigned long base, unsigned long size,
			      unsigned int type)
{}

static int generic_have_wrcomb(void)
{}

int positive_have_wrcomb(void)
{}

/*
 * Generic structure...
 */
const struct mtrr_ops generic_mtrr_ops =;