linux/arch/x86/kernel/cpu/intel.c

// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/pgtable.h>

#include <linux/string.h>
#include <linux/bitops.h>
#include <linux/smp.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <linux/semaphore.h>
#include <linux/thread_info.h>
#include <linux/init.h>
#include <linux/uaccess.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/cpuhotplug.h>

#include <asm/cpufeature.h>
#include <asm/msr.h>
#include <asm/bugs.h>
#include <asm/cpu.h>
#include <asm/intel-family.h>
#include <asm/microcode.h>
#include <asm/hwcap2.h>
#include <asm/elf.h>
#include <asm/cpu_device_id.h>
#include <asm/cmdline.h>
#include <asm/traps.h>
#include <asm/resctrl.h>
#include <asm/numa.h>
#include <asm/thermal.h>

#ifdef CONFIG_X86_64
#include <linux/topology.h>
#endif

#include "cpu.h"

#ifdef CONFIG_X86_LOCAL_APIC
#include <asm/mpspec.h>
#include <asm/apic.h>
#endif

enum split_lock_detect_state {};

/*
 * Default to sld_off because most systems do not support split lock detection.
 * sld_state_setup() will switch this to sld_warn on systems that support
 * split lock/bus lock detect, unless there is a command line override.
 */
static enum split_lock_detect_state sld_state __ro_after_init =;
static u64 msr_test_ctrl_cache __ro_after_init;

/*
 * With a name like MSR_TEST_CTL it should go without saying, but don't touch
 * MSR_TEST_CTL unless the CPU is one of the whitelisted models.  Writing it
 * on CPUs that do not support SLD can cause fireworks, even when writing '0'.
 */
static bool cpu_model_supports_sld __ro_after_init;

/*
 * Processors which have self-snooping capability can handle conflicting
 * memory type across CPUs by snooping its own cache. However, there exists
 * CPU models in which having conflicting memory types still leads to
 * unpredictable behavior, machine check errors, or hangs. Clear this
 * feature to prevent its use on machines with known erratas.
 */
static void check_memory_type_self_snoop_errata(struct cpuinfo_x86 *c)
{}

static bool ring3mwait_disabled __read_mostly;

static int __init ring3mwait_disable(char *__unused)
{}
__setup();

static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
{}

/*
 * Early microcode releases for the Spectre v2 mitigation were broken.
 * Information taken from;
 * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf
 * - https://kb.vmware.com/s/article/52345
 * - Microcode revisions observed in the wild
 * - Release note from 20180108 microcode release
 */
struct sku_microcode {};
static const struct sku_microcode spectre_bad_microcodes[] =;

static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
{}

#define MSR_IA32_TME_ACTIVATE

/* Helpers to access TME_ACTIVATE MSR */
#define TME_ACTIVATE_LOCKED(x)
#define TME_ACTIVATE_ENABLED(x)

#define TME_ACTIVATE_KEYID_BITS(x)

static void detect_tme_early(struct cpuinfo_x86 *c)
{}

void intel_unlock_cpuid_leafs(struct cpuinfo_x86 *c)
{}

static void early_init_intel(struct cpuinfo_x86 *c)
{}

static void bsp_init_intel(struct cpuinfo_x86 *c)
{}

#ifdef CONFIG_X86_32
/*
 *	Early probe support logic for ppro memory erratum #50
 *
 *	This is called before we do cpu ident work
 */

int ppro_with_ram_bug(void)
{
	/* Uses data from early_cpu_detect now */
	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
	    boot_cpu_data.x86 == 6 &&
	    boot_cpu_data.x86_model == 1 &&
	    boot_cpu_data.x86_stepping < 8) {
		pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
		return 1;
	}
	return 0;
}

static void intel_smp_check(struct cpuinfo_x86 *c)
{
	/* calling is from identify_secondary_cpu() ? */
	if (!c->cpu_index)
		return;

	/*
	 * Mask B, Pentium, but not Pentium MMX
	 */
	if (c->x86 == 5 &&
	    c->x86_stepping >= 1 && c->x86_stepping <= 4 &&
	    c->x86_model <= 3) {
		/*
		 * Remember we have B step Pentia with bugs
		 */
		WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
				    "with B stepping processors.\n");
	}
}

static int forcepae;
static int __init forcepae_setup(char *__unused)
{
	forcepae = 1;
	return 1;
}
__setup("forcepae", forcepae_setup);

static void intel_workarounds(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_X86_F00F_BUG
	/*
	 * All models of Pentium and Pentium with MMX technology CPUs
	 * have the F0 0F bug, which lets nonprivileged users lock up the
	 * system. Announce that the fault handler will be checking for it.
	 * The Quark is also family 5, but does not have the same bug.
	 */
	clear_cpu_bug(c, X86_BUG_F00F);
	if (c->x86 == 5 && c->x86_model < 9) {
		static int f00f_workaround_enabled;

		set_cpu_bug(c, X86_BUG_F00F);
		if (!f00f_workaround_enabled) {
			pr_notice("Intel Pentium with F0 0F bug - workaround enabled.\n");
			f00f_workaround_enabled = 1;
		}
	}
#endif

	/*
	 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
	 * model 3 mask 3
	 */
	if ((c->x86<<8 | c->x86_model<<4 | c->x86_stepping) < 0x633)
		clear_cpu_cap(c, X86_FEATURE_SEP);

	/*
	 * PAE CPUID issue: many Pentium M report no PAE but may have a
	 * functionally usable PAE implementation.
	 * Forcefully enable PAE if kernel parameter "forcepae" is present.
	 */
	if (forcepae) {
		pr_warn("PAE forced!\n");
		set_cpu_cap(c, X86_FEATURE_PAE);
		add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
	}

	/*
	 * P4 Xeon erratum 037 workaround.
	 * Hardware prefetcher may cause stale data to be loaded into the cache.
	 */
	if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_stepping == 1)) {
		if (msr_set_bit(MSR_IA32_MISC_ENABLE,
				MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) {
			pr_info("CPU: C0 stepping P4 Xeon detected.\n");
			pr_info("CPU: Disabling hardware prefetching (Erratum 037)\n");
		}
	}

	/*
	 * See if we have a good local APIC by checking for buggy Pentia,
	 * i.e. all B steppings and the C2 stepping of P54C when using their
	 * integrated APIC (see 11AP erratum in "Pentium Processor
	 * Specification Update").
	 */
	if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
	    (c->x86_stepping < 0x6 || c->x86_stepping == 0xb))
		set_cpu_bug(c, X86_BUG_11AP);


#ifdef CONFIG_X86_INTEL_USERCOPY
	/*
	 * Set up the preferred alignment for movsl bulk memory moves
	 */
	switch (c->x86) {
	case 4:		/* 486: untested */
		break;
	case 5:		/* Old Pentia: untested */
		break;
	case 6:		/* PII/PIII only like movsl with 8-byte alignment */
		movsl_mask.mask = 7;
		break;
	case 15:	/* P4 is OK down to 8-byte alignment */
		movsl_mask.mask = 7;
		break;
	}
#endif

	intel_smp_check(c);
}
#else
static void intel_workarounds(struct cpuinfo_x86 *c)
{}
#endif

static void srat_detect_node(struct cpuinfo_x86 *c)
{}

static void init_cpuid_fault(struct cpuinfo_x86 *c)
{}

static void init_intel_misc_features(struct cpuinfo_x86 *c)
{}

static void split_lock_init(void);
static void bus_lock_init(void);

static void init_intel(struct cpuinfo_x86 *c)
{}

#ifdef CONFIG_X86_32
static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
{
	/*
	 * Intel PIII Tualatin. This comes in two flavours.
	 * One has 256kb of cache, the other 512. We have no way
	 * to determine which, so we use a boottime override
	 * for the 512kb model, and assume 256 otherwise.
	 */
	if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
		size = 256;

	/*
	 * Intel Quark SoC X1000 contains a 4-way set associative
	 * 16K cache with a 16 byte cache line and 256 lines per tag
	 */
	if ((c->x86 == 5) && (c->x86_model == 9))
		size = 16;
	return size;
}
#endif

#define TLB_INST_4K
#define TLB_INST_4M
#define TLB_INST_2M_4M

#define TLB_INST_ALL
#define TLB_INST_1G

#define TLB_DATA_4K
#define TLB_DATA_4M
#define TLB_DATA_2M_4M
#define TLB_DATA_4K_4M

#define TLB_DATA_1G

#define TLB_DATA0_4K
#define TLB_DATA0_4M
#define TLB_DATA0_2M_4M

#define STLB_4K
#define STLB_4K_2M

static const struct _tlb_table intel_tlb_table[] =;

static void intel_tlb_lookup(const unsigned char desc)
{}

static void intel_detect_tlb(struct cpuinfo_x86 *c)
{}

static const struct cpu_dev intel_cpu_dev =;

cpu_dev_register(intel_cpu_dev);

#undef pr_fmt
#define pr_fmt(fmt)

static const struct {} sld_options[] __initconst =;

static struct ratelimit_state bld_ratelimit;

static unsigned int sysctl_sld_mitigate =;
static DEFINE_SEMAPHORE(buslock_sem, 1);

#ifdef CONFIG_PROC_SYSCTL
static struct ctl_table sld_sysctls[] =;

static int __init sld_mitigate_sysctl_init(void)
{}

late_initcall(sld_mitigate_sysctl_init);
#endif

static inline bool match_option(const char *arg, int arglen, const char *opt)
{}

static bool split_lock_verify_msr(bool on)
{}

static void __init sld_state_setup(void)
{}

static void __init __split_lock_setup(void)
{}

/*
 * MSR_TEST_CTRL is per core, but we treat it like a per CPU MSR. Locking
 * is not implemented as one thread could undo the setting of the other
 * thread immediately after dropping the lock anyway.
 */
static void sld_update_msr(bool on)
{}

static void split_lock_init(void)
{}

static void __split_lock_reenable_unlock(struct work_struct *work)
{}

static DECLARE_DELAYED_WORK(sl_reenable_unlock, __split_lock_reenable_unlock);

static void __split_lock_reenable(struct work_struct *work)
{}
static DECLARE_DELAYED_WORK(sl_reenable, __split_lock_reenable);

/*
 * If a CPU goes offline with pending delayed work to re-enable split lock
 * detection then the delayed work will be executed on some other CPU. That
 * handles releasing the buslock_sem, but because it executes on a
 * different CPU probably won't re-enable split lock detection. This is a
 * problem on HT systems since the sibling CPU on the same core may then be
 * left running with split lock detection disabled.
 *
 * Unconditionally re-enable detection here.
 */
static int splitlock_cpu_offline(unsigned int cpu)
{}

static void split_lock_warn(unsigned long ip)
{}

bool handle_guest_split_lock(unsigned long ip)
{}
EXPORT_SYMBOL_GPL();

static void bus_lock_init(void)
{}

bool handle_user_split_lock(struct pt_regs *regs, long error_code)
{}

void handle_bus_lock(struct pt_regs *regs)
{}

/*
 * CPU models that are known to have the per-core split-lock detection
 * feature even though they do not enumerate IA32_CORE_CAPABILITIES.
 */
static const struct x86_cpu_id split_lock_cpu_ids[] __initconst =;

static void __init split_lock_setup(struct cpuinfo_x86 *c)
{}

static void sld_state_show(void)
{}

void __init sld_setup(struct cpuinfo_x86 *c)
{}

#define X86_HYBRID_CPU_TYPE_ID_SHIFT

/**
 * get_this_hybrid_cpu_type() - Get the type of this hybrid CPU
 *
 * Returns the CPU type [31:24] (i.e., Atom or Core) of a CPU in
 * a hybrid processor. If the processor is not hybrid, returns 0.
 */
u8 get_this_hybrid_cpu_type(void)
{}