#include <linux/kernel.h>
#include <linux/pgtable.h>
#include <linux/string.h>
#include <linux/bitops.h>
#include <linux/smp.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <linux/semaphore.h>
#include <linux/thread_info.h>
#include <linux/init.h>
#include <linux/uaccess.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/cpuhotplug.h>
#include <asm/cpufeature.h>
#include <asm/msr.h>
#include <asm/bugs.h>
#include <asm/cpu.h>
#include <asm/intel-family.h>
#include <asm/microcode.h>
#include <asm/hwcap2.h>
#include <asm/elf.h>
#include <asm/cpu_device_id.h>
#include <asm/cmdline.h>
#include <asm/traps.h>
#include <asm/resctrl.h>
#include <asm/numa.h>
#include <asm/thermal.h>
#ifdef CONFIG_X86_64
#include <linux/topology.h>
#endif
#include "cpu.h"
#ifdef CONFIG_X86_LOCAL_APIC
#include <asm/mpspec.h>
#include <asm/apic.h>
#endif
enum split_lock_detect_state { … };
static enum split_lock_detect_state sld_state __ro_after_init = …;
static u64 msr_test_ctrl_cache __ro_after_init;
static bool cpu_model_supports_sld __ro_after_init;
static void check_memory_type_self_snoop_errata(struct cpuinfo_x86 *c)
{ … }
static bool ring3mwait_disabled __read_mostly;
static int __init ring3mwait_disable(char *__unused)
{ … }
__setup(…);
static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
{ … }
struct sku_microcode { … };
static const struct sku_microcode spectre_bad_microcodes[] = …;
static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
{ … }
#define MSR_IA32_TME_ACTIVATE …
#define TME_ACTIVATE_LOCKED(x) …
#define TME_ACTIVATE_ENABLED(x) …
#define TME_ACTIVATE_KEYID_BITS(x) …
static void detect_tme_early(struct cpuinfo_x86 *c)
{ … }
void intel_unlock_cpuid_leafs(struct cpuinfo_x86 *c)
{ … }
static void early_init_intel(struct cpuinfo_x86 *c)
{ … }
static void bsp_init_intel(struct cpuinfo_x86 *c)
{ … }
#ifdef CONFIG_X86_32
int ppro_with_ram_bug(void)
{
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
boot_cpu_data.x86 == 6 &&
boot_cpu_data.x86_model == 1 &&
boot_cpu_data.x86_stepping < 8) {
pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
return 1;
}
return 0;
}
static void intel_smp_check(struct cpuinfo_x86 *c)
{
if (!c->cpu_index)
return;
if (c->x86 == 5 &&
c->x86_stepping >= 1 && c->x86_stepping <= 4 &&
c->x86_model <= 3) {
WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
"with B stepping processors.\n");
}
}
static int forcepae;
static int __init forcepae_setup(char *__unused)
{
forcepae = 1;
return 1;
}
__setup("forcepae", forcepae_setup);
static void intel_workarounds(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_X86_F00F_BUG
clear_cpu_bug(c, X86_BUG_F00F);
if (c->x86 == 5 && c->x86_model < 9) {
static int f00f_workaround_enabled;
set_cpu_bug(c, X86_BUG_F00F);
if (!f00f_workaround_enabled) {
pr_notice("Intel Pentium with F0 0F bug - workaround enabled.\n");
f00f_workaround_enabled = 1;
}
}
#endif
if ((c->x86<<8 | c->x86_model<<4 | c->x86_stepping) < 0x633)
clear_cpu_cap(c, X86_FEATURE_SEP);
if (forcepae) {
pr_warn("PAE forced!\n");
set_cpu_cap(c, X86_FEATURE_PAE);
add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
}
if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_stepping == 1)) {
if (msr_set_bit(MSR_IA32_MISC_ENABLE,
MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) {
pr_info("CPU: C0 stepping P4 Xeon detected.\n");
pr_info("CPU: Disabling hardware prefetching (Erratum 037)\n");
}
}
if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
(c->x86_stepping < 0x6 || c->x86_stepping == 0xb))
set_cpu_bug(c, X86_BUG_11AP);
#ifdef CONFIG_X86_INTEL_USERCOPY
switch (c->x86) {
case 4:
break;
case 5:
break;
case 6:
movsl_mask.mask = 7;
break;
case 15:
movsl_mask.mask = 7;
break;
}
#endif
intel_smp_check(c);
}
#else
static void intel_workarounds(struct cpuinfo_x86 *c)
{ … }
#endif
static void srat_detect_node(struct cpuinfo_x86 *c)
{ … }
static void init_cpuid_fault(struct cpuinfo_x86 *c)
{ … }
static void init_intel_misc_features(struct cpuinfo_x86 *c)
{ … }
static void split_lock_init(void);
static void bus_lock_init(void);
static void init_intel(struct cpuinfo_x86 *c)
{ … }
#ifdef CONFIG_X86_32
static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
{
if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
size = 256;
if ((c->x86 == 5) && (c->x86_model == 9))
size = 16;
return size;
}
#endif
#define TLB_INST_4K …
#define TLB_INST_4M …
#define TLB_INST_2M_4M …
#define TLB_INST_ALL …
#define TLB_INST_1G …
#define TLB_DATA_4K …
#define TLB_DATA_4M …
#define TLB_DATA_2M_4M …
#define TLB_DATA_4K_4M …
#define TLB_DATA_1G …
#define TLB_DATA0_4K …
#define TLB_DATA0_4M …
#define TLB_DATA0_2M_4M …
#define STLB_4K …
#define STLB_4K_2M …
static const struct _tlb_table intel_tlb_table[] = …;
static void intel_tlb_lookup(const unsigned char desc)
{ … }
static void intel_detect_tlb(struct cpuinfo_x86 *c)
{ … }
static const struct cpu_dev intel_cpu_dev = …;
cpu_dev_register(intel_cpu_dev);
#undef pr_fmt
#define pr_fmt(fmt) …
static const struct { … } sld_options[] __initconst = …;
static struct ratelimit_state bld_ratelimit;
static unsigned int sysctl_sld_mitigate = …;
static DEFINE_SEMAPHORE(buslock_sem, 1);
#ifdef CONFIG_PROC_SYSCTL
static struct ctl_table sld_sysctls[] = …;
static int __init sld_mitigate_sysctl_init(void)
{ … }
late_initcall(sld_mitigate_sysctl_init);
#endif
static inline bool match_option(const char *arg, int arglen, const char *opt)
{ … }
static bool split_lock_verify_msr(bool on)
{ … }
static void __init sld_state_setup(void)
{ … }
static void __init __split_lock_setup(void)
{ … }
static void sld_update_msr(bool on)
{ … }
static void split_lock_init(void)
{ … }
static void __split_lock_reenable_unlock(struct work_struct *work)
{ … }
static DECLARE_DELAYED_WORK(sl_reenable_unlock, __split_lock_reenable_unlock);
static void __split_lock_reenable(struct work_struct *work)
{ … }
static DECLARE_DELAYED_WORK(sl_reenable, __split_lock_reenable);
static int splitlock_cpu_offline(unsigned int cpu)
{ … }
static void split_lock_warn(unsigned long ip)
{ … }
bool handle_guest_split_lock(unsigned long ip)
{ … }
EXPORT_SYMBOL_GPL(…);
static void bus_lock_init(void)
{ … }
bool handle_user_split_lock(struct pt_regs *regs, long error_code)
{ … }
void handle_bus_lock(struct pt_regs *regs)
{ … }
static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = …;
static void __init split_lock_setup(struct cpuinfo_x86 *c)
{ … }
static void sld_state_show(void)
{ … }
void __init sld_setup(struct cpuinfo_x86 *c)
{ … }
#define X86_HYBRID_CPU_TYPE_ID_SHIFT …
u8 get_this_hybrid_cpu_type(void)
{ … }