linux/arch/x86/kernel/cpu/common.c

// SPDX-License-Identifier: GPL-2.0-only
/* cpu_feature_enabled() cannot be used this early */
#define USE_EARLY_PGTABLE_L5

#include <linux/memblock.h>
#include <linux/linkage.h>
#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/percpu.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/sched/mm.h>
#include <linux/sched/clock.h>
#include <linux/sched/task.h>
#include <linux/sched/smt.h>
#include <linux/init.h>
#include <linux/kprobes.h>
#include <linux/kgdb.h>
#include <linux/mem_encrypt.h>
#include <linux/smp.h>
#include <linux/cpu.h>
#include <linux/io.h>
#include <linux/syscore_ops.h>
#include <linux/pgtable.h>
#include <linux/stackprotector.h>
#include <linux/utsname.h>

#include <asm/alternative.h>
#include <asm/cmdline.h>
#include <asm/perf_event.h>
#include <asm/mmu_context.h>
#include <asm/doublefault.h>
#include <asm/archrandom.h>
#include <asm/hypervisor.h>
#include <asm/processor.h>
#include <asm/tlbflush.h>
#include <asm/debugreg.h>
#include <asm/sections.h>
#include <asm/vsyscall.h>
#include <linux/topology.h>
#include <linux/cpumask.h>
#include <linux/atomic.h>
#include <asm/proto.h>
#include <asm/setup.h>
#include <asm/apic.h>
#include <asm/desc.h>
#include <asm/fpu/api.h>
#include <asm/mtrr.h>
#include <asm/hwcap2.h>
#include <linux/numa.h>
#include <asm/numa.h>
#include <asm/asm.h>
#include <asm/bugs.h>
#include <asm/cpu.h>
#include <asm/mce.h>
#include <asm/msr.h>
#include <asm/cacheinfo.h>
#include <asm/memtype.h>
#include <asm/microcode.h>
#include <asm/intel-family.h>
#include <asm/cpu_device_id.h>
#include <asm/fred.h>
#include <asm/uv/uv.h>
#include <asm/ia32.h>
#include <asm/set_memory.h>
#include <asm/traps.h>
#include <asm/sev.h>
#include <asm/tdx.h>
#include <asm/posted_intr.h>
#include <asm/runtime-const.h>

#include "cpu.h"

DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
EXPORT_PER_CPU_SYMBOL();

u32 elf_hwcap2 __read_mostly;

/* Number of siblings per CPU package */
unsigned int __max_threads_per_core __ro_after_init =;
EXPORT_SYMBOL();

unsigned int __max_dies_per_package __ro_after_init =;
EXPORT_SYMBOL();

unsigned int __max_logical_packages __ro_after_init =;
EXPORT_SYMBOL();

unsigned int __num_cores_per_package __ro_after_init =;
EXPORT_SYMBOL();

unsigned int __num_threads_per_package __ro_after_init =;
EXPORT_SYMBOL();

static struct ppin_info {} ppin_info[] =;

static const struct x86_cpu_id ppin_cpuids[] =;

static void ppin_init(struct cpuinfo_x86 *c)
{}

static void default_init(struct cpuinfo_x86 *c)
{}

static const struct cpu_dev default_cpu =;

static const struct cpu_dev *this_cpu =;

DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) =;
EXPORT_PER_CPU_SYMBOL_GPL();

#ifdef CONFIG_X86_64
static int __init x86_nopcid_setup(char *s)
{}
early_param();
#endif

static int __init x86_noinvpcid_setup(char *s)
{}
early_param();

#ifdef CONFIG_X86_32
static int cachesize_override = -1;
static int disable_x86_serial_nr = 1;

static int __init cachesize_setup(char *str)
{
	get_option(&str, &cachesize_override);
	return 1;
}
__setup("cachesize=", cachesize_setup);

/* Standard macro to see if a specific flag is changeable */
static inline int flag_is_changeable_p(u32 flag)
{
	u32 f1, f2;

	/*
	 * Cyrix and IDT cpus allow disabling of CPUID
	 * so the code below may return different results
	 * when it is executed before and after enabling
	 * the CPUID. Add "volatile" to not allow gcc to
	 * optimize the subsequent calls to this function.
	 */
	asm volatile ("pushfl		\n\t"
		      "pushfl		\n\t"
		      "popl %0		\n\t"
		      "movl %0, %1	\n\t"
		      "xorl %2, %0	\n\t"
		      "pushl %0		\n\t"
		      "popfl		\n\t"
		      "pushfl		\n\t"
		      "popl %0		\n\t"
		      "popfl		\n\t"

		      : "=&r" (f1), "=&r" (f2)
		      : "ir" (flag));

	return ((f1^f2) & flag) != 0;
}

/* Probe for the CPUID instruction */
int have_cpuid_p(void)
{
	return flag_is_changeable_p(X86_EFLAGS_ID);
}

static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
{
	unsigned long lo, hi;

	if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr)
		return;

	/* Disable processor serial number: */

	rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
	lo |= 0x200000;
	wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);

	pr_notice("CPU serial number disabled.\n");
	clear_cpu_cap(c, X86_FEATURE_PN);

	/* Disabling the serial number may affect the cpuid level */
	c->cpuid_level = cpuid_eax(0);
}

static int __init x86_serial_nr_setup(char *s)
{
	disable_x86_serial_nr = 0;
	return 1;
}
__setup("serialnumber", x86_serial_nr_setup);
#else
static inline int flag_is_changeable_p(u32 flag)
{}
static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
{}
#endif

static __always_inline void setup_smep(struct cpuinfo_x86 *c)
{}

static __always_inline void setup_smap(struct cpuinfo_x86 *c)
{}

static __always_inline void setup_umip(struct cpuinfo_x86 *c)
{}

/* These bits should not change their value after CPU init is finished. */
static const unsigned long cr4_pinned_mask =;
static DEFINE_STATIC_KEY_FALSE_RO(cr_pinning);
static unsigned long cr4_pinned_bits __ro_after_init;

void native_write_cr0(unsigned long val)
{}
EXPORT_SYMBOL();

void __no_profile native_write_cr4(unsigned long val)
{}
#if IS_MODULE(CONFIG_LKDTM)
EXPORT_SYMBOL_GPL(native_write_cr4);
#endif

void cr4_update_irqsoff(unsigned long set, unsigned long clear)
{}
EXPORT_SYMBOL();

/* Read the CR4 shadow. */
unsigned long cr4_read_shadow(void)
{}
EXPORT_SYMBOL_GPL();

void cr4_init(void)
{}

/*
 * Once CPU feature detection is finished (and boot params have been
 * parsed), record any of the sensitive CR bits that are set, and
 * enable CR pinning.
 */
static void __init setup_cr_pinning(void)
{}

static __init int x86_nofsgsbase_setup(char *arg)
{}
__setup();

/*
 * Protection Keys are not available in 32-bit mode.
 */
static bool pku_disabled;

static __always_inline void setup_pku(struct cpuinfo_x86 *c)
{}

#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
static __init int setup_disable_pku(char *arg)
{}
__setup();
#endif

#ifdef CONFIG_X86_KERNEL_IBT

__noendbr u64 ibt_save(bool disable)
{}

__noendbr void ibt_restore(u64 save)
{}

#endif

static __always_inline void setup_cet(struct cpuinfo_x86 *c)
{}

__noendbr void cet_disable(void)
{}

/*
 * Some CPU features depend on higher CPUID levels, which may not always
 * be available due to CPUID level capping or broken virtualization
 * software.  Add those features to this table to auto-disable them.
 */
struct cpuid_dependent_feature {};

static const struct cpuid_dependent_feature
cpuid_dependent_features[] =;

static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
{}

/*
 * Naming convention should be: <Name> [(<Codename>)]
 * This table only is used unless init_<vendor>() below doesn't set it;
 * in particular, if CPUID levels 0x80000002..4 are supported, this
 * isn't used
 */

/* Look up CPU names by table lookup. */
static const char *table_lookup_model(struct cpuinfo_x86 *c)
{}

/* Aligned to unsigned long to avoid split lock in atomic bitmap ops */
__u32 cpu_caps_cleared[NCAPINTS + NBUGINTS] __aligned();
__u32 cpu_caps_set[NCAPINTS + NBUGINTS] __aligned();

#ifdef CONFIG_X86_32
/* The 32-bit entry code needs to find cpu_entry_area. */
DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
#endif

/* Load the original GDT from the per-cpu structure */
void load_direct_gdt(int cpu)
{}
EXPORT_SYMBOL_GPL();

/* Load a fixmap remapping of the per-cpu GDT */
void load_fixmap_gdt(int cpu)
{}
EXPORT_SYMBOL_GPL();

/**
 * switch_gdt_and_percpu_base - Switch to direct GDT and runtime per CPU base
 * @cpu:	The CPU number for which this is invoked
 *
 * Invoked during early boot to switch from early GDT and early per CPU to
 * the direct GDT and the runtime per CPU area. On 32-bit the percpu base
 * switch is implicit by loading the direct GDT. On 64bit this requires
 * to update GSBASE.
 */
void __init switch_gdt_and_percpu_base(int cpu)
{}

static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] =;

static void get_model_name(struct cpuinfo_x86 *c)
{}

void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
{}

u16 __read_mostly tlb_lli_4k[NR_INFO];
u16 __read_mostly tlb_lli_2m[NR_INFO];
u16 __read_mostly tlb_lli_4m[NR_INFO];
u16 __read_mostly tlb_lld_4k[NR_INFO];
u16 __read_mostly tlb_lld_2m[NR_INFO];
u16 __read_mostly tlb_lld_4m[NR_INFO];
u16 __read_mostly tlb_lld_1g[NR_INFO];

static void cpu_detect_tlb(struct cpuinfo_x86 *c)
{}

static void get_cpu_vendor(struct cpuinfo_x86 *c)
{}

void cpu_detect(struct cpuinfo_x86 *c)
{}

static void apply_forced_caps(struct cpuinfo_x86 *c)
{}

static void init_speculation_control(struct cpuinfo_x86 *c)
{}

void get_cpu_cap(struct cpuinfo_x86 *c)
{}

void get_cpu_address_sizes(struct cpuinfo_x86 *c)
{}

static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
{}

#define NO_SPECULATION
#define NO_MELTDOWN
#define NO_SSB
#define NO_L1TF
#define NO_MDS
#define MSBDS_ONLY
#define NO_SWAPGS
#define NO_ITLB_MULTIHIT
#define NO_SPECTRE_V2
#define NO_MMIO
#define NO_EIBRS_PBRSB
#define NO_BHI

#define VULNWL(vendor, family, model, whitelist)

#define VULNWL_INTEL(vfm, whitelist)

#define VULNWL_AMD(family, whitelist)

#define VULNWL_HYGON(family, whitelist)

static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] =;

#define VULNBL(vendor, family, model, blacklist)

#define VULNBL_INTEL_STEPPINGS(vfm, steppings, issues)

#define VULNBL_AMD(family, blacklist)

#define VULNBL_HYGON(family, blacklist)

#define SRBDS
/* CPU is affected by X86_BUG_MMIO_STALE_DATA */
#define MMIO
/* CPU is affected by Shared Buffers Data Sampling (SBDS), a variant of X86_BUG_MMIO_STALE_DATA */
#define MMIO_SBDS
/* CPU is affected by RETbleed, speculating where you would not expect it */
#define RETBLEED
/* CPU is affected by SMT (cross-thread) return predictions */
#define SMT_RSB
/* CPU is affected by SRSO */
#define SRSO
/* CPU is affected by GDS */
#define GDS
/* CPU is affected by Register File Data Sampling */
#define RFDS

static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst =;

static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long which)
{}

u64 x86_read_arch_cap_msr(void)
{}

static bool arch_cap_mmio_immune(u64 x86_arch_cap_msr)
{}

static bool __init vulnerable_to_rfds(u64 x86_arch_cap_msr)
{}

static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
{}

/*
 * The NOPL instruction is supposed to exist on all CPUs of family >= 6;
 * unfortunately, that's not true in practice because of early VIA
 * chips and (more importantly) broken virtualizers that are not easy
 * to detect. In the latter case it doesn't even *fail* reliably, so
 * probing for it doesn't even work. Disable it completely on 32-bit
 * unless we can find a reliable way to detect all the broken cases.
 * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
 */
static void detect_nopl(void)
{}

/*
 * We parse cpu parameters early because fpu__init_system() is executed
 * before parse_early_param().
 */
static void __init cpu_parse_early_param(void)
{}

/*
 * Do minimum CPU detection early.
 * Fields really needed: vendor, cpuid_level, family, model, mask,
 * cache alignment.
 * The others are not touched to avoid unwanted side effects.
 *
 * WARNING: this function is only called on the boot CPU.  Don't add code
 * here that is supposed to run on all CPUs.
 */
static void __init early_identify_cpu(struct cpuinfo_x86 *c)
{}

void __init early_cpu_init(void)
{}

static bool detect_null_seg_behavior(void)
{}

void check_null_seg_clears_base(struct cpuinfo_x86 *c)
{}

static void generic_identify(struct cpuinfo_x86 *c)
{}

/*
 * This does the hard work of actually picking apart the CPU stuff...
 */
static void identify_cpu(struct cpuinfo_x86 *c)
{}

/*
 * Set up the CPU state needed to execute SYSENTER/SYSEXIT instructions
 * on 32-bit kernels:
 */
#ifdef CONFIG_X86_32
void enable_sep_cpu(void)
{
	struct tss_struct *tss;
	int cpu;

	if (!boot_cpu_has(X86_FEATURE_SEP))
		return;

	cpu = get_cpu();
	tss = &per_cpu(cpu_tss_rw, cpu);

	/*
	 * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field --
	 * see the big comment in struct x86_hw_tss's definition.
	 */

	tss->x86_tss.ss1 = __KERNEL_CS;
	wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0);
	wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0);
	wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0);

	put_cpu();
}
#endif

static __init void identify_boot_cpu(void)
{}

void identify_secondary_cpu(struct cpuinfo_x86 *c)
{}

void print_cpu_info(struct cpuinfo_x86 *c)
{}

/*
 * clearcpuid= was already parsed in cpu_parse_early_param().  This dummy
 * function prevents it from becoming an environment variable for init.
 */
static __init int setup_clearcpuid(char *arg)
{}
__setup();

DEFINE_PER_CPU_ALIGNED(struct pcpu_hot, pcpu_hot) =;
EXPORT_PER_CPU_SYMBOL();
EXPORT_PER_CPU_SYMBOL();

#ifdef CONFIG_X86_64
DEFINE_PER_CPU_FIRST(struct fixed_percpu_data,
		     fixed_percpu_data) __aligned() __visible;
EXPORT_PER_CPU_SYMBOL_GPL();

static void wrmsrl_cstar(unsigned long val)
{}

static inline void idt_syscall_init(void)
{}

/* May not be marked __init: used by software suspend */
void syscall_init(void)
{}

#else	/* CONFIG_X86_64 */

#ifdef CONFIG_STACKPROTECTOR
DEFINE_PER_CPU(unsigned long, __stack_chk_guard);
EXPORT_PER_CPU_SYMBOL(__stack_chk_guard);
#endif

#endif	/* CONFIG_X86_64 */

/*
 * Clear all 6 debug registers:
 */
static void clear_all_debug_regs(void)
{}

#ifdef CONFIG_KGDB
/*
 * Restore debug regs if using kgdbwait and you have a kernel debugger
 * connection established.
 */
static void dbg_restore_debug_regs(void)
{}
#else /* ! CONFIG_KGDB */
#define dbg_restore_debug_regs
#endif /* ! CONFIG_KGDB */

static inline void setup_getcpu(int cpu)
{}

#ifdef CONFIG_X86_64
static inline void tss_setup_ist(struct tss_struct *tss)
{}
#else /* CONFIG_X86_64 */
static inline void tss_setup_ist(struct tss_struct *tss) { }
#endif /* !CONFIG_X86_64 */

static inline void tss_setup_io_bitmap(struct tss_struct *tss)
{}

/*
 * Setup everything needed to handle exceptions from the IDT, including the IST
 * exceptions which use paranoid_entry().
 */
void cpu_init_exception_handling(bool boot_cpu)
{}

void __init cpu_init_replace_early_idt(void)
{}

/*
 * cpu_init() initializes state that is per-CPU. Some data is already
 * initialized (naturally) in the bootstrap process, such as the GDT.  We
 * reload it nevertheless, this function acts as a 'CPU state barrier',
 * nothing should get across.
 */
void cpu_init(void)
{}

#ifdef CONFIG_MICROCODE_LATE_LOADING
/**
 * store_cpu_caps() - Store a snapshot of CPU capabilities
 * @curr_info: Pointer where to store it
 *
 * Returns: None
 */
void store_cpu_caps(struct cpuinfo_x86 *curr_info)
{}

/**
 * microcode_check() - Check if any CPU capabilities changed after an update.
 * @prev_info:	CPU capabilities stored before an update.
 *
 * The microcode loader calls this upon late microcode load to recheck features,
 * only when microcode has been updated. Caller holds and CPU hotplug lock.
 *
 * Return: None
 */
void microcode_check(struct cpuinfo_x86 *prev_info)
{}
#endif

/*
 * Invoked from core CPU hotplug code after hotplug operations
 */
void arch_smt_update(void)
{}

void __init arch_cpu_finalize_init(void)
{}