#define USE_EARLY_PGTABLE_L5
#include <linux/memblock.h>
#include <linux/linkage.h>
#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/percpu.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/sched/mm.h>
#include <linux/sched/clock.h>
#include <linux/sched/task.h>
#include <linux/sched/smt.h>
#include <linux/init.h>
#include <linux/kprobes.h>
#include <linux/kgdb.h>
#include <linux/mem_encrypt.h>
#include <linux/smp.h>
#include <linux/cpu.h>
#include <linux/io.h>
#include <linux/syscore_ops.h>
#include <linux/pgtable.h>
#include <linux/stackprotector.h>
#include <linux/utsname.h>
#include <asm/alternative.h>
#include <asm/cmdline.h>
#include <asm/perf_event.h>
#include <asm/mmu_context.h>
#include <asm/doublefault.h>
#include <asm/archrandom.h>
#include <asm/hypervisor.h>
#include <asm/processor.h>
#include <asm/tlbflush.h>
#include <asm/debugreg.h>
#include <asm/sections.h>
#include <asm/vsyscall.h>
#include <linux/topology.h>
#include <linux/cpumask.h>
#include <linux/atomic.h>
#include <asm/proto.h>
#include <asm/setup.h>
#include <asm/apic.h>
#include <asm/desc.h>
#include <asm/fpu/api.h>
#include <asm/mtrr.h>
#include <asm/hwcap2.h>
#include <linux/numa.h>
#include <asm/numa.h>
#include <asm/asm.h>
#include <asm/bugs.h>
#include <asm/cpu.h>
#include <asm/mce.h>
#include <asm/msr.h>
#include <asm/cacheinfo.h>
#include <asm/memtype.h>
#include <asm/microcode.h>
#include <asm/intel-family.h>
#include <asm/cpu_device_id.h>
#include <asm/fred.h>
#include <asm/uv/uv.h>
#include <asm/ia32.h>
#include <asm/set_memory.h>
#include <asm/traps.h>
#include <asm/sev.h>
#include <asm/tdx.h>
#include <asm/posted_intr.h>
#include <asm/runtime-const.h>
#include "cpu.h"
DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
EXPORT_PER_CPU_SYMBOL(…);
u32 elf_hwcap2 __read_mostly;
unsigned int __max_threads_per_core __ro_after_init = …;
EXPORT_SYMBOL(…);
unsigned int __max_dies_per_package __ro_after_init = …;
EXPORT_SYMBOL(…);
unsigned int __max_logical_packages __ro_after_init = …;
EXPORT_SYMBOL(…);
unsigned int __num_cores_per_package __ro_after_init = …;
EXPORT_SYMBOL(…);
unsigned int __num_threads_per_package __ro_after_init = …;
EXPORT_SYMBOL(…);
static struct ppin_info { … } ppin_info[] = …;
static const struct x86_cpu_id ppin_cpuids[] = …;
static void ppin_init(struct cpuinfo_x86 *c)
{ … }
static void default_init(struct cpuinfo_x86 *c)
{ … }
static const struct cpu_dev default_cpu = …;
static const struct cpu_dev *this_cpu = …;
DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = …;
EXPORT_PER_CPU_SYMBOL_GPL(…);
#ifdef CONFIG_X86_64
static int __init x86_nopcid_setup(char *s)
{ … }
early_param(…);
#endif
static int __init x86_noinvpcid_setup(char *s)
{ … }
early_param(…);
#ifdef CONFIG_X86_32
static int cachesize_override = -1;
static int disable_x86_serial_nr = 1;
static int __init cachesize_setup(char *str)
{
get_option(&str, &cachesize_override);
return 1;
}
__setup("cachesize=", cachesize_setup);
static inline int flag_is_changeable_p(u32 flag)
{
u32 f1, f2;
asm volatile ("pushfl \n\t"
"pushfl \n\t"
"popl %0 \n\t"
"movl %0, %1 \n\t"
"xorl %2, %0 \n\t"
"pushl %0 \n\t"
"popfl \n\t"
"pushfl \n\t"
"popl %0 \n\t"
"popfl \n\t"
: "=&r" (f1), "=&r" (f2)
: "ir" (flag));
return ((f1^f2) & flag) != 0;
}
int have_cpuid_p(void)
{
return flag_is_changeable_p(X86_EFLAGS_ID);
}
static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
{
unsigned long lo, hi;
if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr)
return;
rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
lo |= 0x200000;
wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
pr_notice("CPU serial number disabled.\n");
clear_cpu_cap(c, X86_FEATURE_PN);
c->cpuid_level = cpuid_eax(0);
}
static int __init x86_serial_nr_setup(char *s)
{
disable_x86_serial_nr = 0;
return 1;
}
__setup("serialnumber", x86_serial_nr_setup);
#else
static inline int flag_is_changeable_p(u32 flag)
{ … }
static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
{ … }
#endif
static __always_inline void setup_smep(struct cpuinfo_x86 *c)
{ … }
static __always_inline void setup_smap(struct cpuinfo_x86 *c)
{ … }
static __always_inline void setup_umip(struct cpuinfo_x86 *c)
{ … }
static const unsigned long cr4_pinned_mask = …;
static DEFINE_STATIC_KEY_FALSE_RO(cr_pinning);
static unsigned long cr4_pinned_bits __ro_after_init;
void native_write_cr0(unsigned long val)
{ … }
EXPORT_SYMBOL(…);
void __no_profile native_write_cr4(unsigned long val)
{ … }
#if IS_MODULE(CONFIG_LKDTM)
EXPORT_SYMBOL_GPL(native_write_cr4);
#endif
void cr4_update_irqsoff(unsigned long set, unsigned long clear)
{ … }
EXPORT_SYMBOL(…);
unsigned long cr4_read_shadow(void)
{ … }
EXPORT_SYMBOL_GPL(…);
void cr4_init(void)
{ … }
static void __init setup_cr_pinning(void)
{ … }
static __init int x86_nofsgsbase_setup(char *arg)
{ … }
__setup(…);
static bool pku_disabled;
static __always_inline void setup_pku(struct cpuinfo_x86 *c)
{ … }
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
static __init int setup_disable_pku(char *arg)
{ … }
__setup(…);
#endif
#ifdef CONFIG_X86_KERNEL_IBT
__noendbr u64 ibt_save(bool disable)
{ … }
__noendbr void ibt_restore(u64 save)
{ … }
#endif
static __always_inline void setup_cet(struct cpuinfo_x86 *c)
{ … }
__noendbr void cet_disable(void)
{ … }
struct cpuid_dependent_feature { … };
static const struct cpuid_dependent_feature
cpuid_dependent_features[] = …;
static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
{ … }
static const char *table_lookup_model(struct cpuinfo_x86 *c)
{ … }
__u32 cpu_caps_cleared[NCAPINTS + NBUGINTS] __aligned(…);
__u32 cpu_caps_set[NCAPINTS + NBUGINTS] __aligned(…);
#ifdef CONFIG_X86_32
DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
#endif
void load_direct_gdt(int cpu)
{ … }
EXPORT_SYMBOL_GPL(…);
void load_fixmap_gdt(int cpu)
{ … }
EXPORT_SYMBOL_GPL(…);
void __init switch_gdt_and_percpu_base(int cpu)
{ … }
static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = …;
static void get_model_name(struct cpuinfo_x86 *c)
{ … }
void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
{ … }
u16 __read_mostly tlb_lli_4k[NR_INFO];
u16 __read_mostly tlb_lli_2m[NR_INFO];
u16 __read_mostly tlb_lli_4m[NR_INFO];
u16 __read_mostly tlb_lld_4k[NR_INFO];
u16 __read_mostly tlb_lld_2m[NR_INFO];
u16 __read_mostly tlb_lld_4m[NR_INFO];
u16 __read_mostly tlb_lld_1g[NR_INFO];
static void cpu_detect_tlb(struct cpuinfo_x86 *c)
{ … }
static void get_cpu_vendor(struct cpuinfo_x86 *c)
{ … }
void cpu_detect(struct cpuinfo_x86 *c)
{ … }
static void apply_forced_caps(struct cpuinfo_x86 *c)
{ … }
static void init_speculation_control(struct cpuinfo_x86 *c)
{ … }
void get_cpu_cap(struct cpuinfo_x86 *c)
{ … }
void get_cpu_address_sizes(struct cpuinfo_x86 *c)
{ … }
static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
{ … }
#define NO_SPECULATION …
#define NO_MELTDOWN …
#define NO_SSB …
#define NO_L1TF …
#define NO_MDS …
#define MSBDS_ONLY …
#define NO_SWAPGS …
#define NO_ITLB_MULTIHIT …
#define NO_SPECTRE_V2 …
#define NO_MMIO …
#define NO_EIBRS_PBRSB …
#define NO_BHI …
#define VULNWL(vendor, family, model, whitelist) …
#define VULNWL_INTEL(vfm, whitelist) …
#define VULNWL_AMD(family, whitelist) …
#define VULNWL_HYGON(family, whitelist) …
static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = …;
#define VULNBL(vendor, family, model, blacklist) …
#define VULNBL_INTEL_STEPPINGS(vfm, steppings, issues) …
#define VULNBL_AMD(family, blacklist) …
#define VULNBL_HYGON(family, blacklist) …
#define SRBDS …
#define MMIO …
#define MMIO_SBDS …
#define RETBLEED …
#define SMT_RSB …
#define SRSO …
#define GDS …
#define RFDS …
static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = …;
static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long which)
{ … }
u64 x86_read_arch_cap_msr(void)
{ … }
static bool arch_cap_mmio_immune(u64 x86_arch_cap_msr)
{ … }
static bool __init vulnerable_to_rfds(u64 x86_arch_cap_msr)
{ … }
static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
{ … }
static void detect_nopl(void)
{ … }
static void __init cpu_parse_early_param(void)
{ … }
static void __init early_identify_cpu(struct cpuinfo_x86 *c)
{ … }
void __init early_cpu_init(void)
{ … }
static bool detect_null_seg_behavior(void)
{ … }
void check_null_seg_clears_base(struct cpuinfo_x86 *c)
{ … }
static void generic_identify(struct cpuinfo_x86 *c)
{ … }
static void identify_cpu(struct cpuinfo_x86 *c)
{ … }
#ifdef CONFIG_X86_32
void enable_sep_cpu(void)
{
struct tss_struct *tss;
int cpu;
if (!boot_cpu_has(X86_FEATURE_SEP))
return;
cpu = get_cpu();
tss = &per_cpu(cpu_tss_rw, cpu);
tss->x86_tss.ss1 = __KERNEL_CS;
wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0);
wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0);
wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0);
put_cpu();
}
#endif
static __init void identify_boot_cpu(void)
{ … }
void identify_secondary_cpu(struct cpuinfo_x86 *c)
{ … }
void print_cpu_info(struct cpuinfo_x86 *c)
{ … }
static __init int setup_clearcpuid(char *arg)
{ … }
__setup(…);
DEFINE_PER_CPU_ALIGNED(struct pcpu_hot, pcpu_hot) = …;
EXPORT_PER_CPU_SYMBOL(…);
EXPORT_PER_CPU_SYMBOL(…);
#ifdef CONFIG_X86_64
DEFINE_PER_CPU_FIRST(struct fixed_percpu_data,
fixed_percpu_data) __aligned(…) __visible;
EXPORT_PER_CPU_SYMBOL_GPL(…);
static void wrmsrl_cstar(unsigned long val)
{ … }
static inline void idt_syscall_init(void)
{ … }
void syscall_init(void)
{ … }
#else
#ifdef CONFIG_STACKPROTECTOR
DEFINE_PER_CPU(unsigned long, __stack_chk_guard);
EXPORT_PER_CPU_SYMBOL(__stack_chk_guard);
#endif
#endif
static void clear_all_debug_regs(void)
{ … }
#ifdef CONFIG_KGDB
static void dbg_restore_debug_regs(void)
{ … }
#else
#define dbg_restore_debug_regs …
#endif
static inline void setup_getcpu(int cpu)
{ … }
#ifdef CONFIG_X86_64
static inline void tss_setup_ist(struct tss_struct *tss)
{ … }
#else
static inline void tss_setup_ist(struct tss_struct *tss) { }
#endif
static inline void tss_setup_io_bitmap(struct tss_struct *tss)
{ … }
void cpu_init_exception_handling(bool boot_cpu)
{ … }
void __init cpu_init_replace_early_idt(void)
{ … }
void cpu_init(void)
{ … }
#ifdef CONFIG_MICROCODE_LATE_LOADING
void store_cpu_caps(struct cpuinfo_x86 *curr_info)
{ … }
void microcode_check(struct cpuinfo_x86 *prev_info)
{ … }
#endif
void arch_smt_update(void)
{ … }
void __init arch_cpu_finalize_init(void)
{ … }