#define pr_fmt(fmt) …
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/sched/topology.h>
#include <linux/sched/hotplug.h>
#include <linux/sched/task_stack.h>
#include <linux/percpu.h>
#include <linux/memblock.h>
#include <linux/err.h>
#include <linux/nmi.h>
#include <linux/tboot.h>
#include <linux/gfp.h>
#include <linux/cpuidle.h>
#include <linux/kexec.h>
#include <linux/numa.h>
#include <linux/pgtable.h>
#include <linux/overflow.h>
#include <linux/stackprotector.h>
#include <linux/cpuhotplug.h>
#include <linux/mc146818rtc.h>
#include <linux/acpi.h>
#include <asm/acpi.h>
#include <asm/cacheinfo.h>
#include <asm/desc.h>
#include <asm/nmi.h>
#include <asm/irq.h>
#include <asm/realmode.h>
#include <asm/cpu.h>
#include <asm/numa.h>
#include <asm/tlbflush.h>
#include <asm/mtrr.h>
#include <asm/mwait.h>
#include <asm/apic.h>
#include <asm/io_apic.h>
#include <asm/fpu/api.h>
#include <asm/setup.h>
#include <asm/uv/uv.h>
#include <asm/microcode.h>
#include <asm/i8259.h>
#include <asm/misc.h>
#include <asm/qspinlock.h>
#include <asm/intel-family.h>
#include <asm/cpu_device_id.h>
#include <asm/spec-ctrl.h>
#include <asm/hw_irq.h>
#include <asm/stackprotector.h>
#include <asm/sev.h>
#include <asm/spec-ctrl.h>
DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
EXPORT_PER_CPU_SYMBOL(…);
DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
EXPORT_PER_CPU_SYMBOL(…);
DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_die_map);
EXPORT_PER_CPU_SYMBOL(…);
struct cpumask __cpu_primary_thread_mask __read_mostly;
static cpumask_var_t cpu_sibling_setup_mask;
struct mwait_cpu_dead { … };
#define CPUDEAD_MWAIT_WAIT …
#define CPUDEAD_MWAIT_KEXEC_HLT …
static DEFINE_PER_CPU_ALIGNED(struct mwait_cpu_dead, mwait_cpu_dead);
int __read_mostly __max_smt_threads = …;
bool x86_topology_update;
int arch_update_cpu_topology(void)
{ … }
static unsigned int smpboot_warm_reset_vector_count;
static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
{ … }
static inline void smpboot_restore_warm_reset_vector(void)
{ … }
static void ap_starting(void)
{ … }
static void ap_calibrate_delay(void)
{ … }
static void notrace start_secondary(void *unused)
{ … }
void smp_store_cpu_info(int id)
{ … }
static bool
topology_same_node(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
{ … }
static bool
topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name)
{ … }
#define link_mask(mfunc, c1, c2) …
static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
{ … }
static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
{ … }
static bool match_l2c(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
{ … }
static bool match_pkg(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
{ … }
static const struct x86_cpu_id intel_cod_cpu[] = …;
static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
{ … }
static inline int x86_sched_itmt_flags(void)
{ … }
#ifdef CONFIG_SCHED_MC
static int x86_core_flags(void)
{ … }
#endif
#ifdef CONFIG_SCHED_SMT
static int x86_smt_flags(void)
{ … }
#endif
#ifdef CONFIG_SCHED_CLUSTER
static int x86_cluster_flags(void)
{ … }
#endif
static int x86_die_flags(void)
{ … }
static bool x86_has_numa_in_package;
static struct sched_domain_topology_level x86_topology[6];
static void __init build_sched_topology(void)
{ … }
void set_cpu_sibling_map(int cpu)
{ … }
const struct cpumask *cpu_coregroup_mask(int cpu)
{ … }
const struct cpumask *cpu_clustergroup_mask(int cpu)
{ … }
EXPORT_SYMBOL_GPL(…);
static void impress_friends(void)
{ … }
#define UDELAY_10MS_DEFAULT …
static unsigned int init_udelay = …;
static int __init cpu_init_udelay(char *str)
{ … }
early_param(…);
static void __init smp_quirk_init_udelay(void)
{ … }
static void send_init_sequence(u32 phys_apicid)
{ … }
static int wakeup_secondary_cpu_via_init(u32 phys_apicid, unsigned long start_eip)
{ … }
static void announce_cpu(int cpu, int apicid)
{ … }
int common_cpu_up(unsigned int cpu, struct task_struct *idle)
{ … }
static int do_boot_cpu(u32 apicid, int cpu, struct task_struct *idle)
{ … }
int native_kick_ap(unsigned int cpu, struct task_struct *tidle)
{ … }
int arch_cpuhp_kick_ap_alive(unsigned int cpu, struct task_struct *tidle)
{ … }
void arch_cpuhp_cleanup_kick_cpu(unsigned int cpu)
{ … }
void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)
{ … }
void arch_cpuhp_sync_state_poll(void)
{ … }
void __init arch_disable_smp_support(void)
{ … }
static __init void disable_smp(void)
{ … }
void __init smp_prepare_cpus_common(void)
{ … }
void __init smp_prepare_boot_cpu(void)
{ … }
#ifdef CONFIG_X86_64
bool __init arch_cpuhp_init_parallel_bringup(void)
{ … }
#endif
void __init native_smp_prepare_cpus(unsigned int max_cpus)
{ … }
void arch_thaw_secondary_cpus_begin(void)
{ … }
void arch_thaw_secondary_cpus_end(void)
{ … }
void __init native_smp_prepare_boot_cpu(void)
{ … }
void __init native_smp_cpus_done(unsigned int max_cpus)
{ … }
void __init setup_cpu_local_masks(void)
{ … }
#ifdef CONFIG_HOTPLUG_CPU
static void recompute_smt_state(void)
{ … }
static void remove_siblinginfo(int cpu)
{ … }
static void remove_cpu_from_maps(int cpu)
{ … }
void cpu_disable_common(void)
{ … }
int native_cpu_disable(void)
{ … }
void play_dead_common(void)
{ … }
static inline void mwait_play_dead(void)
{ … }
void smp_kick_mwait_play_dead(void)
{ … }
void __noreturn hlt_play_dead(void)
{ … }
void native_play_dead(void)
{ … }
#else
int native_cpu_disable(void)
{
return -ENOSYS;
}
void native_play_dead(void)
{
BUG();
}
#endif