linux/arch/x86/kernel/smpboot.c

// SPDX-License-Identifier: GPL-2.0-or-later
 /*
 *	x86 SMP booting functions
 *
 *	(c) 1995 Alan Cox, Building #3 <[email protected]>
 *	(c) 1998, 1999, 2000, 2009 Ingo Molnar <[email protected]>
 *	Copyright 2001 Andi Kleen, SuSE Labs.
 *
 *	Much of the core SMP work is based on previous work by Thomas Radke, to
 *	whom a great many thanks are extended.
 *
 *	Thanks to Intel for making available several different Pentium,
 *	Pentium Pro and Pentium-II/Xeon MP machines.
 *	Original development of Linux SMP code supported by Caldera.
 *
 *	Fixes
 *		Felix Koop	:	NR_CPUS used properly
 *		Jose Renau	:	Handle single CPU case.
 *		Alan Cox	:	By repeated request 8) - Total BogoMIPS report.
 *		Greg Wright	:	Fix for kernel stacks panic.
 *		Erich Boleyn	:	MP v1.4 and additional changes.
 *	Matthias Sattler	:	Changes for 2.1 kernel map.
 *	Michel Lespinasse	:	Changes for 2.1 kernel map.
 *	Michael Chastain	:	Change trampoline.S to gnu as.
 *		Alan Cox	:	Dumb bug: 'B' step PPro's are fine
 *		Ingo Molnar	:	Added APIC timers, based on code
 *					from Jose Renau
 *		Ingo Molnar	:	various cleanups and rewrites
 *		Tigran Aivazian	:	fixed "0.00 in /proc/uptime on SMP" bug.
 *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs
 *	Andi Kleen		:	Changed for SMP boot into long mode.
 *		Martin J. Bligh	: 	Added support for multi-quad systems
 *		Dave Jones	:	Report invalid combinations of Athlon CPUs.
 *		Rusty Russell	:	Hacked into shape for new "hotplug" boot process.
 *      Andi Kleen              :       Converted to new state machine.
 *	Ashok Raj		: 	CPU hotplug support
 *	Glauber Costa		:	i386 and x86_64 integration
 */

#define pr_fmt(fmt)

#include <linux/init.h>
#include <linux/smp.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/sched/topology.h>
#include <linux/sched/hotplug.h>
#include <linux/sched/task_stack.h>
#include <linux/percpu.h>
#include <linux/memblock.h>
#include <linux/err.h>
#include <linux/nmi.h>
#include <linux/tboot.h>
#include <linux/gfp.h>
#include <linux/cpuidle.h>
#include <linux/kexec.h>
#include <linux/numa.h>
#include <linux/pgtable.h>
#include <linux/overflow.h>
#include <linux/stackprotector.h>
#include <linux/cpuhotplug.h>
#include <linux/mc146818rtc.h>
#include <linux/acpi.h>

#include <asm/acpi.h>
#include <asm/cacheinfo.h>
#include <asm/desc.h>
#include <asm/nmi.h>
#include <asm/irq.h>
#include <asm/realmode.h>
#include <asm/cpu.h>
#include <asm/numa.h>
#include <asm/tlbflush.h>
#include <asm/mtrr.h>
#include <asm/mwait.h>
#include <asm/apic.h>
#include <asm/io_apic.h>
#include <asm/fpu/api.h>
#include <asm/setup.h>
#include <asm/uv/uv.h>
#include <asm/microcode.h>
#include <asm/i8259.h>
#include <asm/misc.h>
#include <asm/qspinlock.h>
#include <asm/intel-family.h>
#include <asm/cpu_device_id.h>
#include <asm/spec-ctrl.h>
#include <asm/hw_irq.h>
#include <asm/stackprotector.h>
#include <asm/sev.h>
#include <asm/spec-ctrl.h>

/* representing HT siblings of each logical CPU */
DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
EXPORT_PER_CPU_SYMBOL();

/* representing HT and core siblings of each logical CPU */
DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
EXPORT_PER_CPU_SYMBOL();

/* representing HT, core, and die siblings of each logical CPU */
DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_die_map);
EXPORT_PER_CPU_SYMBOL();

/* CPUs which are the primary SMT threads */
struct cpumask __cpu_primary_thread_mask __read_mostly;

/* Representing CPUs for which sibling maps can be computed */
static cpumask_var_t cpu_sibling_setup_mask;

struct mwait_cpu_dead {};

#define CPUDEAD_MWAIT_WAIT
#define CPUDEAD_MWAIT_KEXEC_HLT

/*
 * Cache line aligned data for mwait_play_dead(). Separate on purpose so
 * that it's unlikely to be touched by other CPUs.
 */
static DEFINE_PER_CPU_ALIGNED(struct mwait_cpu_dead, mwait_cpu_dead);

/* Maximum number of SMT threads on any online core */
int __read_mostly __max_smt_threads =;

/* Flag to indicate if a complete sched domain rebuild is required */
bool x86_topology_update;

int arch_update_cpu_topology(void)
{}

static unsigned int smpboot_warm_reset_vector_count;

static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
{}

static inline void smpboot_restore_warm_reset_vector(void)
{}

/* Run the next set of setup steps for the upcoming CPU */
static void ap_starting(void)
{}

static void ap_calibrate_delay(void)
{}

/*
 * Activate a secondary processor.
 */
static void notrace start_secondary(void *unused)
{}

/*
 * The bootstrap kernel entry code has set these up. Save them for
 * a given CPU
 */
void smp_store_cpu_info(int id)
{}

static bool
topology_same_node(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
{}

static bool
topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name)
{}

#define link_mask(mfunc, c1, c2)

static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
{}

static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
{}

static bool match_l2c(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
{}

/*
 * Unlike the other levels, we do not enforce keeping a
 * multicore group inside a NUMA node.  If this happens, we will
 * discard the MC level of the topology later.
 */
static bool match_pkg(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
{}

/*
 * Define intel_cod_cpu[] for Intel COD (Cluster-on-Die) CPUs.
 *
 * Any Intel CPU that has multiple nodes per package and does not
 * match intel_cod_cpu[] has the SNC (Sub-NUMA Cluster) topology.
 *
 * When in SNC mode, these CPUs enumerate an LLC that is shared
 * by multiple NUMA nodes. The LLC is shared for off-package data
 * access but private to the NUMA node (half of the package) for
 * on-package access. CPUID (the source of the information about
 * the LLC) can only enumerate the cache as shared or unshared,
 * but not this particular configuration.
 */

static const struct x86_cpu_id intel_cod_cpu[] =;

static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
{}


static inline int x86_sched_itmt_flags(void)
{}

#ifdef CONFIG_SCHED_MC
static int x86_core_flags(void)
{}
#endif
#ifdef CONFIG_SCHED_SMT
static int x86_smt_flags(void)
{}
#endif
#ifdef CONFIG_SCHED_CLUSTER
static int x86_cluster_flags(void)
{}
#endif

static int x86_die_flags(void)
{}

/*
 * Set if a package/die has multiple NUMA nodes inside.
 * AMD Magny-Cours, Intel Cluster-on-Die, and Intel
 * Sub-NUMA Clustering have this.
 */
static bool x86_has_numa_in_package;

static struct sched_domain_topology_level x86_topology[6];

static void __init build_sched_topology(void)
{}

void set_cpu_sibling_map(int cpu)
{}

/* maps the cpu to the sched domain representing multi-core */
const struct cpumask *cpu_coregroup_mask(int cpu)
{}

const struct cpumask *cpu_clustergroup_mask(int cpu)
{}
EXPORT_SYMBOL_GPL();

static void impress_friends(void)
{}

/*
 * The Multiprocessor Specification 1.4 (1997) example code suggests
 * that there should be a 10ms delay between the BSP asserting INIT
 * and de-asserting INIT, when starting a remote processor.
 * But that slows boot and resume on modern processors, which include
 * many cores and don't require that delay.
 *
 * Cmdline "init_cpu_udelay=" is available to over-ride this delay.
 * Modern processor families are quirked to remove the delay entirely.
 */
#define UDELAY_10MS_DEFAULT

static unsigned int init_udelay =;

static int __init cpu_init_udelay(char *str)
{}
early_param();

static void __init smp_quirk_init_udelay(void)
{}

/*
 * Wake up AP by INIT, INIT, STARTUP sequence.
 */
static void send_init_sequence(u32 phys_apicid)
{}

/*
 * Wake up AP by INIT, INIT, STARTUP sequence.
 */
static int wakeup_secondary_cpu_via_init(u32 phys_apicid, unsigned long start_eip)
{}

/* reduce the number of lines printed when booting a large cpu count system */
static void announce_cpu(int cpu, int apicid)
{}

int common_cpu_up(unsigned int cpu, struct task_struct *idle)
{}

/*
 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
 * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
 * Returns zero if startup was successfully sent, else error code from
 * ->wakeup_secondary_cpu.
 */
static int do_boot_cpu(u32 apicid, int cpu, struct task_struct *idle)
{}

int native_kick_ap(unsigned int cpu, struct task_struct *tidle)
{}

int arch_cpuhp_kick_ap_alive(unsigned int cpu, struct task_struct *tidle)
{}

void arch_cpuhp_cleanup_kick_cpu(unsigned int cpu)
{}

void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)
{}

void arch_cpuhp_sync_state_poll(void)
{}

/**
 * arch_disable_smp_support() - Disables SMP support for x86 at boottime
 */
void __init arch_disable_smp_support(void)
{}

/*
 * Fall back to non SMP mode after errors.
 *
 * RED-PEN audit/test this more. I bet there is more state messed up here.
 */
static __init void disable_smp(void)
{}

void __init smp_prepare_cpus_common(void)
{}

void __init smp_prepare_boot_cpu(void)
{}

#ifdef CONFIG_X86_64
/* Establish whether parallel bringup can be supported. */
bool __init arch_cpuhp_init_parallel_bringup(void)
{}
#endif

/*
 * Prepare for SMP bootup.
 * @max_cpus: configured maximum number of CPUs, It is a legacy parameter
 *            for common interface support.
 */
void __init native_smp_prepare_cpus(unsigned int max_cpus)
{}

void arch_thaw_secondary_cpus_begin(void)
{}

void arch_thaw_secondary_cpus_end(void)
{}

/*
 * Early setup to make printk work.
 */
void __init native_smp_prepare_boot_cpu(void)
{}

void __init native_smp_cpus_done(unsigned int max_cpus)
{}

/* correctly size the local cpu masks */
void __init setup_cpu_local_masks(void)
{}

#ifdef CONFIG_HOTPLUG_CPU

/* Recompute SMT state for all CPUs on offline */
static void recompute_smt_state(void)
{}

static void remove_siblinginfo(int cpu)
{}

static void remove_cpu_from_maps(int cpu)
{}

void cpu_disable_common(void)
{}

int native_cpu_disable(void)
{}

void play_dead_common(void)
{}

/*
 * We need to flush the caches before going to sleep, lest we have
 * dirty data in our caches when we come back up.
 */
static inline void mwait_play_dead(void)
{}

/*
 * Kick all "offline" CPUs out of mwait on kexec(). See comment in
 * mwait_play_dead().
 */
void smp_kick_mwait_play_dead(void)
{}

void __noreturn hlt_play_dead(void)
{}

/*
 * native_play_dead() is essentially a __noreturn function, but it can't
 * be marked as such as the compiler may complain about it.
 */
void native_play_dead(void)
{}

#else /* ... !CONFIG_HOTPLUG_CPU */
int native_cpu_disable(void)
{
	return -ENOSYS;
}

void native_play_dead(void)
{
	BUG();
}

#endif