#include <linux/sched/mm.h>
#include <linux/proc_fs.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/notifier.h>
#include <linux/sched/signal.h>
#include <linux/sched/hotplug.h>
#include <linux/sched/isolation.h>
#include <linux/sched/task.h>
#include <linux/sched/smt.h>
#include <linux/unistd.h>
#include <linux/cpu.h>
#include <linux/oom.h>
#include <linux/rcupdate.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/bug.h>
#include <linux/kthread.h>
#include <linux/stop_machine.h>
#include <linux/mutex.h>
#include <linux/gfp.h>
#include <linux/suspend.h>
#include <linux/lockdep.h>
#include <linux/tick.h>
#include <linux/irq.h>
#include <linux/nmi.h>
#include <linux/smpboot.h>
#include <linux/relay.h>
#include <linux/slab.h>
#include <linux/scs.h>
#include <linux/percpu-rwsem.h>
#include <linux/cpuset.h>
#include <linux/random.h>
#include <linux/cc_platform.h>
#include <trace/events/power.h>
#define CREATE_TRACE_POINTS
#include <trace/events/cpuhp.h>
#include "smpboot.h"
struct cpuhp_cpu_state { … };
static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = …;
#ifdef CONFIG_SMP
cpumask_t cpus_booted_once_mask;
#endif
#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
static struct lockdep_map cpuhp_state_up_map = …;
static struct lockdep_map cpuhp_state_down_map = …;
static inline void cpuhp_lock_acquire(bool bringup)
{ … }
static inline void cpuhp_lock_release(bool bringup)
{ … }
#else
static inline void cpuhp_lock_acquire(bool bringup) { }
static inline void cpuhp_lock_release(bool bringup) { }
#endif
struct cpuhp_step { … };
static DEFINE_MUTEX(cpuhp_state_mutex);
static struct cpuhp_step cpuhp_hp_states[];
static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
{ … }
static bool cpuhp_step_empty(bool bringup, struct cpuhp_step *step)
{ … }
static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
bool bringup, struct hlist_node *node,
struct hlist_node **lastp)
{ … }
#ifdef CONFIG_SMP
static bool cpuhp_is_ap_state(enum cpuhp_state state)
{ … }
static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
{ … }
static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
{ … }
static bool cpuhp_is_atomic_state(enum cpuhp_state state)
{ … }
enum cpuhp_sync_state { … };
#ifdef CONFIG_HOTPLUG_CORE_SYNC
static inline void cpuhp_ap_update_sync_state(enum cpuhp_sync_state state)
{ … }
void __weak arch_cpuhp_sync_state_poll(void) { … }
static bool cpuhp_wait_for_sync_state(unsigned int cpu, enum cpuhp_sync_state state,
enum cpuhp_sync_state next_state)
{ … }
#else
static inline void cpuhp_ap_update_sync_state(enum cpuhp_sync_state state) { }
#endif
#ifdef CONFIG_HOTPLUG_CORE_SYNC_DEAD
void cpuhp_ap_report_dead(void)
{ … }
void __weak arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) { … }
static void cpuhp_bp_sync_dead(unsigned int cpu)
{ … }
#else
static inline void cpuhp_bp_sync_dead(unsigned int cpu) { }
#endif
#ifdef CONFIG_HOTPLUG_CORE_SYNC_FULL
void cpuhp_ap_sync_alive(void)
{ … }
static bool cpuhp_can_boot_ap(unsigned int cpu)
{ … }
void __weak arch_cpuhp_cleanup_kick_cpu(unsigned int cpu) { … }
static int cpuhp_bp_sync_alive(unsigned int cpu)
{ … }
#else
static inline int cpuhp_bp_sync_alive(unsigned int cpu) { return 0; }
static inline bool cpuhp_can_boot_ap(unsigned int cpu) { return true; }
#endif
static DEFINE_MUTEX(cpu_add_remove_lock);
bool cpuhp_tasks_frozen;
EXPORT_SYMBOL_GPL(…);
void cpu_maps_update_begin(void)
{ … }
void cpu_maps_update_done(void)
{ … }
static int cpu_hotplug_disabled;
#ifdef CONFIG_HOTPLUG_CPU
DEFINE_STATIC_PERCPU_RWSEM(…);
static bool cpu_hotplug_offline_disabled __ro_after_init;
void cpus_read_lock(void)
{ … }
EXPORT_SYMBOL_GPL(…);
int cpus_read_trylock(void)
{ … }
EXPORT_SYMBOL_GPL(…);
void cpus_read_unlock(void)
{ … }
EXPORT_SYMBOL_GPL(…);
void cpus_write_lock(void)
{ … }
void cpus_write_unlock(void)
{ … }
void lockdep_assert_cpus_held(void)
{ … }
#ifdef CONFIG_LOCKDEP
int lockdep_is_cpus_held(void)
{ … }
#endif
static void lockdep_acquire_cpus_lock(void)
{ … }
static void lockdep_release_cpus_lock(void)
{ … }
void cpu_hotplug_disable_offlining(void)
{ … }
void cpu_hotplug_disable(void)
{ … }
EXPORT_SYMBOL_GPL(…);
static void __cpu_hotplug_enable(void)
{ … }
void cpu_hotplug_enable(void)
{ … }
EXPORT_SYMBOL_GPL(…);
#else
static void lockdep_acquire_cpus_lock(void)
{
}
static void lockdep_release_cpus_lock(void)
{
}
#endif
void __weak arch_smt_update(void) { … }
#ifdef CONFIG_HOTPLUG_SMT
enum cpuhp_smt_control cpu_smt_control __read_mostly = …;
static unsigned int cpu_smt_max_threads __ro_after_init;
unsigned int cpu_smt_num_threads __read_mostly = …;
void __init cpu_smt_disable(bool force)
{ … }
void __init cpu_smt_set_num_threads(unsigned int num_threads,
unsigned int max_threads)
{ … }
static int __init smt_cmdline_disable(char *str)
{ … }
early_param(…);
static inline bool cpu_smt_thread_allowed(unsigned int cpu)
{ … }
static inline bool cpu_bootable(unsigned int cpu)
{ … }
bool cpu_smt_possible(void)
{ … }
EXPORT_SYMBOL_GPL(…);
#else
static inline bool cpu_bootable(unsigned int cpu) { return true; }
#endif
static inline enum cpuhp_state
cpuhp_set_state(int cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target)
{ … }
static inline void
cpuhp_reset_state(int cpu, struct cpuhp_cpu_state *st,
enum cpuhp_state prev_state)
{ … }
static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
{ … }
static int cpuhp_kick_ap(int cpu, struct cpuhp_cpu_state *st,
enum cpuhp_state target)
{ … }
static int bringup_wait_for_ap_online(unsigned int cpu)
{ … }
#ifdef CONFIG_HOTPLUG_SPLIT_STARTUP
static int cpuhp_kick_ap_alive(unsigned int cpu)
{ … }
static int cpuhp_bringup_ap(unsigned int cpu)
{ … }
#else
static int bringup_cpu(unsigned int cpu)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
struct task_struct *idle = idle_thread_get(cpu);
int ret;
if (!cpuhp_can_boot_ap(cpu))
return -EAGAIN;
irq_lock_sparse();
ret = __cpu_up(cpu, idle);
if (ret)
goto out_unlock;
ret = cpuhp_bp_sync_alive(cpu);
if (ret)
goto out_unlock;
ret = bringup_wait_for_ap_online(cpu);
if (ret)
goto out_unlock;
irq_unlock_sparse();
if (st->target <= CPUHP_AP_ONLINE_IDLE)
return 0;
return cpuhp_kick_ap(cpu, st, st->target);
out_unlock:
irq_unlock_sparse();
return ret;
}
#endif
static int finish_cpu(unsigned int cpu)
{ … }
static bool cpuhp_next_state(bool bringup,
enum cpuhp_state *state_to_run,
struct cpuhp_cpu_state *st,
enum cpuhp_state target)
{ … }
static int __cpuhp_invoke_callback_range(bool bringup,
unsigned int cpu,
struct cpuhp_cpu_state *st,
enum cpuhp_state target,
bool nofail)
{ … }
static inline int cpuhp_invoke_callback_range(bool bringup,
unsigned int cpu,
struct cpuhp_cpu_state *st,
enum cpuhp_state target)
{ … }
static inline void cpuhp_invoke_callback_range_nofail(bool bringup,
unsigned int cpu,
struct cpuhp_cpu_state *st,
enum cpuhp_state target)
{ … }
static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
{ … }
static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
enum cpuhp_state target)
{ … }
static int cpuhp_should_run(unsigned int cpu)
{ … }
static void cpuhp_thread_fun(unsigned int cpu)
{ … }
static int
cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
struct hlist_node *node)
{ … }
static int cpuhp_kick_ap_work(unsigned int cpu)
{ … }
static struct smp_hotplug_thread cpuhp_threads = …;
static __init void cpuhp_init_state(void)
{ … }
void __init cpuhp_threads_init(void)
{ … }
#ifdef CONFIG_HOTPLUG_CPU
#ifndef arch_clear_mm_cpumask_cpu
#define arch_clear_mm_cpumask_cpu(cpu, mm) …
#endif
void clear_tasks_mm_cpumask(int cpu)
{ … }
static int take_cpu_down(void *_param)
{ … }
static int takedown_cpu(unsigned int cpu)
{ … }
static void cpuhp_complete_idle_dead(void *arg)
{ … }
void cpuhp_report_idle_dead(void)
{ … }
static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
enum cpuhp_state target)
{ … }
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
enum cpuhp_state target)
{ … }
struct cpu_down_work { … };
static long __cpu_down_maps_locked(void *arg)
{ … }
static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
{ … }
static int cpu_down(unsigned int cpu, enum cpuhp_state target)
{ … }
int cpu_device_down(struct device *dev)
{ … }
int remove_cpu(unsigned int cpu)
{ … }
EXPORT_SYMBOL_GPL(…);
void smp_shutdown_nonboot_cpus(unsigned int primary_cpu)
{ … }
#else
#define takedown_cpu …
#endif
void notify_cpu_starting(unsigned int cpu)
{ … }
void cpuhp_online_idle(enum cpuhp_state state)
{ … }
static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
{ … }
static int cpu_up(unsigned int cpu, enum cpuhp_state target)
{ … }
int cpu_device_up(struct device *dev)
{ … }
int add_cpu(unsigned int cpu)
{ … }
EXPORT_SYMBOL_GPL(…);
int bringup_hibernate_cpu(unsigned int sleep_cpu)
{ … }
static void __init cpuhp_bringup_mask(const struct cpumask *mask, unsigned int ncpus,
enum cpuhp_state target)
{ … }
#ifdef CONFIG_HOTPLUG_PARALLEL
static bool __cpuhp_parallel_bringup __ro_after_init = …;
static int __init parallel_bringup_parse_param(char *arg)
{ … }
early_param(…);
static inline bool cpuhp_smt_aware(void)
{ … }
static inline const struct cpumask *cpuhp_get_primary_thread_mask(void)
{ … }
static bool __init cpuhp_bringup_cpus_parallel(unsigned int ncpus)
{ … }
#else
static inline bool cpuhp_bringup_cpus_parallel(unsigned int ncpus) { return false; }
#endif
void __init bringup_nonboot_cpus(unsigned int max_cpus)
{ … }
#ifdef CONFIG_PM_SLEEP_SMP
static cpumask_var_t frozen_cpus;
int freeze_secondary_cpus(int primary)
{ … }
void __weak arch_thaw_secondary_cpus_begin(void)
{ … }
void __weak arch_thaw_secondary_cpus_end(void)
{ … }
void thaw_secondary_cpus(void)
{ … }
static int __init alloc_frozen_cpus(void)
{ … }
core_initcall(alloc_frozen_cpus);
static int
cpu_hotplug_pm_callback(struct notifier_block *nb,
unsigned long action, void *ptr)
{ … }
static int __init cpu_hotplug_pm_sync_init(void)
{ … }
core_initcall(cpu_hotplug_pm_sync_init);
#endif
int __boot_cpu_id;
#endif
static struct cpuhp_step cpuhp_hp_states[] = …;
static int cpuhp_cb_check(enum cpuhp_state state)
{ … }
static int cpuhp_reserve_state(enum cpuhp_state state)
{ … }
static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
int (*startup)(unsigned int cpu),
int (*teardown)(unsigned int cpu),
bool multi_instance)
{ … }
static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
{ … }
static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
struct hlist_node *node)
{ … }
static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
struct hlist_node *node)
{ … }
int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
struct hlist_node *node,
bool invoke)
{ … }
int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
bool invoke)
{ … }
EXPORT_SYMBOL_GPL(…);
int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
const char *name, bool invoke,
int (*startup)(unsigned int cpu),
int (*teardown)(unsigned int cpu),
bool multi_instance)
{ … }
EXPORT_SYMBOL(…);
int __cpuhp_setup_state(enum cpuhp_state state,
const char *name, bool invoke,
int (*startup)(unsigned int cpu),
int (*teardown)(unsigned int cpu),
bool multi_instance)
{ … }
EXPORT_SYMBOL(…);
int __cpuhp_state_remove_instance(enum cpuhp_state state,
struct hlist_node *node, bool invoke)
{ … }
EXPORT_SYMBOL_GPL(…);
void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
{ … }
EXPORT_SYMBOL(…);
void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
{ … }
EXPORT_SYMBOL(…);
#ifdef CONFIG_HOTPLUG_SMT
static void cpuhp_offline_cpu_device(unsigned int cpu)
{ … }
static void cpuhp_online_cpu_device(unsigned int cpu)
{ … }
int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
{ … }
int cpuhp_smt_enable(void)
{ … }
#endif
#if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
static ssize_t state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{ … }
static DEVICE_ATTR_RO(state);
static ssize_t target_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{ … }
static ssize_t target_show(struct device *dev,
struct device_attribute *attr, char *buf)
{ … }
static DEVICE_ATTR_RW(target);
static ssize_t fail_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{ … }
static ssize_t fail_show(struct device *dev,
struct device_attribute *attr, char *buf)
{ … }
static DEVICE_ATTR_RW(fail);
static struct attribute *cpuhp_cpu_attrs[] = …;
static const struct attribute_group cpuhp_cpu_attr_group = …;
static ssize_t states_show(struct device *dev,
struct device_attribute *attr, char *buf)
{ … }
static DEVICE_ATTR_RO(states);
static struct attribute *cpuhp_cpu_root_attrs[] = …;
static const struct attribute_group cpuhp_cpu_root_attr_group = …;
#ifdef CONFIG_HOTPLUG_SMT
static bool cpu_smt_num_threads_valid(unsigned int threads)
{ … }
static ssize_t
__store_smt_control(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{ … }
#else
static ssize_t
__store_smt_control(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
return -ENODEV;
}
#endif
static const char *smt_states[] = …;
static ssize_t control_show(struct device *dev,
struct device_attribute *attr, char *buf)
{ … }
static ssize_t control_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{ … }
static DEVICE_ATTR_RW(control);
static ssize_t active_show(struct device *dev,
struct device_attribute *attr, char *buf)
{ … }
static DEVICE_ATTR_RO(active);
static struct attribute *cpuhp_smt_attrs[] = …;
static const struct attribute_group cpuhp_smt_attr_group = …;
static int __init cpu_smt_sysfs_init(void)
{ … }
static int __init cpuhp_sysfs_init(void)
{ … }
device_initcall(cpuhp_sysfs_init);
#endif
#define MASK_DECLARE_1(x) …
#define MASK_DECLARE_2(x) …
#define MASK_DECLARE_4(x) …
#define MASK_DECLARE_8(x) …
const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = …;
EXPORT_SYMBOL_GPL(…);
const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = …;
EXPORT_SYMBOL(…);
#ifdef CONFIG_INIT_ALL_POSSIBLE
struct cpumask __cpu_possible_mask __ro_after_init
= {CPU_BITS_ALL};
#else
struct cpumask __cpu_possible_mask __ro_after_init;
#endif
EXPORT_SYMBOL(…);
struct cpumask __cpu_online_mask __read_mostly;
EXPORT_SYMBOL(…);
struct cpumask __cpu_enabled_mask __read_mostly;
EXPORT_SYMBOL(…);
struct cpumask __cpu_present_mask __read_mostly;
EXPORT_SYMBOL(…);
struct cpumask __cpu_active_mask __read_mostly;
EXPORT_SYMBOL(…);
struct cpumask __cpu_dying_mask __read_mostly;
EXPORT_SYMBOL(…);
atomic_t __num_online_cpus __read_mostly;
EXPORT_SYMBOL(…);
void init_cpu_present(const struct cpumask *src)
{ … }
void init_cpu_possible(const struct cpumask *src)
{ … }
void init_cpu_online(const struct cpumask *src)
{ … }
void set_cpu_online(unsigned int cpu, bool online)
{ … }
void __init boot_cpu_init(void)
{ … }
void __init boot_cpu_hotplug_init(void)
{ … }
#ifdef CONFIG_CPU_MITIGATIONS
enum cpu_mitigations { … };
static enum cpu_mitigations cpu_mitigations __ro_after_init = …;
static int __init mitigations_parse_cmdline(char *arg)
{ … }
bool cpu_mitigations_off(void)
{ … }
EXPORT_SYMBOL_GPL(…);
bool cpu_mitigations_auto_nosmt(void)
{ … }
EXPORT_SYMBOL_GPL(…);
#else
static int __init mitigations_parse_cmdline(char *arg)
{
pr_crit("Kernel compiled without mitigations, ignoring 'mitigations'; system may still be vulnerable\n");
return 0;
}
#endif
early_param(…);