// SPDX-License-Identifier: GPL-2.0-or-later /* * kernel/stop_machine.c * * Copyright (C) 2008, 2005 IBM Corporation. * Copyright (C) 2008, 2005 Rusty Russell [email protected] * Copyright (C) 2010 SUSE Linux Products GmbH * Copyright (C) 2010 Tejun Heo <[email protected]> */ #include <linux/compiler.h> #include <linux/completion.h> #include <linux/cpu.h> #include <linux/init.h> #include <linux/kthread.h> #include <linux/export.h> #include <linux/percpu.h> #include <linux/sched.h> #include <linux/stop_machine.h> #include <linux/interrupt.h> #include <linux/kallsyms.h> #include <linux/smpboot.h> #include <linux/atomic.h> #include <linux/nmi.h> #include <linux/sched/wake_q.h> /* * Structure to determine completion condition and record errors. May * be shared by works on different cpus. */ struct cpu_stop_done { … }; /* the actual stopper, one per every possible cpu, enabled on online cpus */ struct cpu_stopper { … }; static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper); static bool stop_machine_initialized = …; void print_stop_info(const char *log_lvl, struct task_struct *task) { … } /* static data for stop_cpus */ static DEFINE_MUTEX(stop_cpus_mutex); static bool stop_cpus_in_progress; static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo) { … } /* signal completion unless @done is NULL */ static void cpu_stop_signal_done(struct cpu_stop_done *done) { … } static void __cpu_stop_queue_work(struct cpu_stopper *stopper, struct cpu_stop_work *work, struct wake_q_head *wakeq) { … } /* queue @work to @stopper. if offline, @work is completed immediately */ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) { … } /** * stop_one_cpu - stop a cpu * @cpu: cpu to stop * @fn: function to execute * @arg: argument to @fn * * Execute @fn(@arg) on @cpu. @fn is run in a process context with * the highest priority preempting any task on the cpu and * monopolizing it. This function returns after the execution is * complete. * * This function doesn't guarantee @cpu stays online till @fn * completes. If @cpu goes down in the middle, execution may happen * partially or fully on different cpus. @fn should either be ready * for that or the caller should ensure that @cpu stays online until * this function completes. * * CONTEXT: * Might sleep. * * RETURNS: * -ENOENT if @fn(@arg) was not executed because @cpu was offline; * otherwise, the return value of @fn. */ int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg) { … } /* This controls the threads on each CPU. */ enum multi_stop_state { … }; struct multi_stop_data { … }; static void set_state(struct multi_stop_data *msdata, enum multi_stop_state newstate) { … } /* Last one to ack a state moves to the next state. */ static void ack_state(struct multi_stop_data *msdata) { … } notrace void __weak stop_machine_yield(const struct cpumask *cpumask) { … } /* This is the cpu_stop function which stops the CPU. */ static int multi_cpu_stop(void *data) { … } static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1, int cpu2, struct cpu_stop_work *work2) { … } /** * stop_two_cpus - stops two cpus * @cpu1: the cpu to stop * @cpu2: the other cpu to stop * @fn: function to execute * @arg: argument to @fn * * Stops both the current and specified CPU and runs @fn on one of them. * * returns when both are completed. */ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg) { … } /** * stop_one_cpu_nowait - stop a cpu but don't wait for completion * @cpu: cpu to stop * @fn: function to execute * @arg: argument to @fn * @work_buf: pointer to cpu_stop_work structure * * Similar to stop_one_cpu() but doesn't wait for completion. The * caller is responsible for ensuring @work_buf is currently unused * and will remain untouched until stopper starts executing @fn. * * CONTEXT: * Don't care. * * RETURNS: * true if cpu_stop_work was queued successfully and @fn will be called, * false otherwise. */ bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, struct cpu_stop_work *work_buf) { … } static bool queue_stop_cpus_work(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg, struct cpu_stop_done *done) { … } static int __stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) { … } /** * stop_cpus - stop multiple cpus * @cpumask: cpus to stop * @fn: function to execute * @arg: argument to @fn * * Execute @fn(@arg) on online cpus in @cpumask. On each target cpu, * @fn is run in a process context with the highest priority * preempting any task on the cpu and monopolizing it. This function * returns after all executions are complete. * * This function doesn't guarantee the cpus in @cpumask stay online * till @fn completes. If some cpus go down in the middle, execution * on the cpu may happen partially or fully on different cpus. @fn * should either be ready for that or the caller should ensure that * the cpus stay online until this function completes. * * All stop_cpus() calls are serialized making it safe for @fn to wait * for all cpus to start executing it. * * CONTEXT: * Might sleep. * * RETURNS: * -ENOENT if @fn(@arg) was not executed at all because all cpus in * @cpumask were offline; otherwise, 0 if all executions of @fn * returned 0, any non zero return value if any returned non zero. */ static int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) { … } static int cpu_stop_should_run(unsigned int cpu) { … } static void cpu_stopper_thread(unsigned int cpu) { … } void stop_machine_park(int cpu) { … } static void cpu_stop_create(unsigned int cpu) { … } static void cpu_stop_park(unsigned int cpu) { … } void stop_machine_unpark(int cpu) { … } static struct smp_hotplug_thread cpu_stop_threads = …; static int __init cpu_stop_init(void) { … } early_initcall(cpu_stop_init); int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) { … } int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) { … } EXPORT_SYMBOL_GPL(…); #ifdef CONFIG_SCHED_SMT int stop_core_cpuslocked(unsigned int cpu, cpu_stop_fn_t fn, void *data) { … } EXPORT_SYMBOL_GPL(…); #endif /** * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU * @fn: the function to run * @data: the data ptr for the @fn() * @cpus: the cpus to run the @fn() on (NULL = any online cpu) * * This is identical to stop_machine() but can be called from a CPU which * is not active. The local CPU is in the process of hotplug (so no other * CPU hotplug can start) and not marked active and doesn't have enough * context to sleep. * * This function provides stop_machine() functionality for such state by * using busy-wait for synchronization and executing @fn directly for local * CPU. * * CONTEXT: * Local CPU is inactive. Temporarily stops all active CPUs. * * RETURNS: * 0 if all executions of @fn returned 0, any non zero return value if any * returned non zero. */ int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) { … }