linux/kernel/rcu/tasks.h

/* SPDX-License-Identifier: GPL-2.0+ */
/*
 * Task-based RCU implementations.
 *
 * Copyright (C) 2020 Paul E. McKenney
 */

#ifdef CONFIG_TASKS_RCU_GENERIC
#include "rcu_segcblist.h"

////////////////////////////////////////////////////////////////////////
//
// Generic data structures.

struct rcu_tasks;
rcu_tasks_gp_func_t;
pregp_func_t;
pertask_func_t;
postscan_func_t;
holdouts_func_t;
postgp_func_t;

/**
 * struct rcu_tasks_percpu - Per-CPU component of definition for a Tasks-RCU-like mechanism.
 * @cblist: Callback list.
 * @lock: Lock protecting per-CPU callback list.
 * @rtp_jiffies: Jiffies counter value for statistics.
 * @lazy_timer: Timer to unlazify callbacks.
 * @urgent_gp: Number of additional non-lazy grace periods.
 * @rtp_n_lock_retries: Rough lock-contention statistic.
 * @rtp_work: Work queue for invoking callbacks.
 * @rtp_irq_work: IRQ work queue for deferred wakeups.
 * @barrier_q_head: RCU callback for barrier operation.
 * @rtp_blkd_tasks: List of tasks blocked as readers.
 * @rtp_exit_list: List of tasks in the latter portion of do_exit().
 * @cpu: CPU number corresponding to this entry.
 * @rtpp: Pointer to the rcu_tasks structure.
 */
struct rcu_tasks_percpu {};

/**
 * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism.
 * @cbs_wait: RCU wait allowing a new callback to get kthread's attention.
 * @cbs_gbl_lock: Lock protecting callback list.
 * @tasks_gp_mutex: Mutex protecting grace period, needed during mid-boot dead zone.
 * @gp_func: This flavor's grace-period-wait function.
 * @gp_state: Grace period's most recent state transition (debugging).
 * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping.
 * @init_fract: Initial backoff sleep interval.
 * @gp_jiffies: Time of last @gp_state transition.
 * @gp_start: Most recent grace-period start in jiffies.
 * @tasks_gp_seq: Number of grace periods completed since boot.
 * @n_ipis: Number of IPIs sent to encourage grace periods to end.
 * @n_ipis_fails: Number of IPI-send failures.
 * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
 * @lazy_jiffies: Number of jiffies to allow callbacks to be lazy.
 * @pregp_func: This flavor's pre-grace-period function (optional).
 * @pertask_func: This flavor's per-task scan function (optional).
 * @postscan_func: This flavor's post-task scan function (optional).
 * @holdouts_func: This flavor's holdout-list scan function (optional).
 * @postgp_func: This flavor's post-grace-period function (optional).
 * @call_func: This flavor's call_rcu()-equivalent function.
 * @wait_state: Task state for synchronous grace-period waits (default TASK_UNINTERRUPTIBLE).
 * @rtpcpu: This flavor's rcu_tasks_percpu structure.
 * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks.
 * @percpu_enqueue_lim: Number of per-CPU callback queues in use for enqueuing.
 * @percpu_dequeue_lim: Number of per-CPU callback queues in use for dequeuing.
 * @percpu_dequeue_gpseq: RCU grace-period number to propagate enqueue limit to dequeuers.
 * @barrier_q_mutex: Serialize barrier operations.
 * @barrier_q_count: Number of queues being waited on.
 * @barrier_q_completion: Barrier wait/wakeup mechanism.
 * @barrier_q_seq: Sequence number for barrier operations.
 * @name: This flavor's textual name.
 * @kname: This flavor's kthread name.
 */
struct rcu_tasks {};

static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp);

#define DEFINE_RCU_TASKS(rt_name, gp, call, n)

#ifdef CONFIG_TASKS_RCU

/* Report delay of scan exiting tasklist in rcu_tasks_postscan(). */
static void tasks_rcu_exit_srcu_stall(struct timer_list *unused);
static DEFINE_TIMER(tasks_rcu_exit_srcu_stall_timer, tasks_rcu_exit_srcu_stall);
#endif

/* Avoid IPIing CPUs early in the grace period. */
#define RCU_TASK_IPI_DELAY
static int rcu_task_ipi_delay __read_mostly =;
module_param(rcu_task_ipi_delay, int, 0644);

/* Control stall timeouts.  Disable with <= 0, otherwise jiffies till stall. */
#define RCU_TASK_BOOT_STALL_TIMEOUT
#define RCU_TASK_STALL_TIMEOUT
static int rcu_task_stall_timeout __read_mostly =;
module_param(rcu_task_stall_timeout, int, 0644);
#define RCU_TASK_STALL_INFO
static int rcu_task_stall_info __read_mostly =;
module_param(rcu_task_stall_info, int, 0644);
static int rcu_task_stall_info_mult __read_mostly =;
module_param(rcu_task_stall_info_mult, int, 0444);

static int rcu_task_enqueue_lim __read_mostly =;
module_param(rcu_task_enqueue_lim, int, 0444);

static bool rcu_task_cb_adjust;
static int rcu_task_contend_lim __read_mostly =;
module_param(rcu_task_contend_lim, int, 0444);
static int rcu_task_collapse_lim __read_mostly =;
module_param(rcu_task_collapse_lim, int, 0444);
static int rcu_task_lazy_lim __read_mostly =;
module_param(rcu_task_lazy_lim, int, 0444);

/* RCU tasks grace-period state for debugging. */
#define RTGS_INIT
#define RTGS_WAIT_WAIT_CBS
#define RTGS_WAIT_GP
#define RTGS_PRE_WAIT_GP
#define RTGS_SCAN_TASKLIST
#define RTGS_POST_SCAN_TASKLIST
#define RTGS_WAIT_SCAN_HOLDOUTS
#define RTGS_SCAN_HOLDOUTS
#define RTGS_POST_GP
#define RTGS_WAIT_READERS
#define RTGS_INVOKE_CBS
#define RTGS_WAIT_CBS
#ifndef CONFIG_TINY_RCU
static const char * const rcu_tasks_gp_state_names[] =;
#endif /* #ifndef CONFIG_TINY_RCU */

////////////////////////////////////////////////////////////////////////
//
// Generic code.

static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp);

/* Record grace-period phase and time. */
static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate)
{}

#ifndef CONFIG_TINY_RCU
/* Return state name. */
static const char *tasks_gp_state_getname(struct rcu_tasks *rtp)
{}
#endif /* #ifndef CONFIG_TINY_RCU */

// Initialize per-CPU callback lists for the specified flavor of
// Tasks RCU.  Do not enqueue callbacks before this function is invoked.
static void cblist_init_generic(struct rcu_tasks *rtp)
{}

// Compute wakeup time for lazy callback timer.
static unsigned long rcu_tasks_lazy_time(struct rcu_tasks *rtp)
{}

// Timer handler that unlazifies lazy callbacks.
static void call_rcu_tasks_generic_timer(struct timer_list *tlp)
{}

// IRQ-work handler that does deferred wakeup for call_rcu_tasks_generic().
static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp)
{}

// Enqueue a callback for the specified flavor of Tasks RCU.
static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
				   struct rcu_tasks *rtp)
{}

// RCU callback function for rcu_barrier_tasks_generic().
static void rcu_barrier_tasks_generic_cb(struct rcu_head *rhp)
{}

// Wait for all in-flight callbacks for the specified RCU Tasks flavor.
// Operates in a manner similar to rcu_barrier().
static void rcu_barrier_tasks_generic(struct rcu_tasks *rtp)
{}

// Advance callbacks and indicate whether either a grace period or
// callback invocation is needed.
static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
{}

// Advance callbacks and invoke any that are ready.
static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu *rtpcp)
{}

// Workqueue flood to advance callbacks and invoke any that are ready.
static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp)
{}

// Wait for one grace period.
static void rcu_tasks_one_gp(struct rcu_tasks *rtp, bool midboot)
{}

// RCU-tasks kthread that detects grace periods and invokes callbacks.
static int __noreturn rcu_tasks_kthread(void *arg)
{}

// Wait for a grace period for the specified flavor of Tasks RCU.
static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
{}

/* Spawn RCU-tasks grace-period kthread. */
static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
{}

#ifndef CONFIG_TINY_RCU

/*
 * Print any non-default Tasks RCU settings.
 */
static void __init rcu_tasks_bootup_oddness(void)
{}

#endif /* #ifndef CONFIG_TINY_RCU */

#ifndef CONFIG_TINY_RCU
/* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
{}
#endif // #ifndef CONFIG_TINY_RCU

static void exit_tasks_rcu_finish_trace(struct task_struct *t);

#if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)

////////////////////////////////////////////////////////////////////////
//
// Shared code between task-list-scanning variants of Tasks RCU.

/* Wait for one RCU-tasks grace period. */
static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
{}

#endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */

#ifdef CONFIG_TASKS_RCU

////////////////////////////////////////////////////////////////////////
//
// Simple variant of RCU whose quiescent states are voluntary context
// switch, cond_resched_tasks_rcu_qs(), user-space execution, and idle.
// As such, grace periods can take one good long time.  There are no
// read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
// because this implementation is intended to get the system into a safe
// state for some of the manipulations involved in tracing and the like.
// Finally, this implementation does not support high call_rcu_tasks()
// rates from multiple CPUs.  If this is required, per-CPU callback lists
// will be needed.
//
// The implementation uses rcu_tasks_wait_gp(), which relies on function
// pointers in the rcu_tasks structure.  The rcu_spawn_tasks_kthread()
// function sets these function pointers up so that rcu_tasks_wait_gp()
// invokes these functions in this order:
//
// rcu_tasks_pregp_step():
//	Invokes synchronize_rcu() in order to wait for all in-flight
//	t->on_rq and t->nvcsw transitions to complete.	This works because
//	all such transitions are carried out with interrupts disabled.
// rcu_tasks_pertask(), invoked on every non-idle task:
//	For every runnable non-idle task other than the current one, use
//	get_task_struct() to pin down that task, snapshot that task's
//	number of voluntary context switches, and add that task to the
//	holdout list.
// rcu_tasks_postscan():
//	Gather per-CPU lists of tasks in do_exit() to ensure that all
//	tasks that were in the process of exiting (and which thus might
//	not know to synchronize with this RCU Tasks grace period) have
//	completed exiting.  The synchronize_rcu() in rcu_tasks_postgp()
//	will take care of any tasks stuck in the non-preemptible region
//	of do_exit() following its call to exit_tasks_rcu_finish().
// check_all_holdout_tasks(), repeatedly until holdout list is empty:
//	Scans the holdout list, attempting to identify a quiescent state
//	for each task on the list.  If there is a quiescent state, the
//	corresponding task is removed from the holdout list.
// rcu_tasks_postgp():
//	Invokes synchronize_rcu() in order to ensure that all prior
//	t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks
//	to have happened before the end of this RCU Tasks grace period.
//	Again, this works because all such transitions are carried out
//	with interrupts disabled.
//
// For each exiting task, the exit_tasks_rcu_start() and
// exit_tasks_rcu_finish() functions add and remove, respectively, the
// current task to a per-CPU list of tasks that rcu_tasks_postscan() must
// wait on.  This is necessary because rcu_tasks_postscan() must wait on
// tasks that have already been removed from the global list of tasks.
//
// Pre-grace-period update-side code is ordered before the grace
// via the raw_spin_lock.*rcu_node().  Pre-grace-period read-side code
// is ordered before the grace period via synchronize_rcu() call in
// rcu_tasks_pregp_step() and by the scheduler's locks and interrupt
// disabling.

/* Pre-grace-period preparation. */
static void rcu_tasks_pregp_step(struct list_head *hop)
{}

/* Check for quiescent states since the pregp's synchronize_rcu() */
static bool rcu_tasks_is_holdout(struct task_struct *t)
{}

/* Per-task initial processing. */
static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
{}

void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
DEFINE_RCU_TASKS();

/* Processing between scanning taskslist and draining the holdout list. */
static void rcu_tasks_postscan(struct list_head *hop)
{}

/* See if tasks are still holding out, complain if so. */
static void check_holdout_task(struct task_struct *t,
			       bool needreport, bool *firstreport)
{}

/* Scan the holdout lists for tasks no longer holding out. */
static void check_all_holdout_tasks(struct list_head *hop,
				    bool needreport, bool *firstreport)
{}

/* Finish off the Tasks-RCU grace period. */
static void rcu_tasks_postgp(struct rcu_tasks *rtp)
{}

static void tasks_rcu_exit_srcu_stall(struct timer_list *unused)
{}

/**
 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
 * @rhp: structure to be used for queueing the RCU updates.
 * @func: actual callback function to be invoked after the grace period
 *
 * The callback function will be invoked some time after a full grace
 * period elapses, in other words after all currently executing RCU
 * read-side critical sections have completed. call_rcu_tasks() assumes
 * that the read-side critical sections end at a voluntary context
 * switch (not a preemption!), cond_resched_tasks_rcu_qs(), entry into idle,
 * or transition to usermode execution.  As such, there are no read-side
 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
 * this primitive is intended to determine that all tasks have passed
 * through a safe state, not so much for data-structure synchronization.
 *
 * See the description of call_rcu() for more detailed information on
 * memory ordering guarantees.
 */
void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
{}
EXPORT_SYMBOL_GPL();

/**
 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
 *
 * Control will return to the caller some time after a full rcu-tasks
 * grace period has elapsed, in other words after all currently
 * executing rcu-tasks read-side critical sections have elapsed.  These
 * read-side critical sections are delimited by calls to schedule(),
 * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
 *
 * This is a very specialized primitive, intended only for a few uses in
 * tracing and other situations requiring manipulation of function
 * preambles and profiling hooks.  The synchronize_rcu_tasks() function
 * is not (yet) intended for heavy use from multiple CPUs.
 *
 * See the description of synchronize_rcu() for more detailed information
 * on memory ordering guarantees.
 */
void synchronize_rcu_tasks(void)
{}
EXPORT_SYMBOL_GPL();

/**
 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
 *
 * Although the current implementation is guaranteed to wait, it is not
 * obligated to, for example, if there are no pending callbacks.
 */
void rcu_barrier_tasks(void)
{}
EXPORT_SYMBOL_GPL();

static int rcu_tasks_lazy_ms =;
module_param(rcu_tasks_lazy_ms, int, 0444);

static int __init rcu_spawn_tasks_kthread(void)
{}

#if !defined(CONFIG_TINY_RCU)
void show_rcu_tasks_classic_gp_kthread(void)
{}
EXPORT_SYMBOL_GPL();
#endif // !defined(CONFIG_TINY_RCU)

struct task_struct *get_rcu_tasks_gp_kthread(void)
{}
EXPORT_SYMBOL_GPL();

void rcu_tasks_get_gp_data(int *flags, unsigned long *gp_seq)
{}
EXPORT_SYMBOL_GPL();

/*
 * Protect against tasklist scan blind spot while the task is exiting and
 * may be removed from the tasklist.  Do this by adding the task to yet
 * another list.
 *
 * Note that the task will remove itself from this list, so there is no
 * need for get_task_struct(), except in the case where rcu_tasks_pertask()
 * adds it to the holdout list, in which case rcu_tasks_pertask() supplies
 * the needed get_task_struct().
 */
void exit_tasks_rcu_start(void)
{}

/*
 * Remove the task from the "yet another list" because do_exit() is now
 * non-preemptible, allowing synchronize_rcu() to wait beyond this point.
 */
void exit_tasks_rcu_finish(void)
{}

#else /* #ifdef CONFIG_TASKS_RCU */
void exit_tasks_rcu_start(void) { }
void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
#endif /* #else #ifdef CONFIG_TASKS_RCU */

#ifdef CONFIG_TASKS_RUDE_RCU

////////////////////////////////////////////////////////////////////////
//
// "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of
// passing an empty function to schedule_on_each_cpu().  This approach
// provides an asynchronous call_rcu_tasks_rude() API and batching of
// concurrent calls to the synchronous synchronize_rcu_tasks_rude() API.
// This invokes schedule_on_each_cpu() in order to send IPIs far and wide
// and induces otherwise unnecessary context switches on all online CPUs,
// whether idle or not.
//
// Callback handling is provided by the rcu_tasks_kthread() function.
//
// Ordering is provided by the scheduler's context-switch code.

// Empty function to allow workqueues to force a context switch.
static void rcu_tasks_be_rude(struct work_struct *work)
{}

// Wait for one rude RCU-tasks grace period.
static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
{}

void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func);
DEFINE_RCU_TASKS();

/**
 * call_rcu_tasks_rude() - Queue a callback rude task-based grace period
 * @rhp: structure to be used for queueing the RCU updates.
 * @func: actual callback function to be invoked after the grace period
 *
 * The callback function will be invoked some time after a full grace
 * period elapses, in other words after all currently executing RCU
 * read-side critical sections have completed. call_rcu_tasks_rude()
 * assumes that the read-side critical sections end at context switch,
 * cond_resched_tasks_rcu_qs(), or transition to usermode execution (as
 * usermode execution is schedulable). As such, there are no read-side
 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
 * this primitive is intended to determine that all tasks have passed
 * through a safe state, not so much for data-structure synchronization.
 *
 * See the description of call_rcu() for more detailed information on
 * memory ordering guarantees.
 */
void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func)
{}
EXPORT_SYMBOL_GPL();

/**
 * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period
 *
 * Control will return to the caller some time after a rude rcu-tasks
 * grace period has elapsed, in other words after all currently
 * executing rcu-tasks read-side critical sections have elapsed.  These
 * read-side critical sections are delimited by calls to schedule(),
 * cond_resched_tasks_rcu_qs(), userspace execution (which is a schedulable
 * context), and (in theory, anyway) cond_resched().
 *
 * This is a very specialized primitive, intended only for a few uses in
 * tracing and other situations requiring manipulation of function preambles
 * and profiling hooks.  The synchronize_rcu_tasks_rude() function is not
 * (yet) intended for heavy use from multiple CPUs.
 *
 * See the description of synchronize_rcu() for more detailed information
 * on memory ordering guarantees.
 */
void synchronize_rcu_tasks_rude(void)
{}
EXPORT_SYMBOL_GPL();

/**
 * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks.
 *
 * Although the current implementation is guaranteed to wait, it is not
 * obligated to, for example, if there are no pending callbacks.
 */
void rcu_barrier_tasks_rude(void)
{}
EXPORT_SYMBOL_GPL();

int rcu_tasks_rude_lazy_ms =;
module_param(rcu_tasks_rude_lazy_ms, int, 0444);

static int __init rcu_spawn_tasks_rude_kthread(void)
{}

#if !defined(CONFIG_TINY_RCU)
void show_rcu_tasks_rude_gp_kthread(void)
{}
EXPORT_SYMBOL_GPL();
#endif // !defined(CONFIG_TINY_RCU)

struct task_struct *get_rcu_tasks_rude_gp_kthread(void)
{}
EXPORT_SYMBOL_GPL();

void rcu_tasks_rude_get_gp_data(int *flags, unsigned long *gp_seq)
{}
EXPORT_SYMBOL_GPL();

#endif /* #ifdef CONFIG_TASKS_RUDE_RCU */

////////////////////////////////////////////////////////////////////////
//
// Tracing variant of Tasks RCU.  This variant is designed to be used
// to protect tracing hooks, including those of BPF.  This variant
// therefore:
//
// 1.	Has explicit read-side markers to allow finite grace periods
//	in the face of in-kernel loops for PREEMPT=n builds.
//
// 2.	Protects code in the idle loop, exception entry/exit, and
//	CPU-hotplug code paths, similar to the capabilities of SRCU.
//
// 3.	Avoids expensive read-side instructions, having overhead similar
//	to that of Preemptible RCU.
//
// There are of course downsides.  For example, the grace-period code
// can send IPIs to CPUs, even when those CPUs are in the idle loop or
// in nohz_full userspace.  If needed, these downsides can be at least
// partially remedied.
//
// Perhaps most important, this variant of RCU does not affect the vanilla
// flavors, rcu_preempt and rcu_sched.  The fact that RCU Tasks Trace
// readers can operate from idle, offline, and exception entry/exit in no
// way allows rcu_preempt and rcu_sched readers to also do so.
//
// The implementation uses rcu_tasks_wait_gp(), which relies on function
// pointers in the rcu_tasks structure.  The rcu_spawn_tasks_trace_kthread()
// function sets these function pointers up so that rcu_tasks_wait_gp()
// invokes these functions in this order:
//
// rcu_tasks_trace_pregp_step():
//	Disables CPU hotplug, adds all currently executing tasks to the
//	holdout list, then checks the state of all tasks that blocked
//	or were preempted within their current RCU Tasks Trace read-side
//	critical section, adding them to the holdout list if appropriate.
//	Finally, this function re-enables CPU hotplug.
// The ->pertask_func() pointer is NULL, so there is no per-task processing.
// rcu_tasks_trace_postscan():
//	Invokes synchronize_rcu() to wait for late-stage exiting tasks
//	to finish exiting.
// check_all_holdout_tasks_trace(), repeatedly until holdout list is empty:
//	Scans the holdout list, attempting to identify a quiescent state
//	for each task on the list.  If there is a quiescent state, the
//	corresponding task is removed from the holdout list.  Once this
//	list is empty, the grace period has completed.
// rcu_tasks_trace_postgp():
//	Provides the needed full memory barrier and does debug checks.
//
// The exit_tasks_rcu_finish_trace() synchronizes with exiting tasks.
//
// Pre-grace-period update-side code is ordered before the grace period
// via the ->cbs_lock and barriers in rcu_tasks_kthread().  Pre-grace-period
// read-side code is ordered before the grace period by atomic operations
// on .b.need_qs flag of each task involved in this process, or by scheduler
// context-switch ordering (for locked-down non-running readers).

// The lockdep state must be outside of #ifdef to be useful.
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key rcu_lock_trace_key;
struct lockdep_map rcu_trace_lock_map =;
EXPORT_SYMBOL_GPL();
#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */

#ifdef CONFIG_TASKS_TRACE_RCU

// Record outstanding IPIs to each CPU.  No point in sending two...
static DEFINE_PER_CPU(bool, trc_ipi_to_cpu);

// The number of detections of task quiescent state relying on
// heavyweight readers executing explicit memory barriers.
static unsigned long n_heavy_reader_attempts;
static unsigned long n_heavy_reader_updates;
static unsigned long n_heavy_reader_ofl_updates;
static unsigned long n_trc_holdouts;

void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
DEFINE_RCU_TASKS();

/* Load from ->trc_reader_special.b.need_qs with proper ordering. */
static u8 rcu_ld_need_qs(struct task_struct *t)
{}

/* Store to ->trc_reader_special.b.need_qs with proper ordering. */
static void rcu_st_need_qs(struct task_struct *t, u8 v)
{}

/*
 * Do a cmpxchg() on ->trc_reader_special.b.need_qs, allowing for
 * the four-byte operand-size restriction of some platforms.
 *
 * Returns the old value, which is often ignored.
 */
u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new)
{}
EXPORT_SYMBOL_GPL();

/*
 * If we are the last reader, signal the grace-period kthread.
 * Also remove from the per-CPU list of blocked tasks.
 */
void rcu_read_unlock_trace_special(struct task_struct *t)
{}
EXPORT_SYMBOL_GPL();

/* Add a newly blocked reader task to its CPU's list. */
void rcu_tasks_trace_qs_blkd(struct task_struct *t)
{}
EXPORT_SYMBOL_GPL();

/* Add a task to the holdout list, if it is not already on the list. */
static void trc_add_holdout(struct task_struct *t, struct list_head *bhp)
{}

/* Remove a task from the holdout list, if it is in fact present. */
static void trc_del_holdout(struct task_struct *t)
{}

/* IPI handler to check task state. */
static void trc_read_check_handler(void *t_in)
{}

/* Callback function for scheduler to check locked-down task.  */
static int trc_inspect_reader(struct task_struct *t, void *bhp_in)
{}

/* Attempt to extract the state for the specified task. */
static void trc_wait_for_one_reader(struct task_struct *t,
				    struct list_head *bhp)
{}

/*
 * Initialize for first-round processing for the specified task.
 * Return false if task is NULL or already taken care of, true otherwise.
 */
static bool rcu_tasks_trace_pertask_prep(struct task_struct *t, bool notself)
{}

/* Do first-round processing for the specified task. */
static void rcu_tasks_trace_pertask(struct task_struct *t, struct list_head *hop)
{}

/* Initialize for a new RCU-tasks-trace grace period. */
static void rcu_tasks_trace_pregp_step(struct list_head *hop)
{}

/*
 * Do intermediate processing between task and holdout scans.
 */
static void rcu_tasks_trace_postscan(struct list_head *hop)
{}

/* Communicate task state back to the RCU tasks trace stall warning request. */
struct trc_stall_chk_rdr {};

static int trc_check_slow_task(struct task_struct *t, void *arg)
{}

/* Show the state of a task stalling the current RCU tasks trace GP. */
static void show_stalled_task_trace(struct task_struct *t, bool *firstreport)
{}

/* List stalled IPIs for RCU tasks trace. */
static void show_stalled_ipi_trace(void)
{}

/* Do one scan of the holdout list. */
static void check_all_holdout_tasks_trace(struct list_head *hop,
					  bool needreport, bool *firstreport)
{}

static void rcu_tasks_trace_empty_fn(void *unused)
{}

/* Wait for grace period to complete and provide ordering. */
static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
{}

/* Report any needed quiescent state for this exiting task. */
static void exit_tasks_rcu_finish_trace(struct task_struct *t)
{}

/**
 * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
 * @rhp: structure to be used for queueing the RCU updates.
 * @func: actual callback function to be invoked after the grace period
 *
 * The callback function will be invoked some time after a trace rcu-tasks
 * grace period elapses, in other words after all currently executing
 * trace rcu-tasks read-side critical sections have completed. These
 * read-side critical sections are delimited by calls to rcu_read_lock_trace()
 * and rcu_read_unlock_trace().
 *
 * See the description of call_rcu() for more detailed information on
 * memory ordering guarantees.
 */
void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func)
{}
EXPORT_SYMBOL_GPL();

/**
 * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
 *
 * Control will return to the caller some time after a trace rcu-tasks
 * grace period has elapsed, in other words after all currently executing
 * trace rcu-tasks read-side critical sections have elapsed. These read-side
 * critical sections are delimited by calls to rcu_read_lock_trace()
 * and rcu_read_unlock_trace().
 *
 * This is a very specialized primitive, intended only for a few uses in
 * tracing and other situations requiring manipulation of function preambles
 * and profiling hooks.  The synchronize_rcu_tasks_trace() function is not
 * (yet) intended for heavy use from multiple CPUs.
 *
 * See the description of synchronize_rcu() for more detailed information
 * on memory ordering guarantees.
 */
void synchronize_rcu_tasks_trace(void)
{}
EXPORT_SYMBOL_GPL();

/**
 * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
 *
 * Although the current implementation is guaranteed to wait, it is not
 * obligated to, for example, if there are no pending callbacks.
 */
void rcu_barrier_tasks_trace(void)
{}
EXPORT_SYMBOL_GPL();

int rcu_tasks_trace_lazy_ms =;
module_param(rcu_tasks_trace_lazy_ms, int, 0444);

static int __init rcu_spawn_tasks_trace_kthread(void)
{}

#if !defined(CONFIG_TINY_RCU)
void show_rcu_tasks_trace_gp_kthread(void)
{}
EXPORT_SYMBOL_GPL();
#endif // !defined(CONFIG_TINY_RCU)

struct task_struct *get_rcu_tasks_trace_gp_kthread(void)
{}
EXPORT_SYMBOL_GPL();

void rcu_tasks_trace_get_gp_data(int *flags, unsigned long *gp_seq)
{}
EXPORT_SYMBOL_GPL();

#else /* #ifdef CONFIG_TASKS_TRACE_RCU */
static void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
#endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */

#ifndef CONFIG_TINY_RCU
void show_rcu_tasks_gp_kthreads(void)
{}
#endif /* #ifndef CONFIG_TINY_RCU */

#ifdef CONFIG_PROVE_RCU
struct rcu_tasks_test_desc {};

static struct rcu_tasks_test_desc tests[] =;

static void test_rcu_tasks_callback(struct rcu_head *rhp)
{}

static void rcu_tasks_initiate_self_tests(void)
{}

/*
 * Return:  0 - test passed
 *	    1 - test failed, but have not timed out yet
 *	   -1 - test failed and timed out
 */
static int rcu_tasks_verify_self_tests(void)
{}

/*
 * Repeat the rcu_tasks_verify_self_tests() call once every second until the
 * test passes or has timed out.
 */
static struct delayed_work rcu_tasks_verify_work;
static void rcu_tasks_verify_work_fn(struct work_struct *work __maybe_unused)
{}

static int rcu_tasks_verify_schedule_work(void)
{}
late_initcall(rcu_tasks_verify_schedule_work);
#else /* #ifdef CONFIG_PROVE_RCU */
static void rcu_tasks_initiate_self_tests(void) { }
#endif /* #else #ifdef CONFIG_PROVE_RCU */

void __init tasks_cblist_init_generic(void)
{}

void __init rcu_init_tasks_generic(void)
{}

#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
static inline void rcu_tasks_bootup_oddness(void) {}
#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */