linux/kernel/rcu/srcutree.c

// SPDX-License-Identifier: GPL-2.0+
/*
 * Sleepable Read-Copy Update mechanism for mutual exclusion.
 *
 * Copyright (C) IBM Corporation, 2006
 * Copyright (C) Fujitsu, 2012
 *
 * Authors: Paul McKenney <[email protected]>
 *	   Lai Jiangshan <[email protected]>
 *
 * For detailed explanation of Read-Copy Update mechanism see -
 *		Documentation/RCU/ *.txt
 *
 */

#define pr_fmt(fmt)

#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/percpu.h>
#include <linux/preempt.h>
#include <linux/rcupdate_wait.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/srcu.h>

#include "rcu.h"
#include "rcu_segcblist.h"

/* Holdoff in nanoseconds for auto-expediting. */
#define DEFAULT_SRCU_EXP_HOLDOFF
static ulong exp_holdoff =;
module_param(exp_holdoff, ulong, 0444);

/* Overflow-check frequency.  N bits roughly says every 2**N grace periods. */
static ulong counter_wrap_check =;
module_param(counter_wrap_check, ulong, 0444);

/*
 * Control conversion to SRCU_SIZE_BIG:
 *    0: Don't convert at all.
 *    1: Convert at init_srcu_struct() time.
 *    2: Convert when rcutorture invokes srcu_torture_stats_print().
 *    3: Decide at boot time based on system shape (default).
 * 0x1x: Convert when excessive contention encountered.
 */
#define SRCU_SIZING_NONE
#define SRCU_SIZING_INIT
#define SRCU_SIZING_TORTURE
#define SRCU_SIZING_AUTO
#define SRCU_SIZING_CONTEND
#define SRCU_SIZING_IS(x)
#define SRCU_SIZING_IS_NONE()
#define SRCU_SIZING_IS_INIT()
#define SRCU_SIZING_IS_TORTURE()
#define SRCU_SIZING_IS_CONTEND()
static int convert_to_big =;
module_param(convert_to_big, int, 0444);

/* Number of CPUs to trigger init_srcu_struct()-time transition to big. */
static int big_cpu_lim __read_mostly =;
module_param(big_cpu_lim, int, 0444);

/* Contention events per jiffy to initiate transition to big. */
static int small_contention_lim __read_mostly =;
module_param(small_contention_lim, int, 0444);

/* Early-boot callback-management, so early that no lock is required! */
static LIST_HEAD(srcu_boot_list);
static bool __read_mostly srcu_init_done;

static void srcu_invoke_callbacks(struct work_struct *work);
static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay);
static void process_srcu(struct work_struct *work);
static void srcu_delay_timer(struct timer_list *t);

/* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */
#define spin_lock_rcu_node(p)

#define spin_unlock_rcu_node(p)

#define spin_lock_irq_rcu_node(p)

#define spin_unlock_irq_rcu_node(p)

#define spin_lock_irqsave_rcu_node(p, flags)

#define spin_trylock_irqsave_rcu_node(p, flags)

#define spin_unlock_irqrestore_rcu_node(p, flags)			\

/*
 * Initialize SRCU per-CPU data.  Note that statically allocated
 * srcu_struct structures might already have srcu_read_lock() and
 * srcu_read_unlock() running against them.  So if the is_static parameter
 * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
 */
static void init_srcu_struct_data(struct srcu_struct *ssp)
{}

/* Invalid seq state, used during snp node initialization */
#define SRCU_SNP_INIT_SEQ

/*
 * Check whether sequence number corresponding to snp node,
 * is invalid.
 */
static inline bool srcu_invl_snp_seq(unsigned long s)
{}

/*
 * Allocated and initialize SRCU combining tree.  Returns @true if
 * allocation succeeded and @false otherwise.
 */
static bool init_srcu_struct_nodes(struct srcu_struct *ssp, gfp_t gfp_flags)
{}

/*
 * Initialize non-compile-time initialized fields, including the
 * associated srcu_node and srcu_data structures.  The is_static parameter
 * tells us that ->sda has already been wired up to srcu_data.
 */
static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
{}

#ifdef CONFIG_DEBUG_LOCK_ALLOC

int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
		       struct lock_class_key *key)
{}
EXPORT_SYMBOL_GPL();

#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */

/**
 * init_srcu_struct - initialize a sleep-RCU structure
 * @ssp: structure to initialize.
 *
 * Must invoke this on a given srcu_struct before passing that srcu_struct
 * to any other function.  Each srcu_struct represents a separate domain
 * of SRCU protection.
 */
int init_srcu_struct(struct srcu_struct *ssp)
{
	return init_srcu_struct_fields(ssp, false);
}
EXPORT_SYMBOL_GPL(init_srcu_struct);

#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */

/*
 * Initiate a transition to SRCU_SIZE_BIG with lock held.
 */
static void __srcu_transition_to_big(struct srcu_struct *ssp)
{}

/*
 * Initiate an idempotent transition to SRCU_SIZE_BIG.
 */
static void srcu_transition_to_big(struct srcu_struct *ssp)
{}

/*
 * Check to see if the just-encountered contention event justifies
 * a transition to SRCU_SIZE_BIG.
 */
static void spin_lock_irqsave_check_contention(struct srcu_struct *ssp)
{}

/*
 * Acquire the specified srcu_data structure's ->lock, but check for
 * excessive contention, which results in initiation of a transition
 * to SRCU_SIZE_BIG.  But only if the srcutree.convert_to_big module
 * parameter permits this.
 */
static void spin_lock_irqsave_sdp_contention(struct srcu_data *sdp, unsigned long *flags)
{}

/*
 * Acquire the specified srcu_struct structure's ->lock, but check for
 * excessive contention, which results in initiation of a transition
 * to SRCU_SIZE_BIG.  But only if the srcutree.convert_to_big module
 * parameter permits this.
 */
static void spin_lock_irqsave_ssp_contention(struct srcu_struct *ssp, unsigned long *flags)
{}

/*
 * First-use initialization of statically allocated srcu_struct
 * structure.  Wiring up the combining tree is more than can be
 * done with compile-time initialization, so this check is added
 * to each update-side SRCU primitive.  Use ssp->lock, which -is-
 * compile-time initialized, to resolve races involving multiple
 * CPUs trying to garner first-use privileges.
 */
static void check_init_srcu_struct(struct srcu_struct *ssp)
{}

/*
 * Returns approximate total of the readers' ->srcu_lock_count[] values
 * for the rank of per-CPU counters specified by idx.
 */
static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx)
{}

/*
 * Returns approximate total of the readers' ->srcu_unlock_count[] values
 * for the rank of per-CPU counters specified by idx.
 */
static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx)
{}

/*
 * Return true if the number of pre-existing readers is determined to
 * be zero.
 */
static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx)
{}

/**
 * srcu_readers_active - returns true if there are readers. and false
 *                       otherwise
 * @ssp: which srcu_struct to count active readers (holding srcu_read_lock).
 *
 * Note that this is not an atomic primitive, and can therefore suffer
 * severe errors when invoked on an active srcu_struct.  That said, it
 * can be useful as an error check at cleanup time.
 */
static bool srcu_readers_active(struct srcu_struct *ssp)
{}

/*
 * We use an adaptive strategy for synchronize_srcu() and especially for
 * synchronize_srcu_expedited().  We spin for a fixed time period
 * (defined below, boot time configurable) to allow SRCU readers to exit
 * their read-side critical sections.  If there are still some readers
 * after one jiffy, we repeatedly block for one jiffy time periods.
 * The blocking time is increased as the grace-period age increases,
 * with max blocking time capped at 10 jiffies.
 */
#define SRCU_DEFAULT_RETRY_CHECK_DELAY

static ulong srcu_retry_check_delay =;
module_param(srcu_retry_check_delay, ulong, 0444);

#define SRCU_INTERVAL
#define SRCU_MAX_INTERVAL

#define SRCU_DEFAULT_MAX_NODELAY_PHASE_LO
							// no-delay instances.
#define SRCU_DEFAULT_MAX_NODELAY_PHASE_HI
							// no-delay instances.

#define SRCU_UL_CLAMP_LO(val, low)
#define SRCU_UL_CLAMP_HI(val, high)
#define SRCU_UL_CLAMP(val, low, high)
// per-GP-phase no-delay instances adjusted to allow non-sleeping poll upto
// one jiffies time duration. Mult by 2 is done to factor in the srcu_get_delay()
// called from process_srcu().
#define SRCU_DEFAULT_MAX_NODELAY_PHASE_ADJUSTED

// Maximum per-GP-phase consecutive no-delay instances.
#define SRCU_DEFAULT_MAX_NODELAY_PHASE

static ulong srcu_max_nodelay_phase =;
module_param(srcu_max_nodelay_phase, ulong, 0444);

// Maximum consecutive no-delay instances.
#define SRCU_DEFAULT_MAX_NODELAY

static ulong srcu_max_nodelay =;
module_param(srcu_max_nodelay, ulong, 0444);

/*
 * Return grace-period delay, zero if there are expedited grace
 * periods pending, SRCU_INTERVAL otherwise.
 */
static unsigned long srcu_get_delay(struct srcu_struct *ssp)
{}

/**
 * cleanup_srcu_struct - deconstruct a sleep-RCU structure
 * @ssp: structure to clean up.
 *
 * Must invoke this after you are finished using a given srcu_struct that
 * was initialized via init_srcu_struct(), else you leak memory.
 */
void cleanup_srcu_struct(struct srcu_struct *ssp)
{}
EXPORT_SYMBOL_GPL();

#ifdef CONFIG_PROVE_RCU
/*
 * Check for consistent NMI safety.
 */
void srcu_check_nmi_safety(struct srcu_struct *ssp, bool nmi_safe)
{}
EXPORT_SYMBOL_GPL();
#endif /* CONFIG_PROVE_RCU */

/*
 * Counts the new reader in the appropriate per-CPU element of the
 * srcu_struct.
 * Returns an index that must be passed to the matching srcu_read_unlock().
 */
int __srcu_read_lock(struct srcu_struct *ssp)
{}
EXPORT_SYMBOL_GPL();

/*
 * Removes the count for the old reader from the appropriate per-CPU
 * element of the srcu_struct.  Note that this may well be a different
 * CPU than that which was incremented by the corresponding srcu_read_lock().
 */
void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
{}
EXPORT_SYMBOL_GPL();

#ifdef CONFIG_NEED_SRCU_NMI_SAFE

/*
 * Counts the new reader in the appropriate per-CPU element of the
 * srcu_struct, but in an NMI-safe manner using RMW atomics.
 * Returns an index that must be passed to the matching srcu_read_unlock().
 */
int __srcu_read_lock_nmisafe(struct srcu_struct *ssp)
{
	int idx;
	struct srcu_data *sdp = raw_cpu_ptr(ssp->sda);

	idx = READ_ONCE(ssp->srcu_idx) & 0x1;
	atomic_long_inc(&sdp->srcu_lock_count[idx]);
	smp_mb__after_atomic(); /* B */  /* Avoid leaking the critical section. */
	return idx;
}
EXPORT_SYMBOL_GPL(__srcu_read_lock_nmisafe);

/*
 * Removes the count for the old reader from the appropriate per-CPU
 * element of the srcu_struct.  Note that this may well be a different
 * CPU than that which was incremented by the corresponding srcu_read_lock().
 */
void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
{
	struct srcu_data *sdp = raw_cpu_ptr(ssp->sda);

	smp_mb__before_atomic(); /* C */  /* Avoid leaking the critical section. */
	atomic_long_inc(&sdp->srcu_unlock_count[idx]);
}
EXPORT_SYMBOL_GPL(__srcu_read_unlock_nmisafe);

#endif // CONFIG_NEED_SRCU_NMI_SAFE

/*
 * Start an SRCU grace period.
 */
static void srcu_gp_start(struct srcu_struct *ssp)
{}


static void srcu_delay_timer(struct timer_list *t)
{}

static void srcu_queue_delayed_work_on(struct srcu_data *sdp,
				       unsigned long delay)
{}

/*
 * Schedule callback invocation for the specified srcu_data structure,
 * if possible, on the corresponding CPU.
 */
static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
{}

/*
 * Schedule callback invocation for all srcu_data structures associated
 * with the specified srcu_node structure that have callbacks for the
 * just-completed grace period, the one corresponding to idx.  If possible,
 * schedule this invocation on the corresponding CPUs.
 */
static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp,
				  unsigned long mask, unsigned long delay)
{}

/*
 * Note the end of an SRCU grace period.  Initiates callback invocation
 * and starts a new grace period if needed.
 *
 * The ->srcu_cb_mutex acquisition does not protect any data, but
 * instead prevents more than one grace period from starting while we
 * are initiating callback invocation.  This allows the ->srcu_have_cbs[]
 * array to have a finite number of elements.
 */
static void srcu_gp_end(struct srcu_struct *ssp)
{}

/*
 * Funnel-locking scheme to scalably mediate many concurrent expedited
 * grace-period requests.  This function is invoked for the first known
 * expedited request for a grace period that has already been requested,
 * but without expediting.  To start a completely new grace period,
 * whether expedited or not, use srcu_funnel_gp_start() instead.
 */
static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp,
				  unsigned long s)
{}

/*
 * Funnel-locking scheme to scalably mediate many concurrent grace-period
 * requests.  The winner has to do the work of actually starting grace
 * period s.  Losers must either ensure that their desired grace-period
 * number is recorded on at least their leaf srcu_node structure, or they
 * must take steps to invoke their own callbacks.
 *
 * Note that this function also does the work of srcu_funnel_exp_start(),
 * in some cases by directly invoking it.
 *
 * The srcu read lock should be hold around this function. And s is a seq snap
 * after holding that lock.
 */
static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
				 unsigned long s, bool do_norm)
{}

/*
 * Wait until all readers counted by array index idx complete, but
 * loop an additional time if there is an expedited grace period pending.
 * The caller must ensure that ->srcu_idx is not changed while checking.
 */
static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount)
{}

/*
 * Increment the ->srcu_idx counter so that future SRCU readers will
 * use the other rank of the ->srcu_(un)lock_count[] arrays.  This allows
 * us to wait for pre-existing readers in a starvation-free manner.
 */
static void srcu_flip(struct srcu_struct *ssp)
{}

/*
 * If SRCU is likely idle, return true, otherwise return false.
 *
 * Note that it is OK for several current from-idle requests for a new
 * grace period from idle to specify expediting because they will all end
 * up requesting the same grace period anyhow.  So no loss.
 *
 * Note also that if any CPU (including the current one) is still invoking
 * callbacks, this function will nevertheless say "idle".  This is not
 * ideal, but the overhead of checking all CPUs' callback lists is even
 * less ideal, especially on large systems.  Furthermore, the wakeup
 * can happen before the callback is fully removed, so we have no choice
 * but to accept this type of error.
 *
 * This function is also subject to counter-wrap errors, but let's face
 * it, if this function was preempted for enough time for the counters
 * to wrap, it really doesn't matter whether or not we expedite the grace
 * period.  The extra overhead of a needlessly expedited grace period is
 * negligible when amortized over that time period, and the extra latency
 * of a needlessly non-expedited grace period is similarly negligible.
 */
static bool srcu_might_be_idle(struct srcu_struct *ssp)
{}

/*
 * SRCU callback function to leak a callback.
 */
static void srcu_leak_callback(struct rcu_head *rhp)
{}

/*
 * Start an SRCU grace period, and also queue the callback if non-NULL.
 */
static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
					     struct rcu_head *rhp, bool do_norm)
{}

/*
 * Enqueue an SRCU callback on the srcu_data structure associated with
 * the current CPU and the specified srcu_struct structure, initiating
 * grace-period processing if it is not already running.
 *
 * Note that all CPUs must agree that the grace period extended beyond
 * all pre-existing SRCU read-side critical section.  On systems with
 * more than one CPU, this means that when "func()" is invoked, each CPU
 * is guaranteed to have executed a full memory barrier since the end of
 * its last corresponding SRCU read-side critical section whose beginning
 * preceded the call to call_srcu().  It also means that each CPU executing
 * an SRCU read-side critical section that continues beyond the start of
 * "func()" must have executed a memory barrier after the call_srcu()
 * but before the beginning of that SRCU read-side critical section.
 * Note that these guarantees include CPUs that are offline, idle, or
 * executing in user mode, as well as CPUs that are executing in the kernel.
 *
 * Furthermore, if CPU A invoked call_srcu() and CPU B invoked the
 * resulting SRCU callback function "func()", then both CPU A and CPU
 * B are guaranteed to execute a full memory barrier during the time
 * interval between the call to call_srcu() and the invocation of "func()".
 * This guarantee applies even if CPU A and CPU B are the same CPU (but
 * again only if the system has more than one CPU).
 *
 * Of course, these guarantees apply only for invocations of call_srcu(),
 * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
 * srcu_struct structure.
 */
static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
			rcu_callback_t func, bool do_norm)
{}

/**
 * call_srcu() - Queue a callback for invocation after an SRCU grace period
 * @ssp: srcu_struct in queue the callback
 * @rhp: structure to be used for queueing the SRCU callback.
 * @func: function to be invoked after the SRCU grace period
 *
 * The callback function will be invoked some time after a full SRCU
 * grace period elapses, in other words after all pre-existing SRCU
 * read-side critical sections have completed.  However, the callback
 * function might well execute concurrently with other SRCU read-side
 * critical sections that started after call_srcu() was invoked.  SRCU
 * read-side critical sections are delimited by srcu_read_lock() and
 * srcu_read_unlock(), and may be nested.
 *
 * The callback will be invoked from process context, but must nevertheless
 * be fast and must not block.
 */
void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
	       rcu_callback_t func)
{}
EXPORT_SYMBOL_GPL();

/*
 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
 */
static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm)
{}

/**
 * synchronize_srcu_expedited - Brute-force SRCU grace period
 * @ssp: srcu_struct with which to synchronize.
 *
 * Wait for an SRCU grace period to elapse, but be more aggressive about
 * spinning rather than blocking when waiting.
 *
 * Note that synchronize_srcu_expedited() has the same deadlock and
 * memory-ordering properties as does synchronize_srcu().
 */
void synchronize_srcu_expedited(struct srcu_struct *ssp)
{}
EXPORT_SYMBOL_GPL();

/**
 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
 * @ssp: srcu_struct with which to synchronize.
 *
 * Wait for the count to drain to zero of both indexes. To avoid the
 * possible starvation of synchronize_srcu(), it waits for the count of
 * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first,
 * and then flip the srcu_idx and wait for the count of the other index.
 *
 * Can block; must be called from process context.
 *
 * Note that it is illegal to call synchronize_srcu() from the corresponding
 * SRCU read-side critical section; doing so will result in deadlock.
 * However, it is perfectly legal to call synchronize_srcu() on one
 * srcu_struct from some other srcu_struct's read-side critical section,
 * as long as the resulting graph of srcu_structs is acyclic.
 *
 * There are memory-ordering constraints implied by synchronize_srcu().
 * On systems with more than one CPU, when synchronize_srcu() returns,
 * each CPU is guaranteed to have executed a full memory barrier since
 * the end of its last corresponding SRCU read-side critical section
 * whose beginning preceded the call to synchronize_srcu().  In addition,
 * each CPU having an SRCU read-side critical section that extends beyond
 * the return from synchronize_srcu() is guaranteed to have executed a
 * full memory barrier after the beginning of synchronize_srcu() and before
 * the beginning of that SRCU read-side critical section.  Note that these
 * guarantees include CPUs that are offline, idle, or executing in user mode,
 * as well as CPUs that are executing in the kernel.
 *
 * Furthermore, if CPU A invoked synchronize_srcu(), which returned
 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
 * to have executed a full memory barrier during the execution of
 * synchronize_srcu().  This guarantee applies even if CPU A and CPU B
 * are the same CPU, but again only if the system has more than one CPU.
 *
 * Of course, these memory-ordering guarantees apply only when
 * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are
 * passed the same srcu_struct structure.
 *
 * Implementation of these memory-ordering guarantees is similar to
 * that of synchronize_rcu().
 *
 * If SRCU is likely idle, expedite the first request.  This semantic
 * was provided by Classic SRCU, and is relied upon by its users, so TREE
 * SRCU must also provide it.  Note that detecting idleness is heuristic
 * and subject to both false positives and negatives.
 */
void synchronize_srcu(struct srcu_struct *ssp)
{}
EXPORT_SYMBOL_GPL();

/**
 * get_state_synchronize_srcu - Provide an end-of-grace-period cookie
 * @ssp: srcu_struct to provide cookie for.
 *
 * This function returns a cookie that can be passed to
 * poll_state_synchronize_srcu(), which will return true if a full grace
 * period has elapsed in the meantime.  It is the caller's responsibility
 * to make sure that grace period happens, for example, by invoking
 * call_srcu() after return from get_state_synchronize_srcu().
 */
unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp)
{}
EXPORT_SYMBOL_GPL();

/**
 * start_poll_synchronize_srcu - Provide cookie and start grace period
 * @ssp: srcu_struct to provide cookie for.
 *
 * This function returns a cookie that can be passed to
 * poll_state_synchronize_srcu(), which will return true if a full grace
 * period has elapsed in the meantime.  Unlike get_state_synchronize_srcu(),
 * this function also ensures that any needed SRCU grace period will be
 * started.  This convenience does come at a cost in terms of CPU overhead.
 */
unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp)
{}
EXPORT_SYMBOL_GPL();

/**
 * poll_state_synchronize_srcu - Has cookie's grace period ended?
 * @ssp: srcu_struct to provide cookie for.
 * @cookie: Return value from get_state_synchronize_srcu() or start_poll_synchronize_srcu().
 *
 * This function takes the cookie that was returned from either
 * get_state_synchronize_srcu() or start_poll_synchronize_srcu(), and
 * returns @true if an SRCU grace period elapsed since the time that the
 * cookie was created.
 *
 * Because cookies are finite in size, wrapping/overflow is possible.
 * This is more pronounced on 32-bit systems where cookies are 32 bits,
 * where in theory wrapping could happen in about 14 hours assuming
 * 25-microsecond expedited SRCU grace periods.  However, a more likely
 * overflow lower bound is on the order of 24 days in the case of
 * one-millisecond SRCU grace periods.  Of course, wrapping in a 64-bit
 * system requires geologic timespans, as in more than seven million years
 * even for expedited SRCU grace periods.
 *
 * Wrapping/overflow is much more of an issue for CONFIG_SMP=n systems
 * that also have CONFIG_PREEMPTION=n, which selects Tiny SRCU.  This uses
 * a 16-bit cookie, which rcutorture routinely wraps in a matter of a
 * few minutes.  If this proves to be a problem, this counter will be
 * expanded to the same size as for Tree SRCU.
 */
bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie)
{}
EXPORT_SYMBOL_GPL();

/*
 * Callback function for srcu_barrier() use.
 */
static void srcu_barrier_cb(struct rcu_head *rhp)
{}

/*
 * Enqueue an srcu_barrier() callback on the specified srcu_data
 * structure's ->cblist.  but only if that ->cblist already has at least one
 * callback enqueued.  Note that if a CPU already has callbacks enqueue,
 * it must have already registered the need for a future grace period,
 * so all we need do is enqueue a callback that will use the same grace
 * period as the last callback already in the queue.
 */
static void srcu_barrier_one_cpu(struct srcu_struct *ssp, struct srcu_data *sdp)
{}

/**
 * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
 * @ssp: srcu_struct on which to wait for in-flight callbacks.
 */
void srcu_barrier(struct srcu_struct *ssp)
{}
EXPORT_SYMBOL_GPL();

/**
 * srcu_batches_completed - return batches completed.
 * @ssp: srcu_struct on which to report batch completion.
 *
 * Report the number of batches, correlated with, but not necessarily
 * precisely the same as, the number of grace periods that have elapsed.
 */
unsigned long srcu_batches_completed(struct srcu_struct *ssp)
{}
EXPORT_SYMBOL_GPL();

/*
 * Core SRCU state machine.  Push state bits of ->srcu_gp_seq
 * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has
 * completed in that state.
 */
static void srcu_advance_state(struct srcu_struct *ssp)
{}

/*
 * Invoke a limited number of SRCU callbacks that have passed through
 * their grace period.  If there are more to do, SRCU will reschedule
 * the workqueue.  Note that needed memory barriers have been executed
 * in this task's context by srcu_readers_active_idx_check().
 */
static void srcu_invoke_callbacks(struct work_struct *work)
{}

/*
 * Finished one round of SRCU grace period.  Start another if there are
 * more SRCU callbacks queued, otherwise put SRCU into not-running state.
 */
static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay)
{}

/*
 * This is the work-queue function that handles SRCU grace periods.
 */
static void process_srcu(struct work_struct *work)
{}

void srcutorture_get_gp_data(struct srcu_struct *ssp, int *flags,
			     unsigned long *gp_seq)
{}
EXPORT_SYMBOL_GPL();

static const char * const srcu_size_state_name[] =;

void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf)
{}
EXPORT_SYMBOL_GPL();

static int __init srcu_bootup_announce(void)
{}
early_initcall(srcu_bootup_announce);

void __init srcu_init(void)
{}

#ifdef CONFIG_MODULES

/* Initialize any global-scope srcu_struct structures used by this module. */
static int srcu_module_coming(struct module *mod)
{}

/* Clean up any global-scope srcu_struct structures used by this module. */
static void srcu_module_going(struct module *mod)
{}

/* Handle one module, either coming or going. */
static int srcu_module_notify(struct notifier_block *self,
			      unsigned long val, void *data)
{}

static struct notifier_block srcu_module_nb =;

static __init int init_srcu_module_notifier(void)
{}
late_initcall(init_srcu_module_notifier);

#endif /* #ifdef CONFIG_MODULES */