linux/kernel/rcu/tree_stall.h

// SPDX-License-Identifier: GPL-2.0+
/*
 * RCU CPU stall warnings for normal RCU grace periods
 *
 * Copyright IBM Corporation, 2019
 *
 * Author: Paul E. McKenney <[email protected]>
 */

#include <linux/kvm_para.h>
#include <linux/rcu_notifier.h>

//////////////////////////////////////////////////////////////////////////////
//
// Controlling CPU stall warnings, including delay calculation.

/* panic() on RCU Stall sysctl. */
int sysctl_panic_on_rcu_stall __read_mostly;
int sysctl_max_rcu_stall_to_panic __read_mostly;

#ifdef CONFIG_PROVE_RCU
#define RCU_STALL_DELAY_DELTA
#else
#define RCU_STALL_DELAY_DELTA
#endif
#define RCU_STALL_MIGHT_DIV
#define RCU_STALL_MIGHT_MIN

int rcu_exp_jiffies_till_stall_check(void)
{}
EXPORT_SYMBOL_GPL();

/* Limit-check stall timeouts specified at boottime and runtime. */
int rcu_jiffies_till_stall_check(void)
{}
EXPORT_SYMBOL_GPL();

/**
 * rcu_gp_might_be_stalled - Is it likely that the grace period is stalled?
 *
 * Returns @true if the current grace period is sufficiently old that
 * it is reasonable to assume that it might be stalled.  This can be
 * useful when deciding whether to allocate memory to enable RCU-mediated
 * freeing on the one hand or just invoking synchronize_rcu() on the other.
 * The latter is preferable when the grace period is stalled.
 *
 * Note that sampling of the .gp_start and .gp_seq fields must be done
 * carefully to avoid false positives at the beginnings and ends of
 * grace periods.
 */
bool rcu_gp_might_be_stalled(void)
{}

/* Don't do RCU CPU stall warnings during long sysrq printouts. */
void rcu_sysrq_start(void)
{}

void rcu_sysrq_end(void)
{}

/* Don't print RCU CPU stall warnings during a kernel panic. */
static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
{}

static struct notifier_block rcu_panic_block =;

static int __init check_cpu_stall_init(void)
{}
early_initcall(check_cpu_stall_init);

/* If so specified via sysctl, panic, yielding cleaner stall-warning output. */
static void panic_on_rcu_stall(void)
{}

/**
 * rcu_cpu_stall_reset - restart stall-warning timeout for current grace period
 *
 * To perform the reset request from the caller, disable stall detection until
 * 3 fqs loops have passed. This is required to ensure a fresh jiffies is
 * loaded.  It should be safe to do from the fqs loop as enough timer
 * interrupts and context switches should have passed.
 *
 * The caller must disable hard irqs.
 */
void rcu_cpu_stall_reset(void)
{}

//////////////////////////////////////////////////////////////////////////////
//
// Interaction with RCU grace periods

/* Start of new grace period, so record stall time (and forcing times). */
static void record_gp_stall_check_time(void)
{}

/* Zero ->ticks_this_gp and snapshot the number of RCU softirq handlers. */
static void zero_cpu_stall_ticks(struct rcu_data *rdp)
{}

/*
 * If too much time has passed in the current grace period, and if
 * so configured, go kick the relevant kthreads.
 */
static void rcu_stall_kick_kthreads(void)
{}

/*
 * Handler for the irq_work request posted about halfway into the RCU CPU
 * stall timeout, and used to detect excessive irq disabling.  Set state
 * appropriately, but just complain if there is unexpected state on entry.
 */
static void rcu_iw_handler(struct irq_work *iwp)
{}

//////////////////////////////////////////////////////////////////////////////
//
// Printing RCU CPU stall warnings

#ifdef CONFIG_PREEMPT_RCU

/*
 * Dump detailed information for all tasks blocking the current RCU
 * grace period on the specified rcu_node structure.
 */
static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
{}

// Communicate task state back to the RCU CPU stall warning request.
struct rcu_stall_chk_rdr {};

/*
 * Report out the state of a not-running task that is stalling the
 * current RCU grace period.
 */
static int check_slow_task(struct task_struct *t, void *arg)
{}

/*
 * Scan the current list of tasks blocked within RCU read-side critical
 * sections, printing out the tid of each of the first few of them.
 */
static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
	__releases(rnp->lock)
{}

#else /* #ifdef CONFIG_PREEMPT_RCU */

/*
 * Because preemptible RCU does not exist, we never have to check for
 * tasks blocked within RCU read-side critical sections.
 */
static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
{
}

/*
 * Because preemptible RCU does not exist, we never have to check for
 * tasks blocked within RCU read-side critical sections.
 */
static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
	__releases(rnp->lock)
{
	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
	return 0;
}
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */

/*
 * Dump stacks of all tasks running on stalled CPUs.  First try using
 * NMIs, but fall back to manual remote stack tracing on architectures
 * that don't support NMI-based stack dumps.  The NMI-triggered stack
 * traces are more accurate because they are printed by the target CPU.
 */
static void rcu_dump_cpu_stacks(void)
{}

static const char * const gp_state_names[] =;

/*
 * Convert a ->gp_state value to a character string.
 */
static const char *gp_state_getname(short gs)
{}

/* Is the RCU grace-period kthread being starved of CPU time? */
static bool rcu_is_gp_kthread_starving(unsigned long *jp)
{}

static bool rcu_is_rcuc_kthread_starving(struct rcu_data *rdp, unsigned long *jp)
{}

static void print_cpu_stat_info(int cpu)
{}

/*
 * Print out diagnostic information for the specified stalled CPU.
 *
 * If the specified CPU is aware of the current RCU grace period, then
 * print the number of scheduling clock interrupts the CPU has taken
 * during the time that it has been aware.  Otherwise, print the number
 * of RCU grace periods that this CPU is ignorant of, for example, "1"
 * if the CPU was aware of the previous grace period.
 *
 * Also print out idle info.
 */
static void print_cpu_stall_info(int cpu)
{}

/* Complain about starvation of grace-period kthread.  */
static void rcu_check_gp_kthread_starvation(void)
{}

/* Complain about missing wakeups from expired fqs wait timer */
static void rcu_check_gp_kthread_expired_fqs_timer(void)
{}

static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
{}

static void print_cpu_stall(unsigned long gps)
{}

static void check_cpu_stall(struct rcu_data *rdp)
{}

//////////////////////////////////////////////////////////////////////////////
//
// RCU forward-progress mechanisms, including for callback invocation.


/*
 * Check to see if a failure to end RCU priority inversion was due to
 * a CPU not passing through a quiescent state.  When this happens, there
 * is nothing that RCU priority boosting can do to help, so we shouldn't
 * count this as an RCU priority boosting failure.  A return of true says
 * RCU priority boosting is to blame, and false says otherwise.  If false
 * is returned, the first of the CPUs to blame is stored through cpup.
 * If there was no CPU blocking the current grace period, but also nothing
 * in need of being boosted, *cpup is set to -1.  This can happen in case
 * of vCPU preemption while the last CPU is reporting its quiscent state,
 * for example.
 *
 * If cpup is NULL, then a lockless quick check is carried out, suitable
 * for high-rate usage.  On the other hand, if cpup is non-NULL, each
 * rcu_node structure's ->lock is acquired, ruling out high-rate usage.
 */
bool rcu_check_boost_fail(unsigned long gp_state, int *cpup)
{}
EXPORT_SYMBOL_GPL();

/*
 * Show the state of the grace-period kthreads.
 */
void show_rcu_gp_kthreads(void)
{}
EXPORT_SYMBOL_GPL();

/*
 * This function checks for grace-period requests that fail to motivate
 * RCU to come out of its idle mode.
 */
static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
				     const unsigned long gpssdelay)
{}

/*
 * Do a forward-progress check for rcutorture.  This is normally invoked
 * due to an OOM event.  The argument "j" gives the time period during
 * which rcutorture would like progress to have been made.
 */
void rcu_fwd_progress_check(unsigned long j)
{}
EXPORT_SYMBOL_GPL();

/* Commandeer a sysrq key to dump RCU's tree. */
static bool sysrq_rcu;
module_param(sysrq_rcu, bool, 0444);

/* Dump grace-period-request information due to commandeered sysrq. */
static void sysrq_show_rcu(u8 key)
{}

static const struct sysrq_key_op sysrq_rcudump_op =;

static int __init rcu_sysrq_init(void)
{}
early_initcall(rcu_sysrq_init);

#ifdef CONFIG_RCU_CPU_STALL_NOTIFIER

//////////////////////////////////////////////////////////////////////////////
//
// RCU CPU stall-warning notifiers

static ATOMIC_NOTIFIER_HEAD(rcu_cpu_stall_notifier_list);

/**
 * rcu_stall_chain_notifier_register - Add an RCU CPU stall notifier
 * @n: Entry to add.
 *
 * Adds an RCU CPU stall notifier to an atomic notifier chain.
 * The @action passed to a notifier will be @RCU_STALL_NOTIFY_NORM or
 * friends.  The @data will be the duration of the stalled grace period,
 * in jiffies, coerced to a void* pointer.
 *
 * Returns 0 on success, %-EEXIST on error.
 */
int rcu_stall_chain_notifier_register(struct notifier_block *n)
{}
EXPORT_SYMBOL_GPL();

/**
 * rcu_stall_chain_notifier_unregister - Remove an RCU CPU stall notifier
 * @n: Entry to add.
 *
 * Removes an RCU CPU stall notifier from an atomic notifier chain.
 *
 * Returns zero on success, %-ENOENT on failure.
 */
int rcu_stall_chain_notifier_unregister(struct notifier_block *n)
{}
EXPORT_SYMBOL_GPL();

/*
 * rcu_stall_notifier_call_chain - Call functions in an RCU CPU stall notifier chain
 * @val: Value passed unmodified to notifier function
 * @v: Pointer passed unmodified to notifier function
 *
 * Calls each function in the RCU CPU stall notifier chain in turn, which
 * is an atomic call chain.  See atomic_notifier_call_chain() for more
 * information.
 *
 * This is for use within RCU, hence the omission of the extra asterisk
 * to indicate a non-kerneldoc format header comment.
 */
int rcu_stall_notifier_call_chain(unsigned long val, void *v)
{}

#endif // #ifdef CONFIG_RCU_CPU_STALL_NOTIFIER