linux/kernel/watchdog.c

// SPDX-License-Identifier: GPL-2.0
/*
 * Detect hard and soft lockups on a system
 *
 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
 *
 * Note: Most of this code is borrowed heavily from the original softlockup
 * detector, so thanks to Ingo for the initial implementation.
 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
 * to those contributors as well.
 */

#define pr_fmt(fmt)

#include <linux/cpu.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/irqdesc.h>
#include <linux/kernel_stat.h>
#include <linux/kvm_para.h>
#include <linux/math64.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/nmi.h>
#include <linux/stop_machine.h>
#include <linux/sysctl.h>
#include <linux/tick.h>

#include <linux/sched/clock.h>
#include <linux/sched/debug.h>
#include <linux/sched/isolation.h>

#include <asm/irq_regs.h>

static DEFINE_MUTEX(watchdog_mutex);

#if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HARDLOCKUP_DETECTOR_SPARC64)
#define WATCHDOG_HARDLOCKUP_DEFAULT
#else
#define WATCHDOG_HARDLOCKUP_DEFAULT
#endif

#define NUM_SAMPLE_PERIODS

unsigned long __read_mostly watchdog_enabled;
int __read_mostly watchdog_user_enabled =;
static int __read_mostly watchdog_hardlockup_user_enabled =;
static int __read_mostly watchdog_softlockup_user_enabled =;
int __read_mostly watchdog_thresh =;
static int __read_mostly watchdog_hardlockup_available;

struct cpumask watchdog_cpumask __read_mostly;
unsigned long *watchdog_cpumask_bits =;

#ifdef CONFIG_HARDLOCKUP_DETECTOR

# ifdef CONFIG_SMP
int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
# endif /* CONFIG_SMP */

/*
 * Should we panic when a soft-lockup or hard-lockup occurs:
 */
unsigned int __read_mostly hardlockup_panic =
			IS_ENABLED();
/*
 * We may not want to enable hard lockup detection by default in all cases,
 * for example when running the kernel as a guest on a hypervisor. In these
 * cases this function can be called to disable hard lockup detection. This
 * function should only be executed once by the boot processor before the
 * kernel command line parameters are parsed, because otherwise it is not
 * possible to override this in hardlockup_panic_setup().
 */
void __init hardlockup_detector_disable(void)
{}

static int __init hardlockup_panic_setup(char *str)
{}
__setup();

#endif /* CONFIG_HARDLOCKUP_DETECTOR */

#if defined(CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER)

static DEFINE_PER_CPU(atomic_t, hrtimer_interrupts);
static DEFINE_PER_CPU(int, hrtimer_interrupts_saved);
static DEFINE_PER_CPU(bool, watchdog_hardlockup_warned);
static DEFINE_PER_CPU(bool, watchdog_hardlockup_touched);
static unsigned long hard_lockup_nmi_warn;

notrace void arch_touch_nmi_watchdog(void)
{}
EXPORT_SYMBOL();

void watchdog_hardlockup_touch_cpu(unsigned int cpu)
{}

static bool is_hardlockup(unsigned int cpu)
{}

static void watchdog_hardlockup_kick(void)
{}

void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs)
{}

#else /* CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER */

static inline void watchdog_hardlockup_kick(void) { }

#endif /* !CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER */

/*
 * These functions can be overridden based on the configured hardlockdup detector.
 *
 * watchdog_hardlockup_enable/disable can be implemented to start and stop when
 * softlockup watchdog start and stop. The detector must select the
 * SOFTLOCKUP_DETECTOR Kconfig.
 */
void __weak watchdog_hardlockup_enable(unsigned int cpu) {}

void __weak watchdog_hardlockup_disable(unsigned int cpu) {}

/*
 * Watchdog-detector specific API.
 *
 * Return 0 when hardlockup watchdog is available, negative value otherwise.
 * Note that the negative value means that a delayed probe might
 * succeed later.
 */
int __weak __init watchdog_hardlockup_probe(void)
{}

/**
 * watchdog_hardlockup_stop - Stop the watchdog for reconfiguration
 *
 * The reconfiguration steps are:
 * watchdog_hardlockup_stop();
 * update_variables();
 * watchdog_hardlockup_start();
 */
void __weak watchdog_hardlockup_stop(void) {}

/**
 * watchdog_hardlockup_start - Start the watchdog after reconfiguration
 *
 * Counterpart to watchdog_hardlockup_stop().
 *
 * The following variables have been updated in update_variables() and
 * contain the currently valid configuration:
 * - watchdog_enabled
 * - watchdog_thresh
 * - watchdog_cpumask
 */
void __weak watchdog_hardlockup_start(void) {}

/**
 * lockup_detector_update_enable - Update the sysctl enable bit
 *
 * Caller needs to make sure that the hard watchdogs are off, so this
 * can't race with watchdog_hardlockup_disable().
 */
static void lockup_detector_update_enable(void)
{}

#ifdef CONFIG_SOFTLOCKUP_DETECTOR

/*
 * Delay the soflockup report when running a known slow code.
 * It does _not_ affect the timestamp of the last successdul reschedule.
 */
#define SOFTLOCKUP_DELAY_REPORT

#ifdef CONFIG_SMP
int __read_mostly sysctl_softlockup_all_cpu_backtrace;
#endif

static struct cpumask watchdog_allowed_mask __read_mostly;

/* Global variables, exported for sysctl */
unsigned int __read_mostly softlockup_panic =
			IS_ENABLED();

static bool softlockup_initialized __read_mostly;
static u64 __read_mostly sample_period;

/* Timestamp taken after the last successful reschedule. */
static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
/* Timestamp of the last softlockup report. */
static DEFINE_PER_CPU(unsigned long, watchdog_report_ts);
static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
static DEFINE_PER_CPU(bool, softlockup_touch_sync);
static unsigned long soft_lockup_nmi_warn;

static int __init softlockup_panic_setup(char *str)
{}
__setup();

static int __init nowatchdog_setup(char *str)
{}
__setup();

static int __init nosoftlockup_setup(char *str)
{}
__setup();

static int __init watchdog_thresh_setup(char *str)
{}
__setup();

static void __lockup_detector_cleanup(void);

#ifdef CONFIG_SOFTLOCKUP_DETECTOR_INTR_STORM
enum stats_per_group {};

static const enum cpu_usage_stat tracked_stats[NUM_STATS_PER_GROUP] =;

static DEFINE_PER_CPU(u16, cpustat_old[NUM_STATS_PER_GROUP]);
static DEFINE_PER_CPU(u8, cpustat_util[NUM_SAMPLE_PERIODS][NUM_STATS_PER_GROUP]);
static DEFINE_PER_CPU(u8, cpustat_tail);

/*
 * We don't need nanosecond resolution. A granularity of 16ms is
 * sufficient for our precision, allowing us to use u16 to store
 * cpustats, which will roll over roughly every ~1000 seconds.
 * 2^24 ~= 16 * 10^6
 */
static u16 get_16bit_precision(u64 data_ns)
{}

static void update_cpustat(void)
{}

static void print_cpustat(void)
{}

#define HARDIRQ_PERCENT_THRESH
#define NUM_HARDIRQ_REPORT
struct irq_counts {};

static DEFINE_PER_CPU(bool, snapshot_taken);

/* Tabulate the most frequent interrupts. */
static void tabulate_irq_count(struct irq_counts *irq_counts, int irq, u32 counts, int rank)
{}

/*
 * If the hardirq time exceeds HARDIRQ_PERCENT_THRESH% of the sample_period,
 * then the cause of softlockup might be interrupt storm. In this case, it
 * would be useful to start interrupt counting.
 */
static bool need_counting_irqs(void)
{}

static void start_counting_irqs(void)
{}

static void stop_counting_irqs(void)
{}

static void print_irq_counts(void)
{}

static void report_cpu_status(void)
{}
#else
static inline void update_cpustat(void) { }
static inline void report_cpu_status(void) { }
static inline bool need_counting_irqs(void) { return false; }
static inline void start_counting_irqs(void) { }
static inline void stop_counting_irqs(void) { }
#endif

/*
 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
 * lockups can have false positives under extreme conditions. So we generally
 * want a higher threshold for soft lockups than for hard lockups. So we couple
 * the thresholds with a factor: we make the soft threshold twice the amount of
 * time the hard threshold is.
 */
static int get_softlockup_thresh(void)
{}

/*
 * Returns seconds, approximately.  We don't need nanosecond
 * resolution, and we don't need to waste time with a big divide when
 * 2^30ns == 1.074s.
 */
static unsigned long get_timestamp(void)
{}

static void set_sample_period(void)
{}

static void update_report_ts(void)
{}

/* Commands for resetting the watchdog */
static void update_touch_ts(void)
{}

/**
 * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
 *
 * Call when the scheduler may have stalled for legitimate reasons
 * preventing the watchdog task from executing - e.g. the scheduler
 * entering idle state.  This should only be used for scheduler events.
 * Use touch_softlockup_watchdog() for everything else.
 */
notrace void touch_softlockup_watchdog_sched(void)
{}

notrace void touch_softlockup_watchdog(void)
{}
EXPORT_SYMBOL();

void touch_all_softlockup_watchdogs(void)
{}

void touch_softlockup_watchdog_sync(void)
{}

static int is_softlockup(unsigned long touch_ts,
			 unsigned long period_ts,
			 unsigned long now)
{}

/* watchdog detector functions */
static DEFINE_PER_CPU(struct completion, softlockup_completion);
static DEFINE_PER_CPU(struct cpu_stop_work, softlockup_stop_work);

/*
 * The watchdog feed function - touches the timestamp.
 *
 * It only runs once every sample_period seconds (4 seconds by
 * default) to reset the softlockup timestamp. If this gets delayed
 * for more than 2*watchdog_thresh seconds then the debug-printout
 * triggers in watchdog_timer_fn().
 */
static int softlockup_fn(void *data)
{}

/* watchdog kicker functions */
static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
{}

static void watchdog_enable(unsigned int cpu)
{}

static void watchdog_disable(unsigned int cpu)
{}

static int softlockup_stop_fn(void *data)
{}

static void softlockup_stop_all(void)
{}

static int softlockup_start_fn(void *data)
{}

static void softlockup_start_all(void)
{}

int lockup_detector_online_cpu(unsigned int cpu)
{}

int lockup_detector_offline_cpu(unsigned int cpu)
{}

static void __lockup_detector_reconfigure(void)
{}

void lockup_detector_reconfigure(void)
{}

/*
 * Create the watchdog infrastructure and configure the detector(s).
 */
static __init void lockup_detector_setup(void)
{}

#else /* CONFIG_SOFTLOCKUP_DETECTOR */
static void __lockup_detector_reconfigure(void)
{
	cpus_read_lock();
	watchdog_hardlockup_stop();
	lockup_detector_update_enable();
	watchdog_hardlockup_start();
	cpus_read_unlock();
}
void lockup_detector_reconfigure(void)
{
	__lockup_detector_reconfigure();
}
static inline void lockup_detector_setup(void)
{
	__lockup_detector_reconfigure();
}
#endif /* !CONFIG_SOFTLOCKUP_DETECTOR */

static void __lockup_detector_cleanup(void)
{}

/**
 * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes
 *
 * Caller must not hold the cpu hotplug rwsem.
 */
void lockup_detector_cleanup(void)
{}

/**
 * lockup_detector_soft_poweroff - Interface to stop lockup detector(s)
 *
 * Special interface for parisc. It prevents lockup detector warnings from
 * the default pm_poweroff() function which busy loops forever.
 */
void lockup_detector_soft_poweroff(void)
{}

#ifdef CONFIG_SYSCTL

/* Propagate any changes to the watchdog infrastructure */
static void proc_watchdog_update(void)
{}

/*
 * common function for watchdog, nmi_watchdog and soft_watchdog parameter
 *
 * caller             | table->data points to            | 'which'
 * -------------------|----------------------------------|-------------------------------
 * proc_watchdog      | watchdog_user_enabled            | WATCHDOG_HARDLOCKUP_ENABLED |
 *                    |                                  | WATCHDOG_SOFTOCKUP_ENABLED
 * -------------------|----------------------------------|-------------------------------
 * proc_nmi_watchdog  | watchdog_hardlockup_user_enabled | WATCHDOG_HARDLOCKUP_ENABLED
 * -------------------|----------------------------------|-------------------------------
 * proc_soft_watchdog | watchdog_softlockup_user_enabled | WATCHDOG_SOFTOCKUP_ENABLED
 */
static int proc_watchdog_common(int which, const struct ctl_table *table, int write,
				void *buffer, size_t *lenp, loff_t *ppos)
{}

/*
 * /proc/sys/kernel/watchdog
 */
static int proc_watchdog(const struct ctl_table *table, int write,
			 void *buffer, size_t *lenp, loff_t *ppos)
{}

/*
 * /proc/sys/kernel/nmi_watchdog
 */
static int proc_nmi_watchdog(const struct ctl_table *table, int write,
			     void *buffer, size_t *lenp, loff_t *ppos)
{}

#ifdef CONFIG_SOFTLOCKUP_DETECTOR
/*
 * /proc/sys/kernel/soft_watchdog
 */
static int proc_soft_watchdog(const struct ctl_table *table, int write,
			      void *buffer, size_t *lenp, loff_t *ppos)
{}
#endif

/*
 * /proc/sys/kernel/watchdog_thresh
 */
static int proc_watchdog_thresh(const struct ctl_table *table, int write,
				void *buffer, size_t *lenp, loff_t *ppos)
{}

/*
 * The cpumask is the mask of possible cpus that the watchdog can run
 * on, not the mask of cpus it is actually running on.  This allows the
 * user to specify a mask that will include cpus that have not yet
 * been brought online, if desired.
 */
static int proc_watchdog_cpumask(const struct ctl_table *table, int write,
				 void *buffer, size_t *lenp, loff_t *ppos)
{}

static const int sixty =;

static struct ctl_table watchdog_sysctls[] =;

static struct ctl_table watchdog_hardlockup_sysctl[] =;

static void __init watchdog_sysctl_init(void)
{}

#else
#define watchdog_sysctl_init
#endif /* CONFIG_SYSCTL */

static void __init lockup_detector_delay_init(struct work_struct *work);
static bool allow_lockup_detector_init_retry __initdata;

static struct work_struct detector_work __initdata =;

static void __init lockup_detector_delay_init(struct work_struct *work)
{}

/*
 * lockup_detector_retry_init - retry init lockup detector if possible.
 *
 * Retry hardlockup detector init. It is useful when it requires some
 * functionality that has to be initialized later on a particular
 * platform.
 */
void __init lockup_detector_retry_init(void)
{}

/*
 * Ensure that optional delayed hardlockup init is proceed before
 * the init code and memory is freed.
 */
static int __init lockup_detector_check(void)
{}
late_initcall_sync(lockup_detector_check);

void __init lockup_detector_init(void)
{}