#define pr_fmt(fmt) …
#include <linux/cpu.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/irqdesc.h>
#include <linux/kernel_stat.h>
#include <linux/kvm_para.h>
#include <linux/math64.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/nmi.h>
#include <linux/stop_machine.h>
#include <linux/sysctl.h>
#include <linux/tick.h>
#include <linux/sched/clock.h>
#include <linux/sched/debug.h>
#include <linux/sched/isolation.h>
#include <asm/irq_regs.h>
static DEFINE_MUTEX(watchdog_mutex);
#if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HARDLOCKUP_DETECTOR_SPARC64)
#define WATCHDOG_HARDLOCKUP_DEFAULT …
#else
#define WATCHDOG_HARDLOCKUP_DEFAULT …
#endif
#define NUM_SAMPLE_PERIODS …
unsigned long __read_mostly watchdog_enabled;
int __read_mostly watchdog_user_enabled = …;
static int __read_mostly watchdog_hardlockup_user_enabled = …;
static int __read_mostly watchdog_softlockup_user_enabled = …;
int __read_mostly watchdog_thresh = …;
static int __read_mostly watchdog_hardlockup_available;
struct cpumask watchdog_cpumask __read_mostly;
unsigned long *watchdog_cpumask_bits = …;
#ifdef CONFIG_HARDLOCKUP_DETECTOR
# ifdef CONFIG_SMP
int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
# endif
unsigned int __read_mostly hardlockup_panic = …
IS_ENABLED(…);
void __init hardlockup_detector_disable(void)
{ … }
static int __init hardlockup_panic_setup(char *str)
{ … }
__setup(…);
#endif
#if defined(CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER)
static DEFINE_PER_CPU(atomic_t, hrtimer_interrupts);
static DEFINE_PER_CPU(int, hrtimer_interrupts_saved);
static DEFINE_PER_CPU(bool, watchdog_hardlockup_warned);
static DEFINE_PER_CPU(bool, watchdog_hardlockup_touched);
static unsigned long hard_lockup_nmi_warn;
notrace void arch_touch_nmi_watchdog(void)
{ … }
EXPORT_SYMBOL(…);
void watchdog_hardlockup_touch_cpu(unsigned int cpu)
{ … }
static bool is_hardlockup(unsigned int cpu)
{ … }
static void watchdog_hardlockup_kick(void)
{ … }
void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs)
{ … }
#else
static inline void watchdog_hardlockup_kick(void) { }
#endif
void __weak watchdog_hardlockup_enable(unsigned int cpu) { … }
void __weak watchdog_hardlockup_disable(unsigned int cpu) { … }
int __weak __init watchdog_hardlockup_probe(void)
{ … }
void __weak watchdog_hardlockup_stop(void) { … }
void __weak watchdog_hardlockup_start(void) { … }
static void lockup_detector_update_enable(void)
{ … }
#ifdef CONFIG_SOFTLOCKUP_DETECTOR
#define SOFTLOCKUP_DELAY_REPORT …
#ifdef CONFIG_SMP
int __read_mostly sysctl_softlockup_all_cpu_backtrace;
#endif
static struct cpumask watchdog_allowed_mask __read_mostly;
unsigned int __read_mostly softlockup_panic = …
IS_ENABLED(…);
static bool softlockup_initialized __read_mostly;
static u64 __read_mostly sample_period;
static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
static DEFINE_PER_CPU(unsigned long, watchdog_report_ts);
static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
static DEFINE_PER_CPU(bool, softlockup_touch_sync);
static unsigned long soft_lockup_nmi_warn;
static int __init softlockup_panic_setup(char *str)
{ … }
__setup(…);
static int __init nowatchdog_setup(char *str)
{ … }
__setup(…);
static int __init nosoftlockup_setup(char *str)
{ … }
__setup(…);
static int __init watchdog_thresh_setup(char *str)
{ … }
__setup(…);
static void __lockup_detector_cleanup(void);
#ifdef CONFIG_SOFTLOCKUP_DETECTOR_INTR_STORM
enum stats_per_group { … };
static const enum cpu_usage_stat tracked_stats[NUM_STATS_PER_GROUP] = …;
static DEFINE_PER_CPU(u16, cpustat_old[NUM_STATS_PER_GROUP]);
static DEFINE_PER_CPU(u8, cpustat_util[NUM_SAMPLE_PERIODS][NUM_STATS_PER_GROUP]);
static DEFINE_PER_CPU(u8, cpustat_tail);
static u16 get_16bit_precision(u64 data_ns)
{ … }
static void update_cpustat(void)
{ … }
static void print_cpustat(void)
{ … }
#define HARDIRQ_PERCENT_THRESH …
#define NUM_HARDIRQ_REPORT …
struct irq_counts { … };
static DEFINE_PER_CPU(bool, snapshot_taken);
static void tabulate_irq_count(struct irq_counts *irq_counts, int irq, u32 counts, int rank)
{ … }
static bool need_counting_irqs(void)
{ … }
static void start_counting_irqs(void)
{ … }
static void stop_counting_irqs(void)
{ … }
static void print_irq_counts(void)
{ … }
static void report_cpu_status(void)
{ … }
#else
static inline void update_cpustat(void) { }
static inline void report_cpu_status(void) { }
static inline bool need_counting_irqs(void) { return false; }
static inline void start_counting_irqs(void) { }
static inline void stop_counting_irqs(void) { }
#endif
static int get_softlockup_thresh(void)
{ … }
static unsigned long get_timestamp(void)
{ … }
static void set_sample_period(void)
{ … }
static void update_report_ts(void)
{ … }
static void update_touch_ts(void)
{ … }
notrace void touch_softlockup_watchdog_sched(void)
{ … }
notrace void touch_softlockup_watchdog(void)
{ … }
EXPORT_SYMBOL(…);
void touch_all_softlockup_watchdogs(void)
{ … }
void touch_softlockup_watchdog_sync(void)
{ … }
static int is_softlockup(unsigned long touch_ts,
unsigned long period_ts,
unsigned long now)
{ … }
static DEFINE_PER_CPU(struct completion, softlockup_completion);
static DEFINE_PER_CPU(struct cpu_stop_work, softlockup_stop_work);
static int softlockup_fn(void *data)
{ … }
static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
{ … }
static void watchdog_enable(unsigned int cpu)
{ … }
static void watchdog_disable(unsigned int cpu)
{ … }
static int softlockup_stop_fn(void *data)
{ … }
static void softlockup_stop_all(void)
{ … }
static int softlockup_start_fn(void *data)
{ … }
static void softlockup_start_all(void)
{ … }
int lockup_detector_online_cpu(unsigned int cpu)
{ … }
int lockup_detector_offline_cpu(unsigned int cpu)
{ … }
static void __lockup_detector_reconfigure(void)
{ … }
void lockup_detector_reconfigure(void)
{ … }
static __init void lockup_detector_setup(void)
{ … }
#else
static void __lockup_detector_reconfigure(void)
{
cpus_read_lock();
watchdog_hardlockup_stop();
lockup_detector_update_enable();
watchdog_hardlockup_start();
cpus_read_unlock();
}
void lockup_detector_reconfigure(void)
{
__lockup_detector_reconfigure();
}
static inline void lockup_detector_setup(void)
{
__lockup_detector_reconfigure();
}
#endif
static void __lockup_detector_cleanup(void)
{ … }
void lockup_detector_cleanup(void)
{ … }
void lockup_detector_soft_poweroff(void)
{ … }
#ifdef CONFIG_SYSCTL
static void proc_watchdog_update(void)
{ … }
static int proc_watchdog_common(int which, const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{ … }
static int proc_watchdog(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{ … }
static int proc_nmi_watchdog(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{ … }
#ifdef CONFIG_SOFTLOCKUP_DETECTOR
static int proc_soft_watchdog(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{ … }
#endif
static int proc_watchdog_thresh(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{ … }
static int proc_watchdog_cpumask(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{ … }
static const int sixty = …;
static struct ctl_table watchdog_sysctls[] = …;
static struct ctl_table watchdog_hardlockup_sysctl[] = …;
static void __init watchdog_sysctl_init(void)
{ … }
#else
#define watchdog_sysctl_init …
#endif
static void __init lockup_detector_delay_init(struct work_struct *work);
static bool allow_lockup_detector_init_retry __initdata;
static struct work_struct detector_work __initdata = …;
static void __init lockup_detector_delay_init(struct work_struct *work)
{ … }
void __init lockup_detector_retry_init(void)
{ … }
static int __init lockup_detector_check(void)
{ … }
late_initcall_sync(lockup_detector_check);
void __init lockup_detector_init(void)
{ … }