#include <linux/compiler.h>
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/hrtimer.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/percpu.h>
#include <linux/nmi.h>
#include <linux/profile.h>
#include <linux/sched/signal.h>
#include <linux/sched/clock.h>
#include <linux/sched/stat.h>
#include <linux/sched/nohz.h>
#include <linux/sched/loadavg.h>
#include <linux/module.h>
#include <linux/irq_work.h>
#include <linux/posix-timers.h>
#include <linux/context_tracking.h>
#include <linux/mm.h>
#include <asm/irq_regs.h>
#include "tick-internal.h"
#include <trace/events/timer.h>
static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
struct tick_sched *tick_get_tick_sched(int cpu)
{ … }
static ktime_t last_jiffies_update;
static void tick_do_update_jiffies64(ktime_t now)
{ … }
static ktime_t tick_init_jiffy_update(void)
{ … }
static inline int tick_sched_flag_test(struct tick_sched *ts,
unsigned long flag)
{ … }
static inline void tick_sched_flag_set(struct tick_sched *ts,
unsigned long flag)
{ … }
static inline void tick_sched_flag_clear(struct tick_sched *ts,
unsigned long flag)
{ … }
#define MAX_STALLED_JIFFIES …
static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now)
{ … }
static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
{ … }
static enum hrtimer_restart tick_nohz_handler(struct hrtimer *timer)
{ … }
static void tick_sched_timer_cancel(struct tick_sched *ts)
{ … }
#ifdef CONFIG_NO_HZ_FULL
cpumask_var_t tick_nohz_full_mask;
EXPORT_SYMBOL_GPL(tick_nohz_full_mask);
bool tick_nohz_full_running;
EXPORT_SYMBOL_GPL(tick_nohz_full_running);
static atomic_t tick_dep_mask;
static bool check_tick_dependency(atomic_t *dep)
{
int val = atomic_read(dep);
if (val & TICK_DEP_MASK_POSIX_TIMER) {
trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER);
return true;
}
if (val & TICK_DEP_MASK_PERF_EVENTS) {
trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS);
return true;
}
if (val & TICK_DEP_MASK_SCHED) {
trace_tick_stop(0, TICK_DEP_MASK_SCHED);
return true;
}
if (val & TICK_DEP_MASK_CLOCK_UNSTABLE) {
trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE);
return true;
}
if (val & TICK_DEP_MASK_RCU) {
trace_tick_stop(0, TICK_DEP_MASK_RCU);
return true;
}
if (val & TICK_DEP_MASK_RCU_EXP) {
trace_tick_stop(0, TICK_DEP_MASK_RCU_EXP);
return true;
}
return false;
}
static bool can_stop_full_tick(int cpu, struct tick_sched *ts)
{
lockdep_assert_irqs_disabled();
if (unlikely(!cpu_online(cpu)))
return false;
if (check_tick_dependency(&tick_dep_mask))
return false;
if (check_tick_dependency(&ts->tick_dep_mask))
return false;
if (check_tick_dependency(¤t->tick_dep_mask))
return false;
if (check_tick_dependency(¤t->signal->tick_dep_mask))
return false;
return true;
}
static void nohz_full_kick_func(struct irq_work *work)
{
}
static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) =
IRQ_WORK_INIT_HARD(nohz_full_kick_func);
static void tick_nohz_full_kick(void)
{
if (!tick_nohz_full_cpu(smp_processor_id()))
return;
irq_work_queue(this_cpu_ptr(&nohz_full_kick_work));
}
void tick_nohz_full_kick_cpu(int cpu)
{
if (!tick_nohz_full_cpu(cpu))
return;
irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu);
}
static void tick_nohz_kick_task(struct task_struct *tsk)
{
int cpu;
if (!sched_task_on_rq(tsk))
return;
cpu = task_cpu(tsk);
preempt_disable();
if (cpu_online(cpu))
tick_nohz_full_kick_cpu(cpu);
preempt_enable();
}
static void tick_nohz_full_kick_all(void)
{
int cpu;
if (!tick_nohz_full_running)
return;
preempt_disable();
for_each_cpu_and(cpu, tick_nohz_full_mask, cpu_online_mask)
tick_nohz_full_kick_cpu(cpu);
preempt_enable();
}
static void tick_nohz_dep_set_all(atomic_t *dep,
enum tick_dep_bits bit)
{
int prev;
prev = atomic_fetch_or(BIT(bit), dep);
if (!prev)
tick_nohz_full_kick_all();
}
void tick_nohz_dep_set(enum tick_dep_bits bit)
{
tick_nohz_dep_set_all(&tick_dep_mask, bit);
}
void tick_nohz_dep_clear(enum tick_dep_bits bit)
{
atomic_andnot(BIT(bit), &tick_dep_mask);
}
void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit)
{
int prev;
struct tick_sched *ts;
ts = per_cpu_ptr(&tick_cpu_sched, cpu);
prev = atomic_fetch_or(BIT(bit), &ts->tick_dep_mask);
if (!prev) {
preempt_disable();
if (cpu == smp_processor_id()) {
tick_nohz_full_kick();
} else {
if (!WARN_ON_ONCE(in_nmi()))
tick_nohz_full_kick_cpu(cpu);
}
preempt_enable();
}
}
EXPORT_SYMBOL_GPL(tick_nohz_dep_set_cpu);
void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
{
struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu);
atomic_andnot(BIT(bit), &ts->tick_dep_mask);
}
EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_cpu);
void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit)
{
if (!atomic_fetch_or(BIT(bit), &tsk->tick_dep_mask))
tick_nohz_kick_task(tsk);
}
EXPORT_SYMBOL_GPL(tick_nohz_dep_set_task);
void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit)
{
atomic_andnot(BIT(bit), &tsk->tick_dep_mask);
}
EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_task);
void tick_nohz_dep_set_signal(struct task_struct *tsk,
enum tick_dep_bits bit)
{
int prev;
struct signal_struct *sig = tsk->signal;
prev = atomic_fetch_or(BIT(bit), &sig->tick_dep_mask);
if (!prev) {
struct task_struct *t;
lockdep_assert_held(&tsk->sighand->siglock);
__for_each_thread(sig, t)
tick_nohz_kick_task(t);
}
}
void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bit)
{
atomic_andnot(BIT(bit), &sig->tick_dep_mask);
}
void __tick_nohz_task_switch(void)
{
struct tick_sched *ts;
if (!tick_nohz_full_cpu(smp_processor_id()))
return;
ts = this_cpu_ptr(&tick_cpu_sched);
if (tick_sched_flag_test(ts, TS_FLAG_STOPPED)) {
if (atomic_read(¤t->tick_dep_mask) ||
atomic_read(¤t->signal->tick_dep_mask))
tick_nohz_full_kick();
}
}
void __init tick_nohz_full_setup(cpumask_var_t cpumask)
{
alloc_bootmem_cpumask_var(&tick_nohz_full_mask);
cpumask_copy(tick_nohz_full_mask, cpumask);
tick_nohz_full_running = true;
}
bool tick_nohz_cpu_hotpluggable(unsigned int cpu)
{
if (tick_nohz_full_running && READ_ONCE(tick_do_timer_cpu) == cpu)
return false;
return true;
}
static int tick_nohz_cpu_down(unsigned int cpu)
{
return tick_nohz_cpu_hotpluggable(cpu) ? 0 : -EBUSY;
}
void __init tick_nohz_init(void)
{
int cpu, ret;
if (!tick_nohz_full_running)
return;
if (!arch_irq_work_has_interrupt()) {
pr_warn("NO_HZ: Can't run full dynticks because arch doesn't support IRQ work self-IPIs\n");
cpumask_clear(tick_nohz_full_mask);
tick_nohz_full_running = false;
return;
}
if (IS_ENABLED(CONFIG_PM_SLEEP_SMP) &&
!IS_ENABLED(CONFIG_PM_SLEEP_SMP_NONZERO_CPU)) {
cpu = smp_processor_id();
if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) {
pr_warn("NO_HZ: Clearing %d from nohz_full range "
"for timekeeping\n", cpu);
cpumask_clear_cpu(cpu, tick_nohz_full_mask);
}
}
for_each_cpu(cpu, tick_nohz_full_mask)
ct_cpu_track_user(cpu);
ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
"kernel/nohz:predown", NULL,
tick_nohz_cpu_down);
WARN_ON(ret < 0);
pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n",
cpumask_pr_args(tick_nohz_full_mask));
}
#endif
#ifdef CONFIG_NO_HZ_COMMON
bool tick_nohz_enabled __read_mostly = …;
unsigned long tick_nohz_active __read_mostly;
static int __init setup_tick_nohz(char *str)
{ … }
__setup(…);
bool tick_nohz_tick_stopped(void)
{ … }
bool tick_nohz_tick_stopped_cpu(int cpu)
{ … }
static void tick_nohz_update_jiffies(ktime_t now)
{ … }
static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now)
{ … }
static void tick_nohz_start_idle(struct tick_sched *ts)
{ … }
static u64 get_cpu_sleep_time_us(struct tick_sched *ts, ktime_t *sleeptime,
bool compute_delta, u64 *last_update_time)
{ … }
u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
{ … }
EXPORT_SYMBOL_GPL(…);
u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
{ … }
EXPORT_SYMBOL_GPL(…);
static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
{ … }
static inline bool local_timer_softirq_pending(void)
{ … }
u64 get_jiffies_update(unsigned long *basej)
{ … }
static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
{ … }
static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu)
{ … }
static void tick_nohz_retain_tick(struct tick_sched *ts)
{ … }
#ifdef CONFIG_NO_HZ_FULL
static void tick_nohz_full_stop_tick(struct tick_sched *ts, int cpu)
{
if (tick_nohz_next_event(ts, cpu))
tick_nohz_stop_tick(ts, cpu);
else
tick_nohz_retain_tick(ts);
}
#endif
static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
{ … }
static void __tick_nohz_full_update_tick(struct tick_sched *ts,
ktime_t now)
{ … }
static void tick_nohz_full_update_tick(struct tick_sched *ts)
{ … }
static bool report_idle_softirq(void)
{ … }
static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
{ … }
void tick_nohz_idle_stop_tick(void)
{ … }
void tick_nohz_idle_retain_tick(void)
{ … }
void tick_nohz_idle_enter(void)
{ … }
void tick_nohz_irq_exit(void)
{ … }
bool tick_nohz_idle_got_tick(void)
{ … }
ktime_t tick_nohz_get_next_hrtimer(void)
{ … }
ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next)
{ … }
unsigned long tick_nohz_get_idle_calls_cpu(int cpu)
{ … }
static void tick_nohz_account_idle_time(struct tick_sched *ts,
ktime_t now)
{ … }
void tick_nohz_idle_restart_tick(void)
{ … }
static void tick_nohz_idle_update_tick(struct tick_sched *ts, ktime_t now)
{ … }
void tick_nohz_idle_exit(void)
{ … }
static void tick_nohz_lowres_handler(struct clock_event_device *dev)
{ … }
static inline void tick_nohz_activate(struct tick_sched *ts)
{ … }
static void tick_nohz_switch_to_nohz(void)
{ … }
static inline void tick_nohz_irq_enter(void)
{ … }
#else
static inline void tick_nohz_switch_to_nohz(void) { }
static inline void tick_nohz_irq_enter(void) { }
static inline void tick_nohz_activate(struct tick_sched *ts) { }
#endif
void tick_irq_enter(void)
{ … }
static int sched_skew_tick;
static int __init skew_tick(char *str)
{ … }
early_param(…);
void tick_setup_sched_timer(bool hrtimer)
{ … }
void tick_sched_timer_dying(int cpu)
{ … }
void tick_clock_notify(void)
{ … }
void tick_oneshot_notify(void)
{ … }
int tick_check_oneshot_change(int allow_nohz)
{ … }