#define pr_fmt(fmt) …
#include <linux/export.h>
#include <linux/kernel_stat.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/local_lock.h>
#include <linux/mm.h>
#include <linux/notifier.h>
#include <linux/percpu.h>
#include <linux/cpu.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/rcupdate.h>
#include <linux/ftrace.h>
#include <linux/smp.h>
#include <linux/smpboot.h>
#include <linux/tick.h>
#include <linux/irq.h>
#include <linux/wait_bit.h>
#include <linux/workqueue.h>
#include <asm/softirq_stack.h>
#define CREATE_TRACE_POINTS
#include <trace/events/irq.h>
#ifndef __ARCH_IRQ_STAT
DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
EXPORT_PER_CPU_SYMBOL(irq_stat);
#endif
static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
const char * const softirq_to_name[NR_SOFTIRQS] = …;
static void wakeup_softirqd(void)
{ … }
#ifdef CONFIG_TRACE_IRQFLAGS
DEFINE_PER_CPU(int, hardirqs_enabled);
DEFINE_PER_CPU(int, hardirq_context);
EXPORT_PER_CPU_SYMBOL_GPL(…);
EXPORT_PER_CPU_SYMBOL_GPL(…);
#endif
#ifdef CONFIG_PREEMPT_RT
struct softirq_ctrl {
local_lock_t lock;
int cnt;
};
static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = {
.lock = INIT_LOCAL_LOCK(softirq_ctrl.lock),
};
bool local_bh_blocked(void)
{
return __this_cpu_read(softirq_ctrl.cnt) != 0;
}
void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
{
unsigned long flags;
int newcnt;
WARN_ON_ONCE(in_hardirq());
if (!current->softirq_disable_cnt) {
if (preemptible()) {
local_lock(&softirq_ctrl.lock);
rcu_read_lock();
} else {
DEBUG_LOCKS_WARN_ON(this_cpu_read(softirq_ctrl.cnt));
}
}
newcnt = __this_cpu_add_return(softirq_ctrl.cnt, cnt);
current->softirq_disable_cnt = newcnt;
if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && newcnt == cnt) {
raw_local_irq_save(flags);
lockdep_softirqs_off(ip);
raw_local_irq_restore(flags);
}
}
EXPORT_SYMBOL(__local_bh_disable_ip);
static void __local_bh_enable(unsigned int cnt, bool unlock)
{
unsigned long flags;
int newcnt;
DEBUG_LOCKS_WARN_ON(current->softirq_disable_cnt !=
this_cpu_read(softirq_ctrl.cnt));
if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && softirq_count() == cnt) {
raw_local_irq_save(flags);
lockdep_softirqs_on(_RET_IP_);
raw_local_irq_restore(flags);
}
newcnt = __this_cpu_sub_return(softirq_ctrl.cnt, cnt);
current->softirq_disable_cnt = newcnt;
if (!newcnt && unlock) {
rcu_read_unlock();
local_unlock(&softirq_ctrl.lock);
}
}
void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
{
bool preempt_on = preemptible();
unsigned long flags;
u32 pending;
int curcnt;
WARN_ON_ONCE(in_hardirq());
lockdep_assert_irqs_enabled();
local_irq_save(flags);
curcnt = __this_cpu_read(softirq_ctrl.cnt);
if (curcnt != cnt)
goto out;
pending = local_softirq_pending();
if (!pending)
goto out;
if (!preempt_on) {
wakeup_softirqd();
goto out;
}
cnt = SOFTIRQ_OFFSET;
__local_bh_enable(cnt, false);
__do_softirq();
out:
__local_bh_enable(cnt, preempt_on);
local_irq_restore(flags);
}
EXPORT_SYMBOL(__local_bh_enable_ip);
static inline void ksoftirqd_run_begin(void)
{
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
local_irq_disable();
}
static inline void ksoftirqd_run_end(void)
{
__local_bh_enable(SOFTIRQ_OFFSET, true);
WARN_ON_ONCE(in_interrupt());
local_irq_enable();
}
static inline void softirq_handle_begin(void) { }
static inline void softirq_handle_end(void) { }
static inline bool should_wake_ksoftirqd(void)
{
return !this_cpu_read(softirq_ctrl.cnt);
}
static inline void invoke_softirq(void)
{
if (should_wake_ksoftirqd())
wakeup_softirqd();
}
void do_softirq_post_smp_call_flush(unsigned int was_pending)
{
if (WARN_ON_ONCE(was_pending != local_softirq_pending()))
invoke_softirq();
}
#else
#ifdef CONFIG_TRACE_IRQFLAGS
void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
{ … }
EXPORT_SYMBOL(…);
#endif
static void __local_bh_enable(unsigned int cnt)
{ … }
void _local_bh_enable(void)
{ … }
EXPORT_SYMBOL(…);
void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
{ … }
EXPORT_SYMBOL(…);
static inline void softirq_handle_begin(void)
{ … }
static inline void softirq_handle_end(void)
{ … }
static inline void ksoftirqd_run_begin(void)
{ … }
static inline void ksoftirqd_run_end(void)
{ … }
static inline bool should_wake_ksoftirqd(void)
{ … }
static inline void invoke_softirq(void)
{ … }
asmlinkage __visible void do_softirq(void)
{ … }
#endif
#define MAX_SOFTIRQ_TIME …
#define MAX_SOFTIRQ_RESTART …
#ifdef CONFIG_TRACE_IRQFLAGS
static inline bool lockdep_softirq_start(void)
{ … }
static inline void lockdep_softirq_end(bool in_hardirq)
{ … }
#else
static inline bool lockdep_softirq_start(void) { return false; }
static inline void lockdep_softirq_end(bool in_hardirq) { }
#endif
static void handle_softirqs(bool ksirqd)
{ … }
asmlinkage __visible void __softirq_entry __do_softirq(void)
{ … }
void irq_enter_rcu(void)
{ … }
void irq_enter(void)
{ … }
static inline void tick_irq_exit(void)
{ … }
static inline void __irq_exit_rcu(void)
{ … }
void irq_exit_rcu(void)
{ … }
void irq_exit(void)
{ … }
inline void raise_softirq_irqoff(unsigned int nr)
{ … }
void raise_softirq(unsigned int nr)
{ … }
void __raise_softirq_irqoff(unsigned int nr)
{ … }
void open_softirq(int nr, void (*action)(struct softirq_action *))
{ … }
struct tasklet_head { … };
static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
static void __tasklet_schedule_common(struct tasklet_struct *t,
struct tasklet_head __percpu *headp,
unsigned int softirq_nr)
{ … }
void __tasklet_schedule(struct tasklet_struct *t)
{ … }
EXPORT_SYMBOL(…);
void __tasklet_hi_schedule(struct tasklet_struct *t)
{ … }
EXPORT_SYMBOL(…);
static bool tasklet_clear_sched(struct tasklet_struct *t)
{ … }
static void tasklet_action_common(struct softirq_action *a,
struct tasklet_head *tl_head,
unsigned int softirq_nr)
{ … }
static __latent_entropy void tasklet_action(struct softirq_action *a)
{ … }
static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
{ … }
void tasklet_setup(struct tasklet_struct *t,
void (*callback)(struct tasklet_struct *))
{ … }
EXPORT_SYMBOL(…);
void tasklet_init(struct tasklet_struct *t,
void (*func)(unsigned long), unsigned long data)
{ … }
EXPORT_SYMBOL(…);
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
void tasklet_unlock_spin_wait(struct tasklet_struct *t)
{ … }
EXPORT_SYMBOL(…);
#endif
void tasklet_kill(struct tasklet_struct *t)
{ … }
EXPORT_SYMBOL(…);
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
void tasklet_unlock(struct tasklet_struct *t)
{ … }
EXPORT_SYMBOL_GPL(…);
void tasklet_unlock_wait(struct tasklet_struct *t)
{ … }
EXPORT_SYMBOL_GPL(…);
#endif
void __init softirq_init(void)
{ … }
static int ksoftirqd_should_run(unsigned int cpu)
{ … }
static void run_ksoftirqd(unsigned int cpu)
{ … }
#ifdef CONFIG_HOTPLUG_CPU
static int takeover_tasklets(unsigned int cpu)
{ … }
#else
#define takeover_tasklets …
#endif
static struct smp_hotplug_thread softirq_threads = …;
static __init int spawn_ksoftirqd(void)
{ … }
early_initcall(spawn_ksoftirqd);
int __init __weak early_irq_init(void)
{ … }
int __init __weak arch_probe_nr_irqs(void)
{ … }
int __init __weak arch_early_irq_init(void)
{ … }
unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
{ … }