/* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_HARDIRQ_H #define LINUX_HARDIRQ_H #include <linux/context_tracking_state.h> #include <linux/preempt.h> #include <linux/lockdep.h> #include <linux/ftrace_irq.h> #include <linux/sched.h> #include <linux/vtime.h> #include <asm/hardirq.h> extern void synchronize_irq(unsigned int irq); extern bool synchronize_hardirq(unsigned int irq); #ifdef CONFIG_NO_HZ_FULL void __rcu_irq_enter_check_tick(void); #else static inline void __rcu_irq_enter_check_tick(void) { … } #endif static __always_inline void rcu_irq_enter_check_tick(void) { … } /* * It is safe to do non-atomic ops on ->hardirq_context, * because NMI handlers may not preempt and the ops are * always balanced, so the interrupted value of ->hardirq_context * will always be restored. */ #define __irq_enter() … /* * Like __irq_enter() without time accounting for fast * interrupts, e.g. reschedule IPI where time accounting * is more expensive than the actual interrupt. */ #define __irq_enter_raw() … /* * Enter irq context (on NO_HZ, update jiffies): */ void irq_enter(void); /* * Like irq_enter(), but RCU is already watching. */ void irq_enter_rcu(void); /* * Exit irq context without processing softirqs: */ #define __irq_exit() … /* * Like __irq_exit() without time accounting */ #define __irq_exit_raw() … /* * Exit irq context and process softirqs if needed: */ void irq_exit(void); /* * Like irq_exit(), but return with RCU watching. */ void irq_exit_rcu(void); #ifndef arch_nmi_enter #define arch_nmi_enter() … #define arch_nmi_exit() … #endif /* * NMI vs Tracing * -------------- * * We must not land in a tracer until (or after) we've changed preempt_count * such that in_nmi() becomes true. To that effect all NMI C entry points must * be marked 'notrace' and call nmi_enter() as soon as possible. */ /* * nmi_enter() can nest up to 15 times; see NMI_BITS. */ #define __nmi_enter() … #define nmi_enter() … #define __nmi_exit() … #define nmi_exit() … #endif /* LINUX_HARDIRQ_H */