/* SPDX-License-Identifier: GPL-2.0 */ /* * NOTE: * * This header has combined a lot of unrelated to each other stuff. * The process of splitting its content is in progress while keeping * backward compatibility. That's why it's highly recommended NOT to * include this header inside another header file, especially under * generic or architectural include/ directory. */ #ifndef _LINUX_KERNEL_H #define _LINUX_KERNEL_H #include <linux/stdarg.h> #include <linux/align.h> #include <linux/array_size.h> #include <linux/limits.h> #include <linux/linkage.h> #include <linux/stddef.h> #include <linux/types.h> #include <linux/compiler.h> #include <linux/container_of.h> #include <linux/bitops.h> #include <linux/hex.h> #include <linux/kstrtox.h> #include <linux/log2.h> #include <linux/math.h> #include <linux/minmax.h> #include <linux/typecheck.h> #include <linux/panic.h> #include <linux/printk.h> #include <linux/build_bug.h> #include <linux/sprintf.h> #include <linux/static_call_types.h> #include <linux/instruction_pointer.h> #include <linux/wordpart.h> #include <asm/byteorder.h> #include <uapi/linux/kernel.h> #define STACK_MAGIC … /* generic data direction definitions */ #define READ … #define WRITE … #define PTR_IF(cond, ptr) … #define u64_to_user_ptr(x) … struct completion; struct user; #ifdef CONFIG_PREEMPT_VOLUNTARY_BUILD extern int __cond_resched(void); #define might_resched … #elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) extern int __cond_resched(void); DECLARE_STATIC_CALL(might_resched, __cond_resched); static __always_inline void might_resched(void) { … } #elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) extern int dynamic_might_resched(void); #define might_resched … #else #define might_resched … #endif /* CONFIG_PREEMPT_* */ #ifdef CONFIG_DEBUG_ATOMIC_SLEEP extern void __might_resched(const char *file, int line, unsigned int offsets); extern void __might_sleep(const char *file, int line); extern void __cant_sleep(const char *file, int line, int preempt_offset); extern void __cant_migrate(const char *file, int line); /** * might_sleep - annotation for functions that can sleep * * this macro will print a stack trace if it is executed in an atomic * context (spinlock, irq-handler, ...). Additional sections where blocking is * not allowed can be annotated with non_block_start() and non_block_end() * pairs. * * This is a useful debugging help to be able to catch problems early and not * be bitten later when the calling function happens to sleep when it is not * supposed to. */ #define might_sleep() … /** * cant_sleep - annotation for functions that cannot sleep * * this macro will print a stack trace if it is executed with preemption enabled */ #define cant_sleep() … #define sched_annotate_sleep() … /** * cant_migrate - annotation for functions that cannot migrate * * Will print a stack trace if executed in code which is migratable */ #define cant_migrate() … /** * non_block_start - annotate the start of section where sleeping is prohibited * * This is on behalf of the oom reaper, specifically when it is calling the mmu * notifiers. The problem is that if the notifier were to block on, for example, * mutex_lock() and if the process which holds that mutex were to perform a * sleeping memory allocation, the oom reaper is now blocked on completion of * that memory allocation. Other blocking calls like wait_event() pose similar * issues. */ #define non_block_start() … /** * non_block_end - annotate the end of section where sleeping is prohibited * * Closes a section opened by non_block_start(). */ #define non_block_end() … #else static inline void __might_resched(const char *file, int line, unsigned int offsets) { } static inline void __might_sleep(const char *file, int line) { } #define might_sleep … #define cant_sleep … #define cant_migrate … #define sched_annotate_sleep … #define non_block_start … #define non_block_end … #endif #define might_sleep_if(cond) … #if defined(CONFIG_MMU) && \ (defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)) #define might_fault() … void __might_fault(const char *file, int line); #else static inline void might_fault(void) { } #endif void do_exit(long error_code) __noreturn; extern int core_kernel_text(unsigned long addr); extern int __kernel_text_address(unsigned long addr); extern int kernel_text_address(unsigned long addr); extern int func_ptr_is_kernel_text(void *ptr); extern void bust_spinlocks(int yes); extern int root_mountflags; extern bool early_boot_irqs_disabled; /* * Values used for system_state. Ordering of the states must not be changed * as code checks for <, <=, >, >= STATE. */ extern enum system_states { … } system_state; /* * General tracing related utility functions - trace_printk(), * tracing_on/tracing_off and tracing_start()/tracing_stop * * Use tracing_on/tracing_off when you want to quickly turn on or off * tracing. It simply enables or disables the recording of the trace events. * This also corresponds to the user space /sys/kernel/tracing/tracing_on * file, which gives a means for the kernel and userspace to interact. * Place a tracing_off() in the kernel where you want tracing to end. * From user space, examine the trace, and then echo 1 > tracing_on * to continue tracing. * * tracing_stop/tracing_start has slightly more overhead. It is used * by things like suspend to ram where disabling the recording of the * trace is not enough, but tracing must actually stop because things * like calling smp_processor_id() may crash the system. * * Most likely, you want to use tracing_on/tracing_off. */ enum ftrace_dump_mode { … }; #ifdef CONFIG_TRACING void tracing_on(void); void tracing_off(void); int tracing_is_on(void); void tracing_snapshot(void); void tracing_snapshot_alloc(void); extern void tracing_start(void); extern void tracing_stop(void); static inline __printf(1, 2) void ____trace_printk_check_format(const char *fmt, ...) { … } #define __trace_printk_check_format(fmt, args...) … /** * trace_printk - printf formatting in the ftrace buffer * @fmt: the printf format for printing * * Note: __trace_printk is an internal function for trace_printk() and * the @ip is passed in via the trace_printk() macro. * * This function allows a kernel developer to debug fast path sections * that printk is not appropriate for. By scattering in various * printk like tracing in the code, a developer can quickly see * where problems are occurring. * * This is intended as a debugging tool for the developer only. * Please refrain from leaving trace_printks scattered around in * your code. (Extra memory is used for special buffers that are * allocated when trace_printk() is used.) * * A little optimization trick is done here. If there's only one * argument, there's no need to scan the string for printf formats. * The trace_puts() will suffice. But how can we take advantage of * using trace_puts() when trace_printk() has only one argument? * By stringifying the args and checking the size we can tell * whether or not there are args. __stringify((__VA_ARGS__)) will * turn into "()\0" with a size of 3 when there are no args, anything * else will be bigger. All we need to do is define a string to this, * and then take its size and compare to 3. If it's bigger, use * do_trace_printk() otherwise, optimize it to trace_puts(). Then just * let gcc optimize the rest. */ #define trace_printk(fmt, ...) … #define do_trace_printk(fmt, args...) … extern __printf(2, 3) int __trace_bprintk(unsigned long ip, const char *fmt, ...); extern __printf(2, 3) int __trace_printk(unsigned long ip, const char *fmt, ...); /** * trace_puts - write a string into the ftrace buffer * @str: the string to record * * Note: __trace_bputs is an internal function for trace_puts and * the @ip is passed in via the trace_puts macro. * * This is similar to trace_printk() but is made for those really fast * paths that a developer wants the least amount of "Heisenbug" effects, * where the processing of the print format is still too much. * * This function allows a kernel developer to debug fast path sections * that printk is not appropriate for. By scattering in various * printk like tracing in the code, a developer can quickly see * where problems are occurring. * * This is intended as a debugging tool for the developer only. * Please refrain from leaving trace_puts scattered around in * your code. (Extra memory is used for special buffers that are * allocated when trace_puts() is used.) * * Returns: 0 if nothing was written, positive # if string was. * (1 when __trace_bputs is used, strlen(str) when __trace_puts is used) */ #define trace_puts(str) … extern int __trace_bputs(unsigned long ip, const char *str); extern int __trace_puts(unsigned long ip, const char *str, int size); extern void trace_dump_stack(int skip); /* * The double __builtin_constant_p is because gcc will give us an error * if we try to allocate the static variable to fmt if it is not a * constant. Even with the outer if statement. */ #define ftrace_vprintk(fmt, vargs) … extern __printf(2, 0) int __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap); extern __printf(2, 0) int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap); extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode); #else static inline void tracing_start(void) { } static inline void tracing_stop(void) { } static inline void trace_dump_stack(int skip) { } static inline void tracing_on(void) { } static inline void tracing_off(void) { } static inline int tracing_is_on(void) { return 0; } static inline void tracing_snapshot(void) { } static inline void tracing_snapshot_alloc(void) { } static inline __printf(1, 2) int trace_printk(const char *fmt, ...) { return 0; } static __printf(1, 0) inline int ftrace_vprintk(const char *fmt, va_list ap) { return 0; } static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } #endif /* CONFIG_TRACING */ /* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */ #ifdef CONFIG_FTRACE_MCOUNT_RECORD #define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD #endif /* Permissions on a sysfs file: you didn't miss the 0 prefix did you? */ #define VERIFY_OCTAL_PERMISSIONS(perms) … #endif