#ifndef _LINUX_SCHED_H
#define _LINUX_SCHED_H
#include <uapi/linux/sched.h>
#include <asm/current.h>
#include <asm/processor.h>
#include <linux/thread_info.h>
#include <linux/preempt.h>
#include <linux/cpumask_types.h>
#include <linux/cache.h>
#include <linux/irqflags_types.h>
#include <linux/smp_types.h>
#include <linux/pid_types.h>
#include <linux/sem_types.h>
#include <linux/shm.h>
#include <linux/kmsan_types.h>
#include <linux/mutex_types.h>
#include <linux/plist_types.h>
#include <linux/hrtimer_types.h>
#include <linux/timer_types.h>
#include <linux/seccomp_types.h>
#include <linux/nodemask_types.h>
#include <linux/refcount_types.h>
#include <linux/resource.h>
#include <linux/latencytop.h>
#include <linux/sched/prio.h>
#include <linux/sched/types.h>
#include <linux/signal_types.h>
#include <linux/syscall_user_dispatch_types.h>
#include <linux/mm_types_task.h>
#include <linux/netdevice_xmit.h>
#include <linux/task_io_accounting.h>
#include <linux/posix-timers_types.h>
#include <linux/restart_block.h>
#include <uapi/linux/rseq.h>
#include <linux/seqlock_types.h>
#include <linux/kcsan.h>
#include <linux/rv.h>
#include <linux/livepatch_sched.h>
#include <linux/uidgid_types.h>
#include <asm/kmap_size.h>
struct audit_context;
struct bio_list;
struct blk_plug;
struct bpf_local_storage;
struct bpf_run_ctx;
struct bpf_net_context;
struct capture_control;
struct cfs_rq;
struct fs_struct;
struct futex_pi_state;
struct io_context;
struct io_uring_task;
struct mempolicy;
struct nameidata;
struct nsproxy;
struct perf_event_context;
struct pid_namespace;
struct pipe_inode_info;
struct rcu_node;
struct reclaim_state;
struct robust_list_head;
struct root_domain;
struct rq;
struct sched_attr;
struct sched_dl_entity;
struct seq_file;
struct sighand_struct;
struct signal_struct;
struct task_delay_info;
struct task_group;
struct task_struct;
struct user_event_mm;
#include <linux/sched/ext.h>
#define TASK_RUNNING …
#define TASK_INTERRUPTIBLE …
#define TASK_UNINTERRUPTIBLE …
#define __TASK_STOPPED …
#define __TASK_TRACED …
#define EXIT_DEAD …
#define EXIT_ZOMBIE …
#define EXIT_TRACE …
#define TASK_PARKED …
#define TASK_DEAD …
#define TASK_WAKEKILL …
#define TASK_WAKING …
#define TASK_NOLOAD …
#define TASK_NEW …
#define TASK_RTLOCK_WAIT …
#define TASK_FREEZABLE …
#define __TASK_FREEZABLE_UNSAFE …
#define TASK_FROZEN …
#define TASK_STATE_MAX …
#define TASK_ANY …
#define TASK_FREEZABLE_UNSAFE …
#define TASK_KILLABLE …
#define TASK_STOPPED …
#define TASK_TRACED …
#define TASK_IDLE …
#define TASK_NORMAL …
#define TASK_REPORT …
#define task_is_running(task) …
#define task_is_traced(task) …
#define task_is_stopped(task) …
#define task_is_stopped_or_traced(task) …
#define is_special_task_state(state) …
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
#define debug_normal_state_change(state_value) …
#define debug_special_state_change(state_value) …
#define debug_rtlock_wait_set_state() …
#define debug_rtlock_wait_restore_state() …
#else
#define debug_normal_state_change …
#define debug_special_state_change …
#define debug_rtlock_wait_set_state …
#define debug_rtlock_wait_restore_state …
#endif
#define __set_current_state(state_value) …
#define set_current_state(state_value) …
#define set_special_state(state_value) …
#define current_save_and_set_rtlock_wait_state() …
#define current_restore_rtlock_saved_state() …
#define get_current_state() …
enum { … };
extern void sched_tick(void);
#define MAX_SCHEDULE_TIMEOUT …
extern long schedule_timeout(long timeout);
extern long schedule_timeout_interruptible(long timeout);
extern long schedule_timeout_killable(long timeout);
extern long schedule_timeout_uninterruptible(long timeout);
extern long schedule_timeout_idle(long timeout);
asmlinkage void schedule(void);
extern void schedule_preempt_disabled(void);
asmlinkage void preempt_schedule_irq(void);
#ifdef CONFIG_PREEMPT_RT
extern void schedule_rtlock(void);
#endif
extern int __must_check io_schedule_prepare(void);
extern void io_schedule_finish(int token);
extern long io_schedule_timeout(long timeout);
extern void io_schedule(void);
struct prev_cputime { … };
enum vtime_state { … };
struct vtime { … };
enum uclamp_id { … };
#ifdef CONFIG_SMP
extern struct root_domain def_root_domain;
extern struct mutex sched_domains_mutex;
#endif
struct sched_param { … };
struct sched_info { … };
#define SCHED_FIXEDPOINT_SHIFT …
#define SCHED_FIXEDPOINT_SCALE …
#define SCHED_CAPACITY_SHIFT …
#define SCHED_CAPACITY_SCALE …
struct load_weight { … };
struct sched_avg { … } ____cacheline_aligned;
#define UTIL_EST_WEIGHT_SHIFT …
#define UTIL_AVG_UNCHANGED …
struct sched_statistics { … } ____cacheline_aligned;
struct sched_entity { … };
struct sched_rt_entity { … } __randomize_layout;
dl_server_has_tasks_f;
dl_server_pick_f;
struct sched_dl_entity { … };
#ifdef CONFIG_UCLAMP_TASK
#define UCLAMP_BUCKETS …
struct uclamp_se { … };
#endif
rcu_special;
enum perf_event_task_context { … };
#define PERF_NR_CONTEXTS …
struct wake_q_node { … };
struct kmap_ctrl { … };
struct task_struct { … };
#define TASK_REPORT_IDLE …
#define TASK_REPORT_MAX …
static inline unsigned int __task_state_index(unsigned int tsk_state,
unsigned int tsk_exit_state)
{ … }
static inline unsigned int task_state_index(struct task_struct *tsk)
{ … }
static inline char task_index_to_char(unsigned int state)
{ … }
static inline char task_state_to_char(struct task_struct *tsk)
{ … }
extern struct pid *cad_pid;
#define PF_VCPU …
#define PF_IDLE …
#define PF_EXITING …
#define PF_POSTCOREDUMP …
#define PF_IO_WORKER …
#define PF_WQ_WORKER …
#define PF_FORKNOEXEC …
#define PF_MCE_PROCESS …
#define PF_SUPERPRIV …
#define PF_DUMPCORE …
#define PF_SIGNALED …
#define PF_MEMALLOC …
#define PF_NPROC_EXCEEDED …
#define PF_USED_MATH …
#define PF_USER_WORKER …
#define PF_NOFREEZE …
#define PF__HOLE__00010000 …
#define PF_KSWAPD …
#define PF_MEMALLOC_NOFS …
#define PF_MEMALLOC_NOIO …
#define PF_LOCAL_THROTTLE …
#define PF_KTHREAD …
#define PF_RANDOMIZE …
#define PF__HOLE__00800000 …
#define PF__HOLE__01000000 …
#define PF__HOLE__02000000 …
#define PF_NO_SETAFFINITY …
#define PF_MCE_EARLY …
#define PF_MEMALLOC_PIN …
#define PF_BLOCK_TS …
#define PF__HOLE__40000000 …
#define PF_SUSPEND_TASK …
#define clear_stopped_child_used_math(child) …
#define set_stopped_child_used_math(child) …
#define clear_used_math() …
#define set_used_math() …
#define conditional_stopped_child_used_math(condition, child) …
#define conditional_used_math(condition) …
#define copy_to_stopped_child_used_math(child) …
#define tsk_used_math(p) …
#define used_math() …
static __always_inline bool is_percpu_thread(void)
{ … }
#define PFA_NO_NEW_PRIVS …
#define PFA_SPREAD_PAGE …
#define PFA_SPREAD_SLAB …
#define PFA_SPEC_SSB_DISABLE …
#define PFA_SPEC_SSB_FORCE_DISABLE …
#define PFA_SPEC_IB_DISABLE …
#define PFA_SPEC_IB_FORCE_DISABLE …
#define PFA_SPEC_SSB_NOEXEC …
#define TASK_PFA_TEST(name, func) …
#define TASK_PFA_SET(name, func) …
#define TASK_PFA_CLEAR(name, func) …
TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
TASK_PFA_TEST(SPREAD_PAGE, spread_page)
TASK_PFA_SET(SPREAD_PAGE, spread_page)
TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
TASK_PFA_SET(SPREAD_SLAB, spread_slab)
TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec)
TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec)
TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec)
TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
static inline void
current_restore_flags(unsigned long orig_flags, unsigned long flags)
{ … }
extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
extern int task_can_attach(struct task_struct *p);
extern int dl_bw_alloc(int cpu, u64 dl_bw);
extern void dl_bw_free(int cpu, u64 dl_bw);
#ifdef CONFIG_SMP
extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node);
extern void release_user_cpus_ptr(struct task_struct *p);
extern int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask);
extern void force_compatible_cpus_allowed_ptr(struct task_struct *p);
extern void relax_compatible_cpus_allowed_ptr(struct task_struct *p);
#else
static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
}
static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
{
if ((*cpumask_bits(new_mask) & 1) == 0)
return -EINVAL;
return 0;
}
static inline int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node)
{
if (src->user_cpus_ptr)
return -EINVAL;
return 0;
}
static inline void release_user_cpus_ptr(struct task_struct *p)
{
WARN_ON(p->user_cpus_ptr);
}
static inline int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
{
return 0;
}
#endif
extern int yield_to(struct task_struct *p, bool preempt);
extern void set_user_nice(struct task_struct *p, long nice);
extern int task_prio(const struct task_struct *p);
static inline int task_nice(const struct task_struct *p)
{ … }
extern int can_nice(const struct task_struct *p, const int nice);
extern int task_curr(const struct task_struct *p);
extern int idle_cpu(int cpu);
extern int available_idle_cpu(int cpu);
extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
extern void sched_set_fifo(struct task_struct *p);
extern void sched_set_fifo_low(struct task_struct *p);
extern void sched_set_normal(struct task_struct *p, int nice);
extern int sched_setattr(struct task_struct *, const struct sched_attr *);
extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
extern struct task_struct *idle_task(int cpu);
static __always_inline bool is_idle_task(const struct task_struct *p)
{ … }
extern struct task_struct *curr_task(int cpu);
extern void ia64_set_curr_task(int cpu, struct task_struct *p);
void yield(void);
thread_union;
#ifndef CONFIG_THREAD_INFO_IN_TASK
extern struct thread_info init_thread_info;
#endif
extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
#ifdef CONFIG_THREAD_INFO_IN_TASK
#define task_thread_info(task) …
#elif !defined(__HAVE_THREAD_FUNCTIONS)
#define task_thread_info …
#endif
extern struct task_struct *find_task_by_vpid(pid_t nr);
extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
extern struct task_struct *find_get_task_by_vpid(pid_t nr);
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
extern void wake_up_new_task(struct task_struct *tsk);
#ifdef CONFIG_SMP
extern void kick_process(struct task_struct *tsk);
#else
static inline void kick_process(struct task_struct *tsk) { }
#endif
extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
static inline void set_task_comm(struct task_struct *tsk, const char *from)
{ … }
extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
#define get_task_comm(buf, tsk) …
#ifdef CONFIG_SMP
static __always_inline void scheduler_ipi(void)
{ … }
#else
static inline void scheduler_ipi(void) { }
#endif
extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state);
static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
{ … }
static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{ … }
static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag,
bool value)
{ … }
static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
{ … }
static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{ … }
static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
{ … }
static inline void set_tsk_need_resched(struct task_struct *tsk)
{ … }
static inline void clear_tsk_need_resched(struct task_struct *tsk)
{ … }
static inline int test_tsk_need_resched(struct task_struct *tsk)
{ … }
#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
extern int __cond_resched(void);
#if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
void sched_dynamic_klp_enable(void);
void sched_dynamic_klp_disable(void);
DECLARE_STATIC_CALL(cond_resched, __cond_resched);
static __always_inline int _cond_resched(void)
{ … }
#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
extern int dynamic_cond_resched(void);
static __always_inline int _cond_resched(void)
{
return dynamic_cond_resched();
}
#else
static inline int _cond_resched(void)
{
klp_sched_try_switch();
return __cond_resched();
}
#endif
#else
static inline int _cond_resched(void)
{
klp_sched_try_switch();
return 0;
}
#endif
#define cond_resched() …
extern int __cond_resched_lock(spinlock_t *lock);
extern int __cond_resched_rwlock_read(rwlock_t *lock);
extern int __cond_resched_rwlock_write(rwlock_t *lock);
#define MIGHT_RESCHED_RCU_SHIFT …
#define MIGHT_RESCHED_PREEMPT_MASK …
#ifndef CONFIG_PREEMPT_RT
#define PREEMPT_LOCK_RESCHED_OFFSETS …
#else
#define PREEMPT_LOCK_RESCHED_OFFSETS …
#endif
#define cond_resched_lock(lock) …
#define cond_resched_rwlock_read(lock) …
#define cond_resched_rwlock_write(lock) …
static __always_inline bool need_resched(void)
{ … }
#ifdef CONFIG_SMP
static inline unsigned int task_cpu(const struct task_struct *p)
{ … }
extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
#else
static inline unsigned int task_cpu(const struct task_struct *p)
{
return 0;
}
static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
{
}
#endif
static inline bool task_is_runnable(struct task_struct *p)
{ … }
extern bool sched_task_on_rq(struct task_struct *p);
extern unsigned long get_wchan(struct task_struct *p);
extern struct task_struct *cpu_curr_snapshot(int cpu);
#include <linux/spinlock.h>
#ifndef vcpu_is_preempted
static inline bool vcpu_is_preempted(int cpu)
{
return false;
}
#endif
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
#ifndef TASK_SIZE_OF
#define TASK_SIZE_OF …
#endif
#ifdef CONFIG_SMP
static inline bool owner_on_cpu(struct task_struct *owner)
{ … }
unsigned long sched_cpu_util(int cpu);
#endif
#ifdef CONFIG_SCHED_CORE
extern void sched_core_free(struct task_struct *tsk);
extern void sched_core_fork(struct task_struct *p);
extern int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type,
unsigned long uaddr);
extern int sched_core_idle_cpu(int cpu);
#else
static inline void sched_core_free(struct task_struct *tsk) { }
static inline void sched_core_fork(struct task_struct *p) { }
static inline int sched_core_idle_cpu(int cpu) { return idle_cpu(cpu); }
#endif
extern void sched_set_stop_task(int cpu, struct task_struct *stop);
#ifdef CONFIG_MEM_ALLOC_PROFILING
static __always_inline struct alloc_tag *alloc_tag_save(struct alloc_tag *tag)
{
swap(current->alloc_tag, tag);
return tag;
}
static __always_inline void alloc_tag_restore(struct alloc_tag *tag, struct alloc_tag *old)
{
#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
WARN(current->alloc_tag != tag, "current->alloc_tag was changed:\n");
#endif
current->alloc_tag = old;
}
#else
#define alloc_tag_save(_tag) …
#define alloc_tag_restore(_tag, _old) …
#endif
#endif