#include <linux/anon_inodes.h>
#include <linux/slab.h>
#include <linux/sched/autogroup.h>
#include <linux/sched/mm.h>
#include <linux/sched/coredump.h>
#include <linux/sched/user.h>
#include <linux/sched/numa_balancing.h>
#include <linux/sched/stat.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/sched/cputime.h>
#include <linux/seq_file.h>
#include <linux/rtmutex.h>
#include <linux/init.h>
#include <linux/unistd.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/completion.h>
#include <linux/personality.h>
#include <linux/mempolicy.h>
#include <linux/sem.h>
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/iocontext.h>
#include <linux/key.h>
#include <linux/kmsan.h>
#include <linux/binfmts.h>
#include <linux/mman.h>
#include <linux/mmu_notifier.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/mm_inline.h>
#include <linux/memblock.h>
#include <linux/nsproxy.h>
#include <linux/capability.h>
#include <linux/cpu.h>
#include <linux/cgroup.h>
#include <linux/security.h>
#include <linux/hugetlb.h>
#include <linux/seccomp.h>
#include <linux/swap.h>
#include <linux/syscalls.h>
#include <linux/syscall_user_dispatch.h>
#include <linux/jiffies.h>
#include <linux/futex.h>
#include <linux/compat.h>
#include <linux/kthread.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/rcupdate.h>
#include <linux/ptrace.h>
#include <linux/mount.h>
#include <linux/audit.h>
#include <linux/memcontrol.h>
#include <linux/ftrace.h>
#include <linux/proc_fs.h>
#include <linux/profile.h>
#include <linux/rmap.h>
#include <linux/ksm.h>
#include <linux/acct.h>
#include <linux/userfaultfd_k.h>
#include <linux/tsacct_kern.h>
#include <linux/cn_proc.h>
#include <linux/freezer.h>
#include <linux/delayacct.h>
#include <linux/taskstats_kern.h>
#include <linux/tty.h>
#include <linux/fs_struct.h>
#include <linux/magic.h>
#include <linux/perf_event.h>
#include <linux/posix-timers.h>
#include <linux/user-return-notifier.h>
#include <linux/oom.h>
#include <linux/khugepaged.h>
#include <linux/signalfd.h>
#include <linux/uprobes.h>
#include <linux/aio.h>
#include <linux/compiler.h>
#include <linux/sysctl.h>
#include <linux/kcov.h>
#include <linux/livepatch.h>
#include <linux/thread_info.h>
#include <linux/stackleak.h>
#include <linux/kasan.h>
#include <linux/scs.h>
#include <linux/io_uring.h>
#include <linux/bpf.h>
#include <linux/stackprotector.h>
#include <linux/user_events.h>
#include <linux/iommu.h>
#include <linux/rseq.h>
#include <uapi/linux/pidfd.h>
#include <linux/pidfs.h>
#include <asm/pgalloc.h>
#include <linux/uaccess.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <trace/events/sched.h>
#define CREATE_TRACE_POINTS
#include <trace/events/task.h>
#include <kunit/visibility.h>
#define MIN_THREADS …
#define MAX_THREADS …
unsigned long total_forks;
int nr_threads;
static int max_threads;
#define NAMED_ARRAY_INDEX(x) …
static const char * const resident_page_types[] = …;
DEFINE_PER_CPU(unsigned long, process_counts) = …;
__cacheline_aligned DEFINE_RWLOCK(…);
#ifdef CONFIG_PROVE_RCU
int lockdep_tasklist_lock_is_held(void)
{ … }
EXPORT_SYMBOL_GPL(…);
#endif
int nr_processes(void)
{ … }
void __weak arch_release_task_struct(struct task_struct *tsk)
{ … }
static struct kmem_cache *task_struct_cachep;
static inline struct task_struct *alloc_task_struct_node(int node)
{ … }
static inline void free_task_struct(struct task_struct *tsk)
{ … }
# if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)
# ifdef CONFIG_VMAP_STACK
#define NR_CACHED_STACKS …
static DEFINE_PER_CPU(struct vm_struct *, cached_stacks[NR_CACHED_STACKS]);
struct vm_stack { … };
static bool try_release_thread_stack_to_cache(struct vm_struct *vm)
{ … }
static void thread_stack_free_rcu(struct rcu_head *rh)
{ … }
static void thread_stack_delayed_free(struct task_struct *tsk)
{ … }
static int free_vm_stack_cache(unsigned int cpu)
{ … }
static int memcg_charge_kernel_stack(struct vm_struct *vm)
{ … }
static int alloc_thread_stack_node(struct task_struct *tsk, int node)
{ … }
static void free_thread_stack(struct task_struct *tsk)
{ … }
# else
static void thread_stack_free_rcu(struct rcu_head *rh)
{
__free_pages(virt_to_page(rh), THREAD_SIZE_ORDER);
}
static void thread_stack_delayed_free(struct task_struct *tsk)
{
struct rcu_head *rh = tsk->stack;
call_rcu(rh, thread_stack_free_rcu);
}
static int alloc_thread_stack_node(struct task_struct *tsk, int node)
{
struct page *page = alloc_pages_node(node, THREADINFO_GFP,
THREAD_SIZE_ORDER);
if (likely(page)) {
tsk->stack = kasan_reset_tag(page_address(page));
return 0;
}
return -ENOMEM;
}
static void free_thread_stack(struct task_struct *tsk)
{
thread_stack_delayed_free(tsk);
tsk->stack = NULL;
}
# endif
# else
static struct kmem_cache *thread_stack_cache;
static void thread_stack_free_rcu(struct rcu_head *rh)
{
kmem_cache_free(thread_stack_cache, rh);
}
static void thread_stack_delayed_free(struct task_struct *tsk)
{
struct rcu_head *rh = tsk->stack;
call_rcu(rh, thread_stack_free_rcu);
}
static int alloc_thread_stack_node(struct task_struct *tsk, int node)
{
unsigned long *stack;
stack = kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
stack = kasan_reset_tag(stack);
tsk->stack = stack;
return stack ? 0 : -ENOMEM;
}
static void free_thread_stack(struct task_struct *tsk)
{
thread_stack_delayed_free(tsk);
tsk->stack = NULL;
}
void thread_stack_cache_init(void)
{
thread_stack_cache = kmem_cache_create_usercopy("thread_stack",
THREAD_SIZE, THREAD_SIZE, 0, 0,
THREAD_SIZE, NULL);
BUG_ON(thread_stack_cache == NULL);
}
# endif
static struct kmem_cache *signal_cachep;
struct kmem_cache *sighand_cachep;
struct kmem_cache *files_cachep;
struct kmem_cache *fs_cachep;
static struct kmem_cache *vm_area_cachep;
static struct kmem_cache *mm_cachep;
#ifdef CONFIG_PER_VMA_LOCK
static struct kmem_cache *vma_lock_cachep;
static bool vma_lock_alloc(struct vm_area_struct *vma)
{ … }
static inline void vma_lock_free(struct vm_area_struct *vma)
{ … }
#else
static inline bool vma_lock_alloc(struct vm_area_struct *vma) { return true; }
static inline void vma_lock_free(struct vm_area_struct *vma) {}
#endif
struct vm_area_struct *vm_area_alloc(struct mm_struct *mm)
{ … }
struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
{ … }
void __vm_area_free(struct vm_area_struct *vma)
{ … }
#ifdef CONFIG_PER_VMA_LOCK
static void vm_area_free_rcu_cb(struct rcu_head *head)
{ … }
#endif
void vm_area_free(struct vm_area_struct *vma)
{ … }
static void account_kernel_stack(struct task_struct *tsk, int account)
{ … }
void exit_task_stack_account(struct task_struct *tsk)
{ … }
static void release_task_stack(struct task_struct *tsk)
{ … }
#ifdef CONFIG_THREAD_INFO_IN_TASK
void put_task_stack(struct task_struct *tsk)
{ … }
#endif
void free_task(struct task_struct *tsk)
{ … }
EXPORT_SYMBOL(…);
static void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm)
{ … }
#ifdef CONFIG_MMU
static __latent_entropy int dup_mmap(struct mm_struct *mm,
struct mm_struct *oldmm)
{ … }
static inline int mm_alloc_pgd(struct mm_struct *mm)
{ … }
static inline void mm_free_pgd(struct mm_struct *mm)
{ … }
#else
static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
{
mmap_write_lock(oldmm);
dup_mm_exe_file(mm, oldmm);
mmap_write_unlock(oldmm);
return 0;
}
#define mm_alloc_pgd …
#define mm_free_pgd …
#endif
static void check_mm(struct mm_struct *mm)
{ … }
#define allocate_mm() …
#define free_mm(mm) …
static void do_check_lazy_tlb(void *arg)
{ … }
static void do_shoot_lazy_tlb(void *arg)
{ … }
static void cleanup_lazy_tlbs(struct mm_struct *mm)
{ … }
void __mmdrop(struct mm_struct *mm)
{ … }
EXPORT_SYMBOL_GPL(…);
static void mmdrop_async_fn(struct work_struct *work)
{ … }
static void mmdrop_async(struct mm_struct *mm)
{ … }
static inline void free_signal_struct(struct signal_struct *sig)
{ … }
static inline void put_signal_struct(struct signal_struct *sig)
{ … }
void __put_task_struct(struct task_struct *tsk)
{ … }
EXPORT_SYMBOL_GPL(…);
void __put_task_struct_rcu_cb(struct rcu_head *rhp)
{ … }
EXPORT_SYMBOL_GPL(…);
void __init __weak arch_task_cache_init(void) { … }
static void __init set_max_threads(unsigned int max_threads_suggested)
{ … }
#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
int arch_task_struct_size __read_mostly;
#endif
static void __init task_struct_whitelist(unsigned long *offset, unsigned long *size)
{ … }
void __init fork_init(void)
{ … }
int __weak arch_dup_task_struct(struct task_struct *dst,
struct task_struct *src)
{ … }
void set_task_stack_end_magic(struct task_struct *tsk)
{ … }
static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
{ … }
__cacheline_aligned_in_smp DEFINE_SPINLOCK(…);
static unsigned long default_dump_filter = …;
static int __init coredump_filter_setup(char *s)
{ … }
__setup(…);
#include <linux/init_task.h>
static void mm_init_aio(struct mm_struct *mm)
{ … }
static __always_inline void mm_clear_owner(struct mm_struct *mm,
struct task_struct *p)
{ … }
static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
{ … }
static void mm_init_uprobes_state(struct mm_struct *mm)
{ … }
static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
struct user_namespace *user_ns)
{ … }
struct mm_struct *mm_alloc(void)
{ … }
EXPORT_SYMBOL_IF_KUNIT(…);
static inline void __mmput(struct mm_struct *mm)
{ … }
void mmput(struct mm_struct *mm)
{ … }
EXPORT_SYMBOL_GPL(…);
#ifdef CONFIG_MMU
static void mmput_async_fn(struct work_struct *work)
{ … }
void mmput_async(struct mm_struct *mm)
{ … }
EXPORT_SYMBOL_GPL(…);
#endif
int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
{ … }
int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
{ … }
struct file *get_mm_exe_file(struct mm_struct *mm)
{ … }
struct file *get_task_exe_file(struct task_struct *task)
{ … }
struct mm_struct *get_task_mm(struct task_struct *task)
{ … }
EXPORT_SYMBOL_GPL(…);
struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
{ … }
static void complete_vfork_done(struct task_struct *tsk)
{ … }
static int wait_for_vfork_done(struct task_struct *child,
struct completion *vfork)
{ … }
static void mm_release(struct task_struct *tsk, struct mm_struct *mm)
{ … }
void exit_mm_release(struct task_struct *tsk, struct mm_struct *mm)
{ … }
void exec_mm_release(struct task_struct *tsk, struct mm_struct *mm)
{ … }
static struct mm_struct *dup_mm(struct task_struct *tsk,
struct mm_struct *oldmm)
{ … }
static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
{ … }
static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
{ … }
static int copy_files(unsigned long clone_flags, struct task_struct *tsk,
int no_files)
{ … }
static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
{ … }
void __cleanup_sighand(struct sighand_struct *sighand)
{ … }
static void posix_cpu_timers_init_group(struct signal_struct *sig)
{ … }
static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
{ … }
static void copy_seccomp(struct task_struct *p)
{ … }
SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
{ … }
static void rt_mutex_init_task(struct task_struct *p)
{ … }
static inline void init_task_pid_links(struct task_struct *task)
{ … }
static inline void
init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
{ … }
static inline void rcu_copy_process(struct task_struct *p)
{ … }
static int __pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret)
{ … }
int pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret)
{ … }
static void __delayed_free_task(struct rcu_head *rhp)
{ … }
static __always_inline void delayed_free_task(struct task_struct *tsk)
{ … }
static void copy_oom_score_adj(u64 clone_flags, struct task_struct *tsk)
{ … }
#ifdef CONFIG_RV
static void rv_task_fork(struct task_struct *p)
{ … }
#else
#define rv_task_fork …
#endif
__latent_entropy struct task_struct *copy_process(
struct pid *pid,
int trace,
int node,
struct kernel_clone_args *args)
{ … }
static inline void init_idle_pids(struct task_struct *idle)
{ … }
static int idle_dummy(void *dummy)
{ … }
struct task_struct * __init fork_idle(int cpu)
{ … }
struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node)
{ … }
pid_t kernel_clone(struct kernel_clone_args *args)
{ … }
pid_t kernel_thread(int (*fn)(void *), void *arg, const char *name,
unsigned long flags)
{ … }
pid_t user_mode_thread(int (*fn)(void *), void *arg, unsigned long flags)
{ … }
#ifdef __ARCH_WANT_SYS_FORK
SYSCALL_DEFINE0(fork)
{ … }
#endif
#ifdef __ARCH_WANT_SYS_VFORK
SYSCALL_DEFINE0(vfork)
{ … }
#endif
#ifdef __ARCH_WANT_SYS_CLONE
#ifdef CONFIG_CLONE_BACKWARDS
SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
int __user *, parent_tidptr,
unsigned long, tls,
int __user *, child_tidptr)
#elif defined(CONFIG_CLONE_BACKWARDS2)
SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags,
int __user *, parent_tidptr,
int __user *, child_tidptr,
unsigned long, tls)
#elif defined(CONFIG_CLONE_BACKWARDS3)
SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp,
int, stack_size,
int __user *, parent_tidptr,
int __user *, child_tidptr,
unsigned long, tls)
#else
SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
int __user *, parent_tidptr,
int __user *, child_tidptr,
unsigned long, tls)
#endif
{ … }
#endif
noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs,
struct clone_args __user *uargs,
size_t usize)
{ … }
static inline bool clone3_stack_valid(struct kernel_clone_args *kargs)
{ … }
static bool clone3_args_valid(struct kernel_clone_args *kargs)
{ … }
SYSCALL_DEFINE2(clone3, struct clone_args __user *, uargs, size_t, size)
{ … }
void walk_process_tree(struct task_struct *top, proc_visitor visitor, void *data)
{ … }
#ifndef ARCH_MIN_MMSTRUCT_ALIGN
#define ARCH_MIN_MMSTRUCT_ALIGN …
#endif
static void sighand_ctor(void *data)
{ … }
void __init mm_cache_init(void)
{ … }
void __init proc_caches_init(void)
{ … }
static int check_unshare_flags(unsigned long unshare_flags)
{ … }
static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
{ … }
int unshare_fd(unsigned long unshare_flags, unsigned int max_fds,
struct files_struct **new_fdp)
{ … }
int ksys_unshare(unsigned long unshare_flags)
{ … }
SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
{ … }
int unshare_files(void)
{ … }
int sysctl_max_threads(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{ … }