linux/kernel/cgroup/cgroup-v1.c

// SPDX-License-Identifier: GPL-2.0-only
#include "cgroup-internal.h"

#include <linux/ctype.h>
#include <linux/kmod.h>
#include <linux/sort.h>
#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/sched/signal.h>
#include <linux/sched/task.h>
#include <linux/magic.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/delayacct.h>
#include <linux/pid_namespace.h>
#include <linux/cgroupstats.h>
#include <linux/fs_parser.h>

#include <trace/events/cgroup.h>

/*
 * pidlists linger the following amount before being destroyed.  The goal
 * is avoiding frequent destruction in the middle of consecutive read calls
 * Expiring in the middle is a performance problem not a correctness one.
 * 1 sec should be enough.
 */
#define CGROUP_PIDLIST_DESTROY_DELAY

/* Controllers blocked by the commandline in v1 */
static u16 cgroup_no_v1_mask;

/* disable named v1 mounts */
static bool cgroup_no_v1_named;

/*
 * pidlist destructions need to be flushed on cgroup destruction.  Use a
 * separate workqueue as flush domain.
 */
static struct workqueue_struct *cgroup_pidlist_destroy_wq;

/* protects cgroup_subsys->release_agent_path */
static DEFINE_SPINLOCK(release_agent_path_lock);

bool cgroup1_ssid_disabled(int ssid)
{}

/**
 * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
 * @from: attach to all cgroups of a given task
 * @tsk: the task to be attached
 *
 * Return: %0 on success or a negative errno code on failure
 */
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
{}
EXPORT_SYMBOL_GPL();

/**
 * cgroup_transfer_tasks - move tasks from one cgroup to another
 * @to: cgroup to which the tasks will be moved
 * @from: cgroup in which the tasks currently reside
 *
 * Locking rules between cgroup_post_fork() and the migration path
 * guarantee that, if a task is forking while being migrated, the new child
 * is guaranteed to be either visible in the source cgroup after the
 * parent's migration is complete or put into the target cgroup.  No task
 * can slip out of migration through forking.
 *
 * Return: %0 on success or a negative errno code on failure
 */
int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
{}

/*
 * Stuff for reading the 'tasks'/'procs' files.
 *
 * Reading this file can return large amounts of data if a cgroup has
 * *lots* of attached tasks. So it may need several calls to read(),
 * but we cannot guarantee that the information we produce is correct
 * unless we produce it entirely atomically.
 *
 */

/* which pidlist file are we talking about? */
enum cgroup_filetype {};

/*
 * A pidlist is a list of pids that virtually represents the contents of one
 * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
 * a pair (one each for procs, tasks) for each pid namespace that's relevant
 * to the cgroup.
 */
struct cgroup_pidlist {};

/*
 * Used to destroy all pidlists lingering waiting for destroy timer.  None
 * should be left afterwards.
 */
void cgroup1_pidlist_destroy_all(struct cgroup *cgrp)
{}

static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
{}

/*
 * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
 * Returns the number of unique elements.
 */
static int pidlist_uniq(pid_t *list, int length)
{}

/*
 * The two pid files - task and cgroup.procs - guaranteed that the result
 * is sorted, which forced this whole pidlist fiasco.  As pid order is
 * different per namespace, each namespace needs differently sorted list,
 * making it impossible to use, for example, single rbtree of member tasks
 * sorted by task pointer.  As pidlists can be fairly large, allocating one
 * per open file is dangerous, so cgroup had to implement shared pool of
 * pidlists keyed by cgroup and namespace.
 */
static int cmppid(const void *a, const void *b)
{}

static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
						  enum cgroup_filetype type)
{}

/*
 * find the appropriate pidlist for our purpose (given procs vs tasks)
 * returns with the lock on that pidlist already held, and takes care
 * of the use count, or returns NULL with no locks held if we're out of
 * memory.
 */
static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp,
						enum cgroup_filetype type)
{}

/*
 * Load a cgroup's pidarray with either procs' tgids or tasks' pids
 */
static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
			      struct cgroup_pidlist **lp)
{}

/*
 * seq_file methods for the tasks/procs files. The seq_file position is the
 * next pid to display; the seq_file iterator is a pointer to the pid
 * in the cgroup->l->list array.
 */

static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
{}

static void cgroup_pidlist_stop(struct seq_file *s, void *v)
{}

static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
{}

static int cgroup_pidlist_show(struct seq_file *s, void *v)
{}

static ssize_t __cgroup1_procs_write(struct kernfs_open_file *of,
				     char *buf, size_t nbytes, loff_t off,
				     bool threadgroup)
{}

static ssize_t cgroup1_procs_write(struct kernfs_open_file *of,
				   char *buf, size_t nbytes, loff_t off)
{}

static ssize_t cgroup1_tasks_write(struct kernfs_open_file *of,
				   char *buf, size_t nbytes, loff_t off)
{}

static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
					  char *buf, size_t nbytes, loff_t off)
{}

static int cgroup_release_agent_show(struct seq_file *seq, void *v)
{}

static int cgroup_sane_behavior_show(struct seq_file *seq, void *v)
{}

static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
					 struct cftype *cft)
{}

static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
					  struct cftype *cft, u64 val)
{}

static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
				      struct cftype *cft)
{}

static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
				       struct cftype *cft, u64 val)
{}

/* cgroup core interface files for the legacy hierarchies */
struct cftype cgroup1_base_files[] =;

/* Display information about each subsystem and each hierarchy */
int proc_cgroupstats_show(struct seq_file *m, void *v)
{}

/**
 * cgroupstats_build - build and fill cgroupstats
 * @stats: cgroupstats to fill information into
 * @dentry: A dentry entry belonging to the cgroup for which stats have
 * been requested.
 *
 * Build and fill cgroupstats so that taskstats can export it to user
 * space.
 *
 * Return: %0 on success or a negative errno code on failure
 */
int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
{}

void cgroup1_check_for_release(struct cgroup *cgrp)
{}

/*
 * Notify userspace when a cgroup is released, by running the
 * configured release agent with the name of the cgroup (path
 * relative to the root of cgroup file system) as the argument.
 *
 * Most likely, this user command will try to rmdir this cgroup.
 *
 * This races with the possibility that some other task will be
 * attached to this cgroup before it is removed, or that some other
 * user task will 'mkdir' a child cgroup of this cgroup.  That's ok.
 * The presumed 'rmdir' will fail quietly if this cgroup is no longer
 * unused, and this cgroup will be reprieved from its death sentence,
 * to continue to serve a useful existence.  Next time it's released,
 * we will get notified again, if it still has 'notify_on_release' set.
 *
 * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
 * means only wait until the task is successfully execve()'d.  The
 * separate release agent task is forked by call_usermodehelper(),
 * then control in this thread returns here, without waiting for the
 * release agent task.  We don't bother to wait because the caller of
 * this routine has no use for the exit status of the release agent
 * task, so no sense holding our caller up for that.
 */
void cgroup1_release_agent(struct work_struct *work)
{}

/*
 * cgroup_rename - Only allow simple rename of directories in place.
 */
static int cgroup1_rename(struct kernfs_node *kn, struct kernfs_node *new_parent,
			  const char *new_name_str)
{}

static int cgroup1_show_options(struct seq_file *seq, struct kernfs_root *kf_root)
{}

enum cgroup1_param {};

const struct fs_parameter_spec cgroup1_fs_parameters[] =;

int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
{}

static int check_cgroupfs_options(struct fs_context *fc)
{}

int cgroup1_reconfigure(struct fs_context *fc)
{}

struct kernfs_syscall_ops cgroup1_kf_syscall_ops =;

/*
 * The guts of cgroup1 mount - find or create cgroup_root to use.
 * Called with cgroup_mutex held; returns 0 on success, -E... on
 * error and positive - in case when the candidate is busy dying.
 * On success it stashes a reference to cgroup_root into given
 * cgroup_fs_context; that reference is *NOT* counting towards the
 * cgroup_root refcount.
 */
static int cgroup1_root_to_use(struct fs_context *fc)
{}

int cgroup1_get_tree(struct fs_context *fc)
{}

/**
 * task_get_cgroup1 - Acquires the associated cgroup of a task within a
 * specific cgroup1 hierarchy. The cgroup1 hierarchy is identified by its
 * hierarchy ID.
 * @tsk: The target task
 * @hierarchy_id: The ID of a cgroup1 hierarchy
 *
 * On success, the cgroup is returned. On failure, ERR_PTR is returned.
 * We limit it to cgroup1 only.
 */
struct cgroup *task_get_cgroup1(struct task_struct *tsk, int hierarchy_id)
{}

static int __init cgroup1_wq_init(void)
{}
core_initcall(cgroup1_wq_init);

static int __init cgroup_no_v1(char *str)
{}
__setup();