linux/mm/memcontrol-v1.c

// SPDX-License-Identifier: GPL-2.0-or-later

#include <linux/memcontrol.h>
#include <linux/swap.h>
#include <linux/mm_inline.h>
#include <linux/pagewalk.h>
#include <linux/backing-dev.h>
#include <linux/swap_cgroup.h>
#include <linux/eventfd.h>
#include <linux/poll.h>
#include <linux/sort.h>
#include <linux/file.h>
#include <linux/seq_buf.h>

#include "internal.h"
#include "swap.h"
#include "memcontrol-v1.h"

/*
 * Cgroups above their limits are maintained in a RB-Tree, independent of
 * their hierarchy representation
 */

struct mem_cgroup_tree_per_node {};

struct mem_cgroup_tree {};

static struct mem_cgroup_tree soft_limit_tree __read_mostly;

/*
 * Maximum loops in mem_cgroup_soft_reclaim(), used for soft
 * limit reclaim to prevent infinite loops, if they ever occur.
 */
#define MEM_CGROUP_MAX_RECLAIM_LOOPS
#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS

/* Stuffs for move charges at task migration. */
/*
 * Types of charges to be moved.
 */
#define MOVE_ANON
#define MOVE_FILE
#define MOVE_MASK

/* "mc" and its members are protected by cgroup_mutex */
static struct move_charge_struct {} mc =;

/* for OOM */
struct mem_cgroup_eventfd_list {};

/*
 * cgroup_event represents events which userspace want to receive.
 */
struct mem_cgroup_event {};

#define MEMFILE_PRIVATE(x, val)
#define MEMFILE_TYPE(val)
#define MEMFILE_ATTR(val)

enum {};

#ifdef CONFIG_LOCKDEP
static struct lockdep_map memcg_oom_lock_dep_map =;
#endif

DEFINE_SPINLOCK();

static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
					 struct mem_cgroup_tree_per_node *mctz,
					 unsigned long new_usage_in_excess)
{}

static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
					 struct mem_cgroup_tree_per_node *mctz)
{}

static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
				       struct mem_cgroup_tree_per_node *mctz)
{}

static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
{}

static void memcg1_update_tree(struct mem_cgroup *memcg, int nid)
{}

void memcg1_remove_from_trees(struct mem_cgroup *memcg)
{}

static struct mem_cgroup_per_node *
__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
{}

static struct mem_cgroup_per_node *
mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
{}

static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
				   pg_data_t *pgdat,
				   gfp_t gfp_mask,
				   unsigned long *total_scanned)
{}

unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order,
					    gfp_t gfp_mask,
					    unsigned long *total_scanned)
{}

/*
 * A routine for checking "mem" is under move_account() or not.
 *
 * Checking a cgroup is mc.from or mc.to or under hierarchy of
 * moving cgroups. This is for waiting at high-memory pressure
 * caused by "move".
 */
static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
{}

bool memcg1_wait_acct_move(struct mem_cgroup *memcg)
{}

/**
 * folio_memcg_lock - Bind a folio to its memcg.
 * @folio: The folio.
 *
 * This function prevents unlocked LRU folios from being moved to
 * another cgroup.
 *
 * It ensures lifetime of the bound memcg.  The caller is responsible
 * for the lifetime of the folio.
 */
void folio_memcg_lock(struct folio *folio)
{}

static void __folio_memcg_unlock(struct mem_cgroup *memcg)
{}

/**
 * folio_memcg_unlock - Release the binding between a folio and its memcg.
 * @folio: The folio.
 *
 * This releases the binding created by folio_memcg_lock().  This does
 * not change the accounting of this folio to its memcg, but it does
 * permit others to change it.
 */
void folio_memcg_unlock(struct folio *folio)
{}

#ifdef CONFIG_SWAP
/**
 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
 * @entry: swap entry to be moved
 * @from:  mem_cgroup which the entry is moved from
 * @to:  mem_cgroup which the entry is moved to
 *
 * It succeeds only when the swap_cgroup's record for this entry is the same
 * as the mem_cgroup's id of @from.
 *
 * Returns 0 on success, -EINVAL on failure.
 *
 * The caller must have charged to @to, IOW, called page_counter_charge() about
 * both res and memsw, and called css_get().
 */
static int mem_cgroup_move_swap_account(swp_entry_t entry,
				struct mem_cgroup *from, struct mem_cgroup *to)
{}
#else
static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
				struct mem_cgroup *from, struct mem_cgroup *to)
{
	return -EINVAL;
}
#endif

static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
				struct cftype *cft)
{}

#ifdef CONFIG_MMU
static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
				 struct cftype *cft, u64 val)
{}
#else
static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
				 struct cftype *cft, u64 val)
{
	return -ENOSYS;
}
#endif

#ifdef CONFIG_MMU
/* Handlers for move charge at task migration. */
static int mem_cgroup_do_precharge(unsigned long count)
{}

mc_target;

enum mc_target_type {};

static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
						unsigned long addr, pte_t ptent)
{}

#if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
			pte_t ptent, swp_entry_t *entry)
{}
#else
static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
			pte_t ptent, swp_entry_t *entry)
{
	return NULL;
}
#endif

static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
			unsigned long addr, pte_t ptent)
{}

/**
 * mem_cgroup_move_account - move account of the folio
 * @folio: The folio.
 * @compound: charge the page as compound or small page
 * @from: mem_cgroup which the folio is moved from.
 * @to:	mem_cgroup which the folio is moved to. @from != @to.
 *
 * The folio must be locked and not on the LRU.
 *
 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
 * from old cgroup.
 */
static int mem_cgroup_move_account(struct folio *folio,
				   bool compound,
				   struct mem_cgroup *from,
				   struct mem_cgroup *to)
{}

/**
 * get_mctgt_type - get target type of moving charge
 * @vma: the vma the pte to be checked belongs
 * @addr: the address corresponding to the pte to be checked
 * @ptent: the pte to be checked
 * @target: the pointer the target page or swap ent will be stored(can be NULL)
 *
 * Context: Called with pte lock held.
 * Return:
 * * MC_TARGET_NONE - If the pte is not a target for move charge.
 * * MC_TARGET_PAGE - If the page corresponding to this pte is a target for
 *   move charge. If @target is not NULL, the folio is stored in target->folio
 *   with extra refcnt taken (Caller should release it).
 * * MC_TARGET_SWAP - If the swap entry corresponding to this pte is a
 *   target for charge migration.  If @target is not NULL, the entry is
 *   stored in target->ent.
 * * MC_TARGET_DEVICE - Like MC_TARGET_PAGE but page is device memory and
 *   thus not on the lru.  For now such page is charged like a regular page
 *   would be as it is just special memory taking the place of a regular page.
 *   See Documentations/vm/hmm.txt and include/linux/hmm.h
 */
static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
		unsigned long addr, pte_t ptent, union mc_target *target)
{}

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
 * We don't consider PMD mapped swapping or file mapped pages because THP does
 * not support them for now.
 * Caller should make sure that pmd_trans_huge(pmd) is true.
 */
static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
		unsigned long addr, pmd_t pmd, union mc_target *target)
{}
#else
static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
		unsigned long addr, pmd_t pmd, union mc_target *target)
{
	return MC_TARGET_NONE;
}
#endif

static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
					unsigned long addr, unsigned long end,
					struct mm_walk *walk)
{}

static const struct mm_walk_ops precharge_walk_ops =;

static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
{}

static int mem_cgroup_precharge_mc(struct mm_struct *mm)
{}

/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
static void __mem_cgroup_clear_mc(void)
{}

static void mem_cgroup_clear_mc(void)
{}

int memcg1_can_attach(struct cgroup_taskset *tset)
{}

void memcg1_cancel_attach(struct cgroup_taskset *tset)
{}

static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
				unsigned long addr, unsigned long end,
				struct mm_walk *walk)
{}

static const struct mm_walk_ops charge_walk_ops =;

static void mem_cgroup_move_charge(void)
{}

void memcg1_move_task(void)
{}

#else	/* !CONFIG_MMU */
int memcg1_can_attach(struct cgroup_taskset *tset)
{
	return 0;
}
void memcg1_cancel_attach(struct cgroup_taskset *tset)
{
}
void memcg1_move_task(void)
{
}
#endif

static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
{}

static void mem_cgroup_threshold(struct mem_cgroup *memcg)
{}

/*
 * Check events in order.
 *
 */
void memcg1_check_events(struct mem_cgroup *memcg, int nid)
{}

static int compare_thresholds(const void *a, const void *b)
{}

static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
{}

static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
{}

static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
	struct eventfd_ctx *eventfd, const char *args, enum res_type type)
{}

static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
	struct eventfd_ctx *eventfd, const char *args)
{}

static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
	struct eventfd_ctx *eventfd, const char *args)
{}

static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
	struct eventfd_ctx *eventfd, enum res_type type)
{}

static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
	struct eventfd_ctx *eventfd)
{}

static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
	struct eventfd_ctx *eventfd)
{}

static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
	struct eventfd_ctx *eventfd, const char *args)
{}

static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
	struct eventfd_ctx *eventfd)
{}

/*
 * DO NOT USE IN NEW FILES.
 *
 * "cgroup.event_control" implementation.
 *
 * This is way over-engineered.  It tries to support fully configurable
 * events for each user.  Such level of flexibility is completely
 * unnecessary especially in the light of the planned unified hierarchy.
 *
 * Please deprecate this and replace with something simpler if at all
 * possible.
 */

/*
 * Unregister event and free resources.
 *
 * Gets called from workqueue.
 */
static void memcg_event_remove(struct work_struct *work)
{}

/*
 * Gets called on EPOLLHUP on eventfd when user closes it.
 *
 * Called with wqh->lock held and interrupts disabled.
 */
static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
			    int sync, void *key)
{}

static void memcg_event_ptable_queue_proc(struct file *file,
		wait_queue_head_t *wqh, poll_table *pt)
{}

/*
 * DO NOT USE IN NEW FILES.
 *
 * Parse input and register new cgroup event handler.
 *
 * Input must be in format '<event_fd> <control_fd> <args>'.
 * Interpretation of args is defined by control file implementation.
 */
static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
					 char *buf, size_t nbytes, loff_t off)
{}

void memcg1_memcg_init(struct mem_cgroup *memcg)
{}

void memcg1_css_offline(struct mem_cgroup *memcg)
{}

/*
 * Check OOM-Killer is already running under our hierarchy.
 * If someone is running, return false.
 */
static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
{}

static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
{}

static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
{}

static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
{}

static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);

struct oom_wait_info {};

static int memcg_oom_wake_function(wait_queue_entry_t *wait,
	unsigned mode, int sync, void *arg)
{}

void memcg1_oom_recover(struct mem_cgroup *memcg)
{}

/**
 * mem_cgroup_oom_synchronize - complete memcg OOM handling
 * @handle: actually kill/wait or just clean up the OOM state
 *
 * This has to be called at the end of a page fault if the memcg OOM
 * handler was enabled.
 *
 * Memcg supports userspace OOM handling where failed allocations must
 * sleep on a waitqueue until the userspace task resolves the
 * situation.  Sleeping directly in the charge context with all kinds
 * of locks held is not a good idea, instead we remember an OOM state
 * in the task and mem_cgroup_oom_synchronize() has to be called at
 * the end of the page fault to complete the OOM handling.
 *
 * Returns %true if an ongoing memcg OOM situation was detected and
 * completed, %false otherwise.
 */
bool mem_cgroup_oom_synchronize(bool handle)
{}


bool memcg1_oom_prepare(struct mem_cgroup *memcg, bool *locked)
{}

void memcg1_oom_finish(struct mem_cgroup *memcg, bool locked)
{}

static DEFINE_MUTEX(memcg_max_mutex);

static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
				 unsigned long max, bool memsw)
{}

/*
 * Reclaims as many pages from the given memcg as possible.
 *
 * Caller is responsible for holding css reference for memcg.
 */
static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
{}

static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
					    char *buf, size_t nbytes,
					    loff_t off)
{}

static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
				     struct cftype *cft)
{}

static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
				      struct cftype *cft, u64 val)
{}

static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
			       struct cftype *cft)
{}

/*
 * This function doesn't do anything useful. Its only job is to provide a read
 * handler for a file so that cgroup_file_mode() will add read permissions.
 */
static int mem_cgroup_dummy_seq_show(__always_unused struct seq_file *m,
				     __always_unused void *v)
{}

static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
{}

/*
 * The user of this function is...
 * RES_LIMIT.
 */
static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
				char *buf, size_t nbytes, loff_t off)
{}

static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
				size_t nbytes, loff_t off)
{}

#ifdef CONFIG_NUMA

#define LRU_ALL_FILE
#define LRU_ALL_ANON
#define LRU_ALL

static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
				int nid, unsigned int lru_mask, bool tree)
{}

static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
					     unsigned int lru_mask,
					     bool tree)
{}

static int memcg_numa_stat_show(struct seq_file *m, void *v)
{}
#endif /* CONFIG_NUMA */

static const unsigned int memcg1_stats[] =;

static const char *const memcg1_stat_names[] =;

/* Universal VM events cgroup1 shows, original sort order */
static const unsigned int memcg1_events[] =;

void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
{}

static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
				      struct cftype *cft)
{}

static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
				       struct cftype *cft, u64 val)
{}

static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
{}

static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
	struct cftype *cft, u64 val)
{}

#ifdef CONFIG_SLUB_DEBUG
static int mem_cgroup_slab_show(struct seq_file *m, void *p)
{
	/*
	 * Deprecated.
	 * Please, take a look at tools/cgroup/memcg_slabinfo.py .
	 */
	return 0;
}
#endif

struct cftype mem_cgroup_legacy_files[] =;

struct cftype memsw_files[] =;

void memcg1_account_kmem(struct mem_cgroup *memcg, int nr_pages)
{}

bool memcg1_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
			 gfp_t gfp_mask)
{}

static int __init memcg1_init(void)
{}
subsys_initcall(memcg1_init);