#include <linux/memcontrol.h>
#include <linux/swap.h>
#include <linux/mm_inline.h>
#include <linux/pagewalk.h>
#include <linux/backing-dev.h>
#include <linux/swap_cgroup.h>
#include <linux/eventfd.h>
#include <linux/poll.h>
#include <linux/sort.h>
#include <linux/file.h>
#include <linux/seq_buf.h>
#include "internal.h"
#include "swap.h"
#include "memcontrol-v1.h"
struct mem_cgroup_tree_per_node { … };
struct mem_cgroup_tree { … };
static struct mem_cgroup_tree soft_limit_tree __read_mostly;
#define MEM_CGROUP_MAX_RECLAIM_LOOPS …
#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS …
#define MOVE_ANON …
#define MOVE_FILE …
#define MOVE_MASK …
static struct move_charge_struct { … } mc = …;
struct mem_cgroup_eventfd_list { … };
struct mem_cgroup_event { … };
#define MEMFILE_PRIVATE(x, val) …
#define MEMFILE_TYPE(val) …
#define MEMFILE_ATTR(val) …
enum { … };
#ifdef CONFIG_LOCKDEP
static struct lockdep_map memcg_oom_lock_dep_map = …;
#endif
DEFINE_SPINLOCK(…);
static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
struct mem_cgroup_tree_per_node *mctz,
unsigned long new_usage_in_excess)
{ … }
static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
struct mem_cgroup_tree_per_node *mctz)
{ … }
static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
struct mem_cgroup_tree_per_node *mctz)
{ … }
static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
{ … }
static void memcg1_update_tree(struct mem_cgroup *memcg, int nid)
{ … }
void memcg1_remove_from_trees(struct mem_cgroup *memcg)
{ … }
static struct mem_cgroup_per_node *
__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
{ … }
static struct mem_cgroup_per_node *
mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
{ … }
static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
pg_data_t *pgdat,
gfp_t gfp_mask,
unsigned long *total_scanned)
{ … }
unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order,
gfp_t gfp_mask,
unsigned long *total_scanned)
{ … }
static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
{ … }
bool memcg1_wait_acct_move(struct mem_cgroup *memcg)
{ … }
void folio_memcg_lock(struct folio *folio)
{ … }
static void __folio_memcg_unlock(struct mem_cgroup *memcg)
{ … }
void folio_memcg_unlock(struct folio *folio)
{ … }
#ifdef CONFIG_SWAP
static int mem_cgroup_move_swap_account(swp_entry_t entry,
struct mem_cgroup *from, struct mem_cgroup *to)
{ … }
#else
static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
struct mem_cgroup *from, struct mem_cgroup *to)
{
return -EINVAL;
}
#endif
static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
struct cftype *cft)
{ … }
#ifdef CONFIG_MMU
static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
struct cftype *cft, u64 val)
{ … }
#else
static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
struct cftype *cft, u64 val)
{
return -ENOSYS;
}
#endif
#ifdef CONFIG_MMU
static int mem_cgroup_do_precharge(unsigned long count)
{ … }
mc_target;
enum mc_target_type { … };
static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
unsigned long addr, pte_t ptent)
{ … }
#if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
pte_t ptent, swp_entry_t *entry)
{ … }
#else
static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
pte_t ptent, swp_entry_t *entry)
{
return NULL;
}
#endif
static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
unsigned long addr, pte_t ptent)
{ … }
static int mem_cgroup_move_account(struct folio *folio,
bool compound,
struct mem_cgroup *from,
struct mem_cgroup *to)
{ … }
static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
unsigned long addr, pte_t ptent, union mc_target *target)
{ … }
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
unsigned long addr, pmd_t pmd, union mc_target *target)
{ … }
#else
static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
unsigned long addr, pmd_t pmd, union mc_target *target)
{
return MC_TARGET_NONE;
}
#endif
static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
unsigned long addr, unsigned long end,
struct mm_walk *walk)
{ … }
static const struct mm_walk_ops precharge_walk_ops = …;
static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
{ … }
static int mem_cgroup_precharge_mc(struct mm_struct *mm)
{ … }
static void __mem_cgroup_clear_mc(void)
{ … }
static void mem_cgroup_clear_mc(void)
{ … }
int memcg1_can_attach(struct cgroup_taskset *tset)
{ … }
void memcg1_cancel_attach(struct cgroup_taskset *tset)
{ … }
static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
unsigned long addr, unsigned long end,
struct mm_walk *walk)
{ … }
static const struct mm_walk_ops charge_walk_ops = …;
static void mem_cgroup_move_charge(void)
{ … }
void memcg1_move_task(void)
{ … }
#else
int memcg1_can_attach(struct cgroup_taskset *tset)
{
return 0;
}
void memcg1_cancel_attach(struct cgroup_taskset *tset)
{
}
void memcg1_move_task(void)
{
}
#endif
static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
{ … }
static void mem_cgroup_threshold(struct mem_cgroup *memcg)
{ … }
void memcg1_check_events(struct mem_cgroup *memcg, int nid)
{ … }
static int compare_thresholds(const void *a, const void *b)
{ … }
static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
{ … }
static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
{ … }
static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
struct eventfd_ctx *eventfd, const char *args, enum res_type type)
{ … }
static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
struct eventfd_ctx *eventfd, const char *args)
{ … }
static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
struct eventfd_ctx *eventfd, const char *args)
{ … }
static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
struct eventfd_ctx *eventfd, enum res_type type)
{ … }
static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
struct eventfd_ctx *eventfd)
{ … }
static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
struct eventfd_ctx *eventfd)
{ … }
static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
struct eventfd_ctx *eventfd, const char *args)
{ … }
static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
struct eventfd_ctx *eventfd)
{ … }
static void memcg_event_remove(struct work_struct *work)
{ … }
static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
int sync, void *key)
{ … }
static void memcg_event_ptable_queue_proc(struct file *file,
wait_queue_head_t *wqh, poll_table *pt)
{ … }
static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off)
{ … }
void memcg1_memcg_init(struct mem_cgroup *memcg)
{ … }
void memcg1_css_offline(struct mem_cgroup *memcg)
{ … }
static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
{ … }
static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
{ … }
static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
{ … }
static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
{ … }
static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
struct oom_wait_info { … };
static int memcg_oom_wake_function(wait_queue_entry_t *wait,
unsigned mode, int sync, void *arg)
{ … }
void memcg1_oom_recover(struct mem_cgroup *memcg)
{ … }
bool mem_cgroup_oom_synchronize(bool handle)
{ … }
bool memcg1_oom_prepare(struct mem_cgroup *memcg, bool *locked)
{ … }
void memcg1_oom_finish(struct mem_cgroup *memcg, bool locked)
{ … }
static DEFINE_MUTEX(memcg_max_mutex);
static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
unsigned long max, bool memsw)
{ … }
static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
{ … }
static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
char *buf, size_t nbytes,
loff_t off)
{ … }
static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
struct cftype *cft)
{ … }
static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
struct cftype *cft, u64 val)
{ … }
static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
struct cftype *cft)
{ … }
static int mem_cgroup_dummy_seq_show(__always_unused struct seq_file *m,
__always_unused void *v)
{ … }
static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
{ … }
static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off)
{ … }
static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
size_t nbytes, loff_t off)
{ … }
#ifdef CONFIG_NUMA
#define LRU_ALL_FILE …
#define LRU_ALL_ANON …
#define LRU_ALL …
static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
int nid, unsigned int lru_mask, bool tree)
{ … }
static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
unsigned int lru_mask,
bool tree)
{ … }
static int memcg_numa_stat_show(struct seq_file *m, void *v)
{ … }
#endif
static const unsigned int memcg1_stats[] = …;
static const char *const memcg1_stat_names[] = …;
static const unsigned int memcg1_events[] = …;
void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
{ … }
static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
struct cftype *cft)
{ … }
static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
struct cftype *cft, u64 val)
{ … }
static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
{ … }
static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
struct cftype *cft, u64 val)
{ … }
#ifdef CONFIG_SLUB_DEBUG
static int mem_cgroup_slab_show(struct seq_file *m, void *p)
{
return 0;
}
#endif
struct cftype mem_cgroup_legacy_files[] = …;
struct cftype memsw_files[] = …;
void memcg1_account_kmem(struct mem_cgroup *memcg, int nr_pages)
{ … }
bool memcg1_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
gfp_t gfp_mask)
{ … }
static int __init memcg1_init(void)
{ … }
subsys_initcall(memcg1_init);