linux/mm/hugetlb_cgroup.c

/*
 *
 * Copyright IBM Corporation, 2012
 * Author Aneesh Kumar K.V <[email protected]>
 *
 * Cgroup v2
 * Copyright (C) 2019 Red Hat, Inc.
 * Author: Giuseppe Scrivano <[email protected]>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2.1 of the GNU Lesser General Public License
 * as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it would be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
 *
 */

#include <linux/cgroup.h>
#include <linux/page_counter.h>
#include <linux/slab.h>
#include <linux/hugetlb.h>
#include <linux/hugetlb_cgroup.h>

#define MEMFILE_PRIVATE(x, val)
#define MEMFILE_IDX(val)
#define MEMFILE_ATTR(val)

/* Use t->m[0] to encode the offset */
#define MEMFILE_OFFSET(t, m0)
#define MEMFILE_OFFSET0(val)
#define MEMFILE_FIELD_SIZE(val)

#define DFL_TMPL_SIZE
#define LEGACY_TMPL_SIZE

static struct hugetlb_cgroup *root_h_cgroup __read_mostly;
static struct cftype *dfl_files;
static struct cftype *legacy_files;

static inline struct page_counter *
__hugetlb_cgroup_counter_from_cgroup(struct hugetlb_cgroup *h_cg, int idx,
				     bool rsvd)
{}

static inline struct page_counter *
hugetlb_cgroup_counter_from_cgroup(struct hugetlb_cgroup *h_cg, int idx)
{}

static inline struct page_counter *
hugetlb_cgroup_counter_from_cgroup_rsvd(struct hugetlb_cgroup *h_cg, int idx)
{}

static inline
struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s)
{}

static inline
struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task)
{}

static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg)
{}

static inline struct hugetlb_cgroup *
parent_hugetlb_cgroup(struct hugetlb_cgroup *h_cg)
{}

static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg)
{}

static void hugetlb_cgroup_init(struct hugetlb_cgroup *h_cgroup,
				struct hugetlb_cgroup *parent_h_cgroup)
{}

static void hugetlb_cgroup_free(struct hugetlb_cgroup *h_cgroup)
{}

static struct cgroup_subsys_state *
hugetlb_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
{}

static void hugetlb_cgroup_css_free(struct cgroup_subsys_state *css)
{}

/*
 * Should be called with hugetlb_lock held.
 * Since we are holding hugetlb_lock, pages cannot get moved from
 * active list or uncharged from the cgroup, So no need to get
 * page reference and test for page active here. This function
 * cannot fail.
 */
static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
				       struct page *page)
{}

/*
 * Force the hugetlb cgroup to empty the hugetlb resources by moving them to
 * the parent cgroup.
 */
static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css)
{}

static inline void hugetlb_event(struct hugetlb_cgroup *hugetlb, int idx,
				 enum hugetlb_memory_event event)
{}

static int __hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
					  struct hugetlb_cgroup **ptr,
					  bool rsvd)
{}

int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
				 struct hugetlb_cgroup **ptr)
{}

int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
				      struct hugetlb_cgroup **ptr)
{}

/* Should be called with hugetlb_lock held */
static void __hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
					   struct hugetlb_cgroup *h_cg,
					   struct folio *folio, bool rsvd)
{}

void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
				  struct hugetlb_cgroup *h_cg,
				  struct folio *folio)
{}

void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
				       struct hugetlb_cgroup *h_cg,
				       struct folio *folio)
{}

/*
 * Should be called with hugetlb_lock held
 */
static void __hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
					   struct folio *folio, bool rsvd)
{}

void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
				  struct folio *folio)
{}

void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages,
				       struct folio *folio)
{}

static void __hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
					     struct hugetlb_cgroup *h_cg,
					     bool rsvd)
{}

void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
				    struct hugetlb_cgroup *h_cg)
{}

void hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages,
					 struct hugetlb_cgroup *h_cg)
{}

void hugetlb_cgroup_uncharge_counter(struct resv_map *resv, unsigned long start,
				     unsigned long end)
{}

void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
					 struct file_region *rg,
					 unsigned long nr_pages,
					 bool region_del)
{}

enum {};

static int hugetlb_cgroup_read_numa_stat(struct seq_file *seq, void *dummy)
{}

static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css,
				   struct cftype *cft)
{}

static int hugetlb_cgroup_read_u64_max(struct seq_file *seq, void *v)
{}

static DEFINE_MUTEX(hugetlb_limit_mutex);

static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of,
				    char *buf, size_t nbytes, loff_t off,
				    const char *max)
{}

static ssize_t hugetlb_cgroup_write_legacy(struct kernfs_open_file *of,
					   char *buf, size_t nbytes, loff_t off)
{}

static ssize_t hugetlb_cgroup_write_dfl(struct kernfs_open_file *of,
					char *buf, size_t nbytes, loff_t off)
{}

static ssize_t hugetlb_cgroup_reset(struct kernfs_open_file *of,
				    char *buf, size_t nbytes, loff_t off)
{}

static char *mem_fmt(char *buf, int size, unsigned long hsize)
{}

static int __hugetlb_events_show(struct seq_file *seq, bool local)
{}

static int hugetlb_events_show(struct seq_file *seq, void *v)
{}

static int hugetlb_events_local_show(struct seq_file *seq, void *v)
{}

static struct cftype hugetlb_dfl_tmpl[] =;

static struct cftype hugetlb_legacy_tmpl[] =;

static void __init
hugetlb_cgroup_cfttypes_init(struct hstate *h, struct cftype *cft,
			     struct cftype *tmpl, int tmpl_size)
{}

static void __init __hugetlb_cgroup_file_dfl_init(struct hstate *h)
{}

static void __init __hugetlb_cgroup_file_legacy_init(struct hstate *h)
{}

static void __init __hugetlb_cgroup_file_init(struct hstate *h)
{}

static void __init __hugetlb_cgroup_file_pre_init(void)
{}

static void __init __hugetlb_cgroup_file_post_init(void)
{}

void __init hugetlb_cgroup_file_init(void)
{}

/*
 * hugetlb_lock will make sure a parallel cgroup rmdir won't happen
 * when we migrate hugepages
 */
void hugetlb_cgroup_migrate(struct folio *old_folio, struct folio *new_folio)
{}

static struct cftype hugetlb_files[] =;

struct cgroup_subsys hugetlb_cgrp_subsys =;