#ifndef _LINUX_MEMCONTROL_H
#define _LINUX_MEMCONTROL_H
#include <linux/cgroup.h>
#include <linux/vm_event_item.h>
#include <linux/hardirq.h>
#include <linux/jump_label.h>
#include <linux/kernel.h>
#include <linux/page_counter.h>
#include <linux/vmpressure.h>
#include <linux/eventfd.h>
#include <linux/mm.h>
#include <linux/vmstat.h>
#include <linux/writeback.h>
#include <linux/page-flags.h>
#include <linux/shrinker.h>
struct mem_cgroup;
struct obj_cgroup;
struct page;
struct mm_struct;
struct kmem_cache;
enum memcg_stat_item { … };
enum memcg_memory_event { … };
struct mem_cgroup_reclaim_cookie { … };
#ifdef CONFIG_MEMCG
#define MEM_CGROUP_ID_SHIFT …
struct mem_cgroup_id { … };
struct memcg_vmstats_percpu;
struct memcg1_events_percpu;
struct memcg_vmstats;
struct lruvec_stats_percpu;
struct lruvec_stats;
struct mem_cgroup_reclaim_iter { … };
struct mem_cgroup_per_node { … };
struct mem_cgroup_threshold { … };
struct mem_cgroup_threshold_ary { … };
struct mem_cgroup_thresholds { … };
#define MEMCG_CGWB_FRN_CNT …
struct memcg_cgwb_frn { … };
struct obj_cgroup { … };
struct mem_cgroup { … };
#define MEMCG_CHARGE_BATCH …
extern struct mem_cgroup *root_mem_cgroup;
enum page_memcg_data_flags { … };
#define __FIRST_OBJEXT_FLAG …
#else
#define __FIRST_OBJEXT_FLAG …
#endif
enum objext_flags { … };
#define OBJEXTS_FLAGS_MASK …
#ifdef CONFIG_MEMCG
static inline bool folio_memcg_kmem(struct folio *folio);
static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg)
{ … }
static inline struct mem_cgroup *__folio_memcg(struct folio *folio)
{ … }
static inline struct obj_cgroup *__folio_objcg(struct folio *folio)
{ … }
static inline struct mem_cgroup *folio_memcg(struct folio *folio)
{ … }
static inline bool folio_memcg_charged(struct folio *folio)
{ … }
static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
{ … }
static inline struct mem_cgroup *folio_memcg_check(struct folio *folio)
{ … }
static inline struct mem_cgroup *page_memcg_check(struct page *page)
{ … }
static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
{ … }
static inline bool folio_memcg_kmem(struct folio *folio)
{ … }
static inline bool PageMemcgKmem(struct page *page)
{ … }
static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
{ … }
static inline bool mem_cgroup_disabled(void)
{ … }
static inline void mem_cgroup_protection(struct mem_cgroup *root,
struct mem_cgroup *memcg,
unsigned long *min,
unsigned long *low)
{ … }
void mem_cgroup_calculate_protection(struct mem_cgroup *root,
struct mem_cgroup *memcg);
static inline bool mem_cgroup_unprotected(struct mem_cgroup *target,
struct mem_cgroup *memcg)
{ … }
static inline bool mem_cgroup_below_low(struct mem_cgroup *target,
struct mem_cgroup *memcg)
{ … }
static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
struct mem_cgroup *memcg)
{ … }
void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg);
int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp);
static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm,
gfp_t gfp)
{ … }
int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp,
long nr_pages);
int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
gfp_t gfp, swp_entry_t entry);
void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
void __mem_cgroup_uncharge(struct folio *folio);
static inline void mem_cgroup_uncharge(struct folio *folio)
{ … }
void __mem_cgroup_uncharge_folios(struct folio_batch *folios);
static inline void mem_cgroup_uncharge_folios(struct folio_batch *folios)
{ … }
void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages);
void mem_cgroup_replace_folio(struct folio *old, struct folio *new);
void mem_cgroup_migrate(struct folio *old, struct folio *new);
static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
struct pglist_data *pgdat)
{ … }
static inline struct lruvec *folio_lruvec(struct folio *folio)
{ … }
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
struct mem_cgroup *get_mem_cgroup_from_current(void);
struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio);
struct lruvec *folio_lruvec_lock(struct folio *folio);
struct lruvec *folio_lruvec_lock_irq(struct folio *folio);
struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
unsigned long *flags);
#ifdef CONFIG_DEBUG_VM
void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio);
#else
static inline
void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
{
}
#endif
static inline
struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){ … }
static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg)
{ … }
static inline void obj_cgroup_get(struct obj_cgroup *objcg)
{ … }
static inline void obj_cgroup_get_many(struct obj_cgroup *objcg,
unsigned long nr)
{ … }
static inline void obj_cgroup_put(struct obj_cgroup *objcg)
{ … }
static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
{ … }
static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg)
{ … }
static inline void mem_cgroup_put(struct mem_cgroup *memcg)
{ … }
#define mem_cgroup_from_counter(counter, member) …
struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
struct mem_cgroup *,
struct mem_cgroup_reclaim_cookie *);
void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
int (*)(struct task_struct *, void *), void *arg);
static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
{ … }
struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
#ifdef CONFIG_SHRINKER_DEBUG
static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
{ … }
struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino);
#endif
static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
{ … }
static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
{ … }
static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
{ … }
static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
struct mem_cgroup *root)
{ … }
static inline bool mm_match_cgroup(struct mm_struct *mm,
struct mem_cgroup *memcg)
{ … }
struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio);
ino_t page_cgroup_ino(struct page *page);
static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
{ … }
void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
int zid, int nr_pages);
static inline
unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
enum lru_list lru, int zone_idx)
{ … }
void mem_cgroup_handle_over_high(gfp_t gfp_mask);
unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
struct task_struct *p);
void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
struct mem_cgroup *oom_domain);
void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
void __mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx,
int val);
static inline void mod_memcg_state(struct mem_cgroup *memcg,
enum memcg_stat_item idx, int val)
{ … }
static inline void mod_memcg_page_state(struct page *page,
enum memcg_stat_item idx, int val)
{ … }
unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx);
unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx);
unsigned long lruvec_page_state_local(struct lruvec *lruvec,
enum node_stat_item idx);
void mem_cgroup_flush_stats(struct mem_cgroup *memcg);
void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg);
void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val);
static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
int val)
{ … }
void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
unsigned long count);
static inline void count_memcg_events(struct mem_cgroup *memcg,
enum vm_event_item idx,
unsigned long count)
{ … }
static inline void count_memcg_folio_events(struct folio *folio,
enum vm_event_item idx, unsigned long nr)
{ … }
static inline void count_memcg_events_mm(struct mm_struct *mm,
enum vm_event_item idx, unsigned long count)
{ … }
static inline void count_memcg_event_mm(struct mm_struct *mm,
enum vm_event_item idx)
{ … }
static inline void memcg_memory_event(struct mem_cgroup *memcg,
enum memcg_memory_event event)
{ … }
static inline void memcg_memory_event_mm(struct mm_struct *mm,
enum memcg_memory_event event)
{ … }
void split_page_memcg(struct page *head, int old_order, int new_order);
#else
#define MEM_CGROUP_ID_SHIFT …
static inline struct mem_cgroup *folio_memcg(struct folio *folio)
{
return NULL;
}
static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
{
WARN_ON_ONCE(!rcu_read_lock_held());
return NULL;
}
static inline struct mem_cgroup *folio_memcg_check(struct folio *folio)
{
return NULL;
}
static inline struct mem_cgroup *page_memcg_check(struct page *page)
{
return NULL;
}
static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
{
return NULL;
}
static inline bool folio_memcg_kmem(struct folio *folio)
{
return false;
}
static inline bool PageMemcgKmem(struct page *page)
{
return false;
}
static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
{
return true;
}
static inline bool mem_cgroup_disabled(void)
{
return true;
}
static inline void memcg_memory_event(struct mem_cgroup *memcg,
enum memcg_memory_event event)
{
}
static inline void memcg_memory_event_mm(struct mm_struct *mm,
enum memcg_memory_event event)
{
}
static inline void mem_cgroup_protection(struct mem_cgroup *root,
struct mem_cgroup *memcg,
unsigned long *min,
unsigned long *low)
{
*min = *low = 0;
}
static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
struct mem_cgroup *memcg)
{
}
static inline bool mem_cgroup_unprotected(struct mem_cgroup *target,
struct mem_cgroup *memcg)
{
return true;
}
static inline bool mem_cgroup_below_low(struct mem_cgroup *target,
struct mem_cgroup *memcg)
{
return false;
}
static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
struct mem_cgroup *memcg)
{
return false;
}
static inline void mem_cgroup_commit_charge(struct folio *folio,
struct mem_cgroup *memcg)
{
}
static inline int mem_cgroup_charge(struct folio *folio,
struct mm_struct *mm, gfp_t gfp)
{
return 0;
}
static inline int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg,
gfp_t gfp, long nr_pages)
{
return 0;
}
static inline int mem_cgroup_swapin_charge_folio(struct folio *folio,
struct mm_struct *mm, gfp_t gfp, swp_entry_t entry)
{
return 0;
}
static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr)
{
}
static inline void mem_cgroup_uncharge(struct folio *folio)
{
}
static inline void mem_cgroup_uncharge_folios(struct folio_batch *folios)
{
}
static inline void mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
unsigned int nr_pages)
{
}
static inline void mem_cgroup_replace_folio(struct folio *old,
struct folio *new)
{
}
static inline void mem_cgroup_migrate(struct folio *old, struct folio *new)
{
}
static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
struct pglist_data *pgdat)
{
return &pgdat->__lruvec;
}
static inline struct lruvec *folio_lruvec(struct folio *folio)
{
struct pglist_data *pgdat = folio_pgdat(folio);
return &pgdat->__lruvec;
}
static inline
void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
{
}
static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
{
return NULL;
}
static inline bool mm_match_cgroup(struct mm_struct *mm,
struct mem_cgroup *memcg)
{
return true;
}
static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
{
return NULL;
}
static inline struct mem_cgroup *get_mem_cgroup_from_current(void)
{
return NULL;
}
static inline struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio)
{
return NULL;
}
static inline
struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css)
{
return NULL;
}
static inline void obj_cgroup_put(struct obj_cgroup *objcg)
{
}
static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
{
return true;
}
static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg)
{
return true;
}
static inline void mem_cgroup_put(struct mem_cgroup *memcg)
{
}
static inline struct lruvec *folio_lruvec_lock(struct folio *folio)
{
struct pglist_data *pgdat = folio_pgdat(folio);
spin_lock(&pgdat->__lruvec.lru_lock);
return &pgdat->__lruvec;
}
static inline struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
{
struct pglist_data *pgdat = folio_pgdat(folio);
spin_lock_irq(&pgdat->__lruvec.lru_lock);
return &pgdat->__lruvec;
}
static inline struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
unsigned long *flagsp)
{
struct pglist_data *pgdat = folio_pgdat(folio);
spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp);
return &pgdat->__lruvec;
}
static inline struct mem_cgroup *
mem_cgroup_iter(struct mem_cgroup *root,
struct mem_cgroup *prev,
struct mem_cgroup_reclaim_cookie *reclaim)
{
return NULL;
}
static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
struct mem_cgroup *prev)
{
}
static inline void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
int (*fn)(struct task_struct *, void *), void *arg)
{
}
static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
{
return 0;
}
static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
{
WARN_ON_ONCE(id);
return NULL;
}
#ifdef CONFIG_SHRINKER_DEBUG
static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
{
return 0;
}
static inline struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
{
return NULL;
}
#endif
static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
{
return NULL;
}
static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
{
return NULL;
}
static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
{
return true;
}
static inline
unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
enum lru_list lru, int zone_idx)
{
return 0;
}
static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
{
return 0;
}
static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
{
return 0;
}
static inline void
mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
{
}
static inline void
mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
{
}
static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask)
{
}
static inline struct mem_cgroup *mem_cgroup_get_oom_group(
struct task_struct *victim, struct mem_cgroup *oom_domain)
{
return NULL;
}
static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
{
}
static inline void __mod_memcg_state(struct mem_cgroup *memcg,
enum memcg_stat_item idx,
int nr)
{
}
static inline void mod_memcg_state(struct mem_cgroup *memcg,
enum memcg_stat_item idx,
int nr)
{
}
static inline void mod_memcg_page_state(struct page *page,
enum memcg_stat_item idx, int val)
{
}
static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
{
return 0;
}
static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
enum node_stat_item idx)
{
return node_page_state(lruvec_pgdat(lruvec), idx);
}
static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
enum node_stat_item idx)
{
return node_page_state(lruvec_pgdat(lruvec), idx);
}
static inline void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
{
}
static inline void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
{
}
static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
int val)
{
struct page *page = virt_to_head_page(p);
__mod_node_page_state(page_pgdat(page), idx, val);
}
static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
int val)
{
struct page *page = virt_to_head_page(p);
mod_node_page_state(page_pgdat(page), idx, val);
}
static inline void count_memcg_events(struct mem_cgroup *memcg,
enum vm_event_item idx,
unsigned long count)
{
}
static inline void __count_memcg_events(struct mem_cgroup *memcg,
enum vm_event_item idx,
unsigned long count)
{
}
static inline void count_memcg_folio_events(struct folio *folio,
enum vm_event_item idx, unsigned long nr)
{
}
static inline void count_memcg_events_mm(struct mm_struct *mm,
enum vm_event_item idx, unsigned long count)
{
}
static inline
void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
{
}
static inline void split_page_memcg(struct page *head, int old_order, int new_order)
{
}
#endif
struct slabobj_ext { … } __aligned(…);
static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx)
{ … }
static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx)
{ … }
static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
{ … }
static inline void unlock_page_lruvec(struct lruvec *lruvec)
{ … }
static inline void unlock_page_lruvec_irq(struct lruvec *lruvec)
{ … }
static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
unsigned long flags)
{ … }
static inline bool folio_matches_lruvec(struct folio *folio,
struct lruvec *lruvec)
{ … }
static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio,
struct lruvec *locked_lruvec)
{ … }
static inline void folio_lruvec_relock_irqsave(struct folio *folio,
struct lruvec **lruvecp, unsigned long *flags)
{ … }
#ifdef CONFIG_CGROUP_WRITEBACK
struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
unsigned long *pheadroom, unsigned long *pdirty,
unsigned long *pwriteback);
void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
struct bdi_writeback *wb);
static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
struct bdi_writeback *wb)
{ … }
void mem_cgroup_flush_foreign(struct bdi_writeback *wb);
#else
static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
{
return NULL;
}
static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
unsigned long *pfilepages,
unsigned long *pheadroom,
unsigned long *pdirty,
unsigned long *pwriteback)
{
}
static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
struct bdi_writeback *wb)
{
}
static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
{
}
#endif
struct sock;
bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
gfp_t gfp_mask);
void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
#ifdef CONFIG_MEMCG
extern struct static_key_false memcg_sockets_enabled_key;
#define mem_cgroup_sockets_enabled …
void mem_cgroup_sk_alloc(struct sock *sk);
void mem_cgroup_sk_free(struct sock *sk);
static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
{ … }
int alloc_shrinker_info(struct mem_cgroup *memcg);
void free_shrinker_info(struct mem_cgroup *memcg);
void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id);
void reparent_shrinker_deferred(struct mem_cgroup *memcg);
#else
#define mem_cgroup_sockets_enabled …
static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
static inline void mem_cgroup_sk_free(struct sock *sk) { };
static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
{
return false;
}
static inline void set_shrinker_bit(struct mem_cgroup *memcg,
int nid, int shrinker_id)
{
}
#endif
#ifdef CONFIG_MEMCG
bool mem_cgroup_kmem_disabled(void);
int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
void __memcg_kmem_uncharge_page(struct page *page, int order);
struct obj_cgroup *current_obj_cgroup(void);
struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio);
static inline struct obj_cgroup *get_obj_cgroup_from_current(void)
{ … }
int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
extern struct static_key_false memcg_bpf_enabled_key;
static inline bool memcg_bpf_enabled(void)
{ … }
extern struct static_key_false memcg_kmem_online_key;
static inline bool memcg_kmem_online(void)
{ … }
static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
int order)
{ … }
static inline void memcg_kmem_uncharge_page(struct page *page, int order)
{ … }
static inline int memcg_kmem_id(struct mem_cgroup *memcg)
{ … }
struct mem_cgroup *mem_cgroup_from_slab_obj(void *p);
static inline void count_objcg_event(struct obj_cgroup *objcg,
enum vm_event_item idx)
{ … }
#else
static inline bool mem_cgroup_kmem_disabled(void)
{
return true;
}
static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
int order)
{
return 0;
}
static inline void memcg_kmem_uncharge_page(struct page *page, int order)
{
}
static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp,
int order)
{
return 0;
}
static inline void __memcg_kmem_uncharge_page(struct page *page, int order)
{
}
static inline struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio)
{
return NULL;
}
static inline bool memcg_bpf_enabled(void)
{
return false;
}
static inline bool memcg_kmem_online(void)
{
return false;
}
static inline int memcg_kmem_id(struct mem_cgroup *memcg)
{
return -1;
}
static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
{
return NULL;
}
static inline void count_objcg_event(struct obj_cgroup *objcg,
enum vm_event_item idx)
{
}
#endif
#if defined(CONFIG_MEMCG) && defined(CONFIG_ZSWAP)
bool obj_cgroup_may_zswap(struct obj_cgroup *objcg);
void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size);
void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size);
bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg);
#else
static inline bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
{
return true;
}
static inline void obj_cgroup_charge_zswap(struct obj_cgroup *objcg,
size_t size)
{
}
static inline void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg,
size_t size)
{
}
static inline bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
{
return true;
}
#endif
#ifdef CONFIG_MEMCG_V1
unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order,
gfp_t gfp_mask,
unsigned long *total_scanned);
bool mem_cgroup_oom_synchronize(bool wait);
static inline bool task_in_memcg_oom(struct task_struct *p)
{ … }
void folio_memcg_lock(struct folio *folio);
void folio_memcg_unlock(struct folio *folio);
static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
{ … }
static inline void mem_cgroup_unlock_pages(void)
{ … }
static inline void mem_cgroup_enter_user_fault(void)
{ … }
static inline void mem_cgroup_exit_user_fault(void)
{ … }
#else
static inline
unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order,
gfp_t gfp_mask,
unsigned long *total_scanned)
{
return 0;
}
static inline void folio_memcg_lock(struct folio *folio)
{
}
static inline void folio_memcg_unlock(struct folio *folio)
{
}
static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
{
rcu_read_lock();
return true;
}
static inline void mem_cgroup_unlock_pages(void)
{
rcu_read_unlock();
}
static inline bool task_in_memcg_oom(struct task_struct *p)
{
return false;
}
static inline bool mem_cgroup_oom_synchronize(bool wait)
{
return false;
}
static inline void mem_cgroup_enter_user_fault(void)
{
}
static inline void mem_cgroup_exit_user_fault(void)
{
}
#endif
#endif