#ifndef _LINUX_PAGE_COUNTER_H
#define _LINUX_PAGE_COUNTER_H
#include <linux/atomic.h>
#include <linux/cache.h>
#include <linux/limits.h>
#include <asm/page.h>
struct page_counter { … } ____cacheline_internodealigned_in_smp;
#if BITS_PER_LONG == 32
#define PAGE_COUNTER_MAX …
#else
#define PAGE_COUNTER_MAX …
#endif
static inline void page_counter_init(struct page_counter *counter,
struct page_counter *parent,
bool protection_support)
{ … }
static inline unsigned long page_counter_read(struct page_counter *counter)
{ … }
void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages);
void page_counter_charge(struct page_counter *counter, unsigned long nr_pages);
bool page_counter_try_charge(struct page_counter *counter,
unsigned long nr_pages,
struct page_counter **fail);
void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages);
void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages);
void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages);
static inline void page_counter_set_high(struct page_counter *counter,
unsigned long nr_pages)
{ … }
int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages);
int page_counter_memparse(const char *buf, const char *max,
unsigned long *nr_pages);
static inline void page_counter_reset_watermark(struct page_counter *counter)
{ … }
#ifdef CONFIG_MEMCG
void page_counter_calculate_protection(struct page_counter *root,
struct page_counter *counter,
bool recursive_protection);
#else
static inline void page_counter_calculate_protection(struct page_counter *root,
struct page_counter *counter,
bool recursive_protection) {}
#endif
#endif