#ifndef _LINUX_MM_H
#define _LINUX_MM_H
#include <linux/errno.h>
#include <linux/mmdebug.h>
#include <linux/gfp.h>
#include <linux/pgalloc_tag.h>
#include <linux/bug.h>
#include <linux/list.h>
#include <linux/mmzone.h>
#include <linux/rbtree.h>
#include <linux/atomic.h>
#include <linux/debug_locks.h>
#include <linux/mm_types.h>
#include <linux/mmap_lock.h>
#include <linux/range.h>
#include <linux/pfn.h>
#include <linux/percpu-refcount.h>
#include <linux/bit_spinlock.h>
#include <linux/shrinker.h>
#include <linux/resource.h>
#include <linux/page_ext.h>
#include <linux/err.h>
#include <linux/page-flags.h>
#include <linux/page_ref.h>
#include <linux/overflow.h>
#include <linux/sizes.h>
#include <linux/sched.h>
#include <linux/pgtable.h>
#include <linux/kasan.h>
#include <linux/memremap.h>
#include <linux/slab.h>
struct mempolicy;
struct anon_vma;
struct anon_vma_chain;
struct user_struct;
struct pt_regs;
struct folio_batch;
extern int sysctl_page_lock_unfairness;
void mm_core_init(void);
void init_mm_internals(void);
#ifndef CONFIG_NUMA
extern unsigned long max_mapnr;
static inline void set_max_mapnr(unsigned long limit)
{
max_mapnr = limit;
}
#else
static inline void set_max_mapnr(unsigned long limit) { … }
#endif
extern atomic_long_t _totalram_pages;
static inline unsigned long totalram_pages(void)
{ … }
static inline void totalram_pages_inc(void)
{ … }
static inline void totalram_pages_dec(void)
{ … }
static inline void totalram_pages_add(long count)
{ … }
extern void * high_memory;
extern int page_cluster;
extern const int page_cluster_max;
#ifdef CONFIG_SYSCTL
extern int sysctl_legacy_va_layout;
#else
#define sysctl_legacy_va_layout …
#endif
#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
extern const int mmap_rnd_bits_min;
extern int mmap_rnd_bits_max __ro_after_init;
extern int mmap_rnd_bits __read_mostly;
#endif
#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
extern const int mmap_rnd_compat_bits_min;
extern const int mmap_rnd_compat_bits_max;
extern int mmap_rnd_compat_bits __read_mostly;
#endif
#ifndef PHYSMEM_END
# ifdef MAX_PHYSMEM_BITS
#define PHYSMEM_END …
# else
#define PHYSMEM_END …
# endif
#endif
#include <asm/page.h>
#include <asm/processor.h>
#ifndef __pa_symbol
#define __pa_symbol …
#endif
#ifndef page_to_virt
#define page_to_virt(x) …
#endif
#ifndef lm_alias
#define lm_alias(x) …
#endif
#ifndef mm_forbids_zeropage
#define mm_forbids_zeropage(X) …
#endif
#if BITS_PER_LONG == 64
#define mm_zero_struct_page(pp) …
static inline void __mm_zero_struct_page(struct page *page)
{ … }
#else
#define mm_zero_struct_page …
#endif
#define MAPCOUNT_ELF_CORE_MARGIN …
#define DEFAULT_MAX_MAP_COUNT …
extern int sysctl_max_map_count;
extern unsigned long sysctl_user_reserve_kbytes;
extern unsigned long sysctl_admin_reserve_kbytes;
extern int sysctl_overcommit_memory;
extern int sysctl_overcommit_ratio;
extern unsigned long sysctl_overcommit_kbytes;
int overcommit_ratio_handler(const struct ctl_table *, int, void *, size_t *,
loff_t *);
int overcommit_kbytes_handler(const struct ctl_table *, int, void *, size_t *,
loff_t *);
int overcommit_policy_handler(const struct ctl_table *, int, void *, size_t *,
loff_t *);
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
#define nth_page …
#define folio_page_idx …
#else
#define nth_page(page,n) …
#define folio_page_idx(folio, p) …
#endif
#define PAGE_ALIGN(addr) …
#define PAGE_ALIGN_DOWN(addr) …
#define PAGE_ALIGNED(addr) …
static inline struct folio *lru_to_folio(struct list_head *head)
{ … }
void setup_initial_init_mm(void *start_code, void *end_code,
void *end_data, void *brk);
struct vm_area_struct *vm_area_alloc(struct mm_struct *);
struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
void vm_area_free(struct vm_area_struct *);
void __vm_area_free(struct vm_area_struct *vma);
#ifndef CONFIG_MMU
extern struct rb_root nommu_region_tree;
extern struct rw_semaphore nommu_region_sem;
extern unsigned int kobjsize(const void *objp);
#endif
#define VM_NONE …
#define VM_READ …
#define VM_WRITE …
#define VM_EXEC …
#define VM_SHARED …
#define VM_MAYREAD …
#define VM_MAYWRITE …
#define VM_MAYEXEC …
#define VM_MAYSHARE …
#define VM_GROWSDOWN …
#ifdef CONFIG_MMU
#define VM_UFFD_MISSING …
#else
#define VM_MAYOVERLAY …
#define VM_UFFD_MISSING …
#endif
#define VM_PFNMAP …
#define VM_UFFD_WP …
#define VM_LOCKED …
#define VM_IO …
#define VM_SEQ_READ …
#define VM_RAND_READ …
#define VM_DONTCOPY …
#define VM_DONTEXPAND …
#define VM_LOCKONFAULT …
#define VM_ACCOUNT …
#define VM_NORESERVE …
#define VM_HUGETLB …
#define VM_SYNC …
#define VM_ARCH_1 …
#define VM_WIPEONFORK …
#define VM_DONTDUMP …
#ifdef CONFIG_MEM_SOFT_DIRTY
#define VM_SOFTDIRTY …
#else
#define VM_SOFTDIRTY …
#endif
#define VM_MIXEDMAP …
#define VM_HUGEPAGE …
#define VM_NOHUGEPAGE …
#define VM_MERGEABLE …
#ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS
#define VM_HIGH_ARCH_BIT_0 …
#define VM_HIGH_ARCH_BIT_1 …
#define VM_HIGH_ARCH_BIT_2 …
#define VM_HIGH_ARCH_BIT_3 …
#define VM_HIGH_ARCH_BIT_4 …
#define VM_HIGH_ARCH_BIT_5 …
#define VM_HIGH_ARCH_0 …
#define VM_HIGH_ARCH_1 …
#define VM_HIGH_ARCH_2 …
#define VM_HIGH_ARCH_3 …
#define VM_HIGH_ARCH_4 …
#define VM_HIGH_ARCH_5 …
#endif
#ifdef CONFIG_ARCH_HAS_PKEYS
#define VM_PKEY_SHIFT …
#define VM_PKEY_BIT0 …
#define VM_PKEY_BIT1 …
#define VM_PKEY_BIT2 …
#if CONFIG_ARCH_PKEY_BITS > 3
#define VM_PKEY_BIT3 …
#else
#define VM_PKEY_BIT3 …
#endif
#if CONFIG_ARCH_PKEY_BITS > 4
#define VM_PKEY_BIT4 …
#else
#define VM_PKEY_BIT4 …
#endif
#endif
#ifdef CONFIG_X86_USER_SHADOW_STACK
#define VM_SHADOW_STACK …
#else
#define VM_SHADOW_STACK …
#endif
#if defined(CONFIG_X86)
#define VM_PAT …
#elif defined(CONFIG_PPC64)
#define VM_SAO …
#elif defined(CONFIG_PARISC)
#define VM_GROWSUP …
#elif defined(CONFIG_SPARC64)
#define VM_SPARC_ADI …
#define VM_ARCH_CLEAR …
#elif defined(CONFIG_ARM64)
#define VM_ARM64_BTI …
#define VM_ARCH_CLEAR …
#elif !defined(CONFIG_MMU)
#define VM_MAPPED_COPY …
#endif
#if defined(CONFIG_ARM64_MTE)
#define VM_MTE …
#define VM_MTE_ALLOWED …
#else
#define VM_MTE …
#define VM_MTE_ALLOWED …
#endif
#ifndef VM_GROWSUP
#define VM_GROWSUP …
#endif
#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
#define VM_UFFD_MINOR_BIT …
#define VM_UFFD_MINOR …
#else
#define VM_UFFD_MINOR …
#endif
#ifdef CONFIG_64BIT
#define VM_ALLOW_ANY_UNCACHED_BIT …
#define VM_ALLOW_ANY_UNCACHED …
#else
#define VM_ALLOW_ANY_UNCACHED …
#endif
#ifdef CONFIG_64BIT
#define VM_DROPPABLE_BIT …
#define VM_DROPPABLE …
#elif defined(CONFIG_PPC32)
#define VM_DROPPABLE …
#else
#define VM_DROPPABLE …
#endif
#ifdef CONFIG_64BIT
#define VM_SEALED …
#endif
#define VM_STACK_INCOMPLETE_SETUP …
#define TASK_EXEC …
#define VM_DATA_FLAGS_TSK_EXEC …
#define VM_DATA_FLAGS_NON_EXEC …
#define VM_DATA_FLAGS_EXEC …
#ifndef VM_DATA_DEFAULT_FLAGS
#define VM_DATA_DEFAULT_FLAGS …
#endif
#ifndef VM_STACK_DEFAULT_FLAGS
#define VM_STACK_DEFAULT_FLAGS …
#endif
#define VM_STARTGAP_FLAGS …
#ifdef CONFIG_STACK_GROWSUP
#define VM_STACK …
#define VM_STACK_EARLY …
#else
#define VM_STACK …
#define VM_STACK_EARLY …
#endif
#define VM_STACK_FLAGS …
#define VM_ACCESS_FLAGS …
#define VM_SPECIAL …
#define VM_NO_KHUGEPAGED …
#define VM_INIT_DEF_MASK …
#define VM_LOCKED_MASK …
#ifndef VM_ARCH_CLEAR
#define VM_ARCH_CLEAR …
#endif
#define VM_FLAGS_CLEAR …
#define FAULT_FLAG_DEFAULT …
static inline bool fault_flag_allow_retry_first(enum fault_flag flags)
{ … }
#define FAULT_FLAG_TRACE …
struct vm_fault { … };
struct vm_operations_struct { … };
#ifdef CONFIG_NUMA_BALANCING
static inline void vma_numab_state_init(struct vm_area_struct *vma)
{ … }
static inline void vma_numab_state_free(struct vm_area_struct *vma)
{ … }
#else
static inline void vma_numab_state_init(struct vm_area_struct *vma) {}
static inline void vma_numab_state_free(struct vm_area_struct *vma) {}
#endif
#ifdef CONFIG_PER_VMA_LOCK
static inline bool vma_start_read(struct vm_area_struct *vma)
{ … }
static inline void vma_end_read(struct vm_area_struct *vma)
{ … }
static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq)
{ … }
static inline void vma_start_write(struct vm_area_struct *vma)
{ … }
static inline void vma_assert_write_locked(struct vm_area_struct *vma)
{ … }
static inline void vma_assert_locked(struct vm_area_struct *vma)
{ … }
static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached)
{ … }
static inline void release_fault_lock(struct vm_fault *vmf)
{ … }
static inline void assert_fault_locked(struct vm_fault *vmf)
{ … }
struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
unsigned long address);
#else
static inline bool vma_start_read(struct vm_area_struct *vma)
{ return false; }
static inline void vma_end_read(struct vm_area_struct *vma) {}
static inline void vma_start_write(struct vm_area_struct *vma) {}
static inline void vma_assert_write_locked(struct vm_area_struct *vma)
{ mmap_assert_write_locked(vma->vm_mm); }
static inline void vma_mark_detached(struct vm_area_struct *vma,
bool detached) {}
static inline struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
unsigned long address)
{
return NULL;
}
static inline void vma_assert_locked(struct vm_area_struct *vma)
{
mmap_assert_locked(vma->vm_mm);
}
static inline void release_fault_lock(struct vm_fault *vmf)
{
mmap_read_unlock(vmf->vma->vm_mm);
}
static inline void assert_fault_locked(struct vm_fault *vmf)
{
mmap_assert_locked(vmf->vma->vm_mm);
}
#endif
extern const struct vm_operations_struct vma_dummy_vm_ops;
static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
{ … }
static inline void vm_flags_init(struct vm_area_struct *vma,
vm_flags_t flags)
{ … }
static inline void vm_flags_reset(struct vm_area_struct *vma,
vm_flags_t flags)
{ … }
static inline void vm_flags_reset_once(struct vm_area_struct *vma,
vm_flags_t flags)
{ … }
static inline void vm_flags_set(struct vm_area_struct *vma,
vm_flags_t flags)
{ … }
static inline void vm_flags_clear(struct vm_area_struct *vma,
vm_flags_t flags)
{ … }
static inline void __vm_flags_mod(struct vm_area_struct *vma,
vm_flags_t set, vm_flags_t clear)
{ … }
static inline void vm_flags_mod(struct vm_area_struct *vma,
vm_flags_t set, vm_flags_t clear)
{ … }
static inline void vma_set_anonymous(struct vm_area_struct *vma)
{ … }
static inline bool vma_is_anonymous(struct vm_area_struct *vma)
{ … }
static inline bool vma_is_initial_heap(const struct vm_area_struct *vma)
{ … }
static inline bool vma_is_initial_stack(const struct vm_area_struct *vma)
{ … }
static inline bool vma_is_temporary_stack(struct vm_area_struct *vma)
{ … }
static inline bool vma_is_foreign(struct vm_area_struct *vma)
{ … }
static inline bool vma_is_accessible(struct vm_area_struct *vma)
{ … }
static inline bool is_shared_maywrite(vm_flags_t vm_flags)
{ … }
static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma)
{ … }
static inline
struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
{ … }
static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
{ … }
static inline
struct vm_area_struct *vma_iter_next_range(struct vma_iterator *vmi)
{ … }
static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
{ … }
static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
unsigned long start, unsigned long end, gfp_t gfp)
{ … }
static inline void vma_iter_free(struct vma_iterator *vmi)
{ … }
static inline int vma_iter_bulk_store(struct vma_iterator *vmi,
struct vm_area_struct *vma)
{ … }
static inline void vma_iter_invalidate(struct vma_iterator *vmi)
{ … }
static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr)
{ … }
#define for_each_vma(__vmi, __vma) …
#define for_each_vma_range(__vmi, __vma, __end) …
#ifdef CONFIG_SHMEM
bool vma_is_shmem(struct vm_area_struct *vma);
bool vma_is_anon_shmem(struct vm_area_struct *vma);
#else
static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
static inline bool vma_is_anon_shmem(struct vm_area_struct *vma) { return false; }
#endif
int vma_is_stack_for_current(struct vm_area_struct *vma);
#define TLB_FLUSH_VMA(mm,flags) …
struct mmu_gather;
struct inode;
static inline unsigned int compound_order(struct page *page)
{ … }
static inline unsigned int folio_order(const struct folio *folio)
{ … }
#include <linux/huge_mm.h>
static inline int put_page_testzero(struct page *page)
{ … }
static inline int folio_put_testzero(struct folio *folio)
{ … }
static inline bool get_page_unless_zero(struct page *page)
{ … }
static inline struct folio *folio_get_nontail_page(struct page *page)
{ … }
extern int page_is_ram(unsigned long pfn);
enum { … };
int region_intersects(resource_size_t offset, size_t size, unsigned long flags,
unsigned long desc);
struct page *vmalloc_to_page(const void *addr);
unsigned long vmalloc_to_pfn(const void *addr);
#ifdef CONFIG_MMU
extern bool is_vmalloc_addr(const void *x);
extern int is_vmalloc_or_module_addr(const void *x);
#else
static inline bool is_vmalloc_addr(const void *x)
{
return false;
}
static inline int is_vmalloc_or_module_addr(const void *x)
{
return 0;
}
#endif
static inline int folio_entire_mapcount(const struct folio *folio)
{ … }
static inline int folio_large_mapcount(const struct folio *folio)
{ … }
static inline int folio_mapcount(const struct folio *folio)
{ … }
static inline bool folio_mapped(const struct folio *folio)
{ … }
static inline bool page_mapped(const struct page *page)
{ … }
static inline struct page *virt_to_head_page(const void *x)
{ … }
static inline struct folio *virt_to_folio(const void *x)
{ … }
void __folio_put(struct folio *folio);
void put_pages_list(struct list_head *pages);
void split_page(struct page *page, unsigned int order);
void folio_copy(struct folio *dst, struct folio *src);
int folio_mc_copy(struct folio *dst, struct folio *src);
unsigned long nr_free_buffer_pages(void);
static inline unsigned long page_size(struct page *page)
{ … }
static inline unsigned int page_shift(struct page *page)
{ … }
static inline unsigned int thp_order(struct page *page)
{ … }
static inline unsigned long thp_size(struct page *page)
{ … }
#ifdef CONFIG_MMU
static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
{ … }
vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page);
void set_pte_range(struct vm_fault *vmf, struct folio *folio,
struct page *page, unsigned int nr, unsigned long addr);
vm_fault_t finish_fault(struct vm_fault *vmf);
#endif
#if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_FS_DAX)
DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
bool __put_devmap_managed_folio_refs(struct folio *folio, int refs);
static inline bool put_devmap_managed_folio_refs(struct folio *folio, int refs)
{ … }
#else
static inline bool put_devmap_managed_folio_refs(struct folio *folio, int refs)
{
return false;
}
#endif
#define folio_ref_zero_or_close_to_overflow(folio) …
static inline void folio_get(struct folio *folio)
{ … }
static inline void get_page(struct page *page)
{ … }
static inline __must_check bool try_get_page(struct page *page)
{ … }
static inline void folio_put(struct folio *folio)
{ … }
static inline void folio_put_refs(struct folio *folio, int refs)
{ … }
void folios_put_refs(struct folio_batch *folios, unsigned int *refs);
release_pages_arg __attribute__ ((__transparent_union__ …));
void release_pages(release_pages_arg, int nr);
static inline void folios_put(struct folio_batch *folios)
{ … }
static inline void put_page(struct page *page)
{ … }
#define GUP_PIN_COUNTING_BIAS …
void unpin_user_page(struct page *page);
void unpin_folio(struct folio *folio);
void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
bool make_dirty);
void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
bool make_dirty);
void unpin_user_pages(struct page **pages, unsigned long npages);
void unpin_user_folio(struct folio *folio, unsigned long npages);
void unpin_folios(struct folio **folios, unsigned long nfolios);
static inline bool is_cow_mapping(vm_flags_t flags)
{ … }
#ifndef CONFIG_MMU
static inline bool is_nommu_shared_mapping(vm_flags_t flags)
{
return flags & (VM_MAYSHARE | VM_MAYOVERLAY);
}
#endif
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
#define SECTION_IN_PAGE_FLAGS
#endif
static inline int page_zone_id(struct page *page)
{ … }
#ifdef NODE_NOT_IN_PAGE_FLAGS
int page_to_nid(const struct page *page);
#else
static inline int page_to_nid(const struct page *page)
{ … }
#endif
static inline int folio_nid(const struct folio *folio)
{ … }
#ifdef CONFIG_NUMA_BALANCING
#define PAGE_ACCESS_TIME_MIN_BITS …
#if LAST_CPUPID_SHIFT < PAGE_ACCESS_TIME_MIN_BITS
#define PAGE_ACCESS_TIME_BUCKETS …
#else
#define PAGE_ACCESS_TIME_BUCKETS …
#endif
#define PAGE_ACCESS_TIME_MASK …
static inline int cpu_pid_to_cpupid(int cpu, int pid)
{ … }
static inline int cpupid_to_pid(int cpupid)
{ … }
static inline int cpupid_to_cpu(int cpupid)
{ … }
static inline int cpupid_to_nid(int cpupid)
{ … }
static inline bool cpupid_pid_unset(int cpupid)
{ … }
static inline bool cpupid_cpu_unset(int cpupid)
{ … }
static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
{ … }
#define cpupid_match_pid(task, cpupid) …
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
{
return xchg(&folio->_last_cpupid, cpupid & LAST_CPUPID_MASK);
}
static inline int folio_last_cpupid(struct folio *folio)
{
return folio->_last_cpupid;
}
static inline void page_cpupid_reset_last(struct page *page)
{
page->_last_cpupid = -1 & LAST_CPUPID_MASK;
}
#else
static inline int folio_last_cpupid(struct folio *folio)
{ … }
int folio_xchg_last_cpupid(struct folio *folio, int cpupid);
static inline void page_cpupid_reset_last(struct page *page)
{ … }
#endif
static inline int folio_xchg_access_time(struct folio *folio, int time)
{ … }
static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
{ … }
bool folio_use_access_time(struct folio *folio);
#else
static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
{
return folio_nid(folio);
}
static inline int folio_xchg_access_time(struct folio *folio, int time)
{
return 0;
}
static inline int folio_last_cpupid(struct folio *folio)
{
return folio_nid(folio);
}
static inline int cpupid_to_nid(int cpupid)
{
return -1;
}
static inline int cpupid_to_pid(int cpupid)
{
return -1;
}
static inline int cpupid_to_cpu(int cpupid)
{
return -1;
}
static inline int cpu_pid_to_cpupid(int nid, int pid)
{
return -1;
}
static inline bool cpupid_pid_unset(int cpupid)
{
return true;
}
static inline void page_cpupid_reset_last(struct page *page)
{
}
static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
{
return false;
}
static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
{
}
static inline bool folio_use_access_time(struct folio *folio)
{
return false;
}
#endif
#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
static inline u8 page_kasan_tag(const struct page *page)
{
u8 tag = KASAN_TAG_KERNEL;
if (kasan_enabled()) {
tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
tag ^= 0xff;
}
return tag;
}
static inline void page_kasan_tag_set(struct page *page, u8 tag)
{
unsigned long old_flags, flags;
if (!kasan_enabled())
return;
tag ^= 0xff;
old_flags = READ_ONCE(page->flags);
do {
flags = old_flags;
flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
} while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags)));
}
static inline void page_kasan_tag_reset(struct page *page)
{
if (kasan_enabled())
page_kasan_tag_set(page, KASAN_TAG_KERNEL);
}
#else
static inline u8 page_kasan_tag(const struct page *page)
{ … }
static inline void page_kasan_tag_set(struct page *page, u8 tag) { … }
static inline void page_kasan_tag_reset(struct page *page) { … }
#endif
static inline struct zone *page_zone(const struct page *page)
{ … }
static inline pg_data_t *page_pgdat(const struct page *page)
{ … }
static inline struct zone *folio_zone(const struct folio *folio)
{ … }
static inline pg_data_t *folio_pgdat(const struct folio *folio)
{ … }
#ifdef SECTION_IN_PAGE_FLAGS
static inline void set_page_section(struct page *page, unsigned long section)
{
page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
}
static inline unsigned long page_to_section(const struct page *page)
{
return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
}
#endif
static inline unsigned long folio_pfn(struct folio *folio)
{ … }
static inline struct folio *pfn_folio(unsigned long pfn)
{ … }
static inline bool folio_maybe_dma_pinned(struct folio *folio)
{ … }
static inline bool folio_needs_cow_for_dma(struct vm_area_struct *vma,
struct folio *folio)
{ … }
static inline bool is_zero_page(const struct page *page)
{ … }
static inline bool is_zero_folio(const struct folio *folio)
{ … }
#ifdef CONFIG_MIGRATION
static inline bool folio_is_longterm_pinnable(struct folio *folio)
{ … }
#else
static inline bool folio_is_longterm_pinnable(struct folio *folio)
{
return true;
}
#endif
static inline void set_page_zone(struct page *page, enum zone_type zone)
{ … }
static inline void set_page_node(struct page *page, unsigned long node)
{ … }
static inline void set_page_links(struct page *page, enum zone_type zone,
unsigned long node, unsigned long pfn)
{ … }
static inline long folio_nr_pages(const struct folio *folio)
{ … }
#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
#define MAX_FOLIO_NR_PAGES …
#else
#define MAX_FOLIO_NR_PAGES …
#endif
static inline unsigned long compound_nr(struct page *page)
{ … }
static inline int thp_nr_pages(struct page *page)
{ … }
static inline struct folio *folio_next(struct folio *folio)
{ … }
static inline unsigned int folio_shift(const struct folio *folio)
{ … }
static inline size_t folio_size(const struct folio *folio)
{ … }
static inline bool folio_likely_mapped_shared(struct folio *folio)
{ … }
#ifndef HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
static inline int arch_make_folio_accessible(struct folio *folio)
{ … }
#endif
#include <linux/vmstat.h>
#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
#define HASHED_PAGE_VIRTUAL
#endif
#if defined(WANT_PAGE_VIRTUAL)
static inline void *page_address(const struct page *page)
{
return page->virtual;
}
static inline void set_page_address(struct page *page, void *address)
{
page->virtual = address;
}
#define page_address_init …
#endif
#if defined(HASHED_PAGE_VIRTUAL)
void *page_address(const struct page *page);
void set_page_address(struct page *page, void *virtual);
void page_address_init(void);
#endif
static __always_inline void *lowmem_page_address(const struct page *page)
{ … }
#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
#define page_address(page) …
#define set_page_address(page, address) …
#define page_address_init() …
#endif
static inline void *folio_address(const struct folio *folio)
{ … }
static inline bool page_is_pfmemalloc(const struct page *page)
{ … }
static inline bool folio_is_pfmemalloc(const struct folio *folio)
{ … }
static inline void set_page_pfmemalloc(struct page *page)
{ … }
static inline void clear_page_pfmemalloc(struct page *page)
{ … }
extern void pagefault_out_of_memory(void);
#define offset_in_page(p) …
#define offset_in_thp(page, p) …
#define offset_in_folio(folio, p) …
struct zap_details { … };
#define ZAP_FLAG_DROP_MARKER …
#define ZAP_FLAG_UNMAP …
#ifdef CONFIG_SCHED_MM_CID
void sched_mm_cid_before_execve(struct task_struct *t);
void sched_mm_cid_after_execve(struct task_struct *t);
void sched_mm_cid_fork(struct task_struct *t);
void sched_mm_cid_exit_signals(struct task_struct *t);
static inline int task_mm_cid(struct task_struct *t)
{ … }
#else
static inline void sched_mm_cid_before_execve(struct task_struct *t) { }
static inline void sched_mm_cid_after_execve(struct task_struct *t) { }
static inline void sched_mm_cid_fork(struct task_struct *t) { }
static inline void sched_mm_cid_exit_signals(struct task_struct *t) { }
static inline int task_mm_cid(struct task_struct *t)
{
return raw_smp_processor_id();
}
#endif
#ifdef CONFIG_MMU
extern bool can_do_mlock(void);
#else
static inline bool can_do_mlock(void) { return false; }
#endif
extern int user_shm_lock(size_t, struct ucounts *);
extern void user_shm_unlock(size_t, struct ucounts *);
struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
pte_t pte);
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
pte_t pte);
struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
unsigned long addr, pmd_t pmd);
struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t pmd);
void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
unsigned long size, struct zap_details *details);
static inline void zap_vma_pages(struct vm_area_struct *vma)
{ … }
void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
struct vm_area_struct *start_vma, unsigned long start,
unsigned long end, unsigned long tree_end, bool mm_wr_locked);
struct mmu_notifier_range;
void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
unsigned long end, unsigned long floor, unsigned long ceiling);
int
copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write);
struct follow_pfnmap_args { … };
int follow_pfnmap_start(struct follow_pfnmap_args *args);
void follow_pfnmap_end(struct follow_pfnmap_args *args);
extern void truncate_pagecache(struct inode *inode, loff_t new);
extern void truncate_setsize(struct inode *inode, loff_t newsize);
void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
int generic_error_remove_folio(struct address_space *mapping,
struct folio *folio);
struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
unsigned long address, struct pt_regs *regs);
#ifdef CONFIG_MMU
extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
unsigned long address, unsigned int flags,
struct pt_regs *regs);
extern int fixup_user_fault(struct mm_struct *mm,
unsigned long address, unsigned int fault_flags,
bool *unlocked);
void unmap_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t nr, bool even_cows);
void unmap_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen, int even_cows);
#else
static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
unsigned long address, unsigned int flags,
struct pt_regs *regs)
{
BUG();
return VM_FAULT_SIGBUS;
}
static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address,
unsigned int fault_flags, bool *unlocked)
{
BUG();
return -EFAULT;
}
static inline void unmap_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t nr, bool even_cows) { }
static inline void unmap_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen, int even_cows) { }
#endif
static inline void unmap_shared_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen)
{ … }
static inline struct vm_area_struct *vma_lookup(struct mm_struct *mm,
unsigned long addr);
extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
void *buf, int len, unsigned int gup_flags);
extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
void *buf, int len, unsigned int gup_flags);
long get_user_pages_remote(struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
int *locked);
long pin_user_pages_remote(struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
int *locked);
static inline struct page *get_user_page_vma_remote(struct mm_struct *mm,
unsigned long addr,
int gup_flags,
struct vm_area_struct **vmap)
{ … }
long get_user_pages(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages);
long pin_user_pages(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages);
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
struct page **pages, unsigned int gup_flags);
long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
struct page **pages, unsigned int gup_flags);
long memfd_pin_folios(struct file *memfd, loff_t start, loff_t end,
struct folio **folios, unsigned int max_folios,
pgoff_t *offset);
int get_user_pages_fast(unsigned long start, int nr_pages,
unsigned int gup_flags, struct page **pages);
int pin_user_pages_fast(unsigned long start, int nr_pages,
unsigned int gup_flags, struct page **pages);
void folio_add_pin(struct folio *folio);
int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
struct task_struct *task, bool bypass_rlim);
struct kvec;
struct page *get_dump_page(unsigned long addr);
bool folio_mark_dirty(struct folio *folio);
bool set_page_dirty(struct page *page);
int set_page_dirty_lock(struct page *page);
int get_cmdline(struct task_struct *task, char *buffer, int buflen);
#define MM_CP_TRY_CHANGE_WRITABLE …
#define MM_CP_PROT_NUMA …
#define MM_CP_UFFD_WP …
#define MM_CP_UFFD_WP_RESOLVE …
#define MM_CP_UFFD_WP_ALL …
bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
pte_t pte);
extern long change_protection(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start,
unsigned long end, unsigned long cp_flags);
extern int mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb,
struct vm_area_struct *vma, struct vm_area_struct **pprev,
unsigned long start, unsigned long end, unsigned long newflags);
int get_user_pages_fast_only(unsigned long start, int nr_pages,
unsigned int gup_flags, struct page **pages);
static inline bool get_user_page_fast_only(unsigned long addr,
unsigned int gup_flags, struct page **pagep)
{ … }
static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
{ … }
void mm_trace_rss_stat(struct mm_struct *mm, int member);
static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
{ … }
static inline void inc_mm_counter(struct mm_struct *mm, int member)
{ … }
static inline void dec_mm_counter(struct mm_struct *mm, int member)
{ … }
static inline int mm_counter_file(struct folio *folio)
{ … }
static inline int mm_counter(struct folio *folio)
{ … }
static inline unsigned long get_mm_rss(struct mm_struct *mm)
{ … }
static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
{ … }
static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
{ … }
static inline void update_hiwater_rss(struct mm_struct *mm)
{ … }
static inline void update_hiwater_vm(struct mm_struct *mm)
{ … }
static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
{ … }
static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
struct mm_struct *mm)
{ … }
#ifndef CONFIG_ARCH_HAS_PTE_SPECIAL
static inline int pte_special(pte_t pte)
{
return 0;
}
static inline pte_t pte_mkspecial(pte_t pte)
{
return pte;
}
#endif
#ifndef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
static inline bool pmd_special(pmd_t pmd)
{
return false;
}
static inline pmd_t pmd_mkspecial(pmd_t pmd)
{
return pmd;
}
#endif
#ifndef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
static inline bool pud_special(pud_t pud)
{
return false;
}
static inline pud_t pud_mkspecial(pud_t pud)
{
return pud;
}
#endif
#ifndef CONFIG_ARCH_HAS_PTE_DEVMAP
static inline int pte_devmap(pte_t pte)
{
return 0;
}
#endif
extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
spinlock_t **ptl);
static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
spinlock_t **ptl)
{ … }
#ifdef __PAGETABLE_P4D_FOLDED
static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
unsigned long address)
{
return 0;
}
#else
int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
#endif
#if defined(__PAGETABLE_PUD_FOLDED) || !defined(CONFIG_MMU)
static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d,
unsigned long address)
{
return 0;
}
static inline void mm_inc_nr_puds(struct mm_struct *mm) {}
static inline void mm_dec_nr_puds(struct mm_struct *mm) {}
#else
int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
static inline void mm_inc_nr_puds(struct mm_struct *mm)
{ … }
static inline void mm_dec_nr_puds(struct mm_struct *mm)
{ … }
#endif
#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
unsigned long address)
{
return 0;
}
static inline void mm_inc_nr_pmds(struct mm_struct *mm) {}
static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
#else
int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
static inline void mm_inc_nr_pmds(struct mm_struct *mm)
{ … }
static inline void mm_dec_nr_pmds(struct mm_struct *mm)
{ … }
#endif
#ifdef CONFIG_MMU
static inline void mm_pgtables_bytes_init(struct mm_struct *mm)
{ … }
static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
{ … }
static inline void mm_inc_nr_ptes(struct mm_struct *mm)
{ … }
static inline void mm_dec_nr_ptes(struct mm_struct *mm)
{ … }
#else
static inline void mm_pgtables_bytes_init(struct mm_struct *mm) {}
static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
{
return 0;
}
static inline void mm_inc_nr_ptes(struct mm_struct *mm) {}
static inline void mm_dec_nr_ptes(struct mm_struct *mm) {}
#endif
int __pte_alloc(struct mm_struct *mm, pmd_t *pmd);
int __pte_alloc_kernel(pmd_t *pmd);
#if defined(CONFIG_MMU)
static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
unsigned long address)
{ … }
static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
unsigned long address)
{ … }
static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
{ … }
#endif
static inline struct ptdesc *virt_to_ptdesc(const void *x)
{ … }
static inline void *ptdesc_to_virt(const struct ptdesc *pt)
{ … }
static inline void *ptdesc_address(const struct ptdesc *pt)
{ … }
static inline bool pagetable_is_reserved(struct ptdesc *pt)
{ … }
static inline struct ptdesc *pagetable_alloc_noprof(gfp_t gfp, unsigned int order)
{ … }
#define pagetable_alloc(...) …
static inline void pagetable_free(struct ptdesc *pt)
{ … }
#if defined(CONFIG_SPLIT_PTE_PTLOCKS)
#if ALLOC_SPLIT_PTLOCKS
void __init ptlock_cache_init(void);
bool ptlock_alloc(struct ptdesc *ptdesc);
void ptlock_free(struct ptdesc *ptdesc);
static inline spinlock_t *ptlock_ptr(struct ptdesc *ptdesc)
{ … }
#else
static inline void ptlock_cache_init(void)
{
}
static inline bool ptlock_alloc(struct ptdesc *ptdesc)
{
return true;
}
static inline void ptlock_free(struct ptdesc *ptdesc)
{
}
static inline spinlock_t *ptlock_ptr(struct ptdesc *ptdesc)
{
return &ptdesc->ptl;
}
#endif
static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
{ … }
static inline spinlock_t *ptep_lockptr(struct mm_struct *mm, pte_t *pte)
{ … }
static inline bool ptlock_init(struct ptdesc *ptdesc)
{ … }
#else
static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
{
return &mm->page_table_lock;
}
static inline spinlock_t *ptep_lockptr(struct mm_struct *mm, pte_t *pte)
{
return &mm->page_table_lock;
}
static inline void ptlock_cache_init(void) {}
static inline bool ptlock_init(struct ptdesc *ptdesc) { return true; }
static inline void ptlock_free(struct ptdesc *ptdesc) {}
#endif
static inline bool pagetable_pte_ctor(struct ptdesc *ptdesc)
{ … }
static inline void pagetable_pte_dtor(struct ptdesc *ptdesc)
{ … }
pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp);
static inline pte_t *pte_offset_map(pmd_t *pmd, unsigned long addr)
{ … }
pte_t *__pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr, spinlock_t **ptlp);
static inline pte_t *pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr, spinlock_t **ptlp)
{ … }
pte_t *pte_offset_map_nolock(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr, spinlock_t **ptlp);
#define pte_unmap_unlock(pte, ptl) …
#define pte_alloc(mm, pmd) …
#define pte_alloc_map(mm, pmd, address) …
#define pte_alloc_map_lock(mm, pmd, address, ptlp) …
#define pte_alloc_kernel(pmd, address) …
#if defined(CONFIG_SPLIT_PMD_PTLOCKS)
static inline struct page *pmd_pgtable_page(pmd_t *pmd)
{ … }
static inline struct ptdesc *pmd_ptdesc(pmd_t *pmd)
{ … }
static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
{ … }
static inline bool pmd_ptlock_init(struct ptdesc *ptdesc)
{ … }
static inline void pmd_ptlock_free(struct ptdesc *ptdesc)
{ … }
#define pmd_huge_pte(mm, pmd) …
#else
static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
{
return &mm->page_table_lock;
}
static inline bool pmd_ptlock_init(struct ptdesc *ptdesc) { return true; }
static inline void pmd_ptlock_free(struct ptdesc *ptdesc) {}
#define pmd_huge_pte …
#endif
static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
{ … }
static inline bool pagetable_pmd_ctor(struct ptdesc *ptdesc)
{ … }
static inline void pagetable_pmd_dtor(struct ptdesc *ptdesc)
{ … }
static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud)
{ … }
static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
{ … }
static inline void pagetable_pud_ctor(struct ptdesc *ptdesc)
{ … }
static inline void pagetable_pud_dtor(struct ptdesc *ptdesc)
{ … }
extern void __init pagecache_init(void);
extern void free_initmem(void);
extern unsigned long free_reserved_area(void *start, void *end,
int poison, const char *s);
extern void adjust_managed_page_count(struct page *page, long count);
extern void reserve_bootmem_region(phys_addr_t start,
phys_addr_t end, int nid);
void free_reserved_page(struct page *page);
#define free_highmem_page(page) …
static inline void mark_page_reserved(struct page *page)
{ … }
static inline void free_reserved_ptdesc(struct ptdesc *pt)
{ … }
static inline unsigned long free_initmem_default(int poison)
{ … }
static inline unsigned long get_num_physpages(void)
{ … }
void free_area_init(unsigned long *max_zone_pfn);
unsigned long node_map_pfn_alignment(void);
extern unsigned long absent_pages_in_range(unsigned long start_pfn,
unsigned long end_pfn);
extern void get_pfn_range_for_nid(unsigned int nid,
unsigned long *start_pfn, unsigned long *end_pfn);
#ifndef CONFIG_NUMA
static inline int early_pfn_to_nid(unsigned long pfn)
{
return 0;
}
#else
extern int __meminit early_pfn_to_nid(unsigned long pfn);
#endif
extern void mem_init(void);
extern void __init mmap_init(void);
extern void __show_mem(unsigned int flags, nodemask_t *nodemask, int max_zone_idx);
static inline void show_mem(void)
{ … }
extern long si_mem_available(void);
extern void si_meminfo(struct sysinfo * val);
extern void si_meminfo_node(struct sysinfo *val, int nid);
extern __printf(3, 4)
void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);
extern void setup_per_cpu_pageset(void);
extern atomic_long_t mmap_pages_allocated;
extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
void vma_interval_tree_insert(struct vm_area_struct *node,
struct rb_root_cached *root);
void vma_interval_tree_insert_after(struct vm_area_struct *node,
struct vm_area_struct *prev,
struct rb_root_cached *root);
void vma_interval_tree_remove(struct vm_area_struct *node,
struct rb_root_cached *root);
struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root,
unsigned long start, unsigned long last);
struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
unsigned long start, unsigned long last);
#define vma_interval_tree_foreach(vma, root, start, last) …
void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
struct rb_root_cached *root);
void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
struct rb_root_cached *root);
struct anon_vma_chain *
anon_vma_interval_tree_iter_first(struct rb_root_cached *root,
unsigned long start, unsigned long last);
struct anon_vma_chain *anon_vma_interval_tree_iter_next(
struct anon_vma_chain *node, unsigned long start, unsigned long last);
#ifdef CONFIG_DEBUG_VM_RB
void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
#endif
#define anon_vma_interval_tree_foreach(avc, root, start, last) …
extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
extern void exit_mmap(struct mm_struct *);
int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift);
static inline int check_data_rlimit(unsigned long rlim,
unsigned long new,
unsigned long start,
unsigned long end_data,
unsigned long start_data)
{ … }
extern int mm_take_all_locks(struct mm_struct *mm);
extern void mm_drop_all_locks(struct mm_struct *mm);
extern int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
extern int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
extern struct file *get_mm_exe_file(struct mm_struct *mm);
extern struct file *get_task_exe_file(struct task_struct *task);
extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
extern bool vma_is_special_mapping(const struct vm_area_struct *vma,
const struct vm_special_mapping *sm);
extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
unsigned long addr, unsigned long len,
unsigned long flags,
const struct vm_special_mapping *spec);
unsigned long randomize_stack_top(unsigned long stack_top);
unsigned long randomize_page(unsigned long start, unsigned long range);
unsigned long
__get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags);
static inline unsigned long
get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
{ … }
extern unsigned long mmap_region(struct file *file, unsigned long addr,
unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
struct list_head *uf);
extern unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot, unsigned long flags,
vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
struct list_head *uf);
extern int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
unsigned long start, size_t len, struct list_head *uf,
bool unlock);
int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
struct mm_struct *mm, unsigned long start,
unsigned long end, struct list_head *uf, bool unlock);
extern int do_munmap(struct mm_struct *, unsigned long, size_t,
struct list_head *uf);
extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);
#ifdef CONFIG_MMU
extern int __mm_populate(unsigned long addr, unsigned long len,
int ignore_errors);
static inline void mm_populate(unsigned long addr, unsigned long len)
{ … }
#else
static inline void mm_populate(unsigned long addr, unsigned long len) {}
#endif
extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long);
extern int vm_munmap(unsigned long, size_t);
extern unsigned long __must_check vm_mmap(struct file *, unsigned long,
unsigned long, unsigned long,
unsigned long, unsigned long);
struct vm_unmapped_area_info { … };
extern unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info);
extern void truncate_inode_pages(struct address_space *, loff_t);
extern void truncate_inode_pages_range(struct address_space *,
loff_t lstart, loff_t lend);
extern void truncate_inode_pages_final(struct address_space *);
extern vm_fault_t filemap_fault(struct vm_fault *vmf);
extern vm_fault_t filemap_map_pages(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff);
extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
extern unsigned long stack_guard_gap;
int expand_stack_locked(struct vm_area_struct *vma, unsigned long address);
struct vm_area_struct *expand_stack(struct mm_struct * mm, unsigned long addr);
int expand_downwards(struct vm_area_struct *vma, unsigned long address);
extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
struct vm_area_struct **pprev);
struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
unsigned long start_addr, unsigned long end_addr);
static inline
struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
{ … }
static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma)
{ … }
static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
{ … }
static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
{ … }
static inline unsigned long vma_pages(struct vm_area_struct *vma)
{ … }
static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
unsigned long vm_start, unsigned long vm_end)
{ … }
static inline bool range_in_vma(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{ … }
#ifdef CONFIG_MMU
pgprot_t vm_get_page_prot(unsigned long vm_flags);
void vma_set_page_prot(struct vm_area_struct *vma);
#else
static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
{
return __pgprot(0);
}
static inline void vma_set_page_prot(struct vm_area_struct *vma)
{
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
}
#endif
void vma_set_file(struct vm_area_struct *vma, struct file *file);
#ifdef CONFIG_NUMA_BALANCING
unsigned long change_prot_numa(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
#endif
struct vm_area_struct *find_extend_vma_locked(struct mm_struct *,
unsigned long addr);
int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t);
int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t prot);
int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
struct page **pages, unsigned long *num);
int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
unsigned long num);
int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
unsigned long num);
vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn);
vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, pgprot_t pgprot);
vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
pfn_t pfn);
vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
unsigned long addr, pfn_t pfn);
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
unsigned long addr, struct page *page)
{ … }
#ifndef io_remap_pfn_range
static inline int io_remap_pfn_range(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn,
unsigned long size, pgprot_t prot)
{ … }
#endif
static inline vm_fault_t vmf_error(int err)
{ … }
static inline vm_fault_t vmf_fs_error(int err)
{ … }
static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
{ … }
static inline bool gup_can_follow_protnone(struct vm_area_struct *vma,
unsigned int flags)
{ … }
pte_fn_t;
extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
unsigned long size, pte_fn_t fn, void *data);
extern int apply_to_existing_page_range(struct mm_struct *mm,
unsigned long address, unsigned long size,
pte_fn_t fn, void *data);
#ifdef CONFIG_PAGE_POISONING
extern void __kernel_poison_pages(struct page *page, int numpages);
extern void __kernel_unpoison_pages(struct page *page, int numpages);
extern bool _page_poisoning_enabled_early;
DECLARE_STATIC_KEY_FALSE(_page_poisoning_enabled);
static inline bool page_poisoning_enabled(void)
{ … }
static inline bool page_poisoning_enabled_static(void)
{ … }
static inline void kernel_poison_pages(struct page *page, int numpages)
{ … }
static inline void kernel_unpoison_pages(struct page *page, int numpages)
{ … }
#else
static inline bool page_poisoning_enabled(void) { return false; }
static inline bool page_poisoning_enabled_static(void) { return false; }
static inline void __kernel_poison_pages(struct page *page, int nunmpages) { }
static inline void kernel_poison_pages(struct page *page, int numpages) { }
static inline void kernel_unpoison_pages(struct page *page, int numpages) { }
#endif
DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
static inline bool want_init_on_alloc(gfp_t flags)
{ … }
DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
static inline bool want_init_on_free(void)
{ … }
extern bool _debug_pagealloc_enabled_early;
DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
static inline bool debug_pagealloc_enabled(void)
{ … }
static inline bool debug_pagealloc_enabled_static(void)
{ … }
extern void __kernel_map_pages(struct page *page, int numpages, int enable);
#ifdef CONFIG_DEBUG_PAGEALLOC
static inline void debug_pagealloc_map_pages(struct page *page, int numpages)
{ … }
static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages)
{ … }
extern unsigned int _debug_guardpage_minorder;
DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
static inline unsigned int debug_guardpage_minorder(void)
{ … }
static inline bool debug_guardpage_enabled(void)
{ … }
static inline bool page_is_guard(struct page *page)
{ … }
bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order);
static inline bool set_page_guard(struct zone *zone, struct page *page,
unsigned int order)
{ … }
void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order);
static inline void clear_page_guard(struct zone *zone, struct page *page,
unsigned int order)
{ … }
#else
static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {}
static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {}
static inline unsigned int debug_guardpage_minorder(void) { return 0; }
static inline bool debug_guardpage_enabled(void) { return false; }
static inline bool page_is_guard(struct page *page) { return false; }
static inline bool set_page_guard(struct zone *zone, struct page *page,
unsigned int order) { return false; }
static inline void clear_page_guard(struct zone *zone, struct page *page,
unsigned int order) {}
#endif
#ifdef __HAVE_ARCH_GATE_AREA
extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
extern int in_gate_area_no_mm(unsigned long addr);
extern int in_gate_area(struct mm_struct *mm, unsigned long addr);
#else
static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
{
return NULL;
}
static inline int in_gate_area_no_mm(unsigned long addr) { return 0; }
static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
{
return 0;
}
#endif
extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);
#ifdef CONFIG_SYSCTL
extern int sysctl_drop_caches;
int drop_caches_sysctl_handler(const struct ctl_table *, int, void *, size_t *,
loff_t *);
#endif
void drop_slab(void);
#ifndef CONFIG_MMU
#define randomize_va_space …
#else
extern int randomize_va_space;
#endif
const char * arch_vma_name(struct vm_area_struct *vma);
#ifdef CONFIG_MMU
void print_vma_addr(char *prefix, unsigned long rip);
#else
static inline void print_vma_addr(char *prefix, unsigned long rip)
{
}
#endif
void *sparse_buffer_alloc(unsigned long size);
struct page * __populate_section_memmap(unsigned long pfn,
unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
struct dev_pagemap *pgmap);
void pud_init(void *addr);
void pmd_init(void *addr);
void kernel_pte_init(void *addr);
pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
struct vmem_altmap *altmap, struct page *reuse);
void *vmemmap_alloc_block(unsigned long size, int node);
struct vmem_altmap;
void *vmemmap_alloc_block_buf(unsigned long size, int node,
struct vmem_altmap *altmap);
void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
void vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
unsigned long addr, unsigned long next);
int vmemmap_check_pmd(pmd_t *pmd, int node,
unsigned long addr, unsigned long next);
int vmemmap_populate_basepages(unsigned long start, unsigned long end,
int node, struct vmem_altmap *altmap);
int vmemmap_populate_hugepages(unsigned long start, unsigned long end,
int node, struct vmem_altmap *altmap);
int vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap);
void vmemmap_populate_print_last(void);
#ifdef CONFIG_MEMORY_HOTPLUG
void vmemmap_free(unsigned long start, unsigned long end,
struct vmem_altmap *altmap);
#endif
#ifdef CONFIG_SPARSEMEM_VMEMMAP
static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
{ … }
static inline void vmem_altmap_free(struct vmem_altmap *altmap,
unsigned long nr_pfns)
{ … }
#else
static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
{
return 0;
}
static inline void vmem_altmap_free(struct vmem_altmap *altmap,
unsigned long nr_pfns)
{
}
#endif
#define VMEMMAP_RESERVE_NR …
#ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP
static inline bool __vmemmap_can_optimize(struct vmem_altmap *altmap,
struct dev_pagemap *pgmap)
{ … }
#ifndef vmemmap_can_optimize
#define vmemmap_can_optimize …
#endif
#else
static inline bool vmemmap_can_optimize(struct vmem_altmap *altmap,
struct dev_pagemap *pgmap)
{
return false;
}
#endif
void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
unsigned long nr_pages);
enum mf_flags { … };
int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
unsigned long count, int mf_flags);
extern int memory_failure(unsigned long pfn, int flags);
extern void memory_failure_queue_kick(int cpu);
extern int unpoison_memory(unsigned long pfn);
extern atomic_long_t num_poisoned_pages __read_mostly;
extern int soft_offline_page(unsigned long pfn, int flags);
#ifdef CONFIG_MEMORY_FAILURE
extern const struct attribute_group memory_failure_attr_group;
extern void memory_failure_queue(unsigned long pfn, int flags);
extern int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
bool *migratable_cleared);
void num_poisoned_pages_inc(unsigned long pfn);
void num_poisoned_pages_sub(unsigned long pfn, long i);
#else
static inline void memory_failure_queue(unsigned long pfn, int flags)
{
}
static inline int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
bool *migratable_cleared)
{
return 0;
}
static inline void num_poisoned_pages_inc(unsigned long pfn)
{
}
static inline void num_poisoned_pages_sub(unsigned long pfn, long i)
{
}
#endif
#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG)
extern void memblk_nr_poison_inc(unsigned long pfn);
extern void memblk_nr_poison_sub(unsigned long pfn, long i);
#else
static inline void memblk_nr_poison_inc(unsigned long pfn)
{
}
static inline void memblk_nr_poison_sub(unsigned long pfn, long i)
{
}
#endif
#ifndef arch_memory_failure
static inline int arch_memory_failure(unsigned long pfn, int flags)
{
return -ENXIO;
}
#endif
#ifndef arch_is_platform_page
static inline bool arch_is_platform_page(u64 paddr)
{
return false;
}
#endif
enum mf_result { … };
enum mf_action_page_type { … };
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
void folio_zero_user(struct folio *folio, unsigned long addr_hint);
int copy_user_large_folio(struct folio *dst, struct folio *src,
unsigned long addr_hint,
struct vm_area_struct *vma);
long copy_folio_from_user(struct folio *dst_folio,
const void __user *usr_src,
bool allow_pagefault);
static inline bool vma_is_special_huge(const struct vm_area_struct *vma)
{ … }
#endif
#if MAX_NUMNODES > 1
void __init setup_nr_node_ids(void);
#else
static inline void setup_nr_node_ids(void) {}
#endif
extern int memcmp_pages(struct page *page1, struct page *page2);
static inline int pages_identical(struct page *page1, struct page *page2)
{ … }
#ifdef CONFIG_MAPPING_DIRTY_HELPERS
unsigned long clean_record_shared_mapping_range(struct address_space *mapping,
pgoff_t first_index, pgoff_t nr,
pgoff_t bitmap_pgoff,
unsigned long *bitmap,
pgoff_t *start,
pgoff_t *end);
unsigned long wp_shared_mapping_range(struct address_space *mapping,
pgoff_t first_index, pgoff_t nr);
#endif
extern int sysctl_nr_trim_pages;
#ifdef CONFIG_PRINTK
void mem_dump_obj(void *object);
#else
static inline void mem_dump_obj(void *object) {}
#endif
static inline int seal_check_write(int seals, struct vm_area_struct *vma)
{ … }
#ifdef CONFIG_ANON_VMA_NAME
int madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
unsigned long len_in,
struct anon_vma_name *anon_name);
#else
static inline int
madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
unsigned long len_in, struct anon_vma_name *anon_name) {
return 0;
}
#endif
#ifdef CONFIG_UNACCEPTED_MEMORY
bool range_contains_unaccepted_memory(phys_addr_t start, unsigned long size);
void accept_memory(phys_addr_t start, unsigned long size);
#else
static inline bool range_contains_unaccepted_memory(phys_addr_t start,
unsigned long size)
{
return false;
}
static inline void accept_memory(phys_addr_t start, unsigned long size)
{
}
#endif
static inline bool pfn_is_unaccepted_memory(unsigned long pfn)
{ … }
void vma_pgtable_walk_begin(struct vm_area_struct *vma);
void vma_pgtable_walk_end(struct vm_area_struct *vma);
int reserve_mem_find_by_name(const char *name, phys_addr_t *start, phys_addr_t *size);
#ifdef CONFIG_64BIT
int do_mseal(unsigned long start, size_t len_in, unsigned long flags);
#else
static inline int do_mseal(unsigned long start, size_t len_in, unsigned long flags)
{
return 0;
}
#endif
#ifdef CONFIG_MEM_ALLOC_PROFILING
static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order)
{
int i;
struct alloc_tag *tag;
unsigned int nr_pages = 1 << new_order;
if (!mem_alloc_profiling_enabled())
return;
tag = pgalloc_tag_get(&folio->page);
if (!tag)
return;
for (i = nr_pages; i < (1 << old_order); i += nr_pages) {
union codetag_ref *ref = get_page_tag_ref(folio_page(folio, i));
if (ref) {
alloc_tag_ref_set(ref, tag);
put_page_tag_ref(ref);
}
}
}
static inline void pgalloc_tag_copy(struct folio *new, struct folio *old)
{
struct alloc_tag *tag;
union codetag_ref *ref;
tag = pgalloc_tag_get(&old->page);
if (!tag)
return;
ref = get_page_tag_ref(&new->page);
if (!ref)
return;
clear_page_tag_ref(&old->page);
alloc_tag_sub(ref, folio_nr_pages(new));
__alloc_tag_ref_set(ref, tag);
put_page_tag_ref(ref);
}
#else
static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order)
{ … }
static inline void pgalloc_tag_copy(struct folio *new, struct folio *old)
{ … }
#endif
#endif