#include <linux/list.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/seq_file.h>
#include <linux/sysctl.h>
#include <linux/highmem.h>
#include <linux/mmu_notifier.h>
#include <linux/nodemask.h>
#include <linux/pagemap.h>
#include <linux/mempolicy.h>
#include <linux/compiler.h>
#include <linux/cpuset.h>
#include <linux/mutex.h>
#include <linux/memblock.h>
#include <linux/sysfs.h>
#include <linux/slab.h>
#include <linux/sched/mm.h>
#include <linux/mmdebug.h>
#include <linux/sched/signal.h>
#include <linux/rmap.h>
#include <linux/string_helpers.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/jhash.h>
#include <linux/numa.h>
#include <linux/llist.h>
#include <linux/cma.h>
#include <linux/migrate.h>
#include <linux/nospec.h>
#include <linux/delayacct.h>
#include <linux/memory.h>
#include <linux/mm_inline.h>
#include <linux/padata.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <linux/io.h>
#include <linux/hugetlb.h>
#include <linux/hugetlb_cgroup.h>
#include <linux/node.h>
#include <linux/page_owner.h>
#include "internal.h"
#include "hugetlb_vmemmap.h"
int hugetlb_max_hstate __read_mostly;
unsigned int default_hstate_idx;
struct hstate hstates[HUGE_MAX_HSTATE];
#ifdef CONFIG_CMA
static struct cma *hugetlb_cma[MAX_NUMNODES];
static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata;
static bool hugetlb_cma_folio(struct folio *folio, unsigned int order)
{ … }
#else
static bool hugetlb_cma_folio(struct folio *folio, unsigned int order)
{
return false;
}
#endif
static unsigned long hugetlb_cma_size __initdata;
__initdata struct list_head huge_boot_pages[MAX_NUMNODES];
static struct hstate * __initdata parsed_hstate;
static unsigned long __initdata default_hstate_max_huge_pages;
static bool __initdata parsed_valid_hugepagesz = …;
static bool __initdata parsed_default_hugepagesz;
static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata;
DEFINE_SPINLOCK(…);
static int num_fault_mutexes;
struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
static int hugetlb_acct_memory(struct hstate *h, long delta);
static void hugetlb_vma_lock_free(struct vm_area_struct *vma);
static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
static struct resv_map *vma_resv_map(struct vm_area_struct *vma);
static inline bool subpool_is_free(struct hugepage_subpool *spool)
{ … }
static inline void unlock_or_release_subpool(struct hugepage_subpool *spool,
unsigned long irq_flags)
{ … }
struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
long min_hpages)
{ … }
void hugepage_put_subpool(struct hugepage_subpool *spool)
{ … }
static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
long delta)
{ … }
static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
long delta)
{ … }
static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
{ … }
static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
{ … }
void hugetlb_vma_lock_read(struct vm_area_struct *vma)
{ … }
void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
{ … }
void hugetlb_vma_lock_write(struct vm_area_struct *vma)
{ … }
void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
{ … }
int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
{ … }
void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
{ … }
void hugetlb_vma_lock_release(struct kref *kref)
{ … }
static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock)
{ … }
static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
{ … }
static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
{ … }
static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
{ … }
static struct file_region *
get_file_region_entry_from_cache(struct resv_map *resv, long from, long to)
{ … }
static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg,
struct file_region *rg)
{ … }
static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
struct hstate *h,
struct resv_map *resv,
struct file_region *nrg)
{ … }
static void put_uncharge_info(struct file_region *rg)
{ … }
static bool has_same_uncharge_info(struct file_region *rg,
struct file_region *org)
{ … }
static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
{ … }
static inline long
hugetlb_resv_map_add(struct resv_map *map, struct list_head *rg, long from,
long to, struct hstate *h, struct hugetlb_cgroup *cg,
long *regions_needed)
{ … }
static long add_reservation_in_range(struct resv_map *resv, long f, long t,
struct hugetlb_cgroup *h_cg,
struct hstate *h, long *regions_needed)
{ … }
static int allocate_file_region_entries(struct resv_map *resv,
int regions_needed)
__must_hold(&resv->lock)
{ … }
static long region_add(struct resv_map *resv, long f, long t,
long in_regions_needed, struct hstate *h,
struct hugetlb_cgroup *h_cg)
{ … }
static long region_chg(struct resv_map *resv, long f, long t,
long *out_regions_needed)
{ … }
static void region_abort(struct resv_map *resv, long f, long t,
long regions_needed)
{ … }
static long region_del(struct resv_map *resv, long f, long t)
{ … }
void hugetlb_fix_reserve_counts(struct inode *inode)
{ … }
static long region_count(struct resv_map *resv, long f, long t)
{ … }
static pgoff_t vma_hugecache_offset(struct hstate *h,
struct vm_area_struct *vma, unsigned long address)
{ … }
unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
{ … }
EXPORT_SYMBOL_GPL(…);
__weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
{ … }
#define HPAGE_RESV_OWNER …
#define HPAGE_RESV_UNMAPPED …
#define HPAGE_RESV_MASK …
static unsigned long get_vma_private_data(struct vm_area_struct *vma)
{ … }
static void set_vma_private_data(struct vm_area_struct *vma,
unsigned long value)
{ … }
static void
resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map,
struct hugetlb_cgroup *h_cg,
struct hstate *h)
{ … }
struct resv_map *resv_map_alloc(void)
{ … }
void resv_map_release(struct kref *ref)
{ … }
static inline struct resv_map *inode_resv_map(struct inode *inode)
{ … }
static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
{ … }
static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
{ … }
static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
{ … }
static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
{ … }
bool __vma_private_lock(struct vm_area_struct *vma)
{ … }
void hugetlb_dup_vma_private(struct vm_area_struct *vma)
{ … }
void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
{ … }
static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
{ … }
static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio)
{ … }
static struct folio *dequeue_hugetlb_folio_node_exact(struct hstate *h,
int nid)
{ … }
static struct folio *dequeue_hugetlb_folio_nodemask(struct hstate *h, gfp_t gfp_mask,
int nid, nodemask_t *nmask)
{ … }
static unsigned long available_huge_pages(struct hstate *h)
{ … }
static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
struct vm_area_struct *vma,
unsigned long address, int avoid_reserve,
long chg)
{ … }
static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
{ … }
static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
{ … }
static int hstate_next_node_to_alloc(int *next_node,
nodemask_t *nodes_allowed)
{ … }
static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
{ … }
#define for_each_node_mask_to_alloc(next_node, nr_nodes, node, mask) …
#define for_each_node_mask_to_free(hs, nr_nodes, node, mask) …
static void __destroy_compound_gigantic_folio(struct folio *folio,
unsigned int order, bool demote)
{ … }
static void destroy_compound_hugetlb_folio_for_demote(struct folio *folio,
unsigned int order)
{ … }
#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
static void destroy_compound_gigantic_folio(struct folio *folio,
unsigned int order)
{ … }
static void free_gigantic_folio(struct folio *folio, unsigned int order)
{ … }
#ifdef CONFIG_CONTIG_ALLOC
static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
int nid, nodemask_t *nodemask)
{ … }
#else
static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
int nid, nodemask_t *nodemask)
{
return NULL;
}
#endif
#else
static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
int nid, nodemask_t *nodemask)
{
return NULL;
}
static inline void free_gigantic_folio(struct folio *folio,
unsigned int order) { }
static inline void destroy_compound_gigantic_folio(struct folio *folio,
unsigned int order) { }
#endif
static void remove_hugetlb_folio(struct hstate *h, struct folio *folio,
bool adjust_surplus)
{ … }
static void add_hugetlb_folio(struct hstate *h, struct folio *folio,
bool adjust_surplus)
{ … }
static void __update_and_free_hugetlb_folio(struct hstate *h,
struct folio *folio)
{ … }
static LLIST_HEAD(hpage_freelist);
static void free_hpage_workfn(struct work_struct *work)
{ … }
static DECLARE_WORK(free_hpage_work, free_hpage_workfn);
static inline void flush_free_hpage_work(struct hstate *h)
{ … }
static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio,
bool atomic)
{ … }
static void bulk_vmemmap_restore_error(struct hstate *h,
struct list_head *folio_list,
struct list_head *non_hvo_folios)
{ … }
static void update_and_free_pages_bulk(struct hstate *h,
struct list_head *folio_list)
{ … }
struct hstate *size_to_hstate(unsigned long size)
{ … }
void free_huge_folio(struct folio *folio)
{ … }
static void __prep_account_new_huge_page(struct hstate *h, int nid)
{ … }
static void init_new_hugetlb_folio(struct hstate *h, struct folio *folio)
{ … }
static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio)
{ … }
static void prep_new_hugetlb_folio(struct hstate *h, struct folio *folio, int nid)
{ … }
static bool __prep_compound_gigantic_folio(struct folio *folio,
unsigned int order, bool demote)
{ … }
static bool prep_compound_gigantic_folio(struct folio *folio,
unsigned int order)
{ … }
static bool prep_compound_gigantic_folio_for_demote(struct folio *folio,
unsigned int order)
{ … }
struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio)
{ … }
static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h,
gfp_t gfp_mask, int nid, nodemask_t *nmask,
nodemask_t *node_alloc_noretry)
{ … }
static struct folio *__alloc_fresh_hugetlb_folio(struct hstate *h,
gfp_t gfp_mask, int nid, nodemask_t *nmask,
nodemask_t *node_alloc_noretry)
{ … }
static struct folio *only_alloc_fresh_hugetlb_folio(struct hstate *h,
gfp_t gfp_mask, int nid, nodemask_t *nmask,
nodemask_t *node_alloc_noretry)
{ … }
static struct folio *alloc_fresh_hugetlb_folio(struct hstate *h,
gfp_t gfp_mask, int nid, nodemask_t *nmask)
{ … }
static void prep_and_add_allocated_folios(struct hstate *h,
struct list_head *folio_list)
{ … }
static struct folio *alloc_pool_huge_folio(struct hstate *h,
nodemask_t *nodes_allowed,
nodemask_t *node_alloc_noretry,
int *next_node)
{ … }
static struct folio *remove_pool_hugetlb_folio(struct hstate *h,
nodemask_t *nodes_allowed, bool acct_surplus)
{ … }
int dissolve_free_hugetlb_folio(struct folio *folio)
{ … }
int dissolve_free_hugetlb_folios(unsigned long start_pfn, unsigned long end_pfn)
{ … }
static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
gfp_t gfp_mask, int nid, nodemask_t *nmask)
{ … }
static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mask,
int nid, nodemask_t *nmask)
{ … }
static
struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h,
struct vm_area_struct *vma, unsigned long addr)
{ … }
struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
nodemask_t *nmask, gfp_t gfp_mask, bool allow_alloc_fallback)
{ … }
static nodemask_t *policy_mbind_nodemask(gfp_t gfp)
{ … }
static int gather_surplus_pages(struct hstate *h, long delta)
__must_hold(&hugetlb_lock)
{ … }
static void return_unused_surplus_pages(struct hstate *h,
unsigned long unused_resv_pages)
{ … }
enum vma_resv_mode { … };
static long __vma_reservation_common(struct hstate *h,
struct vm_area_struct *vma, unsigned long addr,
enum vma_resv_mode mode)
{ … }
static long vma_needs_reservation(struct hstate *h,
struct vm_area_struct *vma, unsigned long addr)
{ … }
static long vma_commit_reservation(struct hstate *h,
struct vm_area_struct *vma, unsigned long addr)
{ … }
static void vma_end_reservation(struct hstate *h,
struct vm_area_struct *vma, unsigned long addr)
{ … }
static long vma_add_reservation(struct hstate *h,
struct vm_area_struct *vma, unsigned long addr)
{ … }
static long vma_del_reservation(struct hstate *h,
struct vm_area_struct *vma, unsigned long addr)
{ … }
void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
unsigned long address, struct folio *folio)
{ … }
static int alloc_and_dissolve_hugetlb_folio(struct hstate *h,
struct folio *old_folio, struct list_head *list)
{ … }
int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list)
{ … }
struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
unsigned long addr, int avoid_reserve)
{ … }
int alloc_bootmem_huge_page(struct hstate *h, int nid)
__attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
int __alloc_bootmem_huge_page(struct hstate *h, int nid)
{ … }
static void __init hugetlb_folio_init_tail_vmemmap(struct folio *folio,
unsigned long start_page_number,
unsigned long end_page_number)
{ … }
static void __init hugetlb_folio_init_vmemmap(struct folio *folio,
struct hstate *h,
unsigned long nr_pages)
{ … }
static void __init prep_and_add_bootmem_folios(struct hstate *h,
struct list_head *folio_list)
{ … }
static void __init gather_bootmem_prealloc_node(unsigned long nid)
{ … }
static void __init gather_bootmem_prealloc_parallel(unsigned long start,
unsigned long end, void *arg)
{ … }
static void __init gather_bootmem_prealloc(void)
{ … }
static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)
{ … }
static bool __init hugetlb_hstate_alloc_pages_specific_nodes(struct hstate *h)
{ … }
static void __init hugetlb_hstate_alloc_pages_errcheck(unsigned long allocated, struct hstate *h)
{ … }
static void __init hugetlb_pages_alloc_boot_node(unsigned long start, unsigned long end, void *arg)
{ … }
static unsigned long __init hugetlb_gigantic_pages_alloc_boot(struct hstate *h)
{ … }
static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h)
{ … }
static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
{ … }
static void __init hugetlb_init_hstates(void)
{ … }
static void __init report_hugepages(void)
{ … }
#ifdef CONFIG_HIGHMEM
static void try_to_free_low(struct hstate *h, unsigned long count,
nodemask_t *nodes_allowed)
{
int i;
LIST_HEAD(page_list);
lockdep_assert_held(&hugetlb_lock);
if (hstate_is_gigantic(h))
return;
for_each_node_mask(i, *nodes_allowed) {
struct folio *folio, *next;
struct list_head *freel = &h->hugepage_freelists[i];
list_for_each_entry_safe(folio, next, freel, lru) {
if (count >= h->nr_huge_pages)
goto out;
if (folio_test_highmem(folio))
continue;
remove_hugetlb_folio(h, folio, false);
list_add(&folio->lru, &page_list);
}
}
out:
spin_unlock_irq(&hugetlb_lock);
update_and_free_pages_bulk(h, &page_list);
spin_lock_irq(&hugetlb_lock);
}
#else
static inline void try_to_free_low(struct hstate *h, unsigned long count,
nodemask_t *nodes_allowed)
{ … }
#endif
static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
int delta)
{ … }
#define persistent_huge_pages(h) …
static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
nodemask_t *nodes_allowed)
{ … }
static int demote_free_hugetlb_folio(struct hstate *h, struct folio *folio)
{ … }
static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
__must_hold(&hugetlb_lock)
{ … }
#define HSTATE_ATTR_RO(_name) …
#define HSTATE_ATTR_WO(_name) …
#define HSTATE_ATTR(_name) …
static struct kobject *hugepages_kobj;
static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
{ … }
static ssize_t nr_hugepages_show_common(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{ … }
static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
struct hstate *h, int nid,
unsigned long count, size_t len)
{ … }
static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
struct kobject *kobj, const char *buf,
size_t len)
{ … }
static ssize_t nr_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{ … }
static ssize_t nr_hugepages_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t len)
{ … }
HSTATE_ATTR(…);
#ifdef CONFIG_NUMA
static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{ … }
static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t len)
{ … }
HSTATE_ATTR(…);
#endif
static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{ … }
static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t count)
{ … }
HSTATE_ATTR(…);
static ssize_t free_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{ … }
HSTATE_ATTR_RO(…);
static ssize_t resv_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{ … }
HSTATE_ATTR_RO(…);
static ssize_t surplus_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{ … }
HSTATE_ATTR_RO(…);
static ssize_t demote_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t len)
{ … }
HSTATE_ATTR_WO(…);
static ssize_t demote_size_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{ … }
static ssize_t demote_size_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{ … }
HSTATE_ATTR(…);
static struct attribute *hstate_attrs[] = …;
static const struct attribute_group hstate_attr_group = …;
static struct attribute *hstate_demote_attrs[] = …;
static const struct attribute_group hstate_demote_attr_group = …;
static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
struct kobject **hstate_kobjs,
const struct attribute_group *hstate_attr_group)
{ … }
#ifdef CONFIG_NUMA
static bool hugetlb_sysfs_initialized __ro_after_init;
struct node_hstate { … };
static struct node_hstate node_hstates[MAX_NUMNODES];
static struct attribute *per_node_hstate_attrs[] = …;
static const struct attribute_group per_node_hstate_attr_group = …;
static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
{ … }
void hugetlb_unregister_node(struct node *node)
{ … }
void hugetlb_register_node(struct node *node)
{ … }
static void __init hugetlb_register_all_nodes(void)
{ … }
#else
static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
{
BUG();
if (nidp)
*nidp = -1;
return NULL;
}
static void hugetlb_register_all_nodes(void) { }
#endif
#ifdef CONFIG_CMA
static void __init hugetlb_cma_check(void);
#else
static inline __init void hugetlb_cma_check(void)
{
}
#endif
static void __init hugetlb_sysfs_init(void)
{ … }
#ifdef CONFIG_SYSCTL
static void hugetlb_sysctl_init(void);
#else
static inline void hugetlb_sysctl_init(void) { }
#endif
static int __init hugetlb_init(void)
{ … }
subsys_initcall(hugetlb_init);
bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size)
{ … }
void __init hugetlb_add_hstate(unsigned int order)
{ … }
bool __init __weak hugetlb_node_alloc_supported(void)
{ … }
static void __init hugepages_clear_pages_in_node(void)
{ … }
static int __init hugepages_setup(char *s)
{ … }
__setup(…);
static int __init hugepagesz_setup(char *s)
{ … }
__setup(…);
static int __init default_hugepagesz_setup(char *s)
{ … }
__setup(…);
static unsigned int allowed_mems_nr(struct hstate *h)
{ … }
#ifdef CONFIG_SYSCTL
static int proc_hugetlb_doulongvec_minmax(const struct ctl_table *table, int write,
void *buffer, size_t *length,
loff_t *ppos, unsigned long *out)
{ … }
static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
const struct ctl_table *table, int write,
void *buffer, size_t *length, loff_t *ppos)
{ … }
static int hugetlb_sysctl_handler(const struct ctl_table *table, int write,
void *buffer, size_t *length, loff_t *ppos)
{ … }
#ifdef CONFIG_NUMA
static int hugetlb_mempolicy_sysctl_handler(const struct ctl_table *table, int write,
void *buffer, size_t *length, loff_t *ppos)
{ … }
#endif
static int hugetlb_overcommit_handler(const struct ctl_table *table, int write,
void *buffer, size_t *length, loff_t *ppos)
{ … }
static struct ctl_table hugetlb_table[] = …;
static void hugetlb_sysctl_init(void)
{ … }
#endif
void hugetlb_report_meminfo(struct seq_file *m)
{ … }
int hugetlb_report_node_meminfo(char *buf, int len, int nid)
{ … }
void hugetlb_show_meminfo_node(int nid)
{ … }
void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
{ … }
unsigned long hugetlb_total_pages(void)
{ … }
static int hugetlb_acct_memory(struct hstate *h, long delta)
{ … }
static void hugetlb_vm_op_open(struct vm_area_struct *vma)
{ … }
static void hugetlb_vm_op_close(struct vm_area_struct *vma)
{ … }
static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
{ … }
static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
{ … }
static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
{ … }
const struct vm_operations_struct hugetlb_vm_ops = …;
static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
int writable)
{ … }
static void set_huge_ptep_writable(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{ … }
bool is_hugetlb_entry_migration(pte_t pte)
{ … }
bool is_hugetlb_entry_hwpoisoned(pte_t pte)
{ … }
static void
hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr,
struct folio *new_folio, pte_t old, unsigned long sz)
{ … }
int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *dst_vma,
struct vm_area_struct *src_vma)
{ … }
static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte,
unsigned long sz)
{ … }
int move_hugetlb_page_tables(struct vm_area_struct *vma,
struct vm_area_struct *new_vma,
unsigned long old_addr, unsigned long new_addr,
unsigned long len)
{ … }
void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
unsigned long start, unsigned long end,
struct page *ref_page, zap_flags_t zap_flags)
{ … }
void __hugetlb_zap_begin(struct vm_area_struct *vma,
unsigned long *start, unsigned long *end)
{ … }
void __hugetlb_zap_end(struct vm_area_struct *vma,
struct zap_details *details)
{ … }
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end, struct page *ref_page,
zap_flags_t zap_flags)
{ … }
static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
struct page *page, unsigned long address)
{ … }
static vm_fault_t hugetlb_wp(struct folio *pagecache_folio,
struct vm_fault *vmf)
{ … }
bool hugetlbfs_pagecache_present(struct hstate *h,
struct vm_area_struct *vma, unsigned long address)
{ … }
int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
pgoff_t idx)
{ … }
static inline vm_fault_t hugetlb_handle_userfault(struct vm_fault *vmf,
struct address_space *mapping,
unsigned long reason)
{ … }
static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t old_pte)
{ … }
static vm_fault_t hugetlb_no_page(struct address_space *mapping,
struct vm_fault *vmf)
{ … }
#ifdef CONFIG_SMP
u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
{ … }
#else
u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
{
return 0;
}
#endif
vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, unsigned int flags)
{ … }
#ifdef CONFIG_USERFAULTFD
static struct folio *alloc_hugetlb_folio_vma(struct hstate *h,
struct vm_area_struct *vma, unsigned long address)
{ … }
int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
struct vm_area_struct *dst_vma,
unsigned long dst_addr,
unsigned long src_addr,
uffd_flags_t flags,
struct folio **foliop)
{ … }
#endif
long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end,
pgprot_t newprot, unsigned long cp_flags)
{ … }
bool hugetlb_reserve_pages(struct inode *inode,
long from, long to,
struct vm_area_struct *vma,
vm_flags_t vm_flags)
{ … }
long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
long freed)
{ … }
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
static unsigned long page_table_shareable(struct vm_area_struct *svma,
struct vm_area_struct *vma,
unsigned long addr, pgoff_t idx)
{ … }
bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
{ … }
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
unsigned long *start, unsigned long *end)
{ … }
pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pud_t *pud)
{ … }
int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{ … }
#else
pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pud_t *pud)
{
return NULL;
}
int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
return 0;
}
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
unsigned long *start, unsigned long *end)
{
}
bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
{
return false;
}
#endif
#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, unsigned long sz)
{ … }
pte_t *huge_pte_offset(struct mm_struct *mm,
unsigned long addr, unsigned long sz)
{ … }
unsigned long hugetlb_mask_last_page(struct hstate *h)
{ … }
#else
__weak unsigned long hugetlb_mask_last_page(struct hstate *h)
{
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
if (huge_page_size(h) == PMD_SIZE)
return PUD_SIZE - PMD_SIZE;
#endif
return 0UL;
}
#endif
bool isolate_hugetlb(struct folio *folio, struct list_head *list)
{ … }
int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison)
{ … }
int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
bool *migratable_cleared)
{ … }
void folio_putback_active_hugetlb(struct folio *folio)
{ … }
void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason)
{ … }
static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
unsigned long start,
unsigned long end)
{ … }
void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
{ … }
#ifdef CONFIG_CMA
static bool cma_reserve_called __initdata;
static int __init cmdline_parse_hugetlb_cma(char *p)
{ … }
early_param(…);
void __init hugetlb_cma_reserve(int order)
{ … }
static void __init hugetlb_cma_check(void)
{ … }
#endif