linux/mm/hugetlb.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Generic hugetlb support.
 * (C) Nadia Yvette Chambers, April 2004
 */
#include <linux/list.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/seq_file.h>
#include <linux/sysctl.h>
#include <linux/highmem.h>
#include <linux/mmu_notifier.h>
#include <linux/nodemask.h>
#include <linux/pagemap.h>
#include <linux/mempolicy.h>
#include <linux/compiler.h>
#include <linux/cpuset.h>
#include <linux/mutex.h>
#include <linux/memblock.h>
#include <linux/sysfs.h>
#include <linux/slab.h>
#include <linux/sched/mm.h>
#include <linux/mmdebug.h>
#include <linux/sched/signal.h>
#include <linux/rmap.h>
#include <linux/string_helpers.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/jhash.h>
#include <linux/numa.h>
#include <linux/llist.h>
#include <linux/cma.h>
#include <linux/migrate.h>
#include <linux/nospec.h>
#include <linux/delayacct.h>
#include <linux/memory.h>
#include <linux/mm_inline.h>
#include <linux/padata.h>

#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>

#include <linux/io.h>
#include <linux/hugetlb.h>
#include <linux/hugetlb_cgroup.h>
#include <linux/node.h>
#include <linux/page_owner.h>
#include "internal.h"
#include "hugetlb_vmemmap.h"

int hugetlb_max_hstate __read_mostly;
unsigned int default_hstate_idx;
struct hstate hstates[HUGE_MAX_HSTATE];

#ifdef CONFIG_CMA
static struct cma *hugetlb_cma[MAX_NUMNODES];
static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata;
static bool hugetlb_cma_folio(struct folio *folio, unsigned int order)
{}
#else
static bool hugetlb_cma_folio(struct folio *folio, unsigned int order)
{
	return false;
}
#endif
static unsigned long hugetlb_cma_size __initdata;

__initdata struct list_head huge_boot_pages[MAX_NUMNODES];

/* for command line parsing */
static struct hstate * __initdata parsed_hstate;
static unsigned long __initdata default_hstate_max_huge_pages;
static bool __initdata parsed_valid_hugepagesz =;
static bool __initdata parsed_default_hugepagesz;
static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata;

/*
 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
 * free_huge_pages, and surplus_huge_pages.
 */
DEFINE_SPINLOCK();

/*
 * Serializes faults on the same logical page.  This is used to
 * prevent spurious OOMs when the hugepage pool is fully utilized.
 */
static int num_fault_mutexes;
struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;

/* Forward declaration */
static int hugetlb_acct_memory(struct hstate *h, long delta);
static void hugetlb_vma_lock_free(struct vm_area_struct *vma);
static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
		unsigned long start, unsigned long end);
static struct resv_map *vma_resv_map(struct vm_area_struct *vma);

static inline bool subpool_is_free(struct hugepage_subpool *spool)
{}

static inline void unlock_or_release_subpool(struct hugepage_subpool *spool,
						unsigned long irq_flags)
{}

struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
						long min_hpages)
{}

void hugepage_put_subpool(struct hugepage_subpool *spool)
{}

/*
 * Subpool accounting for allocating and reserving pages.
 * Return -ENOMEM if there are not enough resources to satisfy the
 * request.  Otherwise, return the number of pages by which the
 * global pools must be adjusted (upward).  The returned value may
 * only be different than the passed value (delta) in the case where
 * a subpool minimum size must be maintained.
 */
static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
				      long delta)
{}

/*
 * Subpool accounting for freeing and unreserving pages.
 * Return the number of global page reservations that must be dropped.
 * The return value may only be different than the passed value (delta)
 * in the case where a subpool minimum size must be maintained.
 */
static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
				       long delta)
{}

static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
{}

static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
{}

/*
 * hugetlb vma_lock helper routines
 */
void hugetlb_vma_lock_read(struct vm_area_struct *vma)
{}

void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
{}

void hugetlb_vma_lock_write(struct vm_area_struct *vma)
{}

void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
{}

int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
{}

void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
{}

void hugetlb_vma_lock_release(struct kref *kref)
{}

static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock)
{}

static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
{}

static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
{}

static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
{}

/* Helper that removes a struct file_region from the resv_map cache and returns
 * it for use.
 */
static struct file_region *
get_file_region_entry_from_cache(struct resv_map *resv, long from, long to)
{}

static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg,
					      struct file_region *rg)
{}

/* Helper that records hugetlb_cgroup uncharge info. */
static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
						struct hstate *h,
						struct resv_map *resv,
						struct file_region *nrg)
{}

static void put_uncharge_info(struct file_region *rg)
{}

static bool has_same_uncharge_info(struct file_region *rg,
				   struct file_region *org)
{}

static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
{}

static inline long
hugetlb_resv_map_add(struct resv_map *map, struct list_head *rg, long from,
		     long to, struct hstate *h, struct hugetlb_cgroup *cg,
		     long *regions_needed)
{}

/*
 * Must be called with resv->lock held.
 *
 * Calling this with regions_needed != NULL will count the number of pages
 * to be added but will not modify the linked list. And regions_needed will
 * indicate the number of file_regions needed in the cache to carry out to add
 * the regions for this range.
 */
static long add_reservation_in_range(struct resv_map *resv, long f, long t,
				     struct hugetlb_cgroup *h_cg,
				     struct hstate *h, long *regions_needed)
{}

/* Must be called with resv->lock acquired. Will drop lock to allocate entries.
 */
static int allocate_file_region_entries(struct resv_map *resv,
					int regions_needed)
	__must_hold(&resv->lock)
{}

/*
 * Add the huge page range represented by [f, t) to the reserve
 * map.  Regions will be taken from the cache to fill in this range.
 * Sufficient regions should exist in the cache due to the previous
 * call to region_chg with the same range, but in some cases the cache will not
 * have sufficient entries due to races with other code doing region_add or
 * region_del.  The extra needed entries will be allocated.
 *
 * regions_needed is the out value provided by a previous call to region_chg.
 *
 * Return the number of new huge pages added to the map.  This number is greater
 * than or equal to zero.  If file_region entries needed to be allocated for
 * this operation and we were not able to allocate, it returns -ENOMEM.
 * region_add of regions of length 1 never allocate file_regions and cannot
 * fail; region_chg will always allocate at least 1 entry and a region_add for
 * 1 page will only require at most 1 entry.
 */
static long region_add(struct resv_map *resv, long f, long t,
		       long in_regions_needed, struct hstate *h,
		       struct hugetlb_cgroup *h_cg)
{}

/*
 * Examine the existing reserve map and determine how many
 * huge pages in the specified range [f, t) are NOT currently
 * represented.  This routine is called before a subsequent
 * call to region_add that will actually modify the reserve
 * map to add the specified range [f, t).  region_chg does
 * not change the number of huge pages represented by the
 * map.  A number of new file_region structures is added to the cache as a
 * placeholder, for the subsequent region_add call to use. At least 1
 * file_region structure is added.
 *
 * out_regions_needed is the number of regions added to the
 * resv->adds_in_progress.  This value needs to be provided to a follow up call
 * to region_add or region_abort for proper accounting.
 *
 * Returns the number of huge pages that need to be added to the existing
 * reservation map for the range [f, t).  This number is greater or equal to
 * zero.  -ENOMEM is returned if a new file_region structure or cache entry
 * is needed and can not be allocated.
 */
static long region_chg(struct resv_map *resv, long f, long t,
		       long *out_regions_needed)
{}

/*
 * Abort the in progress add operation.  The adds_in_progress field
 * of the resv_map keeps track of the operations in progress between
 * calls to region_chg and region_add.  Operations are sometimes
 * aborted after the call to region_chg.  In such cases, region_abort
 * is called to decrement the adds_in_progress counter. regions_needed
 * is the value returned by the region_chg call, it is used to decrement
 * the adds_in_progress counter.
 *
 * NOTE: The range arguments [f, t) are not needed or used in this
 * routine.  They are kept to make reading the calling code easier as
 * arguments will match the associated region_chg call.
 */
static void region_abort(struct resv_map *resv, long f, long t,
			 long regions_needed)
{}

/*
 * Delete the specified range [f, t) from the reserve map.  If the
 * t parameter is LONG_MAX, this indicates that ALL regions after f
 * should be deleted.  Locate the regions which intersect [f, t)
 * and either trim, delete or split the existing regions.
 *
 * Returns the number of huge pages deleted from the reserve map.
 * In the normal case, the return value is zero or more.  In the
 * case where a region must be split, a new region descriptor must
 * be allocated.  If the allocation fails, -ENOMEM will be returned.
 * NOTE: If the parameter t == LONG_MAX, then we will never split
 * a region and possibly return -ENOMEM.  Callers specifying
 * t == LONG_MAX do not need to check for -ENOMEM error.
 */
static long region_del(struct resv_map *resv, long f, long t)
{}

/*
 * A rare out of memory error was encountered which prevented removal of
 * the reserve map region for a page.  The huge page itself was free'ed
 * and removed from the page cache.  This routine will adjust the subpool
 * usage count, and the global reserve count if needed.  By incrementing
 * these counts, the reserve map entry which could not be deleted will
 * appear as a "reserved" entry instead of simply dangling with incorrect
 * counts.
 */
void hugetlb_fix_reserve_counts(struct inode *inode)
{}

/*
 * Count and return the number of huge pages in the reserve map
 * that intersect with the range [f, t).
 */
static long region_count(struct resv_map *resv, long f, long t)
{}

/*
 * Convert the address within this vma to the page offset within
 * the mapping, huge page units here.
 */
static pgoff_t vma_hugecache_offset(struct hstate *h,
			struct vm_area_struct *vma, unsigned long address)
{}

/**
 * vma_kernel_pagesize - Page size granularity for this VMA.
 * @vma: The user mapping.
 *
 * Folios in this VMA will be aligned to, and at least the size of the
 * number of bytes returned by this function.
 *
 * Return: The default size of the folios allocated when backing a VMA.
 */
unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
{}
EXPORT_SYMBOL_GPL();

/*
 * Return the page size being used by the MMU to back a VMA. In the majority
 * of cases, the page size used by the kernel matches the MMU size. On
 * architectures where it differs, an architecture-specific 'strong'
 * version of this symbol is required.
 */
__weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
{}

/*
 * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
 * bits of the reservation map pointer, which are always clear due to
 * alignment.
 */
#define HPAGE_RESV_OWNER
#define HPAGE_RESV_UNMAPPED
#define HPAGE_RESV_MASK

/*
 * These helpers are used to track how many pages are reserved for
 * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
 * is guaranteed to have their future faults succeed.
 *
 * With the exception of hugetlb_dup_vma_private() which is called at fork(),
 * the reserve counters are updated with the hugetlb_lock held. It is safe
 * to reset the VMA at fork() time as it is not in use yet and there is no
 * chance of the global counters getting corrupted as a result of the values.
 *
 * The private mapping reservation is represented in a subtly different
 * manner to a shared mapping.  A shared mapping has a region map associated
 * with the underlying file, this region map represents the backing file
 * pages which have ever had a reservation assigned which this persists even
 * after the page is instantiated.  A private mapping has a region map
 * associated with the original mmap which is attached to all VMAs which
 * reference it, this region map represents those offsets which have consumed
 * reservation ie. where pages have been instantiated.
 */
static unsigned long get_vma_private_data(struct vm_area_struct *vma)
{}

static void set_vma_private_data(struct vm_area_struct *vma,
							unsigned long value)
{}

static void
resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map,
					  struct hugetlb_cgroup *h_cg,
					  struct hstate *h)
{}

struct resv_map *resv_map_alloc(void)
{}

void resv_map_release(struct kref *ref)
{}

static inline struct resv_map *inode_resv_map(struct inode *inode)
{}

static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
{}

static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
{}

static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
{}

static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
{}

bool __vma_private_lock(struct vm_area_struct *vma)
{}

void hugetlb_dup_vma_private(struct vm_area_struct *vma)
{}

/*
 * Reset and decrement one ref on hugepage private reservation.
 * Called with mm->mmap_lock writer semaphore held.
 * This function should be only used by move_vma() and operate on
 * same sized vma. It should never come here with last ref on the
 * reservation.
 */
void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
{}

/* Returns true if the VMA has associated reserve pages */
static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
{}

static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio)
{}

static struct folio *dequeue_hugetlb_folio_node_exact(struct hstate *h,
								int nid)
{}

static struct folio *dequeue_hugetlb_folio_nodemask(struct hstate *h, gfp_t gfp_mask,
							int nid, nodemask_t *nmask)
{}

static unsigned long available_huge_pages(struct hstate *h)
{}

static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
				struct vm_area_struct *vma,
				unsigned long address, int avoid_reserve,
				long chg)
{}

/*
 * common helper functions for hstate_next_node_to_{alloc|free}.
 * We may have allocated or freed a huge page based on a different
 * nodes_allowed previously, so h->next_node_to_{alloc|free} might
 * be outside of *nodes_allowed.  Ensure that we use an allowed
 * node for alloc or free.
 */
static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
{}

static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
{}

/*
 * returns the previously saved node ["this node"] from which to
 * allocate a persistent huge page for the pool and advance the
 * next node from which to allocate, handling wrap at end of node
 * mask.
 */
static int hstate_next_node_to_alloc(int *next_node,
					nodemask_t *nodes_allowed)
{}

/*
 * helper for remove_pool_hugetlb_folio() - return the previously saved
 * node ["this node"] from which to free a huge page.  Advance the
 * next node id whether or not we find a free huge page to free so
 * that the next attempt to free addresses the next node.
 */
static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
{}

#define for_each_node_mask_to_alloc(next_node, nr_nodes, node, mask)

#define for_each_node_mask_to_free(hs, nr_nodes, node, mask)

/* used to demote non-gigantic_huge pages as well */
static void __destroy_compound_gigantic_folio(struct folio *folio,
					unsigned int order, bool demote)
{}

static void destroy_compound_hugetlb_folio_for_demote(struct folio *folio,
					unsigned int order)
{}

#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
static void destroy_compound_gigantic_folio(struct folio *folio,
					unsigned int order)
{}

static void free_gigantic_folio(struct folio *folio, unsigned int order)
{}

#ifdef CONFIG_CONTIG_ALLOC
static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
		int nid, nodemask_t *nodemask)
{}

#else /* !CONFIG_CONTIG_ALLOC */
static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
					int nid, nodemask_t *nodemask)
{
	return NULL;
}
#endif /* CONFIG_CONTIG_ALLOC */

#else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
					int nid, nodemask_t *nodemask)
{
	return NULL;
}
static inline void free_gigantic_folio(struct folio *folio,
						unsigned int order) { }
static inline void destroy_compound_gigantic_folio(struct folio *folio,
						unsigned int order) { }
#endif

/*
 * Remove hugetlb folio from lists.
 * If vmemmap exists for the folio, clear the hugetlb flag so that the
 * folio appears as just a compound page.  Otherwise, wait until after
 * allocating vmemmap to clear the flag.
 *
 * Must be called with hugetlb lock held.
 */
static void remove_hugetlb_folio(struct hstate *h, struct folio *folio,
							bool adjust_surplus)
{}

static void add_hugetlb_folio(struct hstate *h, struct folio *folio,
			     bool adjust_surplus)
{}

static void __update_and_free_hugetlb_folio(struct hstate *h,
						struct folio *folio)
{}

/*
 * As update_and_free_hugetlb_folio() can be called under any context, so we cannot
 * use GFP_KERNEL to allocate vmemmap pages. However, we can defer the
 * actual freeing in a workqueue to prevent from using GFP_ATOMIC to allocate
 * the vmemmap pages.
 *
 * free_hpage_workfn() locklessly retrieves the linked list of pages to be
 * freed and frees them one-by-one. As the page->mapping pointer is going
 * to be cleared in free_hpage_workfn() anyway, it is reused as the llist_node
 * structure of a lockless linked list of huge pages to be freed.
 */
static LLIST_HEAD(hpage_freelist);

static void free_hpage_workfn(struct work_struct *work)
{}
static DECLARE_WORK(free_hpage_work, free_hpage_workfn);

static inline void flush_free_hpage_work(struct hstate *h)
{}

static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio,
				 bool atomic)
{}

static void bulk_vmemmap_restore_error(struct hstate *h,
					struct list_head *folio_list,
					struct list_head *non_hvo_folios)
{}

static void update_and_free_pages_bulk(struct hstate *h,
						struct list_head *folio_list)
{}

struct hstate *size_to_hstate(unsigned long size)
{}

void free_huge_folio(struct folio *folio)
{}

/*
 * Must be called with the hugetlb lock held
 */
static void __prep_account_new_huge_page(struct hstate *h, int nid)
{}

static void init_new_hugetlb_folio(struct hstate *h, struct folio *folio)
{}

static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio)
{}

static void prep_new_hugetlb_folio(struct hstate *h, struct folio *folio, int nid)
{}

static bool __prep_compound_gigantic_folio(struct folio *folio,
					unsigned int order, bool demote)
{}

static bool prep_compound_gigantic_folio(struct folio *folio,
							unsigned int order)
{}

static bool prep_compound_gigantic_folio_for_demote(struct folio *folio,
							unsigned int order)
{}

/*
 * Find and lock address space (mapping) in write mode.
 *
 * Upon entry, the folio is locked which means that folio_mapping() is
 * stable.  Due to locking order, we can only trylock_write.  If we can
 * not get the lock, simply return NULL to caller.
 */
struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio)
{}

static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h,
		gfp_t gfp_mask, int nid, nodemask_t *nmask,
		nodemask_t *node_alloc_noretry)
{}

static struct folio *__alloc_fresh_hugetlb_folio(struct hstate *h,
				gfp_t gfp_mask, int nid, nodemask_t *nmask,
				nodemask_t *node_alloc_noretry)
{}

static struct folio *only_alloc_fresh_hugetlb_folio(struct hstate *h,
		gfp_t gfp_mask, int nid, nodemask_t *nmask,
		nodemask_t *node_alloc_noretry)
{}

/*
 * Common helper to allocate a fresh hugetlb page. All specific allocators
 * should use this function to get new hugetlb pages
 *
 * Note that returned page is 'frozen':  ref count of head page and all tail
 * pages is zero.
 */
static struct folio *alloc_fresh_hugetlb_folio(struct hstate *h,
		gfp_t gfp_mask, int nid, nodemask_t *nmask)
{}

static void prep_and_add_allocated_folios(struct hstate *h,
					struct list_head *folio_list)
{}

/*
 * Allocates a fresh hugetlb page in a node interleaved manner.  The page
 * will later be added to the appropriate hugetlb pool.
 */
static struct folio *alloc_pool_huge_folio(struct hstate *h,
					nodemask_t *nodes_allowed,
					nodemask_t *node_alloc_noretry,
					int *next_node)
{}

/*
 * Remove huge page from pool from next node to free.  Attempt to keep
 * persistent huge pages more or less balanced over allowed nodes.
 * This routine only 'removes' the hugetlb page.  The caller must make
 * an additional call to free the page to low level allocators.
 * Called with hugetlb_lock locked.
 */
static struct folio *remove_pool_hugetlb_folio(struct hstate *h,
		nodemask_t *nodes_allowed, bool acct_surplus)
{}

/*
 * Dissolve a given free hugetlb folio into free buddy pages. This function
 * does nothing for in-use hugetlb folios and non-hugetlb folios.
 * This function returns values like below:
 *
 *  -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages
 *           when the system is under memory pressure and the feature of
 *           freeing unused vmemmap pages associated with each hugetlb page
 *           is enabled.
 *  -EBUSY:  failed to dissolved free hugepages or the hugepage is in-use
 *           (allocated or reserved.)
 *       0:  successfully dissolved free hugepages or the page is not a
 *           hugepage (considered as already dissolved)
 */
int dissolve_free_hugetlb_folio(struct folio *folio)
{}

/*
 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
 * make specified memory blocks removable from the system.
 * Note that this will dissolve a free gigantic hugepage completely, if any
 * part of it lies within the given range.
 * Also note that if dissolve_free_hugetlb_folio() returns with an error, all
 * free hugetlb folios that were dissolved before that error are lost.
 */
int dissolve_free_hugetlb_folios(unsigned long start_pfn, unsigned long end_pfn)
{}

/*
 * Allocates a fresh surplus page from the page allocator.
 */
static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
				gfp_t gfp_mask,	int nid, nodemask_t *nmask)
{}

static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mask,
				     int nid, nodemask_t *nmask)
{}

/*
 * Use the VMA's mpolicy to allocate a huge page from the buddy.
 */
static
struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h,
		struct vm_area_struct *vma, unsigned long addr)
{}

/* folio migration callback function */
struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
		nodemask_t *nmask, gfp_t gfp_mask, bool allow_alloc_fallback)
{}

static nodemask_t *policy_mbind_nodemask(gfp_t gfp)
{}

/*
 * Increase the hugetlb pool such that it can accommodate a reservation
 * of size 'delta'.
 */
static int gather_surplus_pages(struct hstate *h, long delta)
	__must_hold(&hugetlb_lock)
{}

/*
 * This routine has two main purposes:
 * 1) Decrement the reservation count (resv_huge_pages) by the value passed
 *    in unused_resv_pages.  This corresponds to the prior adjustments made
 *    to the associated reservation map.
 * 2) Free any unused surplus pages that may have been allocated to satisfy
 *    the reservation.  As many as unused_resv_pages may be freed.
 */
static void return_unused_surplus_pages(struct hstate *h,
					unsigned long unused_resv_pages)
{}


/*
 * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
 * are used by the huge page allocation routines to manage reservations.
 *
 * vma_needs_reservation is called to determine if the huge page at addr
 * within the vma has an associated reservation.  If a reservation is
 * needed, the value 1 is returned.  The caller is then responsible for
 * managing the global reservation and subpool usage counts.  After
 * the huge page has been allocated, vma_commit_reservation is called
 * to add the page to the reservation map.  If the page allocation fails,
 * the reservation must be ended instead of committed.  vma_end_reservation
 * is called in such cases.
 *
 * In the normal case, vma_commit_reservation returns the same value
 * as the preceding vma_needs_reservation call.  The only time this
 * is not the case is if a reserve map was changed between calls.  It
 * is the responsibility of the caller to notice the difference and
 * take appropriate action.
 *
 * vma_add_reservation is used in error paths where a reservation must
 * be restored when a newly allocated huge page must be freed.  It is
 * to be called after calling vma_needs_reservation to determine if a
 * reservation exists.
 *
 * vma_del_reservation is used in error paths where an entry in the reserve
 * map was created during huge page allocation and must be removed.  It is to
 * be called after calling vma_needs_reservation to determine if a reservation
 * exists.
 */
enum vma_resv_mode {};
static long __vma_reservation_common(struct hstate *h,
				struct vm_area_struct *vma, unsigned long addr,
				enum vma_resv_mode mode)
{}

static long vma_needs_reservation(struct hstate *h,
			struct vm_area_struct *vma, unsigned long addr)
{}

static long vma_commit_reservation(struct hstate *h,
			struct vm_area_struct *vma, unsigned long addr)
{}

static void vma_end_reservation(struct hstate *h,
			struct vm_area_struct *vma, unsigned long addr)
{}

static long vma_add_reservation(struct hstate *h,
			struct vm_area_struct *vma, unsigned long addr)
{}

static long vma_del_reservation(struct hstate *h,
			struct vm_area_struct *vma, unsigned long addr)
{}

/*
 * This routine is called to restore reservation information on error paths.
 * It should ONLY be called for folios allocated via alloc_hugetlb_folio(),
 * and the hugetlb mutex should remain held when calling this routine.
 *
 * It handles two specific cases:
 * 1) A reservation was in place and the folio consumed the reservation.
 *    hugetlb_restore_reserve is set in the folio.
 * 2) No reservation was in place for the page, so hugetlb_restore_reserve is
 *    not set.  However, alloc_hugetlb_folio always updates the reserve map.
 *
 * In case 1, free_huge_folio later in the error path will increment the
 * global reserve count.  But, free_huge_folio does not have enough context
 * to adjust the reservation map.  This case deals primarily with private
 * mappings.  Adjust the reserve map here to be consistent with global
 * reserve count adjustments to be made by free_huge_folio.  Make sure the
 * reserve map indicates there is a reservation present.
 *
 * In case 2, simply undo reserve map modifications done by alloc_hugetlb_folio.
 */
void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
			unsigned long address, struct folio *folio)
{}

/*
 * alloc_and_dissolve_hugetlb_folio - Allocate a new folio and dissolve
 * the old one
 * @h: struct hstate old page belongs to
 * @old_folio: Old folio to dissolve
 * @list: List to isolate the page in case we need to
 * Returns 0 on success, otherwise negated error.
 */
static int alloc_and_dissolve_hugetlb_folio(struct hstate *h,
			struct folio *old_folio, struct list_head *list)
{}

int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list)
{}

struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
				    unsigned long addr, int avoid_reserve)
{}

int alloc_bootmem_huge_page(struct hstate *h, int nid)
	__attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
int __alloc_bootmem_huge_page(struct hstate *h, int nid)
{}

/* Initialize [start_page:end_page_number] tail struct pages of a hugepage */
static void __init hugetlb_folio_init_tail_vmemmap(struct folio *folio,
					unsigned long start_page_number,
					unsigned long end_page_number)
{}

static void __init hugetlb_folio_init_vmemmap(struct folio *folio,
					      struct hstate *h,
					      unsigned long nr_pages)
{}

static void __init prep_and_add_bootmem_folios(struct hstate *h,
					struct list_head *folio_list)
{}

/*
 * Put bootmem huge pages into the standard lists after mem_map is up.
 * Note: This only applies to gigantic (order > MAX_PAGE_ORDER) pages.
 */
static void __init gather_bootmem_prealloc_node(unsigned long nid)
{}

static void __init gather_bootmem_prealloc_parallel(unsigned long start,
						    unsigned long end, void *arg)
{}

static void __init gather_bootmem_prealloc(void)
{}

static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)
{}

static bool __init hugetlb_hstate_alloc_pages_specific_nodes(struct hstate *h)
{}

static void __init hugetlb_hstate_alloc_pages_errcheck(unsigned long allocated, struct hstate *h)
{}

static void __init hugetlb_pages_alloc_boot_node(unsigned long start, unsigned long end, void *arg)
{}

static unsigned long __init hugetlb_gigantic_pages_alloc_boot(struct hstate *h)
{}

static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h)
{}

/*
 * NOTE: this routine is called in different contexts for gigantic and
 * non-gigantic pages.
 * - For gigantic pages, this is called early in the boot process and
 *   pages are allocated from memblock allocated or something similar.
 *   Gigantic pages are actually added to pools later with the routine
 *   gather_bootmem_prealloc.
 * - For non-gigantic pages, this is called later in the boot process after
 *   all of mm is up and functional.  Pages are allocated from buddy and
 *   then added to hugetlb pools.
 */
static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
{}

static void __init hugetlb_init_hstates(void)
{}

static void __init report_hugepages(void)
{}

#ifdef CONFIG_HIGHMEM
static void try_to_free_low(struct hstate *h, unsigned long count,
						nodemask_t *nodes_allowed)
{
	int i;
	LIST_HEAD(page_list);

	lockdep_assert_held(&hugetlb_lock);
	if (hstate_is_gigantic(h))
		return;

	/*
	 * Collect pages to be freed on a list, and free after dropping lock
	 */
	for_each_node_mask(i, *nodes_allowed) {
		struct folio *folio, *next;
		struct list_head *freel = &h->hugepage_freelists[i];
		list_for_each_entry_safe(folio, next, freel, lru) {
			if (count >= h->nr_huge_pages)
				goto out;
			if (folio_test_highmem(folio))
				continue;
			remove_hugetlb_folio(h, folio, false);
			list_add(&folio->lru, &page_list);
		}
	}

out:
	spin_unlock_irq(&hugetlb_lock);
	update_and_free_pages_bulk(h, &page_list);
	spin_lock_irq(&hugetlb_lock);
}
#else
static inline void try_to_free_low(struct hstate *h, unsigned long count,
						nodemask_t *nodes_allowed)
{}
#endif

/*
 * Increment or decrement surplus_huge_pages.  Keep node-specific counters
 * balanced by operating on them in a round-robin fashion.
 * Returns 1 if an adjustment was made.
 */
static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
				int delta)
{}

#define persistent_huge_pages(h)
static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
			      nodemask_t *nodes_allowed)
{}

static int demote_free_hugetlb_folio(struct hstate *h, struct folio *folio)
{}

static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
	__must_hold(&hugetlb_lock)
{}

#define HSTATE_ATTR_RO(_name)

#define HSTATE_ATTR_WO(_name)

#define HSTATE_ATTR(_name)

static struct kobject *hugepages_kobj;
static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];

static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);

static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
{}

static ssize_t nr_hugepages_show_common(struct kobject *kobj,
					struct kobj_attribute *attr, char *buf)
{}

static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
					   struct hstate *h, int nid,
					   unsigned long count, size_t len)
{}

static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
					 struct kobject *kobj, const char *buf,
					 size_t len)
{}

static ssize_t nr_hugepages_show(struct kobject *kobj,
				       struct kobj_attribute *attr, char *buf)
{}

static ssize_t nr_hugepages_store(struct kobject *kobj,
	       struct kobj_attribute *attr, const char *buf, size_t len)
{}
HSTATE_ATTR();

#ifdef CONFIG_NUMA

/*
 * hstate attribute for optionally mempolicy-based constraint on persistent
 * huge page alloc/free.
 */
static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
					   struct kobj_attribute *attr,
					   char *buf)
{}

static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
	       struct kobj_attribute *attr, const char *buf, size_t len)
{}
HSTATE_ATTR();
#endif


static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
					struct kobj_attribute *attr, char *buf)
{}

static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
		struct kobj_attribute *attr, const char *buf, size_t count)
{}
HSTATE_ATTR();

static ssize_t free_hugepages_show(struct kobject *kobj,
					struct kobj_attribute *attr, char *buf)
{}
HSTATE_ATTR_RO();

static ssize_t resv_hugepages_show(struct kobject *kobj,
					struct kobj_attribute *attr, char *buf)
{}
HSTATE_ATTR_RO();

static ssize_t surplus_hugepages_show(struct kobject *kobj,
					struct kobj_attribute *attr, char *buf)
{}
HSTATE_ATTR_RO();

static ssize_t demote_store(struct kobject *kobj,
	       struct kobj_attribute *attr, const char *buf, size_t len)
{}
HSTATE_ATTR_WO();

static ssize_t demote_size_show(struct kobject *kobj,
					struct kobj_attribute *attr, char *buf)
{}

static ssize_t demote_size_store(struct kobject *kobj,
					struct kobj_attribute *attr,
					const char *buf, size_t count)
{}
HSTATE_ATTR();

static struct attribute *hstate_attrs[] =;

static const struct attribute_group hstate_attr_group =;

static struct attribute *hstate_demote_attrs[] =;

static const struct attribute_group hstate_demote_attr_group =;

static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
				    struct kobject **hstate_kobjs,
				    const struct attribute_group *hstate_attr_group)
{}

#ifdef CONFIG_NUMA
static bool hugetlb_sysfs_initialized __ro_after_init;

/*
 * node_hstate/s - associate per node hstate attributes, via their kobjects,
 * with node devices in node_devices[] using a parallel array.  The array
 * index of a node device or _hstate == node id.
 * This is here to avoid any static dependency of the node device driver, in
 * the base kernel, on the hugetlb module.
 */
struct node_hstate {};
static struct node_hstate node_hstates[MAX_NUMNODES];

/*
 * A subset of global hstate attributes for node devices
 */
static struct attribute *per_node_hstate_attrs[] =;

static const struct attribute_group per_node_hstate_attr_group =;

/*
 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
 * Returns node id via non-NULL nidp.
 */
static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
{}

/*
 * Unregister hstate attributes from a single node device.
 * No-op if no hstate attributes attached.
 */
void hugetlb_unregister_node(struct node *node)
{}


/*
 * Register hstate attributes for a single node device.
 * No-op if attributes already registered.
 */
void hugetlb_register_node(struct node *node)
{}

/*
 * hugetlb init time:  register hstate attributes for all registered node
 * devices of nodes that have memory.  All on-line nodes should have
 * registered their associated device by this time.
 */
static void __init hugetlb_register_all_nodes(void)
{}
#else	/* !CONFIG_NUMA */

static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
{
	BUG();
	if (nidp)
		*nidp = -1;
	return NULL;
}

static void hugetlb_register_all_nodes(void) { }

#endif

#ifdef CONFIG_CMA
static void __init hugetlb_cma_check(void);
#else
static inline __init void hugetlb_cma_check(void)
{
}
#endif

static void __init hugetlb_sysfs_init(void)
{}

#ifdef CONFIG_SYSCTL
static void hugetlb_sysctl_init(void);
#else
static inline void hugetlb_sysctl_init(void) { }
#endif

static int __init hugetlb_init(void)
{}
subsys_initcall(hugetlb_init);

/* Overwritten by architectures with more huge page sizes */
bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size)
{}

void __init hugetlb_add_hstate(unsigned int order)
{}

bool __init __weak hugetlb_node_alloc_supported(void)
{}

static void __init hugepages_clear_pages_in_node(void)
{}

/*
 * hugepages command line processing
 * hugepages normally follows a valid hugepagsz or default_hugepagsz
 * specification.  If not, ignore the hugepages value.  hugepages can also
 * be the first huge page command line  option in which case it implicitly
 * specifies the number of huge pages for the default size.
 */
static int __init hugepages_setup(char *s)
{}
__setup();

/*
 * hugepagesz command line processing
 * A specific huge page size can only be specified once with hugepagesz.
 * hugepagesz is followed by hugepages on the command line.  The global
 * variable 'parsed_valid_hugepagesz' is used to determine if prior
 * hugepagesz argument was valid.
 */
static int __init hugepagesz_setup(char *s)
{}
__setup();

/*
 * default_hugepagesz command line input
 * Only one instance of default_hugepagesz allowed on command line.
 */
static int __init default_hugepagesz_setup(char *s)
{}
__setup();

static unsigned int allowed_mems_nr(struct hstate *h)
{}

#ifdef CONFIG_SYSCTL
static int proc_hugetlb_doulongvec_minmax(const struct ctl_table *table, int write,
					  void *buffer, size_t *length,
					  loff_t *ppos, unsigned long *out)
{}

static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
			 const struct ctl_table *table, int write,
			 void *buffer, size_t *length, loff_t *ppos)
{}

static int hugetlb_sysctl_handler(const struct ctl_table *table, int write,
			  void *buffer, size_t *length, loff_t *ppos)
{}

#ifdef CONFIG_NUMA
static int hugetlb_mempolicy_sysctl_handler(const struct ctl_table *table, int write,
			  void *buffer, size_t *length, loff_t *ppos)
{}
#endif /* CONFIG_NUMA */

static int hugetlb_overcommit_handler(const struct ctl_table *table, int write,
		void *buffer, size_t *length, loff_t *ppos)
{}

static struct ctl_table hugetlb_table[] =;

static void hugetlb_sysctl_init(void)
{}
#endif /* CONFIG_SYSCTL */

void hugetlb_report_meminfo(struct seq_file *m)
{}

int hugetlb_report_node_meminfo(char *buf, int len, int nid)
{}

void hugetlb_show_meminfo_node(int nid)
{}

void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
{}

/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
unsigned long hugetlb_total_pages(void)
{}

static int hugetlb_acct_memory(struct hstate *h, long delta)
{}

static void hugetlb_vm_op_open(struct vm_area_struct *vma)
{}

static void hugetlb_vm_op_close(struct vm_area_struct *vma)
{}

static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
{}

static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
{}

/*
 * We cannot handle pagefaults against hugetlb pages at all.  They cause
 * handle_mm_fault() to try to instantiate regular-sized pages in the
 * hugepage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
 * this far.
 */
static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
{}

/*
 * When a new function is introduced to vm_operations_struct and added
 * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
 * This is because under System V memory model, mappings created via
 * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
 * their original vm_ops are overwritten with shm_vm_ops.
 */
const struct vm_operations_struct hugetlb_vm_ops =;

static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
				int writable)
{}

static void set_huge_ptep_writable(struct vm_area_struct *vma,
				   unsigned long address, pte_t *ptep)
{}

bool is_hugetlb_entry_migration(pte_t pte)
{}

bool is_hugetlb_entry_hwpoisoned(pte_t pte)
{}

static void
hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr,
		      struct folio *new_folio, pte_t old, unsigned long sz)
{}

int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
			    struct vm_area_struct *dst_vma,
			    struct vm_area_struct *src_vma)
{}

static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
			  unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte,
			  unsigned long sz)
{}

int move_hugetlb_page_tables(struct vm_area_struct *vma,
			     struct vm_area_struct *new_vma,
			     unsigned long old_addr, unsigned long new_addr,
			     unsigned long len)
{}

void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
			    unsigned long start, unsigned long end,
			    struct page *ref_page, zap_flags_t zap_flags)
{}

void __hugetlb_zap_begin(struct vm_area_struct *vma,
			 unsigned long *start, unsigned long *end)
{}

void __hugetlb_zap_end(struct vm_area_struct *vma,
		       struct zap_details *details)
{}

void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
			  unsigned long end, struct page *ref_page,
			  zap_flags_t zap_flags)
{}

/*
 * This is called when the original mapper is failing to COW a MAP_PRIVATE
 * mapping it owns the reserve page for. The intention is to unmap the page
 * from other VMAs and let the children be SIGKILLed if they are faulting the
 * same region.
 */
static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
			      struct page *page, unsigned long address)
{}

/*
 * hugetlb_wp() should be called with page lock of the original hugepage held.
 * Called with hugetlb_fault_mutex_table held and pte_page locked so we
 * cannot race with other handlers or page migration.
 * Keep the pte_same checks anyway to make transition from the mutex easier.
 */
static vm_fault_t hugetlb_wp(struct folio *pagecache_folio,
		       struct vm_fault *vmf)
{}

/*
 * Return whether there is a pagecache page to back given address within VMA.
 */
bool hugetlbfs_pagecache_present(struct hstate *h,
				 struct vm_area_struct *vma, unsigned long address)
{}

int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
			   pgoff_t idx)
{}

static inline vm_fault_t hugetlb_handle_userfault(struct vm_fault *vmf,
						  struct address_space *mapping,
						  unsigned long reason)
{}

/*
 * Recheck pte with pgtable lock.  Returns true if pte didn't change, or
 * false if pte changed or is changing.
 */
static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm, unsigned long addr,
			       pte_t *ptep, pte_t old_pte)
{}

static vm_fault_t hugetlb_no_page(struct address_space *mapping,
			struct vm_fault *vmf)
{}

#ifdef CONFIG_SMP
u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
{}
#else
/*
 * For uniprocessor systems we always use a single mutex, so just
 * return 0 and avoid the hashing overhead.
 */
u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
{
	return 0;
}
#endif

vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
			unsigned long address, unsigned int flags)
{}

#ifdef CONFIG_USERFAULTFD
/*
 * Can probably be eliminated, but still used by hugetlb_mfill_atomic_pte().
 */
static struct folio *alloc_hugetlb_folio_vma(struct hstate *h,
		struct vm_area_struct *vma, unsigned long address)
{}

/*
 * Used by userfaultfd UFFDIO_* ioctls. Based on userfaultfd's mfill_atomic_pte
 * with modifications for hugetlb pages.
 */
int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
			     struct vm_area_struct *dst_vma,
			     unsigned long dst_addr,
			     unsigned long src_addr,
			     uffd_flags_t flags,
			     struct folio **foliop)
{}
#endif /* CONFIG_USERFAULTFD */

long hugetlb_change_protection(struct vm_area_struct *vma,
		unsigned long address, unsigned long end,
		pgprot_t newprot, unsigned long cp_flags)
{}

/* Return true if reservation was successful, false otherwise.  */
bool hugetlb_reserve_pages(struct inode *inode,
					long from, long to,
					struct vm_area_struct *vma,
					vm_flags_t vm_flags)
{}

long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
								long freed)
{}

#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
static unsigned long page_table_shareable(struct vm_area_struct *svma,
				struct vm_area_struct *vma,
				unsigned long addr, pgoff_t idx)
{}

bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
{}

/*
 * Determine if start,end range within vma could be mapped by shared pmd.
 * If yes, adjust start and end to cover range associated with possible
 * shared pmd mappings.
 */
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
				unsigned long *start, unsigned long *end)
{}

/*
 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
 * and returns the corresponding pte. While this is not necessary for the
 * !shared pmd case because we can allocate the pmd later as well, it makes the
 * code much cleaner. pmd allocation is essential for the shared case because
 * pud has to be populated inside the same i_mmap_rwsem section - otherwise
 * racing tasks could either miss the sharing (see huge_pte_offset) or select a
 * bad pmd for sharing.
 */
pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
		      unsigned long addr, pud_t *pud)
{}

/*
 * unmap huge page backed by shared pte.
 *
 * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
 * indicated by page_count > 1, unmap is achieved by clearing pud and
 * decrementing the ref count. If count == 1, the pte page is not shared.
 *
 * Called with page table lock held.
 *
 * returns: 1 successfully unmapped a shared pte page
 *	    0 the underlying pte page is not shared, or it is the last user
 */
int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
					unsigned long addr, pte_t *ptep)
{}

#else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */

pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
		      unsigned long addr, pud_t *pud)
{
	return NULL;
}

int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
				unsigned long addr, pte_t *ptep)
{
	return 0;
}

void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
				unsigned long *start, unsigned long *end)
{
}

bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
{
	return false;
}
#endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */

#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
			unsigned long addr, unsigned long sz)
{}

/*
 * huge_pte_offset() - Walk the page table to resolve the hugepage
 * entry at address @addr
 *
 * Return: Pointer to page table entry (PUD or PMD) for
 * address @addr, or NULL if a !p*d_present() entry is encountered and the
 * size @sz doesn't match the hugepage size at this level of the page
 * table.
 */
pte_t *huge_pte_offset(struct mm_struct *mm,
		       unsigned long addr, unsigned long sz)
{}

/*
 * Return a mask that can be used to update an address to the last huge
 * page in a page table page mapping size.  Used to skip non-present
 * page table entries when linearly scanning address ranges.  Architectures
 * with unique huge page to page table relationships can define their own
 * version of this routine.
 */
unsigned long hugetlb_mask_last_page(struct hstate *h)
{}

#else

/* See description above.  Architectures can provide their own version. */
__weak unsigned long hugetlb_mask_last_page(struct hstate *h)
{
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
	if (huge_page_size(h) == PMD_SIZE)
		return PUD_SIZE - PMD_SIZE;
#endif
	return 0UL;
}

#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */

/*
 * These functions are overwritable if your architecture needs its own
 * behavior.
 */
bool isolate_hugetlb(struct folio *folio, struct list_head *list)
{}

int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison)
{}

int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
				bool *migratable_cleared)
{}

void folio_putback_active_hugetlb(struct folio *folio)
{}

void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason)
{}

static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
				   unsigned long start,
				   unsigned long end)
{}

/*
 * This function will unconditionally remove all the shared pmd pgtable entries
 * within the specific vma for a hugetlbfs memory range.
 */
void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
{}

#ifdef CONFIG_CMA
static bool cma_reserve_called __initdata;

static int __init cmdline_parse_hugetlb_cma(char *p)
{}

early_param();

void __init hugetlb_cma_reserve(int order)
{}

static void __init hugetlb_cma_check(void)
{}

#endif /* CONFIG_CMA */