#ifndef _LINUX_HUGETLB_H
#define _LINUX_HUGETLB_H
#include <linux/mm.h>
#include <linux/mm_types.h>
#include <linux/mmdebug.h>
#include <linux/fs.h>
#include <linux/hugetlb_inline.h>
#include <linux/cgroup.h>
#include <linux/page_ref.h>
#include <linux/list.h>
#include <linux/kref.h>
#include <linux/pgtable.h>
#include <linux/gfp.h>
#include <linux/userfaultfd_k.h>
struct ctl_table;
struct user_struct;
struct mmu_gather;
struct node;
void free_huge_folio(struct folio *folio);
#ifdef CONFIG_HUGETLB_PAGE
#include <linux/pagemap.h>
#include <linux/shm.h>
#include <asm/tlbflush.h>
#define __NR_USED_SUBPAGE …
struct hugepage_subpool { … };
struct resv_map { … };
struct file_region { … };
struct hugetlb_vma_lock { … };
extern struct resv_map *resv_map_alloc(void);
void resv_map_release(struct kref *ref);
extern spinlock_t hugetlb_lock;
extern int hugetlb_max_hstate __read_mostly;
#define for_each_hstate(h) …
struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
long min_hpages);
void hugepage_put_subpool(struct hugepage_subpool *spool);
void hugetlb_dup_vma_private(struct vm_area_struct *vma);
void clear_vma_resv_huge_pages(struct vm_area_struct *vma);
int move_hugetlb_page_tables(struct vm_area_struct *vma,
struct vm_area_struct *new_vma,
unsigned long old_addr, unsigned long new_addr,
unsigned long len);
int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
struct vm_area_struct *, struct vm_area_struct *);
struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
unsigned long address, unsigned int flags,
unsigned int *page_mask);
void unmap_hugepage_range(struct vm_area_struct *,
unsigned long, unsigned long, struct page *,
zap_flags_t);
void __unmap_hugepage_range(struct mmu_gather *tlb,
struct vm_area_struct *vma,
unsigned long start, unsigned long end,
struct page *ref_page, zap_flags_t zap_flags);
void hugetlb_report_meminfo(struct seq_file *);
int hugetlb_report_node_meminfo(char *buf, int len, int nid);
void hugetlb_show_meminfo_node(int nid);
unsigned long hugetlb_total_pages(void);
vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, unsigned int flags);
#ifdef CONFIG_USERFAULTFD
int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
struct vm_area_struct *dst_vma,
unsigned long dst_addr,
unsigned long src_addr,
uffd_flags_t flags,
struct folio **foliop);
#endif
bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
struct vm_area_struct *vma,
vm_flags_t vm_flags);
long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
long freed);
bool isolate_hugetlb(struct folio *folio, struct list_head *list);
int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison);
int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
bool *migratable_cleared);
void folio_putback_active_hugetlb(struct folio *folio);
void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason);
void hugetlb_fix_reserve_counts(struct inode *inode);
extern struct mutex *hugetlb_fault_mutex_table;
u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pud_t *pud);
bool hugetlbfs_pagecache_present(struct hstate *h,
struct vm_area_struct *vma,
unsigned long address);
struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio);
extern int sysctl_hugetlb_shm_group;
extern struct list_head huge_boot_pages[MAX_NUMNODES];
#ifndef CONFIG_HIGHPTE
static inline pte_t *pte_offset_huge(pmd_t *pmd, unsigned long address)
{ … }
static inline pte_t *pte_alloc_huge(struct mm_struct *mm, pmd_t *pmd,
unsigned long address)
{ … }
#endif
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, unsigned long sz);
pte_t *huge_pte_offset(struct mm_struct *mm,
unsigned long addr, unsigned long sz);
unsigned long hugetlb_mask_last_page(struct hstate *h);
int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep);
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
unsigned long *start, unsigned long *end);
extern void __hugetlb_zap_begin(struct vm_area_struct *vma,
unsigned long *begin, unsigned long *end);
extern void __hugetlb_zap_end(struct vm_area_struct *vma,
struct zap_details *details);
static inline void hugetlb_zap_begin(struct vm_area_struct *vma,
unsigned long *start, unsigned long *end)
{ … }
static inline void hugetlb_zap_end(struct vm_area_struct *vma,
struct zap_details *details)
{ … }
void hugetlb_vma_lock_read(struct vm_area_struct *vma);
void hugetlb_vma_unlock_read(struct vm_area_struct *vma);
void hugetlb_vma_lock_write(struct vm_area_struct *vma);
void hugetlb_vma_unlock_write(struct vm_area_struct *vma);
int hugetlb_vma_trylock_write(struct vm_area_struct *vma);
void hugetlb_vma_assert_locked(struct vm_area_struct *vma);
void hugetlb_vma_lock_release(struct kref *kref);
long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end, pgprot_t newprot,
unsigned long cp_flags);
bool is_hugetlb_entry_migration(pte_t pte);
bool is_hugetlb_entry_hwpoisoned(pte_t pte);
void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
#else
static inline void hugetlb_dup_vma_private(struct vm_area_struct *vma)
{
}
static inline void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
{
}
static inline unsigned long hugetlb_total_pages(void)
{
return 0;
}
static inline struct address_space *hugetlb_folio_mapping_lock_write(
struct folio *folio)
{
return NULL;
}
static inline int huge_pmd_unshare(struct mm_struct *mm,
struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
return 0;
}
static inline void adjust_range_if_pmd_sharing_possible(
struct vm_area_struct *vma,
unsigned long *start, unsigned long *end)
{
}
static inline void hugetlb_zap_begin(
struct vm_area_struct *vma,
unsigned long *start, unsigned long *end)
{
}
static inline void hugetlb_zap_end(
struct vm_area_struct *vma,
struct zap_details *details)
{
}
static inline int copy_hugetlb_page_range(struct mm_struct *dst,
struct mm_struct *src,
struct vm_area_struct *dst_vma,
struct vm_area_struct *src_vma)
{
BUG();
return 0;
}
static inline int move_hugetlb_page_tables(struct vm_area_struct *vma,
struct vm_area_struct *new_vma,
unsigned long old_addr,
unsigned long new_addr,
unsigned long len)
{
BUG();
return 0;
}
static inline void hugetlb_report_meminfo(struct seq_file *m)
{
}
static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
{
return 0;
}
static inline void hugetlb_show_meminfo_node(int nid)
{
}
static inline int prepare_hugepage_range(struct file *file,
unsigned long addr, unsigned long len)
{
return -EINVAL;
}
static inline void hugetlb_vma_lock_read(struct vm_area_struct *vma)
{
}
static inline void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
{
}
static inline void hugetlb_vma_lock_write(struct vm_area_struct *vma)
{
}
static inline void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
{
}
static inline int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
{
return 1;
}
static inline void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
{
}
static inline int is_hugepage_only_range(struct mm_struct *mm,
unsigned long addr, unsigned long len)
{
return 0;
}
static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
unsigned long addr, unsigned long end,
unsigned long floor, unsigned long ceiling)
{
BUG();
}
#ifdef CONFIG_USERFAULTFD
static inline int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
struct vm_area_struct *dst_vma,
unsigned long dst_addr,
unsigned long src_addr,
uffd_flags_t flags,
struct folio **foliop)
{
BUG();
return 0;
}
#endif
static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
unsigned long sz)
{
return NULL;
}
static inline bool isolate_hugetlb(struct folio *folio, struct list_head *list)
{
return false;
}
static inline int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison)
{
return 0;
}
static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
bool *migratable_cleared)
{
return 0;
}
static inline void folio_putback_active_hugetlb(struct folio *folio)
{
}
static inline void move_hugetlb_state(struct folio *old_folio,
struct folio *new_folio, int reason)
{
}
static inline long hugetlb_change_protection(
struct vm_area_struct *vma, unsigned long address,
unsigned long end, pgprot_t newprot,
unsigned long cp_flags)
{
return 0;
}
static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start,
unsigned long end, struct page *ref_page,
zap_flags_t zap_flags)
{
BUG();
}
static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long address,
unsigned int flags)
{
BUG();
return 0;
}
static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
#endif
#ifndef pgd_write
static inline int pgd_write(pgd_t pgd)
{ … }
#endif
#define HUGETLB_ANON_FILE …
enum { … };
#ifdef CONFIG_HUGETLBFS
struct hugetlbfs_sb_info { … };
static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
{ … }
struct hugetlbfs_inode_info { … };
static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
{ … }
extern const struct vm_operations_struct hugetlb_vm_ops;
struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
int creat_flags, int page_size_log);
static inline bool is_file_hugepages(const struct file *file)
{ … }
static inline struct hstate *hstate_inode(struct inode *i)
{ … }
#else
#define is_file_hugepages …
static inline struct file *
hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
int creat_flags, int page_size_log)
{
return ERR_PTR(-ENOSYS);
}
static inline struct hstate *hstate_inode(struct inode *i)
{
return NULL;
}
#endif
#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags);
#endif
unsigned long
generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags);
enum hugetlb_page_flags { … };
#ifdef CONFIG_HUGETLB_PAGE
#define TESTHPAGEFLAG(uname, flname) …
#define SETHPAGEFLAG(uname, flname) …
#define CLEARHPAGEFLAG(uname, flname) …
#else
#define TESTHPAGEFLAG …
#define SETHPAGEFLAG …
#define CLEARHPAGEFLAG …
#endif
#define HPAGEFLAG(uname, flname) … \
HPAGEFLAG(…)
HPAGEFLAG(…)
HPAGEFLAG(…)
HPAGEFLAG(…)
HPAGEFLAG(…)
HPAGEFLAG(…)
#ifdef CONFIG_HUGETLB_PAGE
#define HSTATE_NAME_LEN …
struct hstate { … };
struct huge_bootmem_page { … };
int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
unsigned long addr, int avoid_reserve);
struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
nodemask_t *nmask, gfp_t gfp_mask,
bool allow_alloc_fallback);
int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
pgoff_t idx);
void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
unsigned long address, struct folio *folio);
int __init __alloc_bootmem_huge_page(struct hstate *h, int nid);
int __init alloc_bootmem_huge_page(struct hstate *h, int nid);
bool __init hugetlb_node_alloc_supported(void);
void __init hugetlb_add_hstate(unsigned order);
bool __init arch_hugetlb_valid_size(unsigned long size);
struct hstate *size_to_hstate(unsigned long size);
#ifndef HUGE_MAX_HSTATE
#define HUGE_MAX_HSTATE …
#endif
extern struct hstate hstates[HUGE_MAX_HSTATE];
extern unsigned int default_hstate_idx;
#define default_hstate …
static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio)
{ … }
static inline void hugetlb_set_folio_subpool(struct folio *folio,
struct hugepage_subpool *subpool)
{ … }
static inline struct hstate *hstate_file(struct file *f)
{ … }
static inline struct hstate *hstate_sizelog(int page_size_log)
{ … }
static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
{ … }
static inline unsigned long huge_page_size(const struct hstate *h)
{ … }
extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
static inline unsigned long huge_page_mask(struct hstate *h)
{ … }
static inline unsigned int huge_page_order(struct hstate *h)
{ … }
static inline unsigned huge_page_shift(struct hstate *h)
{ … }
static inline bool hstate_is_gigantic(struct hstate *h)
{ … }
static inline unsigned int pages_per_huge_page(const struct hstate *h)
{ … }
static inline unsigned int blocks_per_huge_page(struct hstate *h)
{ … }
static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h,
struct address_space *mapping, pgoff_t idx)
{ … }
#include <asm/hugetlb.h>
#ifndef is_hugepage_only_range
static inline int is_hugepage_only_range(struct mm_struct *mm,
unsigned long addr, unsigned long len)
{ … }
#define is_hugepage_only_range …
#endif
#ifndef arch_clear_hugetlb_flags
static inline void arch_clear_hugetlb_flags(struct folio *folio) { … }
#define arch_clear_hugetlb_flags …
#endif
#ifndef arch_make_huge_pte
static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift,
vm_flags_t flags)
{ … }
#endif
static inline struct hstate *folio_hstate(struct folio *folio)
{ … }
static inline unsigned hstate_index_to_shift(unsigned index)
{ … }
static inline int hstate_index(struct hstate *h)
{ … }
int dissolve_free_hugetlb_folio(struct folio *folio);
int dissolve_free_hugetlb_folios(unsigned long start_pfn,
unsigned long end_pfn);
#ifdef CONFIG_MEMORY_FAILURE
extern void folio_clear_hugetlb_hwpoison(struct folio *folio);
#else
static inline void folio_clear_hugetlb_hwpoison(struct folio *folio)
{
}
#endif
#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
#ifndef arch_hugetlb_migration_supported
static inline bool arch_hugetlb_migration_supported(struct hstate *h)
{ … }
#endif
#else
static inline bool arch_hugetlb_migration_supported(struct hstate *h)
{
return false;
}
#endif
static inline bool hugepage_migration_supported(struct hstate *h)
{ … }
static inline bool hugepage_movable_supported(struct hstate *h)
{ … }
static inline gfp_t htlb_alloc_mask(struct hstate *h)
{ … }
static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
{ … }
static inline bool htlb_allow_alloc_fallback(int reason)
{ … }
static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
struct mm_struct *mm, pte_t *pte)
{ … }
#ifndef hugepages_supported
#define hugepages_supported …
#endif
void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
static inline void hugetlb_count_init(struct mm_struct *mm)
{ … }
static inline void hugetlb_count_add(long l, struct mm_struct *mm)
{ … }
static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
{ … }
#ifndef huge_ptep_modify_prot_start
#define huge_ptep_modify_prot_start …
static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{ … }
#endif
#ifndef huge_ptep_modify_prot_commit
#define huge_ptep_modify_prot_commit …
static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t old_pte, pte_t pte)
{ … }
#endif
#ifdef CONFIG_NUMA
void hugetlb_register_node(struct node *node);
void hugetlb_unregister_node(struct node *node);
#endif
bool is_raw_hwpoison_page_in_hugepage(struct page *page);
#else
struct hstate {};
static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio)
{
return NULL;
}
static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h,
struct address_space *mapping, pgoff_t idx)
{
return NULL;
}
static inline int isolate_or_dissolve_huge_page(struct page *page,
struct list_head *list)
{
return -ENOMEM;
}
static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
unsigned long addr,
int avoid_reserve)
{
return NULL;
}
static inline struct folio *
alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
nodemask_t *nmask, gfp_t gfp_mask,
bool allow_alloc_fallback)
{
return NULL;
}
static inline int __alloc_bootmem_huge_page(struct hstate *h)
{
return 0;
}
static inline struct hstate *hstate_file(struct file *f)
{
return NULL;
}
static inline struct hstate *hstate_sizelog(int page_size_log)
{
return NULL;
}
static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
{
return NULL;
}
static inline struct hstate *folio_hstate(struct folio *folio)
{
return NULL;
}
static inline struct hstate *size_to_hstate(unsigned long size)
{
return NULL;
}
static inline unsigned long huge_page_size(struct hstate *h)
{
return PAGE_SIZE;
}
static inline unsigned long huge_page_mask(struct hstate *h)
{
return PAGE_MASK;
}
static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
{
return PAGE_SIZE;
}
static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
{
return PAGE_SIZE;
}
static inline unsigned int huge_page_order(struct hstate *h)
{
return 0;
}
static inline unsigned int huge_page_shift(struct hstate *h)
{
return PAGE_SHIFT;
}
static inline bool hstate_is_gigantic(struct hstate *h)
{
return false;
}
static inline unsigned int pages_per_huge_page(struct hstate *h)
{
return 1;
}
static inline unsigned hstate_index_to_shift(unsigned index)
{
return 0;
}
static inline int hstate_index(struct hstate *h)
{
return 0;
}
static inline int dissolve_free_hugetlb_folio(struct folio *folio)
{
return 0;
}
static inline int dissolve_free_hugetlb_folios(unsigned long start_pfn,
unsigned long end_pfn)
{
return 0;
}
static inline bool hugepage_migration_supported(struct hstate *h)
{
return false;
}
static inline bool hugepage_movable_supported(struct hstate *h)
{
return false;
}
static inline gfp_t htlb_alloc_mask(struct hstate *h)
{
return 0;
}
static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
{
return 0;
}
static inline bool htlb_allow_alloc_fallback(int reason)
{
return false;
}
static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
struct mm_struct *mm, pte_t *pte)
{
return &mm->page_table_lock;
}
static inline void hugetlb_count_init(struct mm_struct *mm)
{
}
static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
{
}
static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
{
}
static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
#ifdef CONFIG_MMU
return ptep_get(ptep);
#else
return *ptep;
#endif
}
static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte, unsigned long sz)
{
}
static inline void hugetlb_register_node(struct node *node)
{
}
static inline void hugetlb_unregister_node(struct node *node)
{
}
static inline bool hugetlbfs_pagecache_present(
struct hstate *h, struct vm_area_struct *vma, unsigned long address)
{
return false;
}
#endif
static inline spinlock_t *huge_pte_lock(struct hstate *h,
struct mm_struct *mm, pte_t *pte)
{ … }
#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
extern void __init hugetlb_cma_reserve(int order);
#else
static inline __init void hugetlb_cma_reserve(int order)
{
}
#endif
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
static inline bool hugetlb_pmd_shared(pte_t *pte)
{ … }
#else
static inline bool hugetlb_pmd_shared(pte_t *pte)
{
return false;
}
#endif
bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
#define flush_hugetlb_tlb_range(vma, addr, end) …
#endif
static inline bool __vma_shareable_lock(struct vm_area_struct *vma)
{ … }
bool __vma_private_lock(struct vm_area_struct *vma);
static inline pte_t *
hugetlb_walk(struct vm_area_struct *vma, unsigned long addr, unsigned long sz)
{ … }
#endif