#ifndef _LINUX_HUGE_MM_H
#define _LINUX_HUGE_MM_H
#include <linux/sched/coredump.h>
#include <linux/mm_types.h>
#include <linux/fs.h>
#include <linux/kobject.h>
vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
void huge_pmd_set_accessed(struct vm_fault *vmf);
int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
struct vm_area_struct *vma);
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
#else
static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
{
}
#endif
vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf);
bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
pmd_t *pmd, unsigned long addr, unsigned long next);
int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr);
int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
unsigned long addr);
bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd);
int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
pmd_t *pmd, unsigned long addr, pgprot_t newprot,
unsigned long cp_flags);
vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
enum transparent_hugepage_flag { … };
struct kobject;
struct kobj_attribute;
ssize_t single_hugepage_flag_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count,
enum transparent_hugepage_flag flag);
ssize_t single_hugepage_flag_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf,
enum transparent_hugepage_flag flag);
extern struct kobj_attribute shmem_enabled_attr;
extern struct kobj_attribute thpsize_shmem_enabled_attr;
#define THP_ORDERS_ALL_ANON …
#define THP_ORDERS_ALL_SPECIAL …
#define THP_ORDERS_ALL_FILE_DEFAULT …
#define THP_ORDERS_ALL …
#define TVA_SMAPS …
#define TVA_IN_PF …
#define TVA_ENFORCE_SYSFS …
#define thp_vma_allowable_order(vma, vm_flags, tva_flags, order) …
#define split_folio(f) …
#ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
#define HPAGE_PMD_SHIFT …
#define HPAGE_PUD_SHIFT …
#else
#define HPAGE_PMD_SHIFT …
#define HPAGE_PUD_SHIFT …
#endif
#define HPAGE_PMD_ORDER …
#define HPAGE_PMD_NR …
#define HPAGE_PMD_MASK …
#define HPAGE_PMD_SIZE …
#define HPAGE_PUD_ORDER …
#define HPAGE_PUD_NR …
#define HPAGE_PUD_MASK …
#define HPAGE_PUD_SIZE …
enum mthp_stat_item { … };
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
struct mthp_stat { … };
DECLARE_PER_CPU(struct mthp_stat, mthp_stats);
static inline void mod_mthp_stat(int order, enum mthp_stat_item item, int delta)
{ … }
static inline void count_mthp_stat(int order, enum mthp_stat_item item)
{ … }
#else
static inline void mod_mthp_stat(int order, enum mthp_stat_item item, int delta)
{
}
static inline void count_mthp_stat(int order, enum mthp_stat_item item)
{
}
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern unsigned long transparent_hugepage_flags;
extern unsigned long huge_anon_orders_always;
extern unsigned long huge_anon_orders_madvise;
extern unsigned long huge_anon_orders_inherit;
static inline bool hugepage_global_enabled(void)
{ … }
static inline bool hugepage_global_always(void)
{ … }
static inline int highest_order(unsigned long orders)
{ … }
static inline int next_order(unsigned long *orders, int prev)
{ … }
static inline bool thp_vma_suitable_order(struct vm_area_struct *vma,
unsigned long addr, int order)
{ … }
static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
unsigned long addr, unsigned long orders)
{ … }
static inline bool file_thp_enabled(struct vm_area_struct *vma)
{ … }
unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
unsigned long vm_flags,
unsigned long tva_flags,
unsigned long orders);
static inline
unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
unsigned long vm_flags,
unsigned long tva_flags,
unsigned long orders)
{ … }
struct thpsize { … };
#define to_thpsize(kobj) …
#define transparent_hugepage_use_zero_page() …
unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags);
unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags,
vm_flags_t vm_flags);
bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins);
int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
unsigned int new_order);
int min_order_for_split(struct folio *folio);
int split_folio_to_list(struct folio *folio, struct list_head *list);
static inline int split_huge_page(struct page *page)
{ … }
void deferred_split_folio(struct folio *folio, bool partially_mapped);
void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long address, bool freeze, struct folio *folio);
#define split_huge_pmd(__vma, __pmd, __address) …
void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
bool freeze, struct folio *folio);
void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
unsigned long address);
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
int change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
pud_t *pudp, unsigned long addr, pgprot_t newprot,
unsigned long cp_flags);
#else
static inline int
change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
pud_t *pudp, unsigned long addr, pgprot_t newprot,
unsigned long cp_flags) { return 0; }
#endif
#define split_huge_pud(__vma, __pud, __address) …
int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags,
int advice);
int madvise_collapse(struct vm_area_struct *vma,
struct vm_area_struct **prev,
unsigned long start, unsigned long end);
void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
unsigned long end, long adjust_next);
spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);
static inline int is_swap_pmd(pmd_t pmd)
{ … }
static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma)
{ … }
static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
struct vm_area_struct *vma)
{ … }
static inline bool folio_test_pmd_mappable(struct folio *folio)
{ … }
struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf);
extern struct folio *huge_zero_folio;
extern unsigned long huge_zero_pfn;
static inline bool is_huge_zero_folio(const struct folio *folio)
{ … }
static inline bool is_huge_zero_pmd(pmd_t pmd)
{ … }
struct folio *mm_get_huge_zero_folio(struct mm_struct *mm);
void mm_put_huge_zero_folio(struct mm_struct *mm);
#define mk_huge_pmd(page, prot) …
static inline bool thp_migration_supported(void)
{ … }
void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmd, bool freeze, struct folio *folio);
bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmdp, struct folio *folio);
#else
static inline bool folio_test_pmd_mappable(struct folio *folio)
{
return false;
}
static inline bool thp_vma_suitable_order(struct vm_area_struct *vma,
unsigned long addr, int order)
{
return false;
}
static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
unsigned long addr, unsigned long orders)
{
return 0;
}
static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
unsigned long vm_flags,
unsigned long tva_flags,
unsigned long orders)
{
return 0;
}
#define transparent_hugepage_flags …
#define thp_get_unmapped_area …
static inline unsigned long
thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags, vm_flags_t vm_flags)
{
return 0;
}
static inline bool
can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins)
{
return false;
}
static inline int
split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
unsigned int new_order)
{
return 0;
}
static inline int split_huge_page(struct page *page)
{
return 0;
}
static inline int split_folio_to_list(struct folio *folio, struct list_head *list)
{
return 0;
}
static inline void deferred_split_folio(struct folio *folio, bool partially_mapped) {}
#define split_huge_pmd …
static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long address, bool freeze, struct folio *folio) {}
static inline void split_huge_pmd_address(struct vm_area_struct *vma,
unsigned long address, bool freeze, struct folio *folio) {}
static inline void split_huge_pmd_locked(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd,
bool freeze, struct folio *folio) {}
static inline bool unmap_huge_pmd_locked(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp,
struct folio *folio)
{
return false;
}
#define split_huge_pud …
static inline int hugepage_madvise(struct vm_area_struct *vma,
unsigned long *vm_flags, int advice)
{
return -EINVAL;
}
static inline int madvise_collapse(struct vm_area_struct *vma,
struct vm_area_struct **prev,
unsigned long start, unsigned long end)
{
return -EINVAL;
}
static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
unsigned long start,
unsigned long end,
long adjust_next)
{
}
static inline int is_swap_pmd(pmd_t pmd)
{
return 0;
}
static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma)
{
return NULL;
}
static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
struct vm_area_struct *vma)
{
return NULL;
}
static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
{
return 0;
}
static inline bool is_huge_zero_folio(const struct folio *folio)
{
return false;
}
static inline bool is_huge_zero_pmd(pmd_t pmd)
{
return false;
}
static inline void mm_put_huge_zero_folio(struct mm_struct *mm)
{
return;
}
static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
{
return NULL;
}
static inline bool thp_migration_supported(void)
{
return false;
}
static inline int highest_order(unsigned long orders)
{
return 0;
}
static inline int next_order(unsigned long *orders, int prev)
{
return 0;
}
static inline void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
unsigned long address)
{
}
static inline int change_huge_pud(struct mmu_gather *tlb,
struct vm_area_struct *vma, pud_t *pudp,
unsigned long addr, pgprot_t newprot,
unsigned long cp_flags)
{
return 0;
}
#endif
static inline int split_folio_to_list_to_order(struct folio *folio,
struct list_head *list, int new_order)
{ … }
static inline int split_folio_to_order(struct folio *folio, int new_order)
{ … }
#endif