#include <linux/mm.h>
#include <linux/gfp.h>
#include <linux/hugetlb.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/fixmap.h>
#include <asm/mtrr.h>
#ifdef CONFIG_DYNAMIC_PHYSICAL_MASK
phys_addr_t physical_mask __ro_after_init = …;
EXPORT_SYMBOL(…);
#endif
#ifdef CONFIG_HIGHPTE
#define PGTABLE_HIGHMEM …
#else
#define PGTABLE_HIGHMEM …
#endif
#ifndef CONFIG_PARAVIRT
static inline
void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table)
{
tlb_remove_page(tlb, table);
}
#endif
gfp_t __userpte_alloc_gfp = …;
pgtable_t pte_alloc_one(struct mm_struct *mm)
{ … }
static int __init setup_userpte(char *arg)
{ … }
early_param(…);
void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
{ … }
#if CONFIG_PGTABLE_LEVELS > 2
void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
{ … }
#if CONFIG_PGTABLE_LEVELS > 3
void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
{ … }
#if CONFIG_PGTABLE_LEVELS > 4
void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d)
{ … }
#endif
#endif
#endif
static inline void pgd_list_add(pgd_t *pgd)
{ … }
static inline void pgd_list_del(pgd_t *pgd)
{ … }
#define UNSHARED_PTRS_PER_PGD …
#define MAX_UNSHARED_PTRS_PER_PGD …
static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
{ … }
struct mm_struct *pgd_page_get_mm(struct page *page)
{ … }
static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
{ … }
static void pgd_dtor(pgd_t *pgd)
{ … }
#ifdef CONFIG_X86_PAE
#define PREALLOCATED_PMDS …
#define MAX_PREALLOCATED_PMDS …
#define PREALLOCATED_USER_PMDS …
#define MAX_PREALLOCATED_USER_PMDS …
void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
{
paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
flush_tlb_mm(mm);
}
#else
#define PREALLOCATED_PMDS …
#define MAX_PREALLOCATED_PMDS …
#define PREALLOCATED_USER_PMDS …
#define MAX_PREALLOCATED_USER_PMDS …
#endif
static void free_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
{ … }
static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
{ … }
static void mop_up_one_pmd(struct mm_struct *mm, pgd_t *pgdp)
{ … }
static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
{ … }
static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
{ … }
#ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
pgd_t *k_pgd, pmd_t *pmds[])
{ … }
#else
static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
pgd_t *k_pgd, pmd_t *pmds[])
{
}
#endif
#ifdef CONFIG_X86_PAE
#include <linux/slab.h>
#define PGD_SIZE …
#define PGD_ALIGN …
static struct kmem_cache *pgd_cache;
void __init pgtable_cache_init(void)
{
if (!SHARED_KERNEL_PMD)
return;
pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_ALIGN,
SLAB_PANIC, NULL);
}
static inline pgd_t *_pgd_alloc(void)
{
if (!SHARED_KERNEL_PMD)
return (pgd_t *)__get_free_pages(GFP_PGTABLE_USER,
PGD_ALLOCATION_ORDER);
return kmem_cache_alloc(pgd_cache, GFP_PGTABLE_USER);
}
static inline void _pgd_free(pgd_t *pgd)
{
if (!SHARED_KERNEL_PMD)
free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
else
kmem_cache_free(pgd_cache, pgd);
}
#else
static inline pgd_t *_pgd_alloc(void)
{ … }
static inline void _pgd_free(pgd_t *pgd)
{ … }
#endif
pgd_t *pgd_alloc(struct mm_struct *mm)
{ … }
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{ … }
int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
pte_t entry, int dirty)
{ … }
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
int pmdp_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp,
pmd_t entry, int dirty)
{ … }
int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
pud_t *pudp, pud_t entry, int dirty)
{ … }
#endif
int ptep_test_and_clear_young(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{ … }
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
int pmdp_test_and_clear_young(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp)
{ … }
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
int pudp_test_and_clear_young(struct vm_area_struct *vma,
unsigned long addr, pud_t *pudp)
{ … }
#endif
int ptep_clear_flush_young(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{ … }
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
int pmdp_clear_flush_young(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp)
{ … }
pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)
{ … }
#endif
void __init reserve_top_address(unsigned long reserve)
{ … }
int fixmaps_set;
void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
{ … }
void native_set_fixmap(unsigned idx,
phys_addr_t phys, pgprot_t flags)
{ … }
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
#ifdef CONFIG_X86_5LEVEL
int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
{ … }
void p4d_clear_huge(p4d_t *p4d)
{ … }
#endif
int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
{ … }
int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
{ … }
int pud_clear_huge(pud_t *pud)
{ … }
int pmd_clear_huge(pmd_t *pmd)
{ … }
#ifdef CONFIG_X86_64
int pud_free_pmd_page(pud_t *pud, unsigned long addr)
{ … }
int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
{ … }
#else
int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
{
return pmd_none(*pmd);
}
#endif
#endif
pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
{ … }
pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
{ … }
void arch_check_zapped_pte(struct vm_area_struct *vma, pte_t pte)
{ … }
void arch_check_zapped_pmd(struct vm_area_struct *vma, pmd_t pmd)
{ … }