linux/arch/x86/include/asm/pgtable_64_types.h

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_PGTABLE_64_DEFS_H
#define _ASM_X86_PGTABLE_64_DEFS_H

#include <asm/sparsemem.h>

#ifndef __ASSEMBLY__
#include <linux/types.h>
#include <asm/kaslr.h>

/*
 * These are used to make use of C type-checking..
 */
pteval_t;
pmdval_t;
pudval_t;
p4dval_t;
pgdval_t;
pgprotval_t;

pte_t;
pmd_t;

extern unsigned int __pgtable_l5_enabled;

#ifdef CONFIG_X86_5LEVEL
#ifdef USE_EARLY_PGTABLE_L5
/*
 * cpu_feature_enabled() is not available in early boot code.
 * Use variable instead.
 */
static inline bool pgtable_l5_enabled(void)
{
	return __pgtable_l5_enabled;
}
#else
#define pgtable_l5_enabled()
#endif /* USE_EARLY_PGTABLE_L5 */

#else
#define pgtable_l5_enabled
#endif /* CONFIG_X86_5LEVEL */

extern unsigned int pgdir_shift;
extern unsigned int ptrs_per_p4d;

#endif	/* !__ASSEMBLY__ */

#define SHARED_KERNEL_PMD

#ifdef CONFIG_X86_5LEVEL

/*
 * PGDIR_SHIFT determines what a top-level page table entry can map
 */
#define PGDIR_SHIFT
#define PTRS_PER_PGD

/*
 * 4th level page in 5-level paging case
 */
#define P4D_SHIFT
#define MAX_PTRS_PER_P4D
#define PTRS_PER_P4D
#define P4D_SIZE
#define P4D_MASK

#define MAX_POSSIBLE_PHYSMEM_BITS

#else /* CONFIG_X86_5LEVEL */

/*
 * PGDIR_SHIFT determines what a top-level page table entry can map
 */
#define PGDIR_SHIFT
#define PTRS_PER_PGD
#define MAX_PTRS_PER_P4D

#endif /* CONFIG_X86_5LEVEL */

/*
 * 3rd level page
 */
#define PUD_SHIFT
#define PTRS_PER_PUD

/*
 * PMD_SHIFT determines the size of the area a middle-level
 * page table can map
 */
#define PMD_SHIFT
#define PTRS_PER_PMD

/*
 * entries per page directory level
 */
#define PTRS_PER_PTE

#define PMD_SIZE
#define PMD_MASK
#define PUD_SIZE
#define PUD_MASK
#define PGDIR_SIZE
#define PGDIR_MASK

/*
 * See Documentation/arch/x86/x86_64/mm.rst for a description of the memory map.
 *
 * Be very careful vs. KASLR when changing anything here. The KASLR address
 * range must not overlap with anything except the KASAN shadow area, which
 * is correct as KASAN disables KASLR.
 */
#define MAXMEM

#define GUARD_HOLE_PGD_ENTRY
#define GUARD_HOLE_SIZE
#define GUARD_HOLE_BASE_ADDR
#define GUARD_HOLE_END_ADDR

#define LDT_PGD_ENTRY
#define LDT_BASE_ADDR
#define LDT_END_ADDR

#define __VMALLOC_BASE_L4
#define __VMALLOC_BASE_L5

#define VMALLOC_SIZE_TB_L4
#define VMALLOC_SIZE_TB_L5

#define __VMEMMAP_BASE_L4
#define __VMEMMAP_BASE_L5

#ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT
#define VMALLOC_START
#define VMALLOC_SIZE_TB
#define VMEMMAP_START
#else
#define VMALLOC_START
#define VMALLOC_SIZE_TB
#define VMEMMAP_START
#endif /* CONFIG_DYNAMIC_MEMORY_LAYOUT */

/*
 * End of the region for which vmalloc page tables are pre-allocated.
 * For non-KMSAN builds, this is the same as VMALLOC_END.
 * For KMSAN builds, VMALLOC_START..VMEMORY_END is 4 times bigger than
 * VMALLOC_START..VMALLOC_END (see below).
 */
#define VMEMORY_END

#ifndef CONFIG_KMSAN
#define VMALLOC_END
#else
/*
 * In KMSAN builds vmalloc area is four times smaller, and the remaining 3/4
 * are used to keep the metadata for virtual pages. The memory formerly
 * belonging to vmalloc area is now laid out as follows:
 *
 * 1st quarter: VMALLOC_START to VMALLOC_END - new vmalloc area
 * 2nd quarter: KMSAN_VMALLOC_SHADOW_START to
 *              VMALLOC_END+KMSAN_VMALLOC_SHADOW_OFFSET - vmalloc area shadow
 * 3rd quarter: KMSAN_VMALLOC_ORIGIN_START to
 *              VMALLOC_END+KMSAN_VMALLOC_ORIGIN_OFFSET - vmalloc area origins
 * 4th quarter: KMSAN_MODULES_SHADOW_START to KMSAN_MODULES_ORIGIN_START
 *              - shadow for modules,
 *              KMSAN_MODULES_ORIGIN_START to
 *              KMSAN_MODULES_ORIGIN_START + MODULES_LEN - origins for modules.
 */
#define VMALLOC_QUARTER_SIZE
#define VMALLOC_END

/*
 * vmalloc metadata addresses are calculated by adding shadow/origin offsets
 * to vmalloc address.
 */
#define KMSAN_VMALLOC_SHADOW_OFFSET
#define KMSAN_VMALLOC_ORIGIN_OFFSET

#define KMSAN_VMALLOC_SHADOW_START
#define KMSAN_VMALLOC_ORIGIN_START

/*
 * The shadow/origin for modules are placed one by one in the last 1/4 of
 * vmalloc space.
 */
#define KMSAN_MODULES_SHADOW_START
#define KMSAN_MODULES_ORIGIN_START
#endif /* CONFIG_KMSAN */

#define MODULES_VADDR
/* The module sections ends with the start of the fixmap */
#ifndef CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP
#define MODULES_END
#else
#define MODULES_END
#endif
#define MODULES_LEN

#define ESPFIX_PGD_ENTRY
#define ESPFIX_BASE_ADDR

#define CPU_ENTRY_AREA_PGD
#define CPU_ENTRY_AREA_BASE

#define EFI_VA_START
#define EFI_VA_END

#define EARLY_DYNAMIC_PAGE_TABLES

#define PGD_KERNEL_START

/*
 * We borrow bit 3 to remember PG_anon_exclusive.
 */
#define _PAGE_SWP_EXCLUSIVE

#endif /* _ASM_X86_PGTABLE_64_DEFS_H */