linux/arch/x86/include/asm/page_types.h

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_PAGE_DEFS_H
#define _ASM_X86_PAGE_DEFS_H

#include <linux/const.h>
#include <linux/types.h>
#include <linux/mem_encrypt.h>

/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT
#define PAGE_SIZE
#define PAGE_MASK

#define __VIRTUAL_MASK

/* Cast P*D_MASK to a signed type so that it is sign-extended if
   virtual addresses are 32-bits but physical addresses are larger
   (ie, 32-bit PAE). */
#define PHYSICAL_PAGE_MASK
#define PHYSICAL_PMD_PAGE_MASK
#define PHYSICAL_PUD_PAGE_MASK

#define HPAGE_SHIFT
#define HPAGE_SIZE
#define HPAGE_MASK
#define HUGETLB_PAGE_ORDER

#define HUGE_MAX_HSTATE

#define PAGE_OFFSET

#define VM_DATA_DEFAULT_FLAGS

/* Physical address where kernel should be loaded. */
#define LOAD_PHYSICAL_ADDR

#define __START_KERNEL

#ifdef CONFIG_X86_64
#include <asm/page_64_types.h>
#define IOREMAP_MAX_ORDER
#else
#include <asm/page_32_types.h>
#define IOREMAP_MAX_ORDER
#endif	/* CONFIG_X86_64 */

#ifndef __ASSEMBLY__

#ifdef CONFIG_DYNAMIC_PHYSICAL_MASK
extern phys_addr_t physical_mask;
#define __PHYSICAL_MASK
#else
#define __PHYSICAL_MASK
#endif

extern int devmem_is_allowed(unsigned long pagenr);

extern unsigned long max_low_pfn_mapped;
extern unsigned long max_pfn_mapped;

static inline phys_addr_t get_max_mapped(void)
{
	return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT;
}

bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn);

extern void initmem_init(void);

#endif	/* !__ASSEMBLY__ */

#endif	/* _ASM_X86_PAGE_DEFS_H */