#ifndef _ASM_X86_PAGE_H
#define _ASM_X86_PAGE_H
#include <linux/types.h>
#ifdef __KERNEL__
#include <asm/page_types.h>
#ifdef CONFIG_X86_64
#include <asm/page_64.h>
#else
#include <asm/page_32.h>
#endif
#ifndef __ASSEMBLY__
struct page;
#include <linux/range.h>
extern struct range pfn_mapped[];
extern int nr_pfn_mapped;
static inline void clear_user_page(void *page, unsigned long vaddr,
struct page *pg)
{
clear_page(page);
}
static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
struct page *topage)
{
copy_page(to, from);
}
#define vma_alloc_zeroed_movable_folio …
#ifndef __pa
#define __pa …
#endif
#define __pa_nodebug …
#define __pa_symbol …
#ifndef __va
#define __va …
#endif
#define __boot_va …
#define __boot_pa …
#define virt_to_page …
extern bool __virt_addr_valid(unsigned long kaddr);
#define virt_addr_valid …
static __always_inline void *pfn_to_kaddr(unsigned long pfn)
{
return __va(pfn << PAGE_SHIFT);
}
static __always_inline u64 __canonical_address(u64 vaddr, u8 vaddr_bits)
{
return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits);
}
static __always_inline u64 __is_canonical_address(u64 vaddr, u8 vaddr_bits)
{
return __canonical_address(vaddr, vaddr_bits) == vaddr;
}
#endif
#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#endif
#endif