#ifndef _LINUX_SWAPOPS_H
#define _LINUX_SWAPOPS_H
#include <linux/radix-tree.h>
#include <linux/bug.h>
#include <linux/mm_types.h>
#ifdef CONFIG_MMU
#ifdef CONFIG_SWAP
#include <linux/swapfile.h>
#endif
#define SWP_TYPE_SHIFT …
#define SWP_OFFSET_MASK …
#ifdef MAX_PHYSMEM_BITS
#define SWP_PFN_BITS …
#else
#define SWP_PFN_BITS …
#endif
#define SWP_PFN_MASK …
#define SWP_MIG_YOUNG_BIT …
#define SWP_MIG_DIRTY_BIT …
#define SWP_MIG_TOTAL_BITS …
#define SWP_MIG_YOUNG …
#define SWP_MIG_DIRTY …
static inline bool is_pfn_swap_entry(swp_entry_t entry);
static inline pte_t pte_swp_clear_flags(pte_t pte)
{ … }
static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset)
{ … }
static inline unsigned swp_type(swp_entry_t entry)
{ … }
static inline pgoff_t swp_offset(swp_entry_t entry)
{ … }
static inline unsigned long swp_offset_pfn(swp_entry_t entry)
{ … }
static inline int is_swap_pte(pte_t pte)
{ … }
static inline swp_entry_t pte_to_swp_entry(pte_t pte)
{ … }
static inline pte_t swp_entry_to_pte(swp_entry_t entry)
{ … }
static inline swp_entry_t radix_to_swp_entry(void *arg)
{ … }
static inline void *swp_to_radix_entry(swp_entry_t entry)
{ … }
#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset)
{ … }
static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset)
{ … }
static inline bool is_device_private_entry(swp_entry_t entry)
{ … }
static inline bool is_writable_device_private_entry(swp_entry_t entry)
{ … }
static inline swp_entry_t make_readable_device_exclusive_entry(pgoff_t offset)
{ … }
static inline swp_entry_t make_writable_device_exclusive_entry(pgoff_t offset)
{ … }
static inline bool is_device_exclusive_entry(swp_entry_t entry)
{ … }
static inline bool is_writable_device_exclusive_entry(swp_entry_t entry)
{ … }
#else
static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset)
{
return swp_entry(0, 0);
}
static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset)
{
return swp_entry(0, 0);
}
static inline bool is_device_private_entry(swp_entry_t entry)
{
return false;
}
static inline bool is_writable_device_private_entry(swp_entry_t entry)
{
return false;
}
static inline swp_entry_t make_readable_device_exclusive_entry(pgoff_t offset)
{
return swp_entry(0, 0);
}
static inline swp_entry_t make_writable_device_exclusive_entry(pgoff_t offset)
{
return swp_entry(0, 0);
}
static inline bool is_device_exclusive_entry(swp_entry_t entry)
{
return false;
}
static inline bool is_writable_device_exclusive_entry(swp_entry_t entry)
{
return false;
}
#endif
#ifdef CONFIG_MIGRATION
static inline int is_migration_entry(swp_entry_t entry)
{ … }
static inline int is_writable_migration_entry(swp_entry_t entry)
{ … }
static inline int is_readable_migration_entry(swp_entry_t entry)
{ … }
static inline int is_readable_exclusive_migration_entry(swp_entry_t entry)
{ … }
static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
{ … }
static inline swp_entry_t make_readable_exclusive_migration_entry(pgoff_t offset)
{ … }
static inline swp_entry_t make_writable_migration_entry(pgoff_t offset)
{ … }
static inline bool migration_entry_supports_ad(void)
{ … }
static inline swp_entry_t make_migration_entry_young(swp_entry_t entry)
{ … }
static inline bool is_migration_entry_young(swp_entry_t entry)
{ … }
static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry)
{ … }
static inline bool is_migration_entry_dirty(swp_entry_t entry)
{ … }
extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
unsigned long address);
extern void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, pte_t *pte);
#else
static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
{
return swp_entry(0, 0);
}
static inline swp_entry_t make_readable_exclusive_migration_entry(pgoff_t offset)
{
return swp_entry(0, 0);
}
static inline swp_entry_t make_writable_migration_entry(pgoff_t offset)
{
return swp_entry(0, 0);
}
static inline int is_migration_entry(swp_entry_t swp)
{
return 0;
}
static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
unsigned long address) { }
static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
unsigned long addr, pte_t *pte) { }
static inline int is_writable_migration_entry(swp_entry_t entry)
{
return 0;
}
static inline int is_readable_migration_entry(swp_entry_t entry)
{
return 0;
}
static inline swp_entry_t make_migration_entry_young(swp_entry_t entry)
{
return entry;
}
static inline bool is_migration_entry_young(swp_entry_t entry)
{
return false;
}
static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry)
{
return entry;
}
static inline bool is_migration_entry_dirty(swp_entry_t entry)
{
return false;
}
#endif
#ifdef CONFIG_MEMORY_FAILURE
static inline swp_entry_t make_hwpoison_entry(struct page *page)
{ … }
static inline int is_hwpoison_entry(swp_entry_t entry)
{ … }
#else
static inline swp_entry_t make_hwpoison_entry(struct page *page)
{
return swp_entry(0, 0);
}
static inline int is_hwpoison_entry(swp_entry_t swp)
{
return 0;
}
#endif
pte_marker;
#define PTE_MARKER_UFFD_WP …
#define PTE_MARKER_POISONED …
#define PTE_MARKER_MASK …
static inline swp_entry_t make_pte_marker_entry(pte_marker marker)
{ … }
static inline bool is_pte_marker_entry(swp_entry_t entry)
{ … }
static inline pte_marker pte_marker_get(swp_entry_t entry)
{ … }
static inline bool is_pte_marker(pte_t pte)
{ … }
static inline pte_t make_pte_marker(pte_marker marker)
{ … }
static inline swp_entry_t make_poisoned_swp_entry(void)
{ … }
static inline int is_poisoned_swp_entry(swp_entry_t entry)
{ … }
static inline int pte_none_mostly(pte_t pte)
{ … }
static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry)
{ … }
static inline struct folio *pfn_swap_entry_folio(swp_entry_t entry)
{ … }
static inline bool is_pfn_swap_entry(swp_entry_t entry)
{ … }
struct page_vma_mapped_walk;
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
extern int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
struct page *page);
extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
struct page *new);
extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd);
static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
{ … }
static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
{ … }
static inline int is_pmd_migration_entry(pmd_t pmd)
{ … }
#else
static inline int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
struct page *page)
{
BUILD_BUG();
}
static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
struct page *new)
{
BUILD_BUG();
}
static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { }
static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
{
return swp_entry(0, 0);
}
static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
{
return __pmd(0);
}
static inline int is_pmd_migration_entry(pmd_t pmd)
{
return 0;
}
#endif
static inline int non_swap_entry(swp_entry_t entry)
{ … }
#endif
#endif