linux/include/linux/migrate.h

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MIGRATE_H
#define _LINUX_MIGRATE_H

#include <linux/mm.h>
#include <linux/mempolicy.h>
#include <linux/migrate_mode.h>
#include <linux/hugetlb.h>

new_folio_t;
free_folio_t;

struct migration_target_control;

/*
 * Return values from addresss_space_operations.migratepage():
 * - negative errno on page migration failure;
 * - zero on page migration success;
 */
#define MIGRATEPAGE_SUCCESS
#define MIGRATEPAGE_UNMAP

/**
 * struct movable_operations - Driver page migration
 * @isolate_page:
 * The VM calls this function to prepare the page to be moved.  The page
 * is locked and the driver should not unlock it.  The driver should
 * return ``true`` if the page is movable and ``false`` if it is not
 * currently movable.  After this function returns, the VM uses the
 * page->lru field, so the driver must preserve any information which
 * is usually stored here.
 *
 * @migrate_page:
 * After isolation, the VM calls this function with the isolated
 * @src page.  The driver should copy the contents of the
 * @src page to the @dst page and set up the fields of @dst page.
 * Both pages are locked.
 * If page migration is successful, the driver should call
 * __ClearPageMovable(@src) and return MIGRATEPAGE_SUCCESS.
 * If the driver cannot migrate the page at the moment, it can return
 * -EAGAIN.  The VM interprets this as a temporary migration failure and
 * will retry it later.  Any other error value is a permanent migration
 * failure and migration will not be retried.
 * The driver shouldn't touch the @src->lru field while in the
 * migrate_page() function.  It may write to @dst->lru.
 *
 * @putback_page:
 * If migration fails on the isolated page, the VM informs the driver
 * that the page is no longer a candidate for migration by calling
 * this function.  The driver should put the isolated page back into
 * its own data structure.
 */
struct movable_operations {};

/* Defined in mm/debug.c: */
extern const char *migrate_reason_names[MR_TYPES];

#ifdef CONFIG_MIGRATION

void putback_movable_pages(struct list_head *l);
int migrate_folio(struct address_space *mapping, struct folio *dst,
		struct folio *src, enum migrate_mode mode);
int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free,
		  unsigned long private, enum migrate_mode mode, int reason,
		  unsigned int *ret_succeeded);
struct folio *alloc_migration_target(struct folio *src, unsigned long private);
bool isolate_movable_page(struct page *page, isolate_mode_t mode);

int migrate_huge_page_move_mapping(struct address_space *mapping,
		struct folio *dst, struct folio *src);
void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl)
		__releases();
void folio_migrate_flags(struct folio *newfolio, struct folio *folio);
int folio_migrate_mapping(struct address_space *mapping,
		struct folio *newfolio, struct folio *folio, int extra_count);

#else

static inline void putback_movable_pages(struct list_head *l) {}
static inline int migrate_pages(struct list_head *l, new_folio_t new,
		free_folio_t free, unsigned long private,
		enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
	{ return -ENOSYS; }
static inline struct folio *alloc_migration_target(struct folio *src,
		unsigned long private)
	{ return NULL; }
static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode)
	{ return false; }

static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
				  struct folio *dst, struct folio *src)
{
	return -ENOSYS;
}

#endif /* CONFIG_MIGRATION */

#ifdef CONFIG_COMPACTION
bool PageMovable(struct page *page);
void __SetPageMovable(struct page *page, const struct movable_operations *ops);
void __ClearPageMovable(struct page *page);
#else
static inline bool PageMovable(struct page *page) { return false; }
static inline void __SetPageMovable(struct page *page,
		const struct movable_operations *ops)
{
}
static inline void __ClearPageMovable(struct page *page)
{
}
#endif

static inline bool folio_test_movable(struct folio *folio)
{}

static inline
const struct movable_operations *folio_movable_ops(struct folio *folio)
{}

static inline
const struct movable_operations *page_movable_ops(struct page *page)
{}

#ifdef CONFIG_NUMA_BALANCING
int migrate_misplaced_folio_prepare(struct folio *folio,
		struct vm_area_struct *vma, int node);
int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
			   int node);
#else
static inline int migrate_misplaced_folio_prepare(struct folio *folio,
		struct vm_area_struct *vma, int node)
{
	return -EAGAIN; /* can't migrate now */
}
static inline int migrate_misplaced_folio(struct folio *folio,
					 struct vm_area_struct *vma, int node)
{
	return -EAGAIN; /* can't migrate now */
}
#endif /* CONFIG_NUMA_BALANCING */

#ifdef CONFIG_MIGRATION

/*
 * Watch out for PAE architecture, which has an unsigned long, and might not
 * have enough bits to store all physical address and flags. So far we have
 * enough room for all our flags.
 */
#define MIGRATE_PFN_VALID
#define MIGRATE_PFN_MIGRATE
#define MIGRATE_PFN_WRITE
#define MIGRATE_PFN_SHIFT

static inline struct page *migrate_pfn_to_page(unsigned long mpfn)
{}

static inline unsigned long migrate_pfn(unsigned long pfn)
{}

enum migrate_vma_direction {};

struct migrate_vma {};

int migrate_vma_setup(struct migrate_vma *args);
void migrate_vma_pages(struct migrate_vma *migrate);
void migrate_vma_finalize(struct migrate_vma *migrate);
int migrate_device_range(unsigned long *src_pfns, unsigned long start,
			unsigned long npages);
void migrate_device_pages(unsigned long *src_pfns, unsigned long *dst_pfns,
			unsigned long npages);
void migrate_device_finalize(unsigned long *src_pfns,
			unsigned long *dst_pfns, unsigned long npages);

#endif /* CONFIG_MIGRATION */

#endif /* _LINUX_MIGRATE_H */