// SPDX-License-Identifier: GPL-2.0 /* * Device Memory Migration functionality. * * Originally written by Jérôme Glisse. */ #include <linux/export.h> #include <linux/memremap.h> #include <linux/migrate.h> #include <linux/mm.h> #include <linux/mm_inline.h> #include <linux/mmu_notifier.h> #include <linux/oom.h> #include <linux/pagewalk.h> #include <linux/rmap.h> #include <linux/swapops.h> #include <asm/tlbflush.h> #include "internal.h" static int migrate_vma_collect_skip(unsigned long start, unsigned long end, struct mm_walk *walk) { … } static int migrate_vma_collect_hole(unsigned long start, unsigned long end, __always_unused int depth, struct mm_walk *walk) { … } static int migrate_vma_collect_pmd(pmd_t *pmdp, unsigned long start, unsigned long end, struct mm_walk *walk) { … } static const struct mm_walk_ops migrate_vma_walk_ops = …; /* * migrate_vma_collect() - collect pages over a range of virtual addresses * @migrate: migrate struct containing all migration information * * This will walk the CPU page table. For each virtual address backed by a * valid page, it updates the src array and takes a reference on the page, in * order to pin the page until we lock it and unmap it. */ static void migrate_vma_collect(struct migrate_vma *migrate) { … } /* * migrate_vma_check_page() - check if page is pinned or not * @page: struct page to check * * Pinned pages cannot be migrated. This is the same test as in * folio_migrate_mapping(), except that here we allow migration of a * ZONE_DEVICE page. */ static bool migrate_vma_check_page(struct page *page, struct page *fault_page) { … } /* * Unmaps pages for migration. Returns number of source pfns marked as * migrating. */ static unsigned long migrate_device_unmap(unsigned long *src_pfns, unsigned long npages, struct page *fault_page) { … } /* * migrate_vma_unmap() - replace page mapping with special migration pte entry * @migrate: migrate struct containing all migration information * * Isolate pages from the LRU and replace mappings (CPU page table pte) with a * special migration pte entry and check if it has been pinned. Pinned pages are * restored because we cannot migrate them. * * This is the last step before we call the device driver callback to allocate * destination memory and copy contents of original page over to new page. */ static void migrate_vma_unmap(struct migrate_vma *migrate) { … } /** * migrate_vma_setup() - prepare to migrate a range of memory * @args: contains the vma, start, and pfns arrays for the migration * * Returns: negative errno on failures, 0 when 0 or more pages were migrated * without an error. * * Prepare to migrate a range of memory virtual address range by collecting all * the pages backing each virtual address in the range, saving them inside the * src array. Then lock those pages and unmap them. Once the pages are locked * and unmapped, check whether each page is pinned or not. Pages that aren't * pinned have the MIGRATE_PFN_MIGRATE flag set (by this function) in the * corresponding src array entry. Then restores any pages that are pinned, by * remapping and unlocking those pages. * * The caller should then allocate destination memory and copy source memory to * it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE * flag set). Once these are allocated and copied, the caller must update each * corresponding entry in the dst array with the pfn value of the destination * page and with MIGRATE_PFN_VALID. Destination pages must be locked via * lock_page(). * * Note that the caller does not have to migrate all the pages that are marked * with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from * device memory to system memory. If the caller cannot migrate a device page * back to system memory, then it must return VM_FAULT_SIGBUS, which has severe * consequences for the userspace process, so it must be avoided if at all * possible. * * For empty entries inside CPU page table (pte_none() or pmd_none() is true) we * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus * allowing the caller to allocate device memory for those unbacked virtual * addresses. For this the caller simply has to allocate device memory and * properly set the destination entry like for regular migration. Note that * this can still fail, and thus inside the device driver you must check if the * migration was successful for those entries after calling migrate_vma_pages(), * just like for regular migration. * * After that, the callers must call migrate_vma_pages() to go over each entry * in the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set, * then migrate_vma_pages() to migrate struct page information from the source * struct page to the destination struct page. If it fails to migrate the * struct page information, then it clears the MIGRATE_PFN_MIGRATE flag in the * src array. * * At this point all successfully migrated pages have an entry in the src * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst * array entry with MIGRATE_PFN_VALID flag set. * * Once migrate_vma_pages() returns the caller may inspect which pages were * successfully migrated, and which were not. Successfully migrated pages will * have the MIGRATE_PFN_MIGRATE flag set for their src array entry. * * It is safe to update device page table after migrate_vma_pages() because * both destination and source page are still locked, and the mmap_lock is held * in read mode (hence no one can unmap the range being migrated). * * Once the caller is done cleaning up things and updating its page table (if it * chose to do so, this is not an obligation) it finally calls * migrate_vma_finalize() to update the CPU page table to point to new pages * for successfully migrated pages or otherwise restore the CPU page table to * point to the original source pages. */ int migrate_vma_setup(struct migrate_vma *args) { … } EXPORT_SYMBOL(…); /* * This code closely matches the code in: * __handle_mm_fault() * handle_pte_fault() * do_anonymous_page() * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE * private or coherent page. */ static void migrate_vma_insert_page(struct migrate_vma *migrate, unsigned long addr, struct page *page, unsigned long *src) { … } static void __migrate_device_pages(unsigned long *src_pfns, unsigned long *dst_pfns, unsigned long npages, struct migrate_vma *migrate) { … } /** * migrate_device_pages() - migrate meta-data from src page to dst page * @src_pfns: src_pfns returned from migrate_device_range() * @dst_pfns: array of pfns allocated by the driver to migrate memory to * @npages: number of pages in the range * * Equivalent to migrate_vma_pages(). This is called to migrate struct page * meta-data from source struct page to destination. */ void migrate_device_pages(unsigned long *src_pfns, unsigned long *dst_pfns, unsigned long npages) { … } EXPORT_SYMBOL(…); /** * migrate_vma_pages() - migrate meta-data from src page to dst page * @migrate: migrate struct containing all migration information * * This migrates struct page meta-data from source struct page to destination * struct page. This effectively finishes the migration from source page to the * destination page. */ void migrate_vma_pages(struct migrate_vma *migrate) { … } EXPORT_SYMBOL(…); /* * migrate_device_finalize() - complete page migration * @src_pfns: src_pfns returned from migrate_device_range() * @dst_pfns: array of pfns allocated by the driver to migrate memory to * @npages: number of pages in the range * * Completes migration of the page by removing special migration entries. * Drivers must ensure copying of page data is complete and visible to the CPU * before calling this. */ void migrate_device_finalize(unsigned long *src_pfns, unsigned long *dst_pfns, unsigned long npages) { … } EXPORT_SYMBOL(…); /** * migrate_vma_finalize() - restore CPU page table entry * @migrate: migrate struct containing all migration information * * This replaces the special migration pte entry with either a mapping to the * new page if migration was successful for that page, or to the original page * otherwise. * * This also unlocks the pages and puts them back on the lru, or drops the extra * refcount, for device pages. */ void migrate_vma_finalize(struct migrate_vma *migrate) { … } EXPORT_SYMBOL(…); /** * migrate_device_range() - migrate device private pfns to normal memory. * @src_pfns: array large enough to hold migrating source device private pfns. * @start: starting pfn in the range to migrate. * @npages: number of pages to migrate. * * migrate_vma_setup() is similar in concept to migrate_vma_setup() except that * instead of looking up pages based on virtual address mappings a range of * device pfns that should be migrated to system memory is used instead. * * This is useful when a driver needs to free device memory but doesn't know the * virtual mappings of every page that may be in device memory. For example this * is often the case when a driver is being unloaded or unbound from a device. * * Like migrate_vma_setup() this function will take a reference and lock any * migrating pages that aren't free before unmapping them. Drivers may then * allocate destination pages and start copying data from the device to CPU * memory before calling migrate_device_pages(). */ int migrate_device_range(unsigned long *src_pfns, unsigned long start, unsigned long npages) { … } EXPORT_SYMBOL(…); /* * Migrate a device coherent page back to normal memory. The caller should have * a reference on page which will be copied to the new page if migration is * successful or dropped on failure. */ int migrate_device_coherent_page(struct page *page) { … }