// SPDX-License-Identifier: GPL-2.0 #include <linux/mm.h> #include <linux/rmap.h> #include <linux/hugetlb.h> #include <linux/swap.h> #include <linux/swapops.h> #include "internal.h" static inline bool not_found(struct page_vma_mapped_walk *pvmw) { … } static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp) { … } /** * check_pte - check if [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages) is * mapped at the @pvmw->pte * @pvmw: page_vma_mapped_walk struct, includes a pair pte and pfn range * for checking * * page_vma_mapped_walk() found a place where pfn range is *potentially* * mapped. check_pte() has to validate this. * * pvmw->pte may point to empty PTE, swap PTE or PTE pointing to * arbitrary page. * * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration * entry that points to [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages) * * If PVMW_MIGRATION flag is not set, returns true if pvmw->pte points to * [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages) * * Otherwise, return false. * */ static bool check_pte(struct page_vma_mapped_walk *pvmw) { … } /* Returns true if the two ranges overlap. Careful to not overflow. */ static bool check_pmd(unsigned long pfn, struct page_vma_mapped_walk *pvmw) { … } static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size) { … } /** * page_vma_mapped_walk - check if @pvmw->pfn is mapped in @pvmw->vma at * @pvmw->address * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags * must be set. pmd, pte and ptl must be NULL. * * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is * adjusted if needed (for PTE-mapped THPs). * * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in * a loop to find all PTEs that map the THP. * * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry * regardless of which page table level the page is mapped at. @pvmw->pmd is * NULL. * * Returns false if there are no more page table entries for the page in * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped. * * If you need to stop the walk before page_vma_mapped_walk() returned false, * use page_vma_mapped_walk_done(). It will do the housekeeping. */ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw) { … } #ifdef CONFIG_MEMORY_FAILURE /** * page_mapped_in_vma - check whether a page is really mapped in a VMA * @page: the page to test * @vma: the VMA to test * * Return: The address the page is mapped at if the page is in the range * covered by the VMA and present in the page table. If the page is * outside the VMA or not present, returns -EFAULT. * Only valid for normal file or anonymous VMAs. */ unsigned long page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) { … } #endif