linux/mm/userfaultfd.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 *  mm/userfaultfd.c
 *
 *  Copyright (C) 2015  Red Hat, Inc.
 */

#include <linux/mm.h>
#include <linux/sched/signal.h>
#include <linux/pagemap.h>
#include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/userfaultfd_k.h>
#include <linux/mmu_notifier.h>
#include <linux/hugetlb.h>
#include <linux/shmem_fs.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include "internal.h"

static __always_inline
bool validate_dst_vma(struct vm_area_struct *dst_vma, unsigned long dst_end)
{}

static __always_inline
struct vm_area_struct *find_vma_and_prepare_anon(struct mm_struct *mm,
						 unsigned long addr)
{}

#ifdef CONFIG_PER_VMA_LOCK
/*
 * uffd_lock_vma() - Lookup and lock vma corresponding to @address.
 * @mm: mm to search vma in.
 * @address: address that the vma should contain.
 *
 * Should be called without holding mmap_lock.
 *
 * Return: A locked vma containing @address, -ENOENT if no vma is found, or
 * -ENOMEM if anon_vma couldn't be allocated.
 */
static struct vm_area_struct *uffd_lock_vma(struct mm_struct *mm,
				       unsigned long address)
{}

static struct vm_area_struct *uffd_mfill_lock(struct mm_struct *dst_mm,
					      unsigned long dst_start,
					      unsigned long len)
{}

static void uffd_mfill_unlock(struct vm_area_struct *vma)
{}

#else

static struct vm_area_struct *uffd_mfill_lock(struct mm_struct *dst_mm,
					      unsigned long dst_start,
					      unsigned long len)
{
	struct vm_area_struct *dst_vma;

	mmap_read_lock(dst_mm);
	dst_vma = find_vma_and_prepare_anon(dst_mm, dst_start);
	if (IS_ERR(dst_vma))
		goto out_unlock;

	if (validate_dst_vma(dst_vma, dst_start + len))
		return dst_vma;

	dst_vma = ERR_PTR(-ENOENT);
out_unlock:
	mmap_read_unlock(dst_mm);
	return dst_vma;
}

static void uffd_mfill_unlock(struct vm_area_struct *vma)
{
	mmap_read_unlock(vma->vm_mm);
}
#endif

/* Check if dst_addr is outside of file's size. Must be called with ptl held. */
static bool mfill_file_over_size(struct vm_area_struct *dst_vma,
				 unsigned long dst_addr)
{}

/*
 * Install PTEs, to map dst_addr (within dst_vma) to page.
 *
 * This function handles both MCOPY_ATOMIC_NORMAL and _CONTINUE for both shmem
 * and anon, and for both shared and private VMAs.
 */
int mfill_atomic_install_pte(pmd_t *dst_pmd,
			     struct vm_area_struct *dst_vma,
			     unsigned long dst_addr, struct page *page,
			     bool newly_allocated, uffd_flags_t flags)
{}

static int mfill_atomic_pte_copy(pmd_t *dst_pmd,
				 struct vm_area_struct *dst_vma,
				 unsigned long dst_addr,
				 unsigned long src_addr,
				 uffd_flags_t flags,
				 struct folio **foliop)
{}

static int mfill_atomic_pte_zeroed_folio(pmd_t *dst_pmd,
					 struct vm_area_struct *dst_vma,
					 unsigned long dst_addr)
{}

static int mfill_atomic_pte_zeropage(pmd_t *dst_pmd,
				     struct vm_area_struct *dst_vma,
				     unsigned long dst_addr)
{}

/* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */
static int mfill_atomic_pte_continue(pmd_t *dst_pmd,
				     struct vm_area_struct *dst_vma,
				     unsigned long dst_addr,
				     uffd_flags_t flags)
{}

/* Handles UFFDIO_POISON for all non-hugetlb VMAs. */
static int mfill_atomic_pte_poison(pmd_t *dst_pmd,
				   struct vm_area_struct *dst_vma,
				   unsigned long dst_addr,
				   uffd_flags_t flags)
{}

static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
{}

#ifdef CONFIG_HUGETLB_PAGE
/*
 * mfill_atomic processing for HUGETLB vmas.  Note that this routine is
 * called with either vma-lock or mmap_lock held, it will release the lock
 * before returning.
 */
static __always_inline ssize_t mfill_atomic_hugetlb(
					      struct userfaultfd_ctx *ctx,
					      struct vm_area_struct *dst_vma,
					      unsigned long dst_start,
					      unsigned long src_start,
					      unsigned long len,
					      uffd_flags_t flags)
{}
#else /* !CONFIG_HUGETLB_PAGE */
/* fail at build time if gcc attempts to use this */
extern ssize_t mfill_atomic_hugetlb(struct userfaultfd_ctx *ctx,
				    struct vm_area_struct *dst_vma,
				    unsigned long dst_start,
				    unsigned long src_start,
				    unsigned long len,
				    uffd_flags_t flags);
#endif /* CONFIG_HUGETLB_PAGE */

static __always_inline ssize_t mfill_atomic_pte(pmd_t *dst_pmd,
						struct vm_area_struct *dst_vma,
						unsigned long dst_addr,
						unsigned long src_addr,
						uffd_flags_t flags,
						struct folio **foliop)
{}

static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
					    unsigned long dst_start,
					    unsigned long src_start,
					    unsigned long len,
					    uffd_flags_t flags)
{}

ssize_t mfill_atomic_copy(struct userfaultfd_ctx *ctx, unsigned long dst_start,
			  unsigned long src_start, unsigned long len,
			  uffd_flags_t flags)
{}

ssize_t mfill_atomic_zeropage(struct userfaultfd_ctx *ctx,
			      unsigned long start,
			      unsigned long len)
{}

ssize_t mfill_atomic_continue(struct userfaultfd_ctx *ctx, unsigned long start,
			      unsigned long len, uffd_flags_t flags)
{}

ssize_t mfill_atomic_poison(struct userfaultfd_ctx *ctx, unsigned long start,
			    unsigned long len, uffd_flags_t flags)
{}

long uffd_wp_range(struct vm_area_struct *dst_vma,
		   unsigned long start, unsigned long len, bool enable_wp)
{}

int mwriteprotect_range(struct userfaultfd_ctx *ctx, unsigned long start,
			unsigned long len, bool enable_wp)
{}


void double_pt_lock(spinlock_t *ptl1,
		    spinlock_t *ptl2)
	__acquires(ptl1)
	__acquires(ptl2)
{}

void double_pt_unlock(spinlock_t *ptl1,
		      spinlock_t *ptl2)
	__releases(ptl1)
	__releases(ptl2)
{}


static int move_present_pte(struct mm_struct *mm,
			    struct vm_area_struct *dst_vma,
			    struct vm_area_struct *src_vma,
			    unsigned long dst_addr, unsigned long src_addr,
			    pte_t *dst_pte, pte_t *src_pte,
			    pte_t orig_dst_pte, pte_t orig_src_pte,
			    spinlock_t *dst_ptl, spinlock_t *src_ptl,
			    struct folio *src_folio)
{}

static int move_swap_pte(struct mm_struct *mm,
			 unsigned long dst_addr, unsigned long src_addr,
			 pte_t *dst_pte, pte_t *src_pte,
			 pte_t orig_dst_pte, pte_t orig_src_pte,
			 spinlock_t *dst_ptl, spinlock_t *src_ptl)
{}

static int move_zeropage_pte(struct mm_struct *mm,
			     struct vm_area_struct *dst_vma,
			     struct vm_area_struct *src_vma,
			     unsigned long dst_addr, unsigned long src_addr,
			     pte_t *dst_pte, pte_t *src_pte,
			     pte_t orig_dst_pte, pte_t orig_src_pte,
			     spinlock_t *dst_ptl, spinlock_t *src_ptl)
{}


/*
 * The mmap_lock for reading is held by the caller. Just move the page
 * from src_pmd to dst_pmd if possible, and return true if succeeded
 * in moving the page.
 */
static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd,
			  struct vm_area_struct *dst_vma,
			  struct vm_area_struct *src_vma,
			  unsigned long dst_addr, unsigned long src_addr,
			  __u64 mode)
{}

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline bool move_splits_huge_pmd(unsigned long dst_addr,
					unsigned long src_addr,
					unsigned long src_end)
{}
#else
static inline bool move_splits_huge_pmd(unsigned long dst_addr,
					unsigned long src_addr,
					unsigned long src_end)
{
	/* This is unreachable anyway, just to avoid warnings when HPAGE_PMD_SIZE==0 */
	return false;
}
#endif

static inline bool vma_move_compatible(struct vm_area_struct *vma)
{}

static int validate_move_areas(struct userfaultfd_ctx *ctx,
			       struct vm_area_struct *src_vma,
			       struct vm_area_struct *dst_vma)
{}

static __always_inline
int find_vmas_mm_locked(struct mm_struct *mm,
			unsigned long dst_start,
			unsigned long src_start,
			struct vm_area_struct **dst_vmap,
			struct vm_area_struct **src_vmap)
{}

#ifdef CONFIG_PER_VMA_LOCK
static int uffd_move_lock(struct mm_struct *mm,
			  unsigned long dst_start,
			  unsigned long src_start,
			  struct vm_area_struct **dst_vmap,
			  struct vm_area_struct **src_vmap)
{}

static void uffd_move_unlock(struct vm_area_struct *dst_vma,
			     struct vm_area_struct *src_vma)
{}

#else

static int uffd_move_lock(struct mm_struct *mm,
			  unsigned long dst_start,
			  unsigned long src_start,
			  struct vm_area_struct **dst_vmap,
			  struct vm_area_struct **src_vmap)
{
	int err;

	mmap_read_lock(mm);
	err = find_vmas_mm_locked(mm, dst_start, src_start, dst_vmap, src_vmap);
	if (err)
		mmap_read_unlock(mm);
	return err;
}

static void uffd_move_unlock(struct vm_area_struct *dst_vma,
			     struct vm_area_struct *src_vma)
{
	mmap_assert_locked(src_vma->vm_mm);
	mmap_read_unlock(dst_vma->vm_mm);
}
#endif

/**
 * move_pages - move arbitrary anonymous pages of an existing vma
 * @ctx: pointer to the userfaultfd context
 * @dst_start: start of the destination virtual memory range
 * @src_start: start of the source virtual memory range
 * @len: length of the virtual memory range
 * @mode: flags from uffdio_move.mode
 *
 * It will either use the mmap_lock in read mode or per-vma locks
 *
 * move_pages() remaps arbitrary anonymous pages atomically in zero
 * copy. It only works on non shared anonymous pages because those can
 * be relocated without generating non linear anon_vmas in the rmap
 * code.
 *
 * It provides a zero copy mechanism to handle userspace page faults.
 * The source vma pages should have mapcount == 1, which can be
 * enforced by using madvise(MADV_DONTFORK) on src vma.
 *
 * The thread receiving the page during the userland page fault
 * will receive the faulting page in the source vma through the network,
 * storage or any other I/O device (MADV_DONTFORK in the source vma
 * avoids move_pages() to fail with -EBUSY if the process forks before
 * move_pages() is called), then it will call move_pages() to map the
 * page in the faulting address in the destination vma.
 *
 * This userfaultfd command works purely via pagetables, so it's the
 * most efficient way to move physical non shared anonymous pages
 * across different virtual addresses. Unlike mremap()/mmap()/munmap()
 * it does not create any new vmas. The mapping in the destination
 * address is atomic.
 *
 * It only works if the vma protection bits are identical from the
 * source and destination vma.
 *
 * It can remap non shared anonymous pages within the same vma too.
 *
 * If the source virtual memory range has any unmapped holes, or if
 * the destination virtual memory range is not a whole unmapped hole,
 * move_pages() will fail respectively with -ENOENT or -EEXIST. This
 * provides a very strict behavior to avoid any chance of memory
 * corruption going unnoticed if there are userland race conditions.
 * Only one thread should resolve the userland page fault at any given
 * time for any given faulting address. This means that if two threads
 * try to both call move_pages() on the same destination address at the
 * same time, the second thread will get an explicit error from this
 * command.
 *
 * The command retval will return "len" is successful. The command
 * however can be interrupted by fatal signals or errors. If
 * interrupted it will return the number of bytes successfully
 * remapped before the interruption if any, or the negative error if
 * none. It will never return zero. Either it will return an error or
 * an amount of bytes successfully moved. If the retval reports a
 * "short" remap, the move_pages() command should be repeated by
 * userland with src+retval, dst+reval, len-retval if it wants to know
 * about the error that interrupted it.
 *
 * The UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES flag can be specified to
 * prevent -ENOENT errors to materialize if there are holes in the
 * source virtual range that is being remapped. The holes will be
 * accounted as successfully remapped in the retval of the
 * command. This is mostly useful to remap hugepage naturally aligned
 * virtual regions without knowing if there are transparent hugepage
 * in the regions or not, but preventing the risk of having to split
 * the hugepmd during the remap.
 *
 * If there's any rmap walk that is taking the anon_vma locks without
 * first obtaining the folio lock (the only current instance is
 * folio_referenced), they will have to verify if the folio->mapping
 * has changed after taking the anon_vma lock. If it changed they
 * should release the lock and retry obtaining a new anon_vma, because
 * it means the anon_vma was changed by move_pages() before the lock
 * could be obtained. This is the only additional complexity added to
 * the rmap code to provide this anonymous page remapping functionality.
 */
ssize_t move_pages(struct userfaultfd_ctx *ctx, unsigned long dst_start,
		   unsigned long src_start, unsigned long len, __u64 mode)
{}