#include <linux/mm.h>
#include <linux/sched/signal.h>
#include <linux/pagemap.h>
#include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/userfaultfd_k.h>
#include <linux/mmu_notifier.h>
#include <linux/hugetlb.h>
#include <linux/shmem_fs.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include "internal.h"
static __always_inline
bool validate_dst_vma(struct vm_area_struct *dst_vma, unsigned long dst_end)
{ … }
static __always_inline
struct vm_area_struct *find_vma_and_prepare_anon(struct mm_struct *mm,
unsigned long addr)
{ … }
#ifdef CONFIG_PER_VMA_LOCK
static struct vm_area_struct *uffd_lock_vma(struct mm_struct *mm,
unsigned long address)
{ … }
static struct vm_area_struct *uffd_mfill_lock(struct mm_struct *dst_mm,
unsigned long dst_start,
unsigned long len)
{ … }
static void uffd_mfill_unlock(struct vm_area_struct *vma)
{ … }
#else
static struct vm_area_struct *uffd_mfill_lock(struct mm_struct *dst_mm,
unsigned long dst_start,
unsigned long len)
{
struct vm_area_struct *dst_vma;
mmap_read_lock(dst_mm);
dst_vma = find_vma_and_prepare_anon(dst_mm, dst_start);
if (IS_ERR(dst_vma))
goto out_unlock;
if (validate_dst_vma(dst_vma, dst_start + len))
return dst_vma;
dst_vma = ERR_PTR(-ENOENT);
out_unlock:
mmap_read_unlock(dst_mm);
return dst_vma;
}
static void uffd_mfill_unlock(struct vm_area_struct *vma)
{
mmap_read_unlock(vma->vm_mm);
}
#endif
static bool mfill_file_over_size(struct vm_area_struct *dst_vma,
unsigned long dst_addr)
{ … }
int mfill_atomic_install_pte(pmd_t *dst_pmd,
struct vm_area_struct *dst_vma,
unsigned long dst_addr, struct page *page,
bool newly_allocated, uffd_flags_t flags)
{ … }
static int mfill_atomic_pte_copy(pmd_t *dst_pmd,
struct vm_area_struct *dst_vma,
unsigned long dst_addr,
unsigned long src_addr,
uffd_flags_t flags,
struct folio **foliop)
{ … }
static int mfill_atomic_pte_zeroed_folio(pmd_t *dst_pmd,
struct vm_area_struct *dst_vma,
unsigned long dst_addr)
{ … }
static int mfill_atomic_pte_zeropage(pmd_t *dst_pmd,
struct vm_area_struct *dst_vma,
unsigned long dst_addr)
{ … }
static int mfill_atomic_pte_continue(pmd_t *dst_pmd,
struct vm_area_struct *dst_vma,
unsigned long dst_addr,
uffd_flags_t flags)
{ … }
static int mfill_atomic_pte_poison(pmd_t *dst_pmd,
struct vm_area_struct *dst_vma,
unsigned long dst_addr,
uffd_flags_t flags)
{ … }
static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
{ … }
#ifdef CONFIG_HUGETLB_PAGE
static __always_inline ssize_t mfill_atomic_hugetlb(
struct userfaultfd_ctx *ctx,
struct vm_area_struct *dst_vma,
unsigned long dst_start,
unsigned long src_start,
unsigned long len,
uffd_flags_t flags)
{ … }
#else
extern ssize_t mfill_atomic_hugetlb(struct userfaultfd_ctx *ctx,
struct vm_area_struct *dst_vma,
unsigned long dst_start,
unsigned long src_start,
unsigned long len,
uffd_flags_t flags);
#endif
static __always_inline ssize_t mfill_atomic_pte(pmd_t *dst_pmd,
struct vm_area_struct *dst_vma,
unsigned long dst_addr,
unsigned long src_addr,
uffd_flags_t flags,
struct folio **foliop)
{ … }
static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
unsigned long dst_start,
unsigned long src_start,
unsigned long len,
uffd_flags_t flags)
{ … }
ssize_t mfill_atomic_copy(struct userfaultfd_ctx *ctx, unsigned long dst_start,
unsigned long src_start, unsigned long len,
uffd_flags_t flags)
{ … }
ssize_t mfill_atomic_zeropage(struct userfaultfd_ctx *ctx,
unsigned long start,
unsigned long len)
{ … }
ssize_t mfill_atomic_continue(struct userfaultfd_ctx *ctx, unsigned long start,
unsigned long len, uffd_flags_t flags)
{ … }
ssize_t mfill_atomic_poison(struct userfaultfd_ctx *ctx, unsigned long start,
unsigned long len, uffd_flags_t flags)
{ … }
long uffd_wp_range(struct vm_area_struct *dst_vma,
unsigned long start, unsigned long len, bool enable_wp)
{ … }
int mwriteprotect_range(struct userfaultfd_ctx *ctx, unsigned long start,
unsigned long len, bool enable_wp)
{ … }
void double_pt_lock(spinlock_t *ptl1,
spinlock_t *ptl2)
__acquires(ptl1)
__acquires(ptl2)
{ … }
void double_pt_unlock(spinlock_t *ptl1,
spinlock_t *ptl2)
__releases(ptl1)
__releases(ptl2)
{ … }
static int move_present_pte(struct mm_struct *mm,
struct vm_area_struct *dst_vma,
struct vm_area_struct *src_vma,
unsigned long dst_addr, unsigned long src_addr,
pte_t *dst_pte, pte_t *src_pte,
pte_t orig_dst_pte, pte_t orig_src_pte,
spinlock_t *dst_ptl, spinlock_t *src_ptl,
struct folio *src_folio)
{ … }
static int move_swap_pte(struct mm_struct *mm,
unsigned long dst_addr, unsigned long src_addr,
pte_t *dst_pte, pte_t *src_pte,
pte_t orig_dst_pte, pte_t orig_src_pte,
spinlock_t *dst_ptl, spinlock_t *src_ptl)
{ … }
static int move_zeropage_pte(struct mm_struct *mm,
struct vm_area_struct *dst_vma,
struct vm_area_struct *src_vma,
unsigned long dst_addr, unsigned long src_addr,
pte_t *dst_pte, pte_t *src_pte,
pte_t orig_dst_pte, pte_t orig_src_pte,
spinlock_t *dst_ptl, spinlock_t *src_ptl)
{ … }
static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd,
struct vm_area_struct *dst_vma,
struct vm_area_struct *src_vma,
unsigned long dst_addr, unsigned long src_addr,
__u64 mode)
{ … }
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline bool move_splits_huge_pmd(unsigned long dst_addr,
unsigned long src_addr,
unsigned long src_end)
{ … }
#else
static inline bool move_splits_huge_pmd(unsigned long dst_addr,
unsigned long src_addr,
unsigned long src_end)
{
return false;
}
#endif
static inline bool vma_move_compatible(struct vm_area_struct *vma)
{ … }
static int validate_move_areas(struct userfaultfd_ctx *ctx,
struct vm_area_struct *src_vma,
struct vm_area_struct *dst_vma)
{ … }
static __always_inline
int find_vmas_mm_locked(struct mm_struct *mm,
unsigned long dst_start,
unsigned long src_start,
struct vm_area_struct **dst_vmap,
struct vm_area_struct **src_vmap)
{ … }
#ifdef CONFIG_PER_VMA_LOCK
static int uffd_move_lock(struct mm_struct *mm,
unsigned long dst_start,
unsigned long src_start,
struct vm_area_struct **dst_vmap,
struct vm_area_struct **src_vmap)
{ … }
static void uffd_move_unlock(struct vm_area_struct *dst_vma,
struct vm_area_struct *src_vma)
{ … }
#else
static int uffd_move_lock(struct mm_struct *mm,
unsigned long dst_start,
unsigned long src_start,
struct vm_area_struct **dst_vmap,
struct vm_area_struct **src_vmap)
{
int err;
mmap_read_lock(mm);
err = find_vmas_mm_locked(mm, dst_start, src_start, dst_vmap, src_vmap);
if (err)
mmap_read_unlock(mm);
return err;
}
static void uffd_move_unlock(struct vm_area_struct *dst_vma,
struct vm_area_struct *src_vma)
{
mmap_assert_locked(src_vma->vm_mm);
mmap_read_unlock(dst_vma->vm_mm);
}
#endif
ssize_t move_pages(struct userfaultfd_ctx *ctx, unsigned long dst_start,
unsigned long src_start, unsigned long len, __u64 mode)
{ … }