#define pr_fmt(fmt) …
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/backing-dev.h>
#include <linux/mm.h>
#include <linux/mm_inline.h>
#include <linux/shm.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <linux/syscalls.h>
#include <linux/capability.h>
#include <linux/init.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/personality.h>
#include <linux/security.h>
#include <linux/hugetlb.h>
#include <linux/shmem_fs.h>
#include <linux/profile.h>
#include <linux/export.h>
#include <linux/mount.h>
#include <linux/mempolicy.h>
#include <linux/rmap.h>
#include <linux/mmu_notifier.h>
#include <linux/mmdebug.h>
#include <linux/perf_event.h>
#include <linux/audit.h>
#include <linux/khugepaged.h>
#include <linux/uprobes.h>
#include <linux/notifier.h>
#include <linux/memory.h>
#include <linux/printk.h>
#include <linux/userfaultfd_k.h>
#include <linux/moduleparam.h>
#include <linux/pkeys.h>
#include <linux/oom.h>
#include <linux/sched/mm.h>
#include <linux/ksm.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#include <asm/tlb.h>
#include <asm/mmu_context.h>
#define CREATE_TRACE_POINTS
#include <trace/events/mmap.h>
#include "internal.h"
#ifndef arch_mmap_check
#define arch_mmap_check(addr, len, flags) …
#endif
#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
const int mmap_rnd_bits_min = …;
int mmap_rnd_bits_max __ro_after_init = …;
int mmap_rnd_bits __read_mostly = …;
#endif
#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
const int mmap_rnd_compat_bits_min = …;
const int mmap_rnd_compat_bits_max = …;
int mmap_rnd_compat_bits __read_mostly = …;
#endif
static bool ignore_rlimit_data;
core_param(…);
void vma_set_page_prot(struct vm_area_struct *vma)
{ … }
static int check_brk_limits(unsigned long addr, unsigned long len)
{ … }
static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma,
unsigned long addr, unsigned long request, unsigned long flags);
SYSCALL_DEFINE1(brk, unsigned long, brk)
{ … }
static inline unsigned long round_hint_to_min(unsigned long hint)
{ … }
bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
unsigned long bytes)
{ … }
static inline u64 file_mmap_size_max(struct file *file, struct inode *inode)
{ … }
static inline bool file_mmap_ok(struct file *file, struct inode *inode,
unsigned long pgoff, unsigned long len)
{ … }
unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
unsigned long flags, vm_flags_t vm_flags,
unsigned long pgoff, unsigned long *populate,
struct list_head *uf)
{ … }
unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff)
{ … }
SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags,
unsigned long, fd, unsigned long, pgoff)
{ … }
#ifdef __ARCH_WANT_SYS_OLD_MMAP
struct mmap_arg_struct {
unsigned long addr;
unsigned long len;
unsigned long prot;
unsigned long flags;
unsigned long fd;
unsigned long offset;
};
SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
{
struct mmap_arg_struct a;
if (copy_from_user(&a, arg, sizeof(a)))
return -EFAULT;
if (offset_in_page(a.offset))
return -EINVAL;
return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
a.offset >> PAGE_SHIFT);
}
#endif
static inline bool accountable_mapping(struct file *file, vm_flags_t vm_flags)
{ … }
static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
{ … }
static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
{ … }
static inline unsigned long stack_guard_placement(vm_flags_t vm_flags)
{ … }
unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info)
{ … }
unsigned long
generic_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags, vm_flags_t vm_flags)
{ … }
#ifndef HAVE_ARCH_UNMAPPED_AREA
unsigned long
arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags, vm_flags_t vm_flags)
{
return generic_get_unmapped_area(filp, addr, len, pgoff, flags,
vm_flags);
}
#endif
unsigned long
generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags, vm_flags_t vm_flags)
{ … }
#ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
unsigned long
arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags, vm_flags_t vm_flags)
{
return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags,
vm_flags);
}
#endif
unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm, struct file *filp,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags,
vm_flags_t vm_flags)
{ … }
unsigned long
__get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags)
{ … }
unsigned long
mm_get_unmapped_area(struct mm_struct *mm, struct file *file,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
{ … }
EXPORT_SYMBOL(…);
struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
unsigned long start_addr,
unsigned long end_addr)
{ … }
EXPORT_SYMBOL(…);
struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
{ … }
EXPORT_SYMBOL(…);
struct vm_area_struct *
find_vma_prev(struct mm_struct *mm, unsigned long addr,
struct vm_area_struct **pprev)
{ … }
static int acct_stack_growth(struct vm_area_struct *vma,
unsigned long size, unsigned long grow)
{ … }
#if defined(CONFIG_STACK_GROWSUP)
static int expand_upwards(struct vm_area_struct *vma, unsigned long address)
{
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *next;
unsigned long gap_addr;
int error = 0;
VMA_ITERATOR(vmi, mm, vma->vm_start);
if (!(vma->vm_flags & VM_GROWSUP))
return -EFAULT;
address &= PAGE_MASK;
if (address >= (TASK_SIZE & PAGE_MASK))
return -ENOMEM;
address += PAGE_SIZE;
gap_addr = address + stack_guard_gap;
if (gap_addr < address || gap_addr > TASK_SIZE)
gap_addr = TASK_SIZE;
next = find_vma_intersection(mm, vma->vm_end, gap_addr);
if (next && vma_is_accessible(next)) {
if (!(next->vm_flags & VM_GROWSUP))
return -ENOMEM;
}
if (next)
vma_iter_prev_range_limit(&vmi, address);
vma_iter_config(&vmi, vma->vm_start, address);
if (vma_iter_prealloc(&vmi, vma))
return -ENOMEM;
if (unlikely(anon_vma_prepare(vma))) {
vma_iter_free(&vmi);
return -ENOMEM;
}
vma_start_write(vma);
anon_vma_lock_write(vma->anon_vma);
if (address > vma->vm_end) {
unsigned long size, grow;
size = address - vma->vm_start;
grow = (address - vma->vm_end) >> PAGE_SHIFT;
error = -ENOMEM;
if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
error = acct_stack_growth(vma, size, grow);
if (!error) {
spin_lock(&mm->page_table_lock);
if (vma->vm_flags & VM_LOCKED)
mm->locked_vm += grow;
vm_stat_account(mm, vma->vm_flags, grow);
anon_vma_interval_tree_pre_update_vma(vma);
vma->vm_end = address;
vma_iter_store(&vmi, vma);
anon_vma_interval_tree_post_update_vma(vma);
spin_unlock(&mm->page_table_lock);
perf_event_mmap(vma);
}
}
}
anon_vma_unlock_write(vma->anon_vma);
vma_iter_free(&vmi);
validate_mm(mm);
return error;
}
#endif
int expand_downwards(struct vm_area_struct *vma, unsigned long address)
{ … }
unsigned long stack_guard_gap = …;
static int __init cmdline_parse_stack_guard_gap(char *p)
{ … }
__setup(…);
#ifdef CONFIG_STACK_GROWSUP
int expand_stack_locked(struct vm_area_struct *vma, unsigned long address)
{
return expand_upwards(vma, address);
}
struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr)
{
struct vm_area_struct *vma, *prev;
addr &= PAGE_MASK;
vma = find_vma_prev(mm, addr, &prev);
if (vma && (vma->vm_start <= addr))
return vma;
if (!prev)
return NULL;
if (expand_stack_locked(prev, addr))
return NULL;
if (prev->vm_flags & VM_LOCKED)
populate_vma_page_range(prev, addr, prev->vm_end, NULL);
return prev;
}
#else
int expand_stack_locked(struct vm_area_struct *vma, unsigned long address)
{ … }
struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr)
{ … }
#endif
#if defined(CONFIG_STACK_GROWSUP)
#define vma_expand_up …
#define vma_expand_down …
#else
#define vma_expand_up(vma,addr) …
#define vma_expand_down(vma, addr) …
#endif
struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr)
{ … }
int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
struct list_head *uf)
{ … }
unsigned long mmap_region(struct file *file, unsigned long addr,
unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
struct list_head *uf)
{ … }
static int __vm_munmap(unsigned long start, size_t len, bool unlock)
{ … }
int vm_munmap(unsigned long start, size_t len)
{ … }
EXPORT_SYMBOL(…);
SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
{ … }
SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
{ … }
static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
unsigned long addr, unsigned long len, unsigned long flags)
{ … }
int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
{ … }
EXPORT_SYMBOL(…);
void exit_mmap(struct mm_struct *mm)
{ … }
int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
{ … }
bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
{ … }
void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
{ … }
static vm_fault_t special_mapping_fault(struct vm_fault *vmf);
static void special_mapping_close(struct vm_area_struct *vma)
{ … }
static const char *special_mapping_name(struct vm_area_struct *vma)
{ … }
static int special_mapping_mremap(struct vm_area_struct *new_vma)
{ … }
static int special_mapping_split(struct vm_area_struct *vma, unsigned long addr)
{ … }
static const struct vm_operations_struct special_mapping_vmops = …;
static vm_fault_t special_mapping_fault(struct vm_fault *vmf)
{ … }
static struct vm_area_struct *__install_special_mapping(
struct mm_struct *mm,
unsigned long addr, unsigned long len,
unsigned long vm_flags, void *priv,
const struct vm_operations_struct *ops)
{ … }
bool vma_is_special_mapping(const struct vm_area_struct *vma,
const struct vm_special_mapping *sm)
{ … }
struct vm_area_struct *_install_special_mapping(
struct mm_struct *mm,
unsigned long addr, unsigned long len,
unsigned long vm_flags, const struct vm_special_mapping *spec)
{ … }
void __init mmap_init(void)
{ … }
static int init_user_reserve(void)
{ … }
subsys_initcall(init_user_reserve);
static int init_admin_reserve(void)
{ … }
subsys_initcall(init_admin_reserve);
static int reserve_mem_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{ … }
static int __meminit init_reserve_notifier(void)
{ … }
subsys_initcall(init_reserve_notifier);
int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift)
{ … }