linux/fs/hugetlbfs/inode.c

/*
 * hugetlbpage-backed filesystem.  Based on ramfs.
 *
 * Nadia Yvette Chambers, 2002
 *
 * Copyright (C) 2002 Linus Torvalds.
 * License: GPL
 */

#define pr_fmt(fmt)

#include <linux/thread_info.h>
#include <asm/current.h>
#include <linux/falloc.h>
#include <linux/fs.h>
#include <linux/mount.h>
#include <linux/file.h>
#include <linux/kernel.h>
#include <linux/writeback.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/capability.h>
#include <linux/ctype.h>
#include <linux/backing-dev.h>
#include <linux/hugetlb.h>
#include <linux/pagevec.h>
#include <linux/fs_parser.h>
#include <linux/mman.h>
#include <linux/slab.h>
#include <linux/dnotify.h>
#include <linux/statfs.h>
#include <linux/security.h>
#include <linux/magic.h>
#include <linux/migrate.h>
#include <linux/uio.h>

#include <linux/uaccess.h>
#include <linux/sched/mm.h>

static const struct address_space_operations hugetlbfs_aops;
static const struct file_operations hugetlbfs_file_operations;
static const struct inode_operations hugetlbfs_dir_inode_operations;
static const struct inode_operations hugetlbfs_inode_operations;

enum hugetlbfs_size_type {};

struct hugetlbfs_fs_context {};

int sysctl_hugetlb_shm_group;

enum hugetlb_param {};

static const struct fs_parameter_spec hugetlb_fs_parameters[] =;

/*
 * Mask used when checking the page offset value passed in via system
 * calls.  This value will be converted to a loff_t which is signed.
 * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the
 * value.  The extra bit (- 1 in the shift value) is to take the sign
 * bit into account.
 */
#define PGOFF_LOFFT_MAX

static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
{}

/*
 * Called under mmap_write_lock(mm).
 */

static unsigned long
hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr,
		unsigned long len, unsigned long pgoff, unsigned long flags)
{}

static unsigned long
hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
		unsigned long len, unsigned long pgoff, unsigned long flags)
{}

unsigned long
generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
				  unsigned long len, unsigned long pgoff,
				  unsigned long flags)
{}

#ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
static unsigned long
hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
			  unsigned long len, unsigned long pgoff,
			  unsigned long flags)
{
	return generic_hugetlb_get_unmapped_area(file, addr, len, pgoff, flags);
}
#endif

/*
 * Someone wants to read @bytes from a HWPOISON hugetlb @page from @offset.
 * Returns the maximum number of bytes one can read without touching the 1st raw
 * HWPOISON subpage.
 *
 * The implementation borrows the iteration logic from copy_page_to_iter*.
 */
static size_t adjust_range_hwpoison(struct page *page, size_t offset, size_t bytes)
{}

/*
 * Support for read() - Find the page attached to f_mapping and copy out the
 * data. This provides functionality similar to filemap_read().
 */
static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
{}

static int hugetlbfs_write_begin(struct file *file,
			struct address_space *mapping,
			loff_t pos, unsigned len,
			struct page **pagep, void **fsdata)
{}

static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
			loff_t pos, unsigned len, unsigned copied,
			struct page *page, void *fsdata)
{}

static void hugetlb_delete_from_page_cache(struct folio *folio)
{}

/*
 * Called with i_mmap_rwsem held for inode based vma maps.  This makes
 * sure vma (and vm_mm) will not go away.  We also hold the hugetlb fault
 * mutex for the page in the mapping.  So, we can not race with page being
 * faulted into the vma.
 */
static bool hugetlb_vma_maps_page(struct vm_area_struct *vma,
				unsigned long addr, struct page *page)
{}

/*
 * Can vma_offset_start/vma_offset_end overflow on 32-bit arches?
 * No, because the interval tree returns us only those vmas
 * which overlap the truncated area starting at pgoff,
 * and no vma on a 32-bit arch can span beyond the 4GB.
 */
static unsigned long vma_offset_start(struct vm_area_struct *vma, pgoff_t start)
{}

static unsigned long vma_offset_end(struct vm_area_struct *vma, pgoff_t end)
{}

/*
 * Called with hugetlb fault mutex held.  Therefore, no more mappings to
 * this folio can be created while executing the routine.
 */
static void hugetlb_unmap_file_folio(struct hstate *h,
					struct address_space *mapping,
					struct folio *folio, pgoff_t index)
{}

static void
hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end,
		      zap_flags_t zap_flags)
{}

/*
 * Called with hugetlb fault mutex held.
 * Returns true if page was actually removed, false otherwise.
 */
static bool remove_inode_single_folio(struct hstate *h, struct inode *inode,
					struct address_space *mapping,
					struct folio *folio, pgoff_t index,
					bool truncate_op)
{}

/*
 * remove_inode_hugepages handles two distinct cases: truncation and hole
 * punch.  There are subtle differences in operation for each case.
 *
 * truncation is indicated by end of range being LLONG_MAX
 *	In this case, we first scan the range and release found pages.
 *	After releasing pages, hugetlb_unreserve_pages cleans up region/reserve
 *	maps and global counts.  Page faults can race with truncation.
 *	During faults, hugetlb_no_page() checks i_size before page allocation,
 *	and again after obtaining page table lock.  It will 'back out'
 *	allocations in the truncated range.
 * hole punch is indicated if end is not LLONG_MAX
 *	In the hole punch case we scan the range and release found pages.
 *	Only when releasing a page is the associated region/reserve map
 *	deleted.  The region/reserve map for ranges without associated
 *	pages are not modified.  Page faults can race with hole punch.
 *	This is indicated if we find a mapped page.
 * Note: If the passed end of range value is beyond the end of file, but
 * not LLONG_MAX this routine still performs a hole punch operation.
 */
static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
				   loff_t lend)
{}

static void hugetlbfs_evict_inode(struct inode *inode)
{}

static void hugetlb_vmtruncate(struct inode *inode, loff_t offset)
{}

static void hugetlbfs_zero_partial_page(struct hstate *h,
					struct address_space *mapping,
					loff_t start,
					loff_t end)
{}

static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
{}

static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
				loff_t len)
{}

static int hugetlbfs_setattr(struct mnt_idmap *idmap,
			     struct dentry *dentry, struct iattr *attr)
{}

static struct inode *hugetlbfs_get_root(struct super_block *sb,
					struct hugetlbfs_fs_context *ctx)
{}

/*
 * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never
 * be taken from reclaim -- unlike regular filesystems. This needs an
 * annotation because huge_pmd_share() does an allocation under hugetlb's
 * i_mmap_rwsem.
 */
static struct lock_class_key hugetlbfs_i_mmap_rwsem_key;

static struct inode *hugetlbfs_get_inode(struct super_block *sb,
					struct mnt_idmap *idmap,
					struct inode *dir,
					umode_t mode, dev_t dev)
{}

/*
 * File creation. Allocate an inode, and we're done..
 */
static int hugetlbfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
			   struct dentry *dentry, umode_t mode, dev_t dev)
{}

static int hugetlbfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
			   struct dentry *dentry, umode_t mode)
{}

static int hugetlbfs_create(struct mnt_idmap *idmap,
			    struct inode *dir, struct dentry *dentry,
			    umode_t mode, bool excl)
{}

static int hugetlbfs_tmpfile(struct mnt_idmap *idmap,
			     struct inode *dir, struct file *file,
			     umode_t mode)
{}

static int hugetlbfs_symlink(struct mnt_idmap *idmap,
			     struct inode *dir, struct dentry *dentry,
			     const char *symname)
{}

#ifdef CONFIG_MIGRATION
static int hugetlbfs_migrate_folio(struct address_space *mapping,
				struct folio *dst, struct folio *src,
				enum migrate_mode mode)
{}
#else
#define hugetlbfs_migrate_folio
#endif

static int hugetlbfs_error_remove_folio(struct address_space *mapping,
				struct folio *folio)
{}

/*
 * Display the mount options in /proc/mounts.
 */
static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root)
{}

static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{}

static void hugetlbfs_put_super(struct super_block *sb)
{}

static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
{}

static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
{}


static struct kmem_cache *hugetlbfs_inode_cachep;

static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
{}

static void hugetlbfs_free_inode(struct inode *inode)
{}

static void hugetlbfs_destroy_inode(struct inode *inode)
{}

static const struct address_space_operations hugetlbfs_aops =;


static void init_once(void *foo)
{}

static const struct file_operations hugetlbfs_file_operations =;

static const struct inode_operations hugetlbfs_dir_inode_operations =;

static const struct inode_operations hugetlbfs_inode_operations =;

static const struct super_operations hugetlbfs_ops =;

/*
 * Convert size option passed from command line to number of huge pages
 * in the pool specified by hstate.  Size option could be in bytes
 * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT).
 */
static long
hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt,
			 enum hugetlbfs_size_type val_type)
{}

/*
 * Parse one mount parameter.
 */
static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
{}

/*
 * Validate the parsed options.
 */
static int hugetlbfs_validate(struct fs_context *fc)
{}

static int
hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc)
{}

static int hugetlbfs_get_tree(struct fs_context *fc)
{}

static void hugetlbfs_fs_context_free(struct fs_context *fc)
{}

static const struct fs_context_operations hugetlbfs_fs_context_ops =;

static int hugetlbfs_init_fs_context(struct fs_context *fc)
{}

static struct file_system_type hugetlbfs_fs_type =;

static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];

static int can_do_hugetlb_shm(void)
{}

static int get_hstate_idx(int page_size_log)
{}

/*
 * Note that size should be aligned to proper hugepage size in caller side,
 * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
 */
struct file *hugetlb_file_setup(const char *name, size_t size,
				vm_flags_t acctflag, int creat_flags,
				int page_size_log)
{}

static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h)
{}

static int __init init_hugetlbfs_fs(void)
{}
fs_initcall()