linux/lib/iov_iter.c

// SPDX-License-Identifier: GPL-2.0-only
#include <linux/export.h>
#include <linux/bvec.h>
#include <linux/fault-inject-usercopy.h>
#include <linux/uio.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/splice.h>
#include <linux/compat.h>
#include <linux/scatterlist.h>
#include <linux/instrumented.h>
#include <linux/iov_iter.h>

static __always_inline
size_t copy_to_user_iter(void __user *iter_to, size_t progress,
			 size_t len, void *from, void *priv2)
{}

static __always_inline
size_t copy_to_user_iter_nofault(void __user *iter_to, size_t progress,
				 size_t len, void *from, void *priv2)
{}

static __always_inline
size_t copy_from_user_iter(void __user *iter_from, size_t progress,
			   size_t len, void *to, void *priv2)
{}

static __always_inline
size_t memcpy_to_iter(void *iter_to, size_t progress,
		      size_t len, void *from, void *priv2)
{}

static __always_inline
size_t memcpy_from_iter(void *iter_from, size_t progress,
			size_t len, void *to, void *priv2)
{}

/*
 * fault_in_iov_iter_readable - fault in iov iterator for reading
 * @i: iterator
 * @size: maximum length
 *
 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
 * @size.  For each iovec, fault in each page that constitutes the iovec.
 *
 * Returns the number of bytes not faulted in (like copy_to_user() and
 * copy_from_user()).
 *
 * Always returns 0 for non-userspace iterators.
 */
size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size)
{}
EXPORT_SYMBOL();

/*
 * fault_in_iov_iter_writeable - fault in iov iterator for writing
 * @i: iterator
 * @size: maximum length
 *
 * Faults in the iterator using get_user_pages(), i.e., without triggering
 * hardware page faults.  This is primarily useful when we already know that
 * some or all of the pages in @i aren't in memory.
 *
 * Returns the number of bytes not faulted in, like copy_to_user() and
 * copy_from_user().
 *
 * Always returns 0 for non-user-space iterators.
 */
size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size)
{}
EXPORT_SYMBOL();

void iov_iter_init(struct iov_iter *i, unsigned int direction,
			const struct iovec *iov, unsigned long nr_segs,
			size_t count)
{}
EXPORT_SYMBOL();

size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{}
EXPORT_SYMBOL();

#ifdef CONFIG_ARCH_HAS_COPY_MC
static __always_inline
size_t copy_to_user_iter_mc(void __user *iter_to, size_t progress,
			    size_t len, void *from, void *priv2)
{}

static __always_inline
size_t memcpy_to_iter_mc(void *iter_to, size_t progress,
			 size_t len, void *from, void *priv2)
{}

/**
 * _copy_mc_to_iter - copy to iter with source memory error exception handling
 * @addr: source kernel address
 * @bytes: total transfer length
 * @i: destination iterator
 *
 * The pmem driver deploys this for the dax operation
 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
 * successfully copied.
 *
 * The main differences between this and typical _copy_to_iter().
 *
 * * Typical tail/residue handling after a fault retries the copy
 *   byte-by-byte until the fault happens again. Re-triggering machine
 *   checks is potentially fatal so the implementation uses source
 *   alignment and poison alignment assumptions to avoid re-triggering
 *   hardware exceptions.
 *
 * * ITER_KVEC and ITER_BVEC can return short copies.  Compare to
 *   copy_to_iter() where only ITER_IOVEC attempts might return a short copy.
 *
 * Return: number of bytes copied (may be %0)
 */
size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{}
EXPORT_SYMBOL_GPL();
#endif /* CONFIG_ARCH_HAS_COPY_MC */

static __always_inline
size_t __copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{}

size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{}
EXPORT_SYMBOL();

static __always_inline
size_t copy_from_user_iter_nocache(void __user *iter_from, size_t progress,
				   size_t len, void *to, void *priv2)
{}

size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
{}
EXPORT_SYMBOL();

#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
static __always_inline
size_t copy_from_user_iter_flushcache(void __user *iter_from, size_t progress,
				      size_t len, void *to, void *priv2)
{}

static __always_inline
size_t memcpy_from_iter_flushcache(void *iter_from, size_t progress,
				   size_t len, void *to, void *priv2)
{}

/**
 * _copy_from_iter_flushcache - write destination through cpu cache
 * @addr: destination kernel address
 * @bytes: total transfer length
 * @i: source iterator
 *
 * The pmem driver arranges for filesystem-dax to use this facility via
 * dax_copy_from_iter() for ensuring that writes to persistent memory
 * are flushed through the CPU cache. It is differentiated from
 * _copy_from_iter_nocache() in that guarantees all data is flushed for
 * all iterator types. The _copy_from_iter_nocache() only attempts to
 * bypass the cache for the ITER_IOVEC case, and on some archs may use
 * instructions that strand dirty-data in the cache.
 *
 * Return: number of bytes copied (may be %0)
 */
size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
{}
EXPORT_SYMBOL_GPL();
#endif

static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
{}

size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
			 struct iov_iter *i)
{}
EXPORT_SYMBOL();

size_t copy_page_to_iter_nofault(struct page *page, unsigned offset, size_t bytes,
				 struct iov_iter *i)
{}
EXPORT_SYMBOL();

size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
			 struct iov_iter *i)
{}
EXPORT_SYMBOL();

static __always_inline
size_t zero_to_user_iter(void __user *iter_to, size_t progress,
			 size_t len, void *priv, void *priv2)
{}

static __always_inline
size_t zero_to_iter(void *iter_to, size_t progress,
		    size_t len, void *priv, void *priv2)
{}

size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
{}
EXPORT_SYMBOL();

size_t copy_page_from_iter_atomic(struct page *page, size_t offset,
		size_t bytes, struct iov_iter *i)
{}
EXPORT_SYMBOL();

static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
{}

static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
{}

void iov_iter_advance(struct iov_iter *i, size_t size)
{}
EXPORT_SYMBOL();

void iov_iter_revert(struct iov_iter *i, size_t unroll)
{}
EXPORT_SYMBOL();

/*
 * Return the count of just the current iov_iter segment.
 */
size_t iov_iter_single_seg_count(const struct iov_iter *i)
{}
EXPORT_SYMBOL();

void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
			const struct kvec *kvec, unsigned long nr_segs,
			size_t count)
{}
EXPORT_SYMBOL();

void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
			const struct bio_vec *bvec, unsigned long nr_segs,
			size_t count)
{}
EXPORT_SYMBOL();

/**
 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
 * @i: The iterator to initialise.
 * @direction: The direction of the transfer.
 * @xarray: The xarray to access.
 * @start: The start file position.
 * @count: The size of the I/O buffer in bytes.
 *
 * Set up an I/O iterator to either draw data out of the pages attached to an
 * inode or to inject data into those pages.  The pages *must* be prevented
 * from evaporation, either by taking a ref on them or locking them by the
 * caller.
 */
void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
		     struct xarray *xarray, loff_t start, size_t count)
{}
EXPORT_SYMBOL();

/**
 * iov_iter_discard - Initialise an I/O iterator that discards data
 * @i: The iterator to initialise.
 * @direction: The direction of the transfer.
 * @count: The size of the I/O buffer in bytes.
 *
 * Set up an I/O iterator that just discards everything that's written to it.
 * It's only available as a READ iterator.
 */
void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
{}
EXPORT_SYMBOL();

static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask,
				   unsigned len_mask)
{}

static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask,
				  unsigned len_mask)
{}

/**
 * iov_iter_is_aligned() - Check if the addresses and lengths of each segments
 * 	are aligned to the parameters.
 *
 * @i: &struct iov_iter to restore
 * @addr_mask: bit mask to check against the iov element's addresses
 * @len_mask: bit mask to check against the iov element's lengths
 *
 * Return: false if any addresses or lengths intersect with the provided masks
 */
bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
			 unsigned len_mask)
{}
EXPORT_SYMBOL_GPL();

static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
{}

static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
{}

unsigned long iov_iter_alignment(const struct iov_iter *i)
{}
EXPORT_SYMBOL();

unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
{}
EXPORT_SYMBOL();

static int want_pages_array(struct page ***res, size_t size,
			    size_t start, unsigned int maxpages)
{}

static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
					  pgoff_t index, unsigned int nr_pages)
{}

static ssize_t iter_xarray_get_pages(struct iov_iter *i,
				     struct page ***pages, size_t maxsize,
				     unsigned maxpages, size_t *_start_offset)
{}

/* must be done on non-empty ITER_UBUF or ITER_IOVEC one */
static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size)
{}

/* must be done on non-empty ITER_BVEC one */
static struct page *first_bvec_segment(const struct iov_iter *i,
				       size_t *size, size_t *start)
{}

static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
		   struct page ***pages, size_t maxsize,
		   unsigned int maxpages, size_t *start)
{}

ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
		size_t maxsize, unsigned maxpages, size_t *start)
{}
EXPORT_SYMBOL();

ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i,
		struct page ***pages, size_t maxsize, size_t *start)
{}
EXPORT_SYMBOL();

static int iov_npages(const struct iov_iter *i, int maxpages)
{}

static int bvec_npages(const struct iov_iter *i, int maxpages)
{}

int iov_iter_npages(const struct iov_iter *i, int maxpages)
{}
EXPORT_SYMBOL();

const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
{}
EXPORT_SYMBOL();

static __noclone int copy_compat_iovec_from_user(struct iovec *iov,
		const struct iovec __user *uvec, u32 nr_segs)
{}

static __noclone int copy_iovec_from_user(struct iovec *iov,
		const struct iovec __user *uiov, unsigned long nr_segs)
{}

struct iovec *iovec_from_user(const struct iovec __user *uvec,
		unsigned long nr_segs, unsigned long fast_segs,
		struct iovec *fast_iov, bool compat)
{}

/*
 * Single segment iovec supplied by the user, import it as ITER_UBUF.
 */
static ssize_t __import_iovec_ubuf(int type, const struct iovec __user *uvec,
				   struct iovec **iovp, struct iov_iter *i,
				   bool compat)
{}

ssize_t __import_iovec(int type, const struct iovec __user *uvec,
		 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
		 struct iov_iter *i, bool compat)
{}

/**
 * import_iovec() - Copy an array of &struct iovec from userspace
 *     into the kernel, check that it is valid, and initialize a new
 *     &struct iov_iter iterator to access it.
 *
 * @type: One of %READ or %WRITE.
 * @uvec: Pointer to the userspace array.
 * @nr_segs: Number of elements in userspace array.
 * @fast_segs: Number of elements in @iov.
 * @iovp: (input and output parameter) Pointer to pointer to (usually small
 *     on-stack) kernel array.
 * @i: Pointer to iterator that will be initialized on success.
 *
 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
 * then this function places %NULL in *@iov on return. Otherwise, a new
 * array will be allocated and the result placed in *@iov. This means that
 * the caller may call kfree() on *@iov regardless of whether the small
 * on-stack array was used or not (and regardless of whether this function
 * returns an error or not).
 *
 * Return: Negative error code on error, bytes imported on success
 */
ssize_t import_iovec(int type, const struct iovec __user *uvec,
		 unsigned nr_segs, unsigned fast_segs,
		 struct iovec **iovp, struct iov_iter *i)
{}
EXPORT_SYMBOL();

int import_ubuf(int rw, void __user *buf, size_t len, struct iov_iter *i)
{}
EXPORT_SYMBOL_GPL();

/**
 * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
 *     iov_iter_save_state() was called.
 *
 * @i: &struct iov_iter to restore
 * @state: state to restore from
 *
 * Used after iov_iter_save_state() to bring restore @i, if operations may
 * have advanced it.
 *
 * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC
 */
void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
{}

/*
 * Extract a list of contiguous pages from an ITER_XARRAY iterator.  This does not
 * get references on the pages, nor does it get a pin on them.
 */
static ssize_t iov_iter_extract_xarray_pages(struct iov_iter *i,
					     struct page ***pages, size_t maxsize,
					     unsigned int maxpages,
					     iov_iter_extraction_t extraction_flags,
					     size_t *offset0)
{}

/*
 * Extract a list of contiguous pages from an ITER_BVEC iterator.  This does
 * not get references on the pages, nor does it get a pin on them.
 */
static ssize_t iov_iter_extract_bvec_pages(struct iov_iter *i,
					   struct page ***pages, size_t maxsize,
					   unsigned int maxpages,
					   iov_iter_extraction_t extraction_flags,
					   size_t *offset0)
{}

/*
 * Extract a list of virtually contiguous pages from an ITER_KVEC iterator.
 * This does not get references on the pages, nor does it get a pin on them.
 */
static ssize_t iov_iter_extract_kvec_pages(struct iov_iter *i,
					   struct page ***pages, size_t maxsize,
					   unsigned int maxpages,
					   iov_iter_extraction_t extraction_flags,
					   size_t *offset0)
{}

/*
 * Extract a list of contiguous pages from a user iterator and get a pin on
 * each of them.  This should only be used if the iterator is user-backed
 * (IOBUF/UBUF).
 *
 * It does not get refs on the pages, but the pages must be unpinned by the
 * caller once the transfer is complete.
 *
 * This is safe to be used where background IO/DMA *is* going to be modifying
 * the buffer; using a pin rather than a ref makes forces fork() to give the
 * child a copy of the page.
 */
static ssize_t iov_iter_extract_user_pages(struct iov_iter *i,
					   struct page ***pages,
					   size_t maxsize,
					   unsigned int maxpages,
					   iov_iter_extraction_t extraction_flags,
					   size_t *offset0)
{}

/**
 * iov_iter_extract_pages - Extract a list of contiguous pages from an iterator
 * @i: The iterator to extract from
 * @pages: Where to return the list of pages
 * @maxsize: The maximum amount of iterator to extract
 * @maxpages: The maximum size of the list of pages
 * @extraction_flags: Flags to qualify request
 * @offset0: Where to return the starting offset into (*@pages)[0]
 *
 * Extract a list of contiguous pages from the current point of the iterator,
 * advancing the iterator.  The maximum number of pages and the maximum amount
 * of page contents can be set.
 *
 * If *@pages is NULL, a page list will be allocated to the required size and
 * *@pages will be set to its base.  If *@pages is not NULL, it will be assumed
 * that the caller allocated a page list at least @maxpages in size and this
 * will be filled in.
 *
 * @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA
 * be allowed on the pages extracted.
 *
 * The iov_iter_extract_will_pin() function can be used to query how cleanup
 * should be performed.
 *
 * Extra refs or pins on the pages may be obtained as follows:
 *
 *  (*) If the iterator is user-backed (ITER_IOVEC/ITER_UBUF), pins will be
 *      added to the pages, but refs will not be taken.
 *      iov_iter_extract_will_pin() will return true.
 *
 *  (*) If the iterator is ITER_KVEC, ITER_BVEC or ITER_XARRAY, the pages are
 *      merely listed; no extra refs or pins are obtained.
 *      iov_iter_extract_will_pin() will return 0.
 *
 * Note also:
 *
 *  (*) Use with ITER_DISCARD is not supported as that has no content.
 *
 * On success, the function sets *@pages to the new pagelist, if allocated, and
 * sets *offset0 to the offset into the first page.
 *
 * It may also return -ENOMEM and -EFAULT.
 */
ssize_t iov_iter_extract_pages(struct iov_iter *i,
			       struct page ***pages,
			       size_t maxsize,
			       unsigned int maxpages,
			       iov_iter_extraction_t extraction_flags,
			       size_t *offset0)
{}
EXPORT_SYMBOL_GPL();