linux/fs/netfs/buffered_read.c

// SPDX-License-Identifier: GPL-2.0-or-later
/* Network filesystem high-level buffered read support.
 *
 * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
 * Written by David Howells ([email protected])
 */

#include <linux/export.h>
#include <linux/task_io_accounting_ops.h>
#include "internal.h"

static void netfs_cache_expand_readahead(struct netfs_io_request *rreq,
					 unsigned long long *_start,
					 unsigned long long *_len,
					 unsigned long long i_size)
{}

static void netfs_rreq_expand(struct netfs_io_request *rreq,
			      struct readahead_control *ractl)
{}

/*
 * Begin an operation, and fetch the stored zero point value from the cookie if
 * available.
 */
static int netfs_begin_cache_read(struct netfs_io_request *rreq, struct netfs_inode *ctx)
{}

/*
 * Decant the list of folios to read into a rolling buffer.
 */
static size_t netfs_load_buffer_from_ra(struct netfs_io_request *rreq,
					struct folio_queue *folioq,
					struct folio_batch *put_batch)
{}

/*
 * netfs_prepare_read_iterator - Prepare the subreq iterator for I/O
 * @subreq: The subrequest to be set up
 *
 * Prepare the I/O iterator representing the read buffer on a subrequest for
 * the filesystem to use for I/O (it can be passed directly to a socket).  This
 * is intended to be called from the ->issue_read() method once the filesystem
 * has trimmed the request to the size it wants.
 *
 * Returns the limited size if successful and -ENOMEM if insufficient memory
 * available.
 *
 * [!] NOTE: This must be run in the same thread as ->issue_read() was called
 * in as we access the readahead_control struct.
 */
static ssize_t netfs_prepare_read_iterator(struct netfs_io_subrequest *subreq)
{}

static enum netfs_io_source netfs_cache_prepare_read(struct netfs_io_request *rreq,
						     struct netfs_io_subrequest *subreq,
						     loff_t i_size)
{}

static void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error,
					bool was_async)
{}

/*
 * Issue a read against the cache.
 * - Eats the caller's ref on subreq.
 */
static void netfs_read_cache_to_pagecache(struct netfs_io_request *rreq,
					  struct netfs_io_subrequest *subreq)
{}

/*
 * Perform a read to the pagecache from a series of sources of different types,
 * slicing up the region to be read according to available cache blocks and
 * network rsize.
 */
static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
{}

/*
 * Wait for the read operation to complete, successfully or otherwise.
 */
static int netfs_wait_for_read(struct netfs_io_request *rreq)
{}

/*
 * Set up the initial folioq of buffer folios in the rolling buffer and set the
 * iterator to refer to it.
 */
static int netfs_prime_buffer(struct netfs_io_request *rreq)
{}

/**
 * netfs_readahead - Helper to manage a read request
 * @ractl: The description of the readahead request
 *
 * Fulfil a readahead request by drawing data from the cache if possible, or
 * the netfs if not.  Space beyond the EOF is zero-filled.  Multiple I/O
 * requests from different sources will get munged together.  If necessary, the
 * readahead window can be expanded in either direction to a more convenient
 * alighment for RPC efficiency or to make storage in the cache feasible.
 *
 * The calling netfs must initialise a netfs context contiguous to the vfs
 * inode before calling this.
 *
 * This is usable whether or not caching is enabled.
 */
void netfs_readahead(struct readahead_control *ractl)
{}
EXPORT_SYMBOL();

/*
 * Create a rolling buffer with a single occupying folio.
 */
static int netfs_create_singular_buffer(struct netfs_io_request *rreq, struct folio *folio)
{}

/*
 * Read into gaps in a folio partially filled by a streaming write.
 */
static int netfs_read_gaps(struct file *file, struct folio *folio)
{}

/**
 * netfs_read_folio - Helper to manage a read_folio request
 * @file: The file to read from
 * @folio: The folio to read
 *
 * Fulfil a read_folio request by drawing data from the cache if
 * possible, or the netfs if not.  Space beyond the EOF is zero-filled.
 * Multiple I/O requests from different sources will get munged together.
 *
 * The calling netfs must initialise a netfs context contiguous to the vfs
 * inode before calling this.
 *
 * This is usable whether or not caching is enabled.
 */
int netfs_read_folio(struct file *file, struct folio *folio)
{}
EXPORT_SYMBOL();

/*
 * Prepare a folio for writing without reading first
 * @folio: The folio being prepared
 * @pos: starting position for the write
 * @len: length of write
 * @always_fill: T if the folio should always be completely filled/cleared
 *
 * In some cases, write_begin doesn't need to read at all:
 * - full folio write
 * - write that lies in a folio that is completely beyond EOF
 * - write that covers the folio from start to EOF or beyond it
 *
 * If any of these criteria are met, then zero out the unwritten parts
 * of the folio and return true. Otherwise, return false.
 */
static bool netfs_skip_folio_read(struct folio *folio, loff_t pos, size_t len,
				 bool always_fill)
{}

/**
 * netfs_write_begin - Helper to prepare for writing [DEPRECATED]
 * @ctx: The netfs context
 * @file: The file to read from
 * @mapping: The mapping to read from
 * @pos: File position at which the write will begin
 * @len: The length of the write (may extend beyond the end of the folio chosen)
 * @_folio: Where to put the resultant folio
 * @_fsdata: Place for the netfs to store a cookie
 *
 * Pre-read data for a write-begin request by drawing data from the cache if
 * possible, or the netfs if not.  Space beyond the EOF is zero-filled.
 * Multiple I/O requests from different sources will get munged together.
 *
 * The calling netfs must provide a table of operations, only one of which,
 * issue_read, is mandatory.
 *
 * The check_write_begin() operation can be provided to check for and flush
 * conflicting writes once the folio is grabbed and locked.  It is passed a
 * pointer to the fsdata cookie that gets returned to the VM to be passed to
 * write_end.  It is permitted to sleep.  It should return 0 if the request
 * should go ahead or it may return an error.  It may also unlock and put the
 * folio, provided it sets ``*foliop`` to NULL, in which case a return of 0
 * will cause the folio to be re-got and the process to be retried.
 *
 * The calling netfs must initialise a netfs context contiguous to the vfs
 * inode before calling this.
 *
 * This is usable whether or not caching is enabled.
 *
 * Note that this should be considered deprecated and netfs_perform_write()
 * used instead.
 */
int netfs_write_begin(struct netfs_inode *ctx,
		      struct file *file, struct address_space *mapping,
		      loff_t pos, unsigned int len, struct folio **_folio,
		      void **_fsdata)
{}
EXPORT_SYMBOL();

/*
 * Preload the data into a page we're proposing to write into.
 */
int netfs_prefetch_for_write(struct file *file, struct folio *folio,
			     size_t offset, size_t len)
{}

/**
 * netfs_buffered_read_iter - Filesystem buffered I/O read routine
 * @iocb: kernel I/O control block
 * @iter: destination for the data read
 *
 * This is the ->read_iter() routine for all filesystems that can use the page
 * cache directly.
 *
 * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall be
 * returned when no data can be read without waiting for I/O requests to
 * complete; it doesn't prevent readahead.
 *
 * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O requests
 * shall be made for the read or for readahead.  When no data can be read,
 * -EAGAIN shall be returned.  When readahead would be triggered, a partial,
 * possibly empty read shall be returned.
 *
 * Return:
 * * number of bytes copied, even for partial reads
 * * negative error code (or 0 if IOCB_NOIO) if nothing was read
 */
ssize_t netfs_buffered_read_iter(struct kiocb *iocb, struct iov_iter *iter)
{}
EXPORT_SYMBOL();

/**
 * netfs_file_read_iter - Generic filesystem read routine
 * @iocb: kernel I/O control block
 * @iter: destination for the data read
 *
 * This is the ->read_iter() routine for all filesystems that can use the page
 * cache directly.
 *
 * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall be
 * returned when no data can be read without waiting for I/O requests to
 * complete; it doesn't prevent readahead.
 *
 * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O requests
 * shall be made for the read or for readahead.  When no data can be read,
 * -EAGAIN shall be returned.  When readahead would be triggered, a partial,
 * possibly empty read shall be returned.
 *
 * Return:
 * * number of bytes copied, even for partial reads
 * * negative error code (or 0 if IOCB_NOIO) if nothing was read
 */
ssize_t netfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
{}
EXPORT_SYMBOL();