linux/io_uring/rw.c

// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/blk-mq.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/fsnotify.h>
#include <linux/poll.h>
#include <linux/nospec.h>
#include <linux/compat.h>
#include <linux/io_uring/cmd.h>
#include <linux/indirect_call_wrapper.h>

#include <uapi/linux/io_uring.h>

#include "io_uring.h"
#include "opdef.h"
#include "kbuf.h"
#include "alloc_cache.h"
#include "rsrc.h"
#include "poll.h"
#include "rw.h"

struct io_rw {};

static inline bool io_file_supports_nowait(struct io_kiocb *req)
{}

#ifdef CONFIG_COMPAT
static int io_iov_compat_buffer_select_prep(struct io_rw *rw)
{}
#endif

static int io_iov_buffer_select_prep(struct io_kiocb *req)
{}

static int __io_import_iovec(int ddir, struct io_kiocb *req,
			     struct io_async_rw *io,
			     unsigned int issue_flags)
{}

static inline int io_import_iovec(int rw, struct io_kiocb *req,
				  struct io_async_rw *io,
				  unsigned int issue_flags)
{}

static void io_rw_iovec_free(struct io_async_rw *rw)
{}

static void io_rw_recycle(struct io_kiocb *req, unsigned int issue_flags)
{}

static void io_req_rw_cleanup(struct io_kiocb *req, unsigned int issue_flags)
{}

static int io_rw_alloc_async(struct io_kiocb *req)
{}

static int io_prep_rw_setup(struct io_kiocb *req, int ddir, bool do_import)
{}

static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
		      int ddir, bool do_import)
{}

int io_prep_read(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{}

int io_prep_write(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{}

static int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe,
		       int ddir)
{}

int io_prep_readv(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{}

int io_prep_writev(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{}

static int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe,
			    int ddir)
{}

int io_prep_read_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{}

int io_prep_write_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{}

/*
 * Multishot read is prepared just like a normal read/write request, only
 * difference is that we set the MULTISHOT flag.
 */
int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{}

void io_readv_writev_cleanup(struct io_kiocb *req)
{}

static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
{}

#ifdef CONFIG_BLOCK
static void io_resubmit_prep(struct io_kiocb *req)
{}

static bool io_rw_should_reissue(struct io_kiocb *req)
{}
#else
static void io_resubmit_prep(struct io_kiocb *req)
{
}
static bool io_rw_should_reissue(struct io_kiocb *req)
{
	return false;
}
#endif

static void io_req_end_write(struct io_kiocb *req)
{}

/*
 * Trigger the notifications after having done some IO, and finish the write
 * accounting, if any.
 */
static void io_req_io_end(struct io_kiocb *req)
{}

static bool __io_complete_rw_common(struct io_kiocb *req, long res)
{}

static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
{}

void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts)
{}

static void io_complete_rw(struct kiocb *kiocb, long res)
{}

static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
{}

static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
{}

static int kiocb_done(struct io_kiocb *req, ssize_t ret,
		       unsigned int issue_flags)
{}

static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
{}

/*
 * For files that don't have ->read_iter() and ->write_iter(), handle them
 * by looping over ->read() or ->write() manually.
 */
static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter)
{}

/*
 * This is our waitqueue callback handler, registered through __folio_lock_async()
 * when we initially tried to do the IO with the iocb armed our waitqueue.
 * This gets called when the page is unlocked, and we generally expect that to
 * happen when the page IO is completed and the page is now uptodate. This will
 * queue a task_work based retry of the operation, attempting to copy the data
 * again. If the latter fails because the page was NOT uptodate, then we will
 * do a thread based blocking retry of the operation. That's the unexpected
 * slow path.
 */
static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
			     int sync, void *arg)
{}

/*
 * This controls whether a given IO request should be armed for async page
 * based retry. If we return false here, the request is handed to the async
 * worker threads for retry. If we're doing buffered reads on a regular file,
 * we prepare a private wait_page_queue entry and retry the operation. This
 * will either succeed because the page is now uptodate and unlocked, or it
 * will register a callback when the page is unlocked at IO completion. Through
 * that callback, io_uring uses task_work to setup a retry of the operation.
 * That retry will attempt the buffered read again. The retry will generally
 * succeed, or in rare cases where it fails, we then fall back to using the
 * async worker threads for a blocking retry.
 */
static bool io_rw_should_retry(struct io_kiocb *req)
{}

static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter)
{}

static bool need_complete_io(struct io_kiocb *req)
{}

static int io_rw_init_file(struct io_kiocb *req, fmode_t mode, int rw_type)
{}

static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
{}

int io_read(struct io_kiocb *req, unsigned int issue_flags)
{}

int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
{}

int io_write(struct io_kiocb *req, unsigned int issue_flags)
{}

void io_rw_fail(struct io_kiocb *req)
{}

int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
{}

void io_rw_cache_free(const void *entry)
{}