linux/io_uring/poll.c

// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/hashtable.h>
#include <linux/io_uring.h>

#include <trace/events/io_uring.h>

#include <uapi/linux/io_uring.h>

#include "io_uring.h"
#include "alloc_cache.h"
#include "refs.h"
#include "napi.h"
#include "opdef.h"
#include "kbuf.h"
#include "poll.h"
#include "cancel.h"

struct io_poll_update {};

struct io_poll_table {};

#define IO_POLL_CANCEL_FLAG
#define IO_POLL_RETRY_FLAG
#define IO_POLL_REF_MASK

/*
 * We usually have 1-2 refs taken, 128 is more than enough and we want to
 * maximise the margin between this amount and the moment when it overflows.
 */
#define IO_POLL_REF_BIAS

#define IO_WQE_F_DOUBLE

static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
			void *key);

static inline struct io_kiocb *wqe_to_req(struct wait_queue_entry *wqe)
{}

static inline bool wqe_is_double(struct wait_queue_entry *wqe)
{}

static bool io_poll_get_ownership_slowpath(struct io_kiocb *req)
{}

/*
 * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
 * bump it and acquire ownership. It's disallowed to modify requests while not
 * owning it, that prevents from races for enqueueing task_work's and b/w
 * arming poll and wakeups.
 */
static inline bool io_poll_get_ownership(struct io_kiocb *req)
{}

static void io_poll_mark_cancelled(struct io_kiocb *req)
{}

static struct io_poll *io_poll_get_double(struct io_kiocb *req)
{}

static struct io_poll *io_poll_get_single(struct io_kiocb *req)
{}

static void io_poll_req_insert(struct io_kiocb *req)
{}

static void io_poll_req_delete(struct io_kiocb *req, struct io_ring_ctx *ctx)
{}

static void io_poll_req_insert_locked(struct io_kiocb *req)
{}

static void io_poll_tw_hash_eject(struct io_kiocb *req, struct io_tw_state *ts)
{}

static void io_init_poll_iocb(struct io_poll *poll, __poll_t events)
{}

static inline void io_poll_remove_entry(struct io_poll *poll)
{}

static void io_poll_remove_entries(struct io_kiocb *req)
{}

enum {};

static void __io_poll_execute(struct io_kiocb *req, int mask)
{}

static inline void io_poll_execute(struct io_kiocb *req, int res)
{}

/*
 * All poll tw should go through this. Checks for poll events, manages
 * references, does rewait, etc.
 *
 * Returns a negative error on failure. IOU_POLL_NO_ACTION when no action
 * require, which is either spurious wakeup or multishot CQE is served.
 * IOU_POLL_DONE when it's done with the request, then the mask is stored in
 * req->cqe.res. IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot
 * poll and that the result is stored in req->cqe.
 */
static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
{}

void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts)
{}

static void io_poll_cancel_req(struct io_kiocb *req)
{}

#define IO_ASYNC_POLL_COMMON

static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll)
{}

static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
			void *key)
{}

/* fails only when polling is already completing by the first entry */
static bool io_poll_double_prepare(struct io_kiocb *req)
{}

static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt,
			    struct wait_queue_head *head,
			    struct io_poll **poll_ptr)
{}

static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
			       struct poll_table_struct *p)
{}

static bool io_poll_can_finish_inline(struct io_kiocb *req,
				      struct io_poll_table *pt)
{}

static void io_poll_add_hash(struct io_kiocb *req)
{}

/*
 * Returns 0 when it's handed over for polling. The caller owns the requests if
 * it returns non-zero, but otherwise should not touch it. Negative values
 * contain an error code. When the result is >0, the polling has completed
 * inline and ipt.result_mask is set to the mask.
 */
static int __io_arm_poll_handler(struct io_kiocb *req,
				 struct io_poll *poll,
				 struct io_poll_table *ipt, __poll_t mask,
				 unsigned issue_flags)
{}

static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
			       struct poll_table_struct *p)
{}

/*
 * We can't reliably detect loops in repeated poll triggers and issue
 * subsequently failing. But rather than fail these immediately, allow a
 * certain amount of retries before we give up. Given that this condition
 * should _rarely_ trigger even once, we should be fine with a larger value.
 */
#define APOLL_MAX_RETRY

static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
					     unsigned issue_flags)
{}

int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
{}

static __cold bool io_poll_remove_all_table(struct task_struct *tsk,
					    struct io_hash_table *table,
					    bool cancel_all)
{}

/*
 * Returns true if we found and killed one or more poll requests
 */
__cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
			       bool cancel_all)
	__must_hold(&ctx->uring_lock)
{}

static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
				     struct io_cancel_data *cd,
				     struct io_hash_table *table,
				     struct io_hash_bucket **out_bucket)
{}

static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
					  struct io_cancel_data *cd,
					  struct io_hash_table *table,
					  struct io_hash_bucket **out_bucket)
{}

static int io_poll_disarm(struct io_kiocb *req)
{}

static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
			    struct io_hash_table *table)
{}

int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
		   unsigned issue_flags)
{}

static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
				     unsigned int flags)
{}

int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{}

int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{}

int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
{}

int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
{}