linux/io_uring/io_uring.h

#ifndef IOU_CORE_H
#define IOU_CORE_H

#include <linux/errno.h>
#include <linux/lockdep.h>
#include <linux/resume_user_mode.h>
#include <linux/kasan.h>
#include <linux/poll.h>
#include <linux/io_uring_types.h>
#include <uapi/linux/eventpoll.h>
#include "io-wq.h"
#include "slist.h"
#include "filetable.h"

#ifndef CREATE_TRACE_POINTS
#include <trace/events/io_uring.h>
#endif

enum {};

struct io_wait_queue {};

static inline bool io_should_wake(struct io_wait_queue *iowq)
{}

bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow);
int io_run_task_work_sig(struct io_ring_ctx *ctx);
void io_req_defer_failed(struct io_kiocb *req, s32 res);
bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags);
void __io_commit_cqring_flush(struct io_ring_ctx *ctx);

struct file *io_file_get_normal(struct io_kiocb *req, int fd);
struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
			       unsigned issue_flags);

void __io_req_task_work_add(struct io_kiocb *req, unsigned flags);
void io_req_task_work_add_remote(struct io_kiocb *req, struct io_ring_ctx *ctx,
				 unsigned flags);
bool io_alloc_async_data(struct io_kiocb *req);
void io_req_task_queue(struct io_kiocb *req);
void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts);
void io_req_task_queue_fail(struct io_kiocb *req, int ret);
void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts);
struct llist_node *io_handle_tw_list(struct llist_node *node, unsigned int *count, unsigned int max_entries);
struct llist_node *tctx_task_work_run(struct io_uring_task *tctx, unsigned int max_entries, unsigned int *count);
void tctx_task_work(struct callback_head *cb);
__cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
int io_uring_alloc_task_context(struct task_struct *task,
				struct io_ring_ctx *ctx);

int io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file,
				     int start, int end);

int io_poll_issue(struct io_kiocb *req, struct io_tw_state *ts);
int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr);
int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin);
void __io_submit_flush_completions(struct io_ring_ctx *ctx);

struct io_wq_work *io_wq_free_work(struct io_wq_work *work);
void io_wq_submit_work(struct io_wq_work *work);

void io_free_req(struct io_kiocb *req);
void io_queue_next(struct io_kiocb *req);
void io_task_refs_refill(struct io_uring_task *tctx);
bool __io_alloc_req_refill(struct io_ring_ctx *ctx);

bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
			bool cancel_all);

void io_activate_pollwq(struct io_ring_ctx *ctx);

static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
{}

static inline void io_req_task_work_add(struct io_kiocb *req)
{}

static inline void io_submit_flush_completions(struct io_ring_ctx *ctx)
{}

#define io_for_each_link(pos, head)

static inline bool io_get_cqe_overflow(struct io_ring_ctx *ctx,
					struct io_uring_cqe **ret,
					bool overflow)
{}

static inline bool io_get_cqe(struct io_ring_ctx *ctx, struct io_uring_cqe **ret)
{}

static __always_inline bool io_fill_cqe_req(struct io_ring_ctx *ctx,
					    struct io_kiocb *req)
{}

static inline void req_set_fail(struct io_kiocb *req)
{}

static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags)
{}

static inline bool req_has_async_data(struct io_kiocb *req)
{}

static inline void io_put_file(struct io_kiocb *req)
{}

static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx,
					 unsigned issue_flags)
{}

static inline void io_ring_submit_lock(struct io_ring_ctx *ctx,
				       unsigned issue_flags)
{}

static inline void io_commit_cqring(struct io_ring_ctx *ctx)
{}

static inline void io_poll_wq_wake(struct io_ring_ctx *ctx)
{}

static inline void io_cqring_wake(struct io_ring_ctx *ctx)
{}

static inline bool io_sqring_full(struct io_ring_ctx *ctx)
{}

static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
{}

static inline int io_run_task_work(void)
{}

static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
{}

static inline void io_tw_lock(struct io_ring_ctx *ctx, struct io_tw_state *ts)
{}

/*
 * Don't complete immediately but use deferred completion infrastructure.
 * Protected by ->uring_lock and can only be used either with
 * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex.
 */
static inline void io_req_complete_defer(struct io_kiocb *req)
	__must_hold(&req->ctx->uring_lock)
{}

static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx)
{}

static inline void io_get_task_refs(int nr)
{}

static inline bool io_req_cache_empty(struct io_ring_ctx *ctx)
{}

extern struct kmem_cache *req_cachep;
extern struct kmem_cache *io_buf_cachep;

static inline struct io_kiocb *io_extract_req(struct io_ring_ctx *ctx)
{}

static inline bool io_alloc_req(struct io_ring_ctx *ctx, struct io_kiocb **req)
{}

static inline bool io_allowed_defer_tw_run(struct io_ring_ctx *ctx)
{}

static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
{}

static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
{}

/*
 * IORING_SETUP_SQE128 contexts allocate twice the normal SQE size for each
 * slot.
 */
static inline size_t uring_sqe_size(struct io_ring_ctx *ctx)
{}

static inline bool io_file_can_poll(struct io_kiocb *req)
{}

enum {};

static inline bool io_has_work(struct io_ring_ctx *ctx)
{}
#endif