linux/io_uring/net.c

// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/file.h>
#include <linux/slab.h>
#include <linux/net.h>
#include <linux/compat.h>
#include <net/compat.h>
#include <linux/io_uring.h>

#include <uapi/linux/io_uring.h>

#include "io_uring.h"
#include "kbuf.h"
#include "alloc_cache.h"
#include "net.h"
#include "notif.h"
#include "rsrc.h"

#if defined(CONFIG_NET)
struct io_shutdown {};

struct io_accept {};

struct io_socket {};

struct io_connect {};

struct io_bind {};

struct io_listen {};

struct io_sr_msg {};

/*
 * Number of times we'll try and do receives if there's more data. If we
 * exceed this limit, then add us to the back of the queue and retry from
 * there. This helps fairness between flooding clients.
 */
#define MULTISHOT_MAX_RETRY

int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{}

int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
{}

static bool io_net_retry(struct socket *sock, int flags)
{}

static void io_netmsg_iovec_free(struct io_async_msghdr *kmsg)
{}

static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
{}

static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req)
{}

/* assign new iovec to kmsg, if we need to */
static int io_net_vec_assign(struct io_kiocb *req, struct io_async_msghdr *kmsg,
			     struct iovec *iov)
{}

static inline void io_mshot_prep_retry(struct io_kiocb *req,
				       struct io_async_msghdr *kmsg)
{}

#ifdef CONFIG_COMPAT
static int io_compat_msg_copy_hdr(struct io_kiocb *req,
				  struct io_async_msghdr *iomsg,
				  struct compat_msghdr *msg, int ddir)
{}
#endif

static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,
			   struct user_msghdr *msg, int ddir)
{}

static int io_sendmsg_copy_hdr(struct io_kiocb *req,
			       struct io_async_msghdr *iomsg)
{}

void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
{}

static int io_send_setup(struct io_kiocb *req)
{}

static int io_sendmsg_prep_setup(struct io_kiocb *req, int is_msg)
{}

#define SENDMSG_FLAGS

int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{}

static void io_req_msg_cleanup(struct io_kiocb *req,
			       unsigned int issue_flags)
{}

/*
 * For bundle completions, we need to figure out how many segments we consumed.
 * A bundle could be using a single ITER_UBUF if that's all we mapped, or it
 * could be using an ITER_IOVEC. If the latter, then if we consumed all of
 * the segments, then it's a trivial questiont o answer. If we have residual
 * data in the iter, then loop the segments to figure out how much we
 * transferred.
 */
static int io_bundle_nbufs(struct io_async_msghdr *kmsg, int ret)
{}

static inline bool io_send_finish(struct io_kiocb *req, int *ret,
				  struct io_async_msghdr *kmsg,
				  unsigned issue_flags)
{}

int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
{}

int io_send(struct io_kiocb *req, unsigned int issue_flags)
{}

static int io_recvmsg_mshot_prep(struct io_kiocb *req,
				 struct io_async_msghdr *iomsg,
				 int namelen, size_t controllen)
{}

static int io_recvmsg_copy_hdr(struct io_kiocb *req,
			       struct io_async_msghdr *iomsg)
{}

static int io_recvmsg_prep_setup(struct io_kiocb *req)
{}

#define RECVMSG_FLAGS

int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{}

/*
 * Finishes io_recv and io_recvmsg.
 *
 * Returns true if it is actually finished, or false if it should run
 * again (for multishot).
 */
static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
				  struct io_async_msghdr *kmsg,
				  bool mshot_finished, unsigned issue_flags)
{}

static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
				     struct io_sr_msg *sr, void __user **buf,
				     size_t *len)
{}

struct io_recvmsg_multishot_hdr {};

static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
				struct io_async_msghdr *kmsg,
				unsigned int flags, bool *finished)
{}

int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
{}

static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg,
			      size_t *len, unsigned int issue_flags)
{}

int io_recv(struct io_kiocb *req, unsigned int issue_flags)
{}

void io_send_zc_cleanup(struct io_kiocb *req)
{}

#define IO_ZC_FLAGS_COMMON
#define IO_ZC_FLAGS_VALID

int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{}

static int io_sg_from_iter_iovec(struct sk_buff *skb,
				 struct iov_iter *from, size_t length)
{}

static int io_sg_from_iter(struct sk_buff *skb,
			   struct iov_iter *from, size_t length)
{}

static int io_send_zc_import(struct io_kiocb *req, struct io_async_msghdr *kmsg)
{}

int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
{}

int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
{}

void io_sendrecv_fail(struct io_kiocb *req)
{}

#define ACCEPT_FLAGS

int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{}

int io_accept(struct io_kiocb *req, unsigned int issue_flags)
{}

int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{}

int io_socket(struct io_kiocb *req, unsigned int issue_flags)
{}

int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{}

int io_connect(struct io_kiocb *req, unsigned int issue_flags)
{}

int io_bind_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{}

int io_bind(struct io_kiocb *req, unsigned int issue_flags)
{}

int io_listen_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{}

int io_listen(struct io_kiocb *req, unsigned int issue_flags)
{}

void io_netmsg_cache_free(const void *entry)
{}
#endif