linux/io_uring/kbuf.c

// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/namei.h>
#include <linux/poll.h>
#include <linux/vmalloc.h>
#include <linux/io_uring.h>

#include <uapi/linux/io_uring.h>

#include "io_uring.h"
#include "opdef.h"
#include "kbuf.h"
#include "memmap.h"

/* BIDs are addressed by a 16-bit field in a CQE */
#define MAX_BIDS_PER_BGID

struct kmem_cache *io_buf_cachep;

struct io_provide_buf {};

static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
							unsigned int bgid)
{}

static int io_buffer_add_list(struct io_ring_ctx *ctx,
			      struct io_buffer_list *bl, unsigned int bgid)
{}

bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
{}

void __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
{}

static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
					      struct io_buffer_list *bl)
{}

static int io_provided_buffers_select(struct io_kiocb *req, size_t *len,
				      struct io_buffer_list *bl,
				      struct iovec *iov)
{}

static struct io_uring_buf *io_ring_head_to_buf(struct io_uring_buf_ring *br,
						__u16 head, __u16 mask)
{}

static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
					  struct io_buffer_list *bl,
					  unsigned int issue_flags)
{}

void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
			      unsigned int issue_flags)
{}

/* cap it at a reasonable 256, will be one page even for 4K */
#define PEEK_MAX_IMPORT

static int io_ring_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg,
				struct io_buffer_list *bl)
{}

int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg,
		      unsigned int issue_flags)
{}

int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg)
{}

static int __io_remove_buffers(struct io_ring_ctx *ctx,
			       struct io_buffer_list *bl, unsigned nbufs)
{}

void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
{}

void io_destroy_buffers(struct io_ring_ctx *ctx)
{}

int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{}

int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
{}

int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{}

#define IO_BUFFER_ALLOC_BATCH

static int io_refill_buffer_cache(struct io_ring_ctx *ctx)
{}

static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
			  struct io_buffer_list *bl)
{}

int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
{}

static int io_pin_pbuf_ring(struct io_uring_buf_reg *reg,
			    struct io_buffer_list *bl)
{}

static int io_alloc_pbuf_ring(struct io_ring_ctx *ctx,
			      struct io_uring_buf_reg *reg,
			      struct io_buffer_list *bl)
{}

int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
{}

int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
{}

int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg)
{}

struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx,
				      unsigned long bgid)
{}

int io_pbuf_mmap(struct file *file, struct vm_area_struct *vma)
{}