linux/drivers/virtio/virtio_ring.c

// SPDX-License-Identifier: GPL-2.0-or-later
/* Virtio ring implementation.
 *
 *  Copyright 2007 Rusty Russell IBM Corporation
 */
#include <linux/virtio.h>
#include <linux/virtio_ring.h>
#include <linux/virtio_config.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/hrtimer.h>
#include <linux/dma-mapping.h>
#include <linux/kmsan.h>
#include <linux/spinlock.h>
#include <xen/xen.h>

#ifdef DEBUG
/* For development, we want to crash whenever the ring is screwed. */
#define BAD_RING
/* Caller is supposed to guarantee no reentry. */
#define START_USE
#define END_USE
#define LAST_ADD_TIME_UPDATE
#define LAST_ADD_TIME_CHECK
#define LAST_ADD_TIME_INVALID
#else
#define BAD_RING(_vq, fmt, args...)
#define START_USE(vq)
#define END_USE(vq)
#define LAST_ADD_TIME_UPDATE(vq)
#define LAST_ADD_TIME_CHECK(vq)
#define LAST_ADD_TIME_INVALID(vq)
#endif

struct vring_desc_state_split {};

struct vring_desc_state_packed {};

struct vring_desc_extra {};

struct vring_virtqueue_split {};

struct vring_virtqueue_packed {};

struct vring_virtqueue {};

static struct virtqueue *__vring_new_virtqueue(unsigned int index,
					       struct vring_virtqueue_split *vring_split,
					       struct virtio_device *vdev,
					       bool weak_barriers,
					       bool context,
					       bool (*notify)(struct virtqueue *),
					       void (*callback)(struct virtqueue *),
					       const char *name,
					       struct device *dma_dev);
static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num);
static void vring_free(struct virtqueue *_vq);

/*
 * Helpers.
 */

#define to_vvq(_vq)

static bool virtqueue_use_indirect(const struct vring_virtqueue *vq,
				   unsigned int total_sg)
{}

/*
 * Modern virtio devices have feature bits to specify whether they need a
 * quirk and bypass the IOMMU. If not there, just use the DMA API.
 *
 * If there, the interaction between virtio and DMA API is messy.
 *
 * On most systems with virtio, physical addresses match bus addresses,
 * and it doesn't particularly matter whether we use the DMA API.
 *
 * On some systems, including Xen and any system with a physical device
 * that speaks virtio behind a physical IOMMU, we must use the DMA API
 * for virtio DMA to work at all.
 *
 * On other systems, including SPARC and PPC64, virtio-pci devices are
 * enumerated as though they are behind an IOMMU, but the virtio host
 * ignores the IOMMU, so we must either pretend that the IOMMU isn't
 * there or somehow map everything as the identity.
 *
 * For the time being, we preserve historic behavior and bypass the DMA
 * API.
 *
 * TODO: install a per-device DMA ops structure that does the right thing
 * taking into account all the above quirks, and use the DMA API
 * unconditionally on data path.
 */

static bool vring_use_dma_api(const struct virtio_device *vdev)
{}

size_t virtio_max_dma_size(const struct virtio_device *vdev)
{}
EXPORT_SYMBOL_GPL();

static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
			       dma_addr_t *dma_handle, gfp_t flag,
			       struct device *dma_dev)
{}

static void vring_free_queue(struct virtio_device *vdev, size_t size,
			     void *queue, dma_addr_t dma_handle,
			     struct device *dma_dev)
{}

/*
 * The DMA ops on various arches are rather gnarly right now, and
 * making all of the arch DMA ops work on the vring device itself
 * is a mess.
 */
static struct device *vring_dma_dev(const struct vring_virtqueue *vq)
{}

/* Map one sg entry. */
static int vring_map_one_sg(const struct vring_virtqueue *vq, struct scatterlist *sg,
			    enum dma_data_direction direction, dma_addr_t *addr)
{}

static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
				   void *cpu_addr, size_t size,
				   enum dma_data_direction direction)
{}

static int vring_mapping_error(const struct vring_virtqueue *vq,
			       dma_addr_t addr)
{}

static void virtqueue_init(struct vring_virtqueue *vq, u32 num)
{}


/*
 * Split ring specific functions - *_split().
 */

static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
					   const struct vring_desc *desc)
{}

static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
					  unsigned int i)
{}

static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
					       unsigned int total_sg,
					       gfp_t gfp)
{}

static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq,
						    struct vring_desc *desc,
						    unsigned int i,
						    dma_addr_t addr,
						    unsigned int len,
						    u16 flags,
						    bool indirect)
{}

static inline int virtqueue_add_split(struct virtqueue *_vq,
				      struct scatterlist *sgs[],
				      unsigned int total_sg,
				      unsigned int out_sgs,
				      unsigned int in_sgs,
				      void *data,
				      void *ctx,
				      gfp_t gfp)
{}

static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
{}

static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
			     void **ctx)
{}

static bool more_used_split(const struct vring_virtqueue *vq)
{}

static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
					 unsigned int *len,
					 void **ctx)
{}

static void virtqueue_disable_cb_split(struct virtqueue *_vq)
{}

static unsigned int virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
{}

static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned int last_used_idx)
{}

static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
{}

static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
{}

static void virtqueue_vring_init_split(struct vring_virtqueue_split *vring_split,
				       struct vring_virtqueue *vq)
{}

static void virtqueue_reinit_split(struct vring_virtqueue *vq)
{}

static void virtqueue_vring_attach_split(struct vring_virtqueue *vq,
					 struct vring_virtqueue_split *vring_split)
{}

static int vring_alloc_state_extra_split(struct vring_virtqueue_split *vring_split)
{}

static void vring_free_split(struct vring_virtqueue_split *vring_split,
			     struct virtio_device *vdev, struct device *dma_dev)
{}

static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split,
				   struct virtio_device *vdev,
				   u32 num,
				   unsigned int vring_align,
				   bool may_reduce_num,
				   struct device *dma_dev)
{}

static struct virtqueue *vring_create_virtqueue_split(
	unsigned int index,
	unsigned int num,
	unsigned int vring_align,
	struct virtio_device *vdev,
	bool weak_barriers,
	bool may_reduce_num,
	bool context,
	bool (*notify)(struct virtqueue *),
	void (*callback)(struct virtqueue *),
	const char *name,
	struct device *dma_dev)
{}

static int virtqueue_resize_split(struct virtqueue *_vq, u32 num)
{}


/*
 * Packed ring specific functions - *_packed().
 */
static bool packed_used_wrap_counter(u16 last_used_idx)
{}

static u16 packed_last_used(u16 last_used_idx)
{}

static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
				     const struct vring_desc_extra *extra)
{}

static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
				    const struct vring_packed_desc *desc)
{}

static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
						       gfp_t gfp)
{}

static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
					 struct scatterlist *sgs[],
					 unsigned int total_sg,
					 unsigned int out_sgs,
					 unsigned int in_sgs,
					 void *data,
					 gfp_t gfp)
{}

static inline int virtqueue_add_packed(struct virtqueue *_vq,
				       struct scatterlist *sgs[],
				       unsigned int total_sg,
				       unsigned int out_sgs,
				       unsigned int in_sgs,
				       void *data,
				       void *ctx,
				       gfp_t gfp)
{}

static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)
{}

static void detach_buf_packed(struct vring_virtqueue *vq,
			      unsigned int id, void **ctx)
{}

static inline bool is_used_desc_packed(const struct vring_virtqueue *vq,
				       u16 idx, bool used_wrap_counter)
{}

static bool more_used_packed(const struct vring_virtqueue *vq)
{}

static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
					  unsigned int *len,
					  void **ctx)
{}

static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
{}

static unsigned int virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
{}

static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap)
{}

static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
{}

static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
{}

static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num)
{}

static void vring_free_packed(struct vring_virtqueue_packed *vring_packed,
			      struct virtio_device *vdev,
			      struct device *dma_dev)
{}

static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed,
				    struct virtio_device *vdev,
				    u32 num, struct device *dma_dev)
{}

static int vring_alloc_state_extra_packed(struct vring_virtqueue_packed *vring_packed)
{}

static void virtqueue_vring_init_packed(struct vring_virtqueue_packed *vring_packed,
					bool callback)
{}

static void virtqueue_vring_attach_packed(struct vring_virtqueue *vq,
					  struct vring_virtqueue_packed *vring_packed)
{}

static void virtqueue_reinit_packed(struct vring_virtqueue *vq)
{}

static struct virtqueue *vring_create_virtqueue_packed(
	unsigned int index,
	unsigned int num,
	unsigned int vring_align,
	struct virtio_device *vdev,
	bool weak_barriers,
	bool may_reduce_num,
	bool context,
	bool (*notify)(struct virtqueue *),
	void (*callback)(struct virtqueue *),
	const char *name,
	struct device *dma_dev)
{}

static int virtqueue_resize_packed(struct virtqueue *_vq, u32 num)
{}

static int virtqueue_disable_and_recycle(struct virtqueue *_vq,
					 void (*recycle)(struct virtqueue *vq, void *buf))
{}

static int virtqueue_enable_after_reset(struct virtqueue *_vq)
{}

/*
 * Generic functions and exported symbols.
 */

static inline int virtqueue_add(struct virtqueue *_vq,
				struct scatterlist *sgs[],
				unsigned int total_sg,
				unsigned int out_sgs,
				unsigned int in_sgs,
				void *data,
				void *ctx,
				gfp_t gfp)
{}

/**
 * virtqueue_add_sgs - expose buffers to other end
 * @_vq: the struct virtqueue we're talking about.
 * @sgs: array of terminated scatterlists.
 * @out_sgs: the number of scatterlists readable by other side
 * @in_sgs: the number of scatterlists which are writable (after readable ones)
 * @data: the token identifying the buffer.
 * @gfp: how to do memory allocations (if necessary).
 *
 * Caller must ensure we don't call this with other virtqueue operations
 * at the same time (except where noted).
 *
 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
 */
int virtqueue_add_sgs(struct virtqueue *_vq,
		      struct scatterlist *sgs[],
		      unsigned int out_sgs,
		      unsigned int in_sgs,
		      void *data,
		      gfp_t gfp)
{}
EXPORT_SYMBOL_GPL();

/**
 * virtqueue_add_outbuf - expose output buffers to other end
 * @vq: the struct virtqueue we're talking about.
 * @sg: scatterlist (must be well-formed and terminated!)
 * @num: the number of entries in @sg readable by other side
 * @data: the token identifying the buffer.
 * @gfp: how to do memory allocations (if necessary).
 *
 * Caller must ensure we don't call this with other virtqueue operations
 * at the same time (except where noted).
 *
 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
 */
int virtqueue_add_outbuf(struct virtqueue *vq,
			 struct scatterlist *sg, unsigned int num,
			 void *data,
			 gfp_t gfp)
{}
EXPORT_SYMBOL_GPL();

/**
 * virtqueue_add_inbuf - expose input buffers to other end
 * @vq: the struct virtqueue we're talking about.
 * @sg: scatterlist (must be well-formed and terminated!)
 * @num: the number of entries in @sg writable by other side
 * @data: the token identifying the buffer.
 * @gfp: how to do memory allocations (if necessary).
 *
 * Caller must ensure we don't call this with other virtqueue operations
 * at the same time (except where noted).
 *
 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
 */
int virtqueue_add_inbuf(struct virtqueue *vq,
			struct scatterlist *sg, unsigned int num,
			void *data,
			gfp_t gfp)
{}
EXPORT_SYMBOL_GPL();

/**
 * virtqueue_add_inbuf_ctx - expose input buffers to other end
 * @vq: the struct virtqueue we're talking about.
 * @sg: scatterlist (must be well-formed and terminated!)
 * @num: the number of entries in @sg writable by other side
 * @data: the token identifying the buffer.
 * @ctx: extra context for the token
 * @gfp: how to do memory allocations (if necessary).
 *
 * Caller must ensure we don't call this with other virtqueue operations
 * at the same time (except where noted).
 *
 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
 */
int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
			struct scatterlist *sg, unsigned int num,
			void *data,
			void *ctx,
			gfp_t gfp)
{}
EXPORT_SYMBOL_GPL();

/**
 * virtqueue_dma_dev - get the dma dev
 * @_vq: the struct virtqueue we're talking about.
 *
 * Returns the dma dev. That can been used for dma api.
 */
struct device *virtqueue_dma_dev(struct virtqueue *_vq)
{}
EXPORT_SYMBOL_GPL();

/**
 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
 * @_vq: the struct virtqueue
 *
 * Instead of virtqueue_kick(), you can do:
 *	if (virtqueue_kick_prepare(vq))
 *		virtqueue_notify(vq);
 *
 * This is sometimes useful because the virtqueue_kick_prepare() needs
 * to be serialized, but the actual virtqueue_notify() call does not.
 */
bool virtqueue_kick_prepare(struct virtqueue *_vq)
{}
EXPORT_SYMBOL_GPL();

/**
 * virtqueue_notify - second half of split virtqueue_kick call.
 * @_vq: the struct virtqueue
 *
 * This does not need to be serialized.
 *
 * Returns false if host notify failed or queue is broken, otherwise true.
 */
bool virtqueue_notify(struct virtqueue *_vq)
{}
EXPORT_SYMBOL_GPL();

/**
 * virtqueue_kick - update after add_buf
 * @vq: the struct virtqueue
 *
 * After one or more virtqueue_add_* calls, invoke this to kick
 * the other side.
 *
 * Caller must ensure we don't call this with other virtqueue
 * operations at the same time (except where noted).
 *
 * Returns false if kick failed, otherwise true.
 */
bool virtqueue_kick(struct virtqueue *vq)
{}
EXPORT_SYMBOL_GPL();

/**
 * virtqueue_get_buf_ctx - get the next used buffer
 * @_vq: the struct virtqueue we're talking about.
 * @len: the length written into the buffer
 * @ctx: extra context for the token
 *
 * If the device wrote data into the buffer, @len will be set to the
 * amount written.  This means you don't need to clear the buffer
 * beforehand to ensure there's no data leakage in the case of short
 * writes.
 *
 * Caller must ensure we don't call this with other virtqueue
 * operations at the same time (except where noted).
 *
 * Returns NULL if there are no used buffers, or the "data" token
 * handed to virtqueue_add_*().
 */
void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
			    void **ctx)
{}
EXPORT_SYMBOL_GPL();

void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
{}
EXPORT_SYMBOL_GPL();
/**
 * virtqueue_disable_cb - disable callbacks
 * @_vq: the struct virtqueue we're talking about.
 *
 * Note that this is not necessarily synchronous, hence unreliable and only
 * useful as an optimization.
 *
 * Unlike other operations, this need not be serialized.
 */
void virtqueue_disable_cb(struct virtqueue *_vq)
{}
EXPORT_SYMBOL_GPL();

/**
 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
 * @_vq: the struct virtqueue we're talking about.
 *
 * This re-enables callbacks; it returns current queue state
 * in an opaque unsigned value. This value should be later tested by
 * virtqueue_poll, to detect a possible race between the driver checking for
 * more work, and enabling callbacks.
 *
 * Caller must ensure we don't call this with other virtqueue
 * operations at the same time (except where noted).
 */
unsigned int virtqueue_enable_cb_prepare(struct virtqueue *_vq)
{}
EXPORT_SYMBOL_GPL();

/**
 * virtqueue_poll - query pending used buffers
 * @_vq: the struct virtqueue we're talking about.
 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
 *
 * Returns "true" if there are pending used buffers in the queue.
 *
 * This does not need to be serialized.
 */
bool virtqueue_poll(struct virtqueue *_vq, unsigned int last_used_idx)
{}
EXPORT_SYMBOL_GPL();

/**
 * virtqueue_enable_cb - restart callbacks after disable_cb.
 * @_vq: the struct virtqueue we're talking about.
 *
 * This re-enables callbacks; it returns "false" if there are pending
 * buffers in the queue, to detect a possible race between the driver
 * checking for more work, and enabling callbacks.
 *
 * Caller must ensure we don't call this with other virtqueue
 * operations at the same time (except where noted).
 */
bool virtqueue_enable_cb(struct virtqueue *_vq)
{}
EXPORT_SYMBOL_GPL();

/**
 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
 * @_vq: the struct virtqueue we're talking about.
 *
 * This re-enables callbacks but hints to the other side to delay
 * interrupts until most of the available buffers have been processed;
 * it returns "false" if there are many pending buffers in the queue,
 * to detect a possible race between the driver checking for more work,
 * and enabling callbacks.
 *
 * Caller must ensure we don't call this with other virtqueue
 * operations at the same time (except where noted).
 */
bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
{}
EXPORT_SYMBOL_GPL();

/**
 * virtqueue_detach_unused_buf - detach first unused buffer
 * @_vq: the struct virtqueue we're talking about.
 *
 * Returns NULL or the "data" token handed to virtqueue_add_*().
 * This is not valid on an active queue; it is useful for device
 * shutdown or the reset queue.
 */
void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
{}
EXPORT_SYMBOL_GPL();

static inline bool more_used(const struct vring_virtqueue *vq)
{}

/**
 * vring_interrupt - notify a virtqueue on an interrupt
 * @irq: the IRQ number (ignored)
 * @_vq: the struct virtqueue to notify
 *
 * Calls the callback function of @_vq to process the virtqueue
 * notification.
 */
irqreturn_t vring_interrupt(int irq, void *_vq)
{}
EXPORT_SYMBOL_GPL();

/* Only available for split ring */
static struct virtqueue *__vring_new_virtqueue(unsigned int index,
					       struct vring_virtqueue_split *vring_split,
					       struct virtio_device *vdev,
					       bool weak_barriers,
					       bool context,
					       bool (*notify)(struct virtqueue *),
					       void (*callback)(struct virtqueue *),
					       const char *name,
					       struct device *dma_dev)
{}

struct virtqueue *vring_create_virtqueue(
	unsigned int index,
	unsigned int num,
	unsigned int vring_align,
	struct virtio_device *vdev,
	bool weak_barriers,
	bool may_reduce_num,
	bool context,
	bool (*notify)(struct virtqueue *),
	void (*callback)(struct virtqueue *),
	const char *name)
{}
EXPORT_SYMBOL_GPL();

struct virtqueue *vring_create_virtqueue_dma(
	unsigned int index,
	unsigned int num,
	unsigned int vring_align,
	struct virtio_device *vdev,
	bool weak_barriers,
	bool may_reduce_num,
	bool context,
	bool (*notify)(struct virtqueue *),
	void (*callback)(struct virtqueue *),
	const char *name,
	struct device *dma_dev)
{}
EXPORT_SYMBOL_GPL();

/**
 * virtqueue_resize - resize the vring of vq
 * @_vq: the struct virtqueue we're talking about.
 * @num: new ring num
 * @recycle: callback to recycle unused buffers
 *
 * When it is really necessary to create a new vring, it will set the current vq
 * into the reset state. Then call the passed callback to recycle the buffer
 * that is no longer used. Only after the new vring is successfully created, the
 * old vring will be released.
 *
 * Caller must ensure we don't call this with other virtqueue operations
 * at the same time (except where noted).
 *
 * Returns zero or a negative error.
 * 0: success.
 * -ENOMEM: Failed to allocate a new ring, fall back to the original ring size.
 *  vq can still work normally
 * -EBUSY: Failed to sync with device, vq may not work properly
 * -ENOENT: Transport or device not supported
 * -E2BIG/-EINVAL: num error
 * -EPERM: Operation not permitted
 *
 */
int virtqueue_resize(struct virtqueue *_vq, u32 num,
		     void (*recycle)(struct virtqueue *vq, void *buf))
{}
EXPORT_SYMBOL_GPL();

/**
 * virtqueue_set_dma_premapped - set the vring premapped mode
 * @_vq: the struct virtqueue we're talking about.
 *
 * Enable the premapped mode of the vq.
 *
 * The vring in premapped mode does not do dma internally, so the driver must
 * do dma mapping in advance. The driver must pass the dma_address through
 * dma_address of scatterlist. When the driver got a used buffer from
 * the vring, it has to unmap the dma address.
 *
 * This function must be called immediately after creating the vq, or after vq
 * reset, and before adding any buffers to it.
 *
 * Caller must ensure we don't call this with other virtqueue operations
 * at the same time (except where noted).
 *
 * Returns zero or a negative error.
 * 0: success.
 * -EINVAL: too late to enable premapped mode, the vq already contains buffers.
 */
int virtqueue_set_dma_premapped(struct virtqueue *_vq)
{}
EXPORT_SYMBOL_GPL();

/**
 * virtqueue_reset - detach and recycle all unused buffers
 * @_vq: the struct virtqueue we're talking about.
 * @recycle: callback to recycle unused buffers
 *
 * Caller must ensure we don't call this with other virtqueue operations
 * at the same time (except where noted).
 *
 * Returns zero or a negative error.
 * 0: success.
 * -EBUSY: Failed to sync with device, vq may not work properly
 * -ENOENT: Transport or device not supported
 * -EPERM: Operation not permitted
 */
int virtqueue_reset(struct virtqueue *_vq,
		    void (*recycle)(struct virtqueue *vq, void *buf))
{}
EXPORT_SYMBOL_GPL();

/* Only available for split ring */
struct virtqueue *vring_new_virtqueue(unsigned int index,
				      unsigned int num,
				      unsigned int vring_align,
				      struct virtio_device *vdev,
				      bool weak_barriers,
				      bool context,
				      void *pages,
				      bool (*notify)(struct virtqueue *vq),
				      void (*callback)(struct virtqueue *vq),
				      const char *name)
{}
EXPORT_SYMBOL_GPL();

static void vring_free(struct virtqueue *_vq)
{}

void vring_del_virtqueue(struct virtqueue *_vq)
{}
EXPORT_SYMBOL_GPL();

u32 vring_notification_data(struct virtqueue *_vq)
{}
EXPORT_SYMBOL_GPL();

/* Manipulates transport-specific feature bits. */
void vring_transport_features(struct virtio_device *vdev)
{}
EXPORT_SYMBOL_GPL();

/**
 * virtqueue_get_vring_size - return the size of the virtqueue's vring
 * @_vq: the struct virtqueue containing the vring of interest.
 *
 * Returns the size of the vring.  This is mainly used for boasting to
 * userspace.  Unlike other operations, this need not be serialized.
 */
unsigned int virtqueue_get_vring_size(const struct virtqueue *_vq)
{}
EXPORT_SYMBOL_GPL();

/*
 * This function should only be called by the core, not directly by the driver.
 */
void __virtqueue_break(struct virtqueue *_vq)
{}
EXPORT_SYMBOL_GPL();

/*
 * This function should only be called by the core, not directly by the driver.
 */
void __virtqueue_unbreak(struct virtqueue *_vq)
{}
EXPORT_SYMBOL_GPL();

bool virtqueue_is_broken(const struct virtqueue *_vq)
{}
EXPORT_SYMBOL_GPL();

/*
 * This should prevent the device from being used, allowing drivers to
 * recover.  You may need to grab appropriate locks to flush.
 */
void virtio_break_device(struct virtio_device *dev)
{}
EXPORT_SYMBOL_GPL();

/*
 * This should allow the device to be used by the driver. You may
 * need to grab appropriate locks to flush the write to
 * vq->broken. This should only be used in some specific case e.g
 * (probing and restoring). This function should only be called by the
 * core, not directly by the driver.
 */
void __virtio_unbreak_device(struct virtio_device *dev)
{}
EXPORT_SYMBOL_GPL();

dma_addr_t virtqueue_get_desc_addr(const struct virtqueue *_vq)
{}
EXPORT_SYMBOL_GPL();

dma_addr_t virtqueue_get_avail_addr(const struct virtqueue *_vq)
{}
EXPORT_SYMBOL_GPL();

dma_addr_t virtqueue_get_used_addr(const struct virtqueue *_vq)
{}
EXPORT_SYMBOL_GPL();

/* Only available for split ring */
const struct vring *virtqueue_get_vring(const struct virtqueue *vq)
{}
EXPORT_SYMBOL_GPL();

/**
 * virtqueue_dma_map_single_attrs - map DMA for _vq
 * @_vq: the struct virtqueue we're talking about.
 * @ptr: the pointer of the buffer to do dma
 * @size: the size of the buffer to do dma
 * @dir: DMA direction
 * @attrs: DMA Attrs
 *
 * The caller calls this to do dma mapping in advance. The DMA address can be
 * passed to this _vq when it is in pre-mapped mode.
 *
 * return DMA address. Caller should check that by virtqueue_dma_mapping_error().
 */
dma_addr_t virtqueue_dma_map_single_attrs(struct virtqueue *_vq, void *ptr,
					  size_t size,
					  enum dma_data_direction dir,
					  unsigned long attrs)
{}
EXPORT_SYMBOL_GPL();

/**
 * virtqueue_dma_unmap_single_attrs - unmap DMA for _vq
 * @_vq: the struct virtqueue we're talking about.
 * @addr: the dma address to unmap
 * @size: the size of the buffer
 * @dir: DMA direction
 * @attrs: DMA Attrs
 *
 * Unmap the address that is mapped by the virtqueue_dma_map_* APIs.
 *
 */
void virtqueue_dma_unmap_single_attrs(struct virtqueue *_vq, dma_addr_t addr,
				      size_t size, enum dma_data_direction dir,
				      unsigned long attrs)
{}
EXPORT_SYMBOL_GPL();

/**
 * virtqueue_dma_mapping_error - check dma address
 * @_vq: the struct virtqueue we're talking about.
 * @addr: DMA address
 *
 * Returns 0 means dma valid. Other means invalid dma address.
 */
int virtqueue_dma_mapping_error(struct virtqueue *_vq, dma_addr_t addr)
{}
EXPORT_SYMBOL_GPL();

/**
 * virtqueue_dma_need_sync - check a dma address needs sync
 * @_vq: the struct virtqueue we're talking about.
 * @addr: DMA address
 *
 * Check if the dma address mapped by the virtqueue_dma_map_* APIs needs to be
 * synchronized
 *
 * return bool
 */
bool virtqueue_dma_need_sync(struct virtqueue *_vq, dma_addr_t addr)
{}
EXPORT_SYMBOL_GPL();

/**
 * virtqueue_dma_sync_single_range_for_cpu - dma sync for cpu
 * @_vq: the struct virtqueue we're talking about.
 * @addr: DMA address
 * @offset: DMA address offset
 * @size: buf size for sync
 * @dir: DMA direction
 *
 * Before calling this function, use virtqueue_dma_need_sync() to confirm that
 * the DMA address really needs to be synchronized
 *
 */
void virtqueue_dma_sync_single_range_for_cpu(struct virtqueue *_vq,
					     dma_addr_t addr,
					     unsigned long offset, size_t size,
					     enum dma_data_direction dir)
{}
EXPORT_SYMBOL_GPL();

/**
 * virtqueue_dma_sync_single_range_for_device - dma sync for device
 * @_vq: the struct virtqueue we're talking about.
 * @addr: DMA address
 * @offset: DMA address offset
 * @size: buf size for sync
 * @dir: DMA direction
 *
 * Before calling this function, use virtqueue_dma_need_sync() to confirm that
 * the DMA address really needs to be synchronized
 */
void virtqueue_dma_sync_single_range_for_device(struct virtqueue *_vq,
						dma_addr_t addr,
						unsigned long offset, size_t size,
						enum dma_data_direction dir)
{}
EXPORT_SYMBOL_GPL();

MODULE_DESCRIPTION();
MODULE_LICENSE();