#include <linux/virtio.h>
#include <linux/virtio_ring.h>
#include <linux/virtio_config.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/hrtimer.h>
#include <linux/dma-mapping.h>
#include <linux/kmsan.h>
#include <linux/spinlock.h>
#include <xen/xen.h>
#ifdef DEBUG
#define BAD_RING …
#define START_USE …
#define END_USE …
#define LAST_ADD_TIME_UPDATE …
#define LAST_ADD_TIME_CHECK …
#define LAST_ADD_TIME_INVALID …
#else
#define BAD_RING(_vq, fmt, args...) …
#define START_USE(vq) …
#define END_USE(vq) …
#define LAST_ADD_TIME_UPDATE(vq) …
#define LAST_ADD_TIME_CHECK(vq) …
#define LAST_ADD_TIME_INVALID(vq) …
#endif
struct vring_desc_state_split { … };
struct vring_desc_state_packed { … };
struct vring_desc_extra { … };
struct vring_virtqueue_split { … };
struct vring_virtqueue_packed { … };
struct vring_virtqueue { … };
static struct virtqueue *__vring_new_virtqueue(unsigned int index,
struct vring_virtqueue_split *vring_split,
struct virtio_device *vdev,
bool weak_barriers,
bool context,
bool (*notify)(struct virtqueue *),
void (*callback)(struct virtqueue *),
const char *name,
struct device *dma_dev);
static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num);
static void vring_free(struct virtqueue *_vq);
#define to_vvq(_vq) …
static bool virtqueue_use_indirect(const struct vring_virtqueue *vq,
unsigned int total_sg)
{ … }
static bool vring_use_dma_api(const struct virtio_device *vdev)
{ … }
size_t virtio_max_dma_size(const struct virtio_device *vdev)
{ … }
EXPORT_SYMBOL_GPL(…);
static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
struct device *dma_dev)
{ … }
static void vring_free_queue(struct virtio_device *vdev, size_t size,
void *queue, dma_addr_t dma_handle,
struct device *dma_dev)
{ … }
static struct device *vring_dma_dev(const struct vring_virtqueue *vq)
{ … }
static int vring_map_one_sg(const struct vring_virtqueue *vq, struct scatterlist *sg,
enum dma_data_direction direction, dma_addr_t *addr)
{ … }
static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
void *cpu_addr, size_t size,
enum dma_data_direction direction)
{ … }
static int vring_mapping_error(const struct vring_virtqueue *vq,
dma_addr_t addr)
{ … }
static void virtqueue_init(struct vring_virtqueue *vq, u32 num)
{ … }
static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
const struct vring_desc *desc)
{ … }
static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
unsigned int i)
{ … }
static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
unsigned int total_sg,
gfp_t gfp)
{ … }
static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq,
struct vring_desc *desc,
unsigned int i,
dma_addr_t addr,
unsigned int len,
u16 flags,
bool indirect)
{ … }
static inline int virtqueue_add_split(struct virtqueue *_vq,
struct scatterlist *sgs[],
unsigned int total_sg,
unsigned int out_sgs,
unsigned int in_sgs,
void *data,
void *ctx,
gfp_t gfp)
{ … }
static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
{ … }
static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
void **ctx)
{ … }
static bool more_used_split(const struct vring_virtqueue *vq)
{ … }
static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
unsigned int *len,
void **ctx)
{ … }
static void virtqueue_disable_cb_split(struct virtqueue *_vq)
{ … }
static unsigned int virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
{ … }
static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned int last_used_idx)
{ … }
static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
{ … }
static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
{ … }
static void virtqueue_vring_init_split(struct vring_virtqueue_split *vring_split,
struct vring_virtqueue *vq)
{ … }
static void virtqueue_reinit_split(struct vring_virtqueue *vq)
{ … }
static void virtqueue_vring_attach_split(struct vring_virtqueue *vq,
struct vring_virtqueue_split *vring_split)
{ … }
static int vring_alloc_state_extra_split(struct vring_virtqueue_split *vring_split)
{ … }
static void vring_free_split(struct vring_virtqueue_split *vring_split,
struct virtio_device *vdev, struct device *dma_dev)
{ … }
static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split,
struct virtio_device *vdev,
u32 num,
unsigned int vring_align,
bool may_reduce_num,
struct device *dma_dev)
{ … }
static struct virtqueue *vring_create_virtqueue_split(
unsigned int index,
unsigned int num,
unsigned int vring_align,
struct virtio_device *vdev,
bool weak_barriers,
bool may_reduce_num,
bool context,
bool (*notify)(struct virtqueue *),
void (*callback)(struct virtqueue *),
const char *name,
struct device *dma_dev)
{ … }
static int virtqueue_resize_split(struct virtqueue *_vq, u32 num)
{ … }
static bool packed_used_wrap_counter(u16 last_used_idx)
{ … }
static u16 packed_last_used(u16 last_used_idx)
{ … }
static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
const struct vring_desc_extra *extra)
{ … }
static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
const struct vring_packed_desc *desc)
{ … }
static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
gfp_t gfp)
{ … }
static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
struct scatterlist *sgs[],
unsigned int total_sg,
unsigned int out_sgs,
unsigned int in_sgs,
void *data,
gfp_t gfp)
{ … }
static inline int virtqueue_add_packed(struct virtqueue *_vq,
struct scatterlist *sgs[],
unsigned int total_sg,
unsigned int out_sgs,
unsigned int in_sgs,
void *data,
void *ctx,
gfp_t gfp)
{ … }
static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)
{ … }
static void detach_buf_packed(struct vring_virtqueue *vq,
unsigned int id, void **ctx)
{ … }
static inline bool is_used_desc_packed(const struct vring_virtqueue *vq,
u16 idx, bool used_wrap_counter)
{ … }
static bool more_used_packed(const struct vring_virtqueue *vq)
{ … }
static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
unsigned int *len,
void **ctx)
{ … }
static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
{ … }
static unsigned int virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
{ … }
static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap)
{ … }
static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
{ … }
static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
{ … }
static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num)
{ … }
static void vring_free_packed(struct vring_virtqueue_packed *vring_packed,
struct virtio_device *vdev,
struct device *dma_dev)
{ … }
static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed,
struct virtio_device *vdev,
u32 num, struct device *dma_dev)
{ … }
static int vring_alloc_state_extra_packed(struct vring_virtqueue_packed *vring_packed)
{ … }
static void virtqueue_vring_init_packed(struct vring_virtqueue_packed *vring_packed,
bool callback)
{ … }
static void virtqueue_vring_attach_packed(struct vring_virtqueue *vq,
struct vring_virtqueue_packed *vring_packed)
{ … }
static void virtqueue_reinit_packed(struct vring_virtqueue *vq)
{ … }
static struct virtqueue *vring_create_virtqueue_packed(
unsigned int index,
unsigned int num,
unsigned int vring_align,
struct virtio_device *vdev,
bool weak_barriers,
bool may_reduce_num,
bool context,
bool (*notify)(struct virtqueue *),
void (*callback)(struct virtqueue *),
const char *name,
struct device *dma_dev)
{ … }
static int virtqueue_resize_packed(struct virtqueue *_vq, u32 num)
{ … }
static int virtqueue_disable_and_recycle(struct virtqueue *_vq,
void (*recycle)(struct virtqueue *vq, void *buf))
{ … }
static int virtqueue_enable_after_reset(struct virtqueue *_vq)
{ … }
static inline int virtqueue_add(struct virtqueue *_vq,
struct scatterlist *sgs[],
unsigned int total_sg,
unsigned int out_sgs,
unsigned int in_sgs,
void *data,
void *ctx,
gfp_t gfp)
{ … }
int virtqueue_add_sgs(struct virtqueue *_vq,
struct scatterlist *sgs[],
unsigned int out_sgs,
unsigned int in_sgs,
void *data,
gfp_t gfp)
{ … }
EXPORT_SYMBOL_GPL(…);
int virtqueue_add_outbuf(struct virtqueue *vq,
struct scatterlist *sg, unsigned int num,
void *data,
gfp_t gfp)
{ … }
EXPORT_SYMBOL_GPL(…);
int virtqueue_add_inbuf(struct virtqueue *vq,
struct scatterlist *sg, unsigned int num,
void *data,
gfp_t gfp)
{ … }
EXPORT_SYMBOL_GPL(…);
int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
struct scatterlist *sg, unsigned int num,
void *data,
void *ctx,
gfp_t gfp)
{ … }
EXPORT_SYMBOL_GPL(…);
struct device *virtqueue_dma_dev(struct virtqueue *_vq)
{ … }
EXPORT_SYMBOL_GPL(…);
bool virtqueue_kick_prepare(struct virtqueue *_vq)
{ … }
EXPORT_SYMBOL_GPL(…);
bool virtqueue_notify(struct virtqueue *_vq)
{ … }
EXPORT_SYMBOL_GPL(…);
bool virtqueue_kick(struct virtqueue *vq)
{ … }
EXPORT_SYMBOL_GPL(…);
void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
void **ctx)
{ … }
EXPORT_SYMBOL_GPL(…);
void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
{ … }
EXPORT_SYMBOL_GPL(…);
void virtqueue_disable_cb(struct virtqueue *_vq)
{ … }
EXPORT_SYMBOL_GPL(…);
unsigned int virtqueue_enable_cb_prepare(struct virtqueue *_vq)
{ … }
EXPORT_SYMBOL_GPL(…);
bool virtqueue_poll(struct virtqueue *_vq, unsigned int last_used_idx)
{ … }
EXPORT_SYMBOL_GPL(…);
bool virtqueue_enable_cb(struct virtqueue *_vq)
{ … }
EXPORT_SYMBOL_GPL(…);
bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
{ … }
EXPORT_SYMBOL_GPL(…);
void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
{ … }
EXPORT_SYMBOL_GPL(…);
static inline bool more_used(const struct vring_virtqueue *vq)
{ … }
irqreturn_t vring_interrupt(int irq, void *_vq)
{ … }
EXPORT_SYMBOL_GPL(…);
static struct virtqueue *__vring_new_virtqueue(unsigned int index,
struct vring_virtqueue_split *vring_split,
struct virtio_device *vdev,
bool weak_barriers,
bool context,
bool (*notify)(struct virtqueue *),
void (*callback)(struct virtqueue *),
const char *name,
struct device *dma_dev)
{ … }
struct virtqueue *vring_create_virtqueue(
unsigned int index,
unsigned int num,
unsigned int vring_align,
struct virtio_device *vdev,
bool weak_barriers,
bool may_reduce_num,
bool context,
bool (*notify)(struct virtqueue *),
void (*callback)(struct virtqueue *),
const char *name)
{ … }
EXPORT_SYMBOL_GPL(…);
struct virtqueue *vring_create_virtqueue_dma(
unsigned int index,
unsigned int num,
unsigned int vring_align,
struct virtio_device *vdev,
bool weak_barriers,
bool may_reduce_num,
bool context,
bool (*notify)(struct virtqueue *),
void (*callback)(struct virtqueue *),
const char *name,
struct device *dma_dev)
{ … }
EXPORT_SYMBOL_GPL(…);
int virtqueue_resize(struct virtqueue *_vq, u32 num,
void (*recycle)(struct virtqueue *vq, void *buf))
{ … }
EXPORT_SYMBOL_GPL(…);
int virtqueue_set_dma_premapped(struct virtqueue *_vq)
{ … }
EXPORT_SYMBOL_GPL(…);
int virtqueue_reset(struct virtqueue *_vq,
void (*recycle)(struct virtqueue *vq, void *buf))
{ … }
EXPORT_SYMBOL_GPL(…);
struct virtqueue *vring_new_virtqueue(unsigned int index,
unsigned int num,
unsigned int vring_align,
struct virtio_device *vdev,
bool weak_barriers,
bool context,
void *pages,
bool (*notify)(struct virtqueue *vq),
void (*callback)(struct virtqueue *vq),
const char *name)
{ … }
EXPORT_SYMBOL_GPL(…);
static void vring_free(struct virtqueue *_vq)
{ … }
void vring_del_virtqueue(struct virtqueue *_vq)
{ … }
EXPORT_SYMBOL_GPL(…);
u32 vring_notification_data(struct virtqueue *_vq)
{ … }
EXPORT_SYMBOL_GPL(…);
void vring_transport_features(struct virtio_device *vdev)
{ … }
EXPORT_SYMBOL_GPL(…);
unsigned int virtqueue_get_vring_size(const struct virtqueue *_vq)
{ … }
EXPORT_SYMBOL_GPL(…);
void __virtqueue_break(struct virtqueue *_vq)
{ … }
EXPORT_SYMBOL_GPL(…);
void __virtqueue_unbreak(struct virtqueue *_vq)
{ … }
EXPORT_SYMBOL_GPL(…);
bool virtqueue_is_broken(const struct virtqueue *_vq)
{ … }
EXPORT_SYMBOL_GPL(…);
void virtio_break_device(struct virtio_device *dev)
{ … }
EXPORT_SYMBOL_GPL(…);
void __virtio_unbreak_device(struct virtio_device *dev)
{ … }
EXPORT_SYMBOL_GPL(…);
dma_addr_t virtqueue_get_desc_addr(const struct virtqueue *_vq)
{ … }
EXPORT_SYMBOL_GPL(…);
dma_addr_t virtqueue_get_avail_addr(const struct virtqueue *_vq)
{ … }
EXPORT_SYMBOL_GPL(…);
dma_addr_t virtqueue_get_used_addr(const struct virtqueue *_vq)
{ … }
EXPORT_SYMBOL_GPL(…);
const struct vring *virtqueue_get_vring(const struct virtqueue *vq)
{ … }
EXPORT_SYMBOL_GPL(…);
dma_addr_t virtqueue_dma_map_single_attrs(struct virtqueue *_vq, void *ptr,
size_t size,
enum dma_data_direction dir,
unsigned long attrs)
{ … }
EXPORT_SYMBOL_GPL(…);
void virtqueue_dma_unmap_single_attrs(struct virtqueue *_vq, dma_addr_t addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{ … }
EXPORT_SYMBOL_GPL(…);
int virtqueue_dma_mapping_error(struct virtqueue *_vq, dma_addr_t addr)
{ … }
EXPORT_SYMBOL_GPL(…);
bool virtqueue_dma_need_sync(struct virtqueue *_vq, dma_addr_t addr)
{ … }
EXPORT_SYMBOL_GPL(…);
void virtqueue_dma_sync_single_range_for_cpu(struct virtqueue *_vq,
dma_addr_t addr,
unsigned long offset, size_t size,
enum dma_data_direction dir)
{ … }
EXPORT_SYMBOL_GPL(…);
void virtqueue_dma_sync_single_range_for_device(struct virtqueue *_vq,
dma_addr_t addr,
unsigned long offset, size_t size,
enum dma_data_direction dir)
{ … }
EXPORT_SYMBOL_GPL(…);
MODULE_DESCRIPTION(…) …;
MODULE_LICENSE(…) …;