#include <linux/eventfd.h>
#include <linux/vhost.h>
#include <linux/uio.h>
#include <linux/mm.h>
#include <linux/miscdevice.h>
#include <linux/mutex.h>
#include <linux/poll.h>
#include <linux/file.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/sort.h>
#include <linux/sched/mm.h>
#include <linux/sched/signal.h>
#include <linux/sched/vhost_task.h>
#include <linux/interval_tree_generic.h>
#include <linux/nospec.h>
#include <linux/kcov.h>
#include "vhost.h"
static ushort max_mem_regions = …;
module_param(max_mem_regions, ushort, 0444);
MODULE_PARM_DESC(…) …;
static int max_iotlb_entries = …;
module_param(max_iotlb_entries, int, 0444);
MODULE_PARM_DESC(…) …;
enum { … };
#define vhost_used_event(vq) …
#define vhost_avail_event(vq) …
#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
{ … }
static void vhost_enable_cross_endian_big(struct vhost_virtqueue *vq)
{ … }
static void vhost_enable_cross_endian_little(struct vhost_virtqueue *vq)
{ … }
static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
{ … }
static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
int __user *argp)
{ … }
static void vhost_init_is_le(struct vhost_virtqueue *vq)
{ … }
#else
static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
{
}
static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
{
return -ENOIOCTLCMD;
}
static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
int __user *argp)
{
return -ENOIOCTLCMD;
}
static void vhost_init_is_le(struct vhost_virtqueue *vq)
{
vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1)
|| virtio_legacy_is_little_endian();
}
#endif
static void vhost_reset_is_le(struct vhost_virtqueue *vq)
{ … }
struct vhost_flush_struct { … };
static void vhost_flush_work(struct vhost_work *work)
{ … }
static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
poll_table *pt)
{ … }
static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync,
void *key)
{ … }
void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
{ … }
EXPORT_SYMBOL_GPL(…);
void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
__poll_t mask, struct vhost_dev *dev,
struct vhost_virtqueue *vq)
{ … }
EXPORT_SYMBOL_GPL(…);
int vhost_poll_start(struct vhost_poll *poll, struct file *file)
{ … }
EXPORT_SYMBOL_GPL(…);
void vhost_poll_stop(struct vhost_poll *poll)
{ … }
EXPORT_SYMBOL_GPL(…);
static void vhost_worker_queue(struct vhost_worker *worker,
struct vhost_work *work)
{ … }
bool vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work)
{ … }
EXPORT_SYMBOL_GPL(…);
static void __vhost_worker_flush(struct vhost_worker *worker)
{ … }
static void vhost_worker_flush(struct vhost_worker *worker)
{ … }
void vhost_dev_flush(struct vhost_dev *dev)
{ … }
EXPORT_SYMBOL_GPL(…);
bool vhost_vq_has_work(struct vhost_virtqueue *vq)
{ … }
EXPORT_SYMBOL_GPL(…);
void vhost_poll_queue(struct vhost_poll *poll)
{ … }
EXPORT_SYMBOL_GPL(…);
static void __vhost_vq_meta_reset(struct vhost_virtqueue *vq)
{ … }
static void vhost_vq_meta_reset(struct vhost_dev *d)
{ … }
static void vhost_vring_call_reset(struct vhost_vring_call *call_ctx)
{ … }
bool vhost_vq_is_setup(struct vhost_virtqueue *vq)
{ … }
EXPORT_SYMBOL_GPL(…);
static void vhost_vq_reset(struct vhost_dev *dev,
struct vhost_virtqueue *vq)
{ … }
static bool vhost_run_work_list(void *data)
{ … }
static void vhost_worker_killed(void *data)
{ … }
static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
{ … }
static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
{ … }
static void vhost_dev_free_iovecs(struct vhost_dev *dev)
{ … }
bool vhost_exceeds_weight(struct vhost_virtqueue *vq,
int pkts, int total_len)
{ … }
EXPORT_SYMBOL_GPL(…);
static size_t vhost_get_avail_size(struct vhost_virtqueue *vq,
unsigned int num)
{ … }
static size_t vhost_get_used_size(struct vhost_virtqueue *vq,
unsigned int num)
{ … }
static size_t vhost_get_desc_size(struct vhost_virtqueue *vq,
unsigned int num)
{ … }
void vhost_dev_init(struct vhost_dev *dev,
struct vhost_virtqueue **vqs, int nvqs,
int iov_limit, int weight, int byte_weight,
bool use_worker,
int (*msg_handler)(struct vhost_dev *dev, u32 asid,
struct vhost_iotlb_msg *msg))
{ … }
EXPORT_SYMBOL_GPL(…);
long vhost_dev_check_owner(struct vhost_dev *dev)
{ … }
EXPORT_SYMBOL_GPL(…);
bool vhost_dev_has_owner(struct vhost_dev *dev)
{ … }
EXPORT_SYMBOL_GPL(…);
static void vhost_attach_mm(struct vhost_dev *dev)
{ … }
static void vhost_detach_mm(struct vhost_dev *dev)
{ … }
static void vhost_worker_destroy(struct vhost_dev *dev,
struct vhost_worker *worker)
{ … }
static void vhost_workers_free(struct vhost_dev *dev)
{ … }
static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev)
{ … }
static void __vhost_vq_attach_worker(struct vhost_virtqueue *vq,
struct vhost_worker *worker)
{ … }
static int vhost_vq_attach_worker(struct vhost_virtqueue *vq,
struct vhost_vring_worker *info)
{ … }
static int vhost_new_worker(struct vhost_dev *dev,
struct vhost_worker_state *info)
{ … }
static int vhost_free_worker(struct vhost_dev *dev,
struct vhost_worker_state *info)
{ … }
static int vhost_get_vq_from_user(struct vhost_dev *dev, void __user *argp,
struct vhost_virtqueue **vq, u32 *id)
{ … }
long vhost_worker_ioctl(struct vhost_dev *dev, unsigned int ioctl,
void __user *argp)
{ … }
EXPORT_SYMBOL_GPL(…);
long vhost_dev_set_owner(struct vhost_dev *dev)
{ … }
EXPORT_SYMBOL_GPL(…);
static struct vhost_iotlb *iotlb_alloc(void)
{ … }
struct vhost_iotlb *vhost_dev_reset_owner_prepare(void)
{ … }
EXPORT_SYMBOL_GPL(…);
void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *umem)
{ … }
EXPORT_SYMBOL_GPL(…);
void vhost_dev_stop(struct vhost_dev *dev)
{ … }
EXPORT_SYMBOL_GPL(…);
void vhost_clear_msg(struct vhost_dev *dev)
{ … }
EXPORT_SYMBOL_GPL(…);
void vhost_dev_cleanup(struct vhost_dev *dev)
{ … }
EXPORT_SYMBOL_GPL(…);
static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
{ … }
static bool vhost_overflow(u64 uaddr, u64 size)
{ … }
static bool vq_memory_access_ok(void __user *log_base, struct vhost_iotlb *umem,
int log_all)
{ … }
static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq,
u64 addr, unsigned int size,
int type)
{ … }
static bool memory_access_ok(struct vhost_dev *d, struct vhost_iotlb *umem,
int log_all)
{ … }
static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
struct iovec iov[], int iov_size, int access);
static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to,
const void *from, unsigned size)
{ … }
static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
void __user *from, unsigned size)
{ … }
static void __user *__vhost_get_user_slow(struct vhost_virtqueue *vq,
void __user *addr, unsigned int size,
int type)
{ … }
static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
void __user *addr, unsigned int size,
int type)
{ … }
#define vhost_put_user(vq, x, ptr) …
static inline int vhost_put_avail_event(struct vhost_virtqueue *vq)
{ … }
static inline int vhost_put_used(struct vhost_virtqueue *vq,
struct vring_used_elem *head, int idx,
int count)
{ … }
static inline int vhost_put_used_flags(struct vhost_virtqueue *vq)
{ … }
static inline int vhost_put_used_idx(struct vhost_virtqueue *vq)
{ … }
#define vhost_get_user(vq, x, ptr, type) …
#define vhost_get_avail(vq, x, ptr) …
#define vhost_get_used(vq, x, ptr) …
static void vhost_dev_lock_vqs(struct vhost_dev *d)
{ … }
static void vhost_dev_unlock_vqs(struct vhost_dev *d)
{ … }
static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq)
{ … }
static inline int vhost_get_avail_head(struct vhost_virtqueue *vq,
__virtio16 *head, int idx)
{ … }
static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq,
__virtio16 *flags)
{ … }
static inline int vhost_get_used_event(struct vhost_virtqueue *vq,
__virtio16 *event)
{ … }
static inline int vhost_get_used_idx(struct vhost_virtqueue *vq,
__virtio16 *idx)
{ … }
static inline int vhost_get_desc(struct vhost_virtqueue *vq,
struct vring_desc *desc, int idx)
{ … }
static void vhost_iotlb_notify_vq(struct vhost_dev *d,
struct vhost_iotlb_msg *msg)
{ … }
static bool umem_access_ok(u64 uaddr, u64 size, int access)
{ … }
static int vhost_process_iotlb_msg(struct vhost_dev *dev, u32 asid,
struct vhost_iotlb_msg *msg)
{ … }
ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
struct iov_iter *from)
{ … }
EXPORT_SYMBOL(…);
__poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
poll_table *wait)
{ … }
EXPORT_SYMBOL(…);
ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
int noblock)
{ … }
EXPORT_SYMBOL_GPL(…);
static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access)
{ … }
static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
vring_desc_t __user *desc,
vring_avail_t __user *avail,
vring_used_t __user *used)
{ … }
static void vhost_vq_meta_update(struct vhost_virtqueue *vq,
const struct vhost_iotlb_map *map,
int type)
{ … }
static bool iotlb_access_ok(struct vhost_virtqueue *vq,
int access, u64 addr, u64 len, int type)
{ … }
int vq_meta_prefetch(struct vhost_virtqueue *vq)
{ … }
EXPORT_SYMBOL_GPL(…);
bool vhost_log_access_ok(struct vhost_dev *dev)
{ … }
EXPORT_SYMBOL_GPL(…);
static bool vq_log_used_access_ok(struct vhost_virtqueue *vq,
void __user *log_base,
bool log_used,
u64 log_addr)
{ … }
static bool vq_log_access_ok(struct vhost_virtqueue *vq,
void __user *log_base)
{ … }
bool vhost_vq_access_ok(struct vhost_virtqueue *vq)
{ … }
EXPORT_SYMBOL_GPL(…);
static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
{ … }
static long vhost_vring_set_num(struct vhost_dev *d,
struct vhost_virtqueue *vq,
void __user *argp)
{ … }
static long vhost_vring_set_addr(struct vhost_dev *d,
struct vhost_virtqueue *vq,
void __user *argp)
{ … }
static long vhost_vring_set_num_addr(struct vhost_dev *d,
struct vhost_virtqueue *vq,
unsigned int ioctl,
void __user *argp)
{ … }
long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
{ … }
EXPORT_SYMBOL_GPL(…);
int vhost_init_device_iotlb(struct vhost_dev *d)
{ … }
EXPORT_SYMBOL_GPL(…);
long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
{ … }
EXPORT_SYMBOL_GPL(…);
static int set_bit_to_user(int nr, void __user *addr)
{ … }
static int log_write(void __user *log_base,
u64 write_address, u64 write_length)
{ … }
static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
{ … }
static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
{ … }
int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
unsigned int log_num, u64 len, struct iovec *iov, int count)
{ … }
EXPORT_SYMBOL_GPL(…);
static int vhost_update_used_flags(struct vhost_virtqueue *vq)
{ … }
static int vhost_update_avail_event(struct vhost_virtqueue *vq)
{ … }
int vhost_vq_init_access(struct vhost_virtqueue *vq)
{ … }
EXPORT_SYMBOL_GPL(…);
static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
struct iovec iov[], int iov_size, int access)
{ … }
static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc)
{ … }
static int get_indirect(struct vhost_virtqueue *vq,
struct iovec iov[], unsigned int iov_size,
unsigned int *out_num, unsigned int *in_num,
struct vhost_log *log, unsigned int *log_num,
struct vring_desc *indirect)
{ … }
int vhost_get_vq_desc(struct vhost_virtqueue *vq,
struct iovec iov[], unsigned int iov_size,
unsigned int *out_num, unsigned int *in_num,
struct vhost_log *log, unsigned int *log_num)
{ … }
EXPORT_SYMBOL_GPL(…);
void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
{ … }
EXPORT_SYMBOL_GPL(…);
int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
{ … }
EXPORT_SYMBOL_GPL(…);
static int __vhost_add_used_n(struct vhost_virtqueue *vq,
struct vring_used_elem *heads,
unsigned count)
{ … }
int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
unsigned count)
{ … }
EXPORT_SYMBOL_GPL(…);
static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{ … }
void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{ … }
EXPORT_SYMBOL_GPL(…);
void vhost_add_used_and_signal(struct vhost_dev *dev,
struct vhost_virtqueue *vq,
unsigned int head, int len)
{ … }
EXPORT_SYMBOL_GPL(…);
void vhost_add_used_and_signal_n(struct vhost_dev *dev,
struct vhost_virtqueue *vq,
struct vring_used_elem *heads, unsigned count)
{ … }
EXPORT_SYMBOL_GPL(…);
bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{ … }
EXPORT_SYMBOL_GPL(…);
bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{ … }
EXPORT_SYMBOL_GPL(…);
void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{ … }
EXPORT_SYMBOL_GPL(…);
struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type)
{ … }
EXPORT_SYMBOL_GPL(…);
void vhost_enqueue_msg(struct vhost_dev *dev, struct list_head *head,
struct vhost_msg_node *node)
{ … }
EXPORT_SYMBOL_GPL(…);
struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
struct list_head *head)
{ … }
EXPORT_SYMBOL_GPL(…);
void vhost_set_backend_features(struct vhost_dev *dev, u64 features)
{ … }
EXPORT_SYMBOL_GPL(…);
static int __init vhost_init(void)
{ … }
static void __exit vhost_exit(void)
{ … }
module_init(…) …;
module_exit(vhost_exit);
MODULE_VERSION(…) …;
MODULE_LICENSE(…) …;
MODULE_AUTHOR(…) …;
MODULE_DESCRIPTION(…) …;