linux/drivers/vhost/vhost.c

// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2009 Red Hat, Inc.
 * Copyright (C) 2006 Rusty Russell IBM Corporation
 *
 * Author: Michael S. Tsirkin <[email protected]>
 *
 * Inspiration, some code, and most witty comments come from
 * Documentation/virtual/lguest/lguest.c, by Rusty Russell
 *
 * Generic code for virtio server in host kernel.
 */

#include <linux/eventfd.h>
#include <linux/vhost.h>
#include <linux/uio.h>
#include <linux/mm.h>
#include <linux/miscdevice.h>
#include <linux/mutex.h>
#include <linux/poll.h>
#include <linux/file.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/sort.h>
#include <linux/sched/mm.h>
#include <linux/sched/signal.h>
#include <linux/sched/vhost_task.h>
#include <linux/interval_tree_generic.h>
#include <linux/nospec.h>
#include <linux/kcov.h>

#include "vhost.h"

static ushort max_mem_regions =;
module_param(max_mem_regions, ushort, 0444);
MODULE_PARM_DESC();
static int max_iotlb_entries =;
module_param(max_iotlb_entries, int, 0444);
MODULE_PARM_DESC();

enum {};

#define vhost_used_event(vq)
#define vhost_avail_event(vq)

#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
{}

static void vhost_enable_cross_endian_big(struct vhost_virtqueue *vq)
{}

static void vhost_enable_cross_endian_little(struct vhost_virtqueue *vq)
{}

static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
{}

static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
				   int __user *argp)
{}

static void vhost_init_is_le(struct vhost_virtqueue *vq)
{}
#else
static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
{
}

static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
{
	return -ENOIOCTLCMD;
}

static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
				   int __user *argp)
{
	return -ENOIOCTLCMD;
}

static void vhost_init_is_le(struct vhost_virtqueue *vq)
{
	vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1)
		|| virtio_legacy_is_little_endian();
}
#endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */

static void vhost_reset_is_le(struct vhost_virtqueue *vq)
{}

struct vhost_flush_struct {};

static void vhost_flush_work(struct vhost_work *work)
{}

static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
			    poll_table *pt)
{}

static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync,
			     void *key)
{}

void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
{}
EXPORT_SYMBOL_GPL();

/* Init poll structure */
void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
		     __poll_t mask, struct vhost_dev *dev,
		     struct vhost_virtqueue *vq)
{}
EXPORT_SYMBOL_GPL();

/* Start polling a file. We add ourselves to file's wait queue. The caller must
 * keep a reference to a file until after vhost_poll_stop is called. */
int vhost_poll_start(struct vhost_poll *poll, struct file *file)
{}
EXPORT_SYMBOL_GPL();

/* Stop polling a file. After this function returns, it becomes safe to drop the
 * file reference. You must also flush afterwards. */
void vhost_poll_stop(struct vhost_poll *poll)
{}
EXPORT_SYMBOL_GPL();

static void vhost_worker_queue(struct vhost_worker *worker,
			       struct vhost_work *work)
{}

bool vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work)
{}
EXPORT_SYMBOL_GPL();

/**
 * __vhost_worker_flush - flush a worker
 * @worker: worker to flush
 *
 * The worker's flush_mutex must be held.
 */
static void __vhost_worker_flush(struct vhost_worker *worker)
{}

static void vhost_worker_flush(struct vhost_worker *worker)
{}

void vhost_dev_flush(struct vhost_dev *dev)
{}
EXPORT_SYMBOL_GPL();

/* A lockless hint for busy polling code to exit the loop */
bool vhost_vq_has_work(struct vhost_virtqueue *vq)
{}
EXPORT_SYMBOL_GPL();

void vhost_poll_queue(struct vhost_poll *poll)
{}
EXPORT_SYMBOL_GPL();

static void __vhost_vq_meta_reset(struct vhost_virtqueue *vq)
{}

static void vhost_vq_meta_reset(struct vhost_dev *d)
{}

static void vhost_vring_call_reset(struct vhost_vring_call *call_ctx)
{}

bool vhost_vq_is_setup(struct vhost_virtqueue *vq)
{}
EXPORT_SYMBOL_GPL();

static void vhost_vq_reset(struct vhost_dev *dev,
			   struct vhost_virtqueue *vq)
{}

static bool vhost_run_work_list(void *data)
{}

static void vhost_worker_killed(void *data)
{}

static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
{}

/* Helper to allocate iovec buffers for all vqs. */
static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
{}

static void vhost_dev_free_iovecs(struct vhost_dev *dev)
{}

bool vhost_exceeds_weight(struct vhost_virtqueue *vq,
			  int pkts, int total_len)
{}
EXPORT_SYMBOL_GPL();

static size_t vhost_get_avail_size(struct vhost_virtqueue *vq,
				   unsigned int num)
{}

static size_t vhost_get_used_size(struct vhost_virtqueue *vq,
				  unsigned int num)
{}

static size_t vhost_get_desc_size(struct vhost_virtqueue *vq,
				  unsigned int num)
{}

void vhost_dev_init(struct vhost_dev *dev,
		    struct vhost_virtqueue **vqs, int nvqs,
		    int iov_limit, int weight, int byte_weight,
		    bool use_worker,
		    int (*msg_handler)(struct vhost_dev *dev, u32 asid,
				       struct vhost_iotlb_msg *msg))
{}
EXPORT_SYMBOL_GPL();

/* Caller should have device mutex */
long vhost_dev_check_owner(struct vhost_dev *dev)
{}
EXPORT_SYMBOL_GPL();

/* Caller should have device mutex */
bool vhost_dev_has_owner(struct vhost_dev *dev)
{}
EXPORT_SYMBOL_GPL();

static void vhost_attach_mm(struct vhost_dev *dev)
{}

static void vhost_detach_mm(struct vhost_dev *dev)
{}

static void vhost_worker_destroy(struct vhost_dev *dev,
				 struct vhost_worker *worker)
{}

static void vhost_workers_free(struct vhost_dev *dev)
{}

static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev)
{}

/* Caller must have device mutex */
static void __vhost_vq_attach_worker(struct vhost_virtqueue *vq,
				     struct vhost_worker *worker)
{}

 /* Caller must have device mutex */
static int vhost_vq_attach_worker(struct vhost_virtqueue *vq,
				  struct vhost_vring_worker *info)
{}

/* Caller must have device mutex */
static int vhost_new_worker(struct vhost_dev *dev,
			    struct vhost_worker_state *info)
{}

/* Caller must have device mutex */
static int vhost_free_worker(struct vhost_dev *dev,
			     struct vhost_worker_state *info)
{}

static int vhost_get_vq_from_user(struct vhost_dev *dev, void __user *argp,
				  struct vhost_virtqueue **vq, u32 *id)
{}

/* Caller must have device mutex */
long vhost_worker_ioctl(struct vhost_dev *dev, unsigned int ioctl,
			void __user *argp)
{}
EXPORT_SYMBOL_GPL();

/* Caller should have device mutex */
long vhost_dev_set_owner(struct vhost_dev *dev)
{}
EXPORT_SYMBOL_GPL();

static struct vhost_iotlb *iotlb_alloc(void)
{}

struct vhost_iotlb *vhost_dev_reset_owner_prepare(void)
{}
EXPORT_SYMBOL_GPL();

/* Caller should have device mutex */
void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *umem)
{}
EXPORT_SYMBOL_GPL();

void vhost_dev_stop(struct vhost_dev *dev)
{}
EXPORT_SYMBOL_GPL();

void vhost_clear_msg(struct vhost_dev *dev)
{}
EXPORT_SYMBOL_GPL();

void vhost_dev_cleanup(struct vhost_dev *dev)
{}
EXPORT_SYMBOL_GPL();

static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
{}

/* Make sure 64 bit math will not overflow. */
static bool vhost_overflow(u64 uaddr, u64 size)
{}

/* Caller should have vq mutex and device mutex. */
static bool vq_memory_access_ok(void __user *log_base, struct vhost_iotlb *umem,
				int log_all)
{}

static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq,
					       u64 addr, unsigned int size,
					       int type)
{}

/* Can we switch to this memory table? */
/* Caller should have device mutex but not vq mutex */
static bool memory_access_ok(struct vhost_dev *d, struct vhost_iotlb *umem,
			     int log_all)
{}

static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
			  struct iovec iov[], int iov_size, int access);

static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to,
			      const void *from, unsigned size)
{}

static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
				void __user *from, unsigned size)
{}

static void __user *__vhost_get_user_slow(struct vhost_virtqueue *vq,
					  void __user *addr, unsigned int size,
					  int type)
{}

/* This function should be called after iotlb
 * prefetch, which means we're sure that vq
 * could be access through iotlb. So -EAGAIN should
 * not happen in this case.
 */
static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
					    void __user *addr, unsigned int size,
					    int type)
{}

#define vhost_put_user(vq, x, ptr)

static inline int vhost_put_avail_event(struct vhost_virtqueue *vq)
{}

static inline int vhost_put_used(struct vhost_virtqueue *vq,
				 struct vring_used_elem *head, int idx,
				 int count)
{}

static inline int vhost_put_used_flags(struct vhost_virtqueue *vq)

{}

static inline int vhost_put_used_idx(struct vhost_virtqueue *vq)

{}

#define vhost_get_user(vq, x, ptr, type)

#define vhost_get_avail(vq, x, ptr)

#define vhost_get_used(vq, x, ptr)

static void vhost_dev_lock_vqs(struct vhost_dev *d)
{}

static void vhost_dev_unlock_vqs(struct vhost_dev *d)
{}

static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq)
{}

static inline int vhost_get_avail_head(struct vhost_virtqueue *vq,
				       __virtio16 *head, int idx)
{}

static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq,
					__virtio16 *flags)
{}

static inline int vhost_get_used_event(struct vhost_virtqueue *vq,
				       __virtio16 *event)
{}

static inline int vhost_get_used_idx(struct vhost_virtqueue *vq,
				     __virtio16 *idx)
{}

static inline int vhost_get_desc(struct vhost_virtqueue *vq,
				 struct vring_desc *desc, int idx)
{}

static void vhost_iotlb_notify_vq(struct vhost_dev *d,
				  struct vhost_iotlb_msg *msg)
{}

static bool umem_access_ok(u64 uaddr, u64 size, int access)
{}

static int vhost_process_iotlb_msg(struct vhost_dev *dev, u32 asid,
				   struct vhost_iotlb_msg *msg)
{}
ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
			     struct iov_iter *from)
{}
EXPORT_SYMBOL();

__poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
			    poll_table *wait)
{}
EXPORT_SYMBOL();

ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
			    int noblock)
{}
EXPORT_SYMBOL_GPL();

static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access)
{}

static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
			 vring_desc_t __user *desc,
			 vring_avail_t __user *avail,
			 vring_used_t __user *used)

{}

static void vhost_vq_meta_update(struct vhost_virtqueue *vq,
				 const struct vhost_iotlb_map *map,
				 int type)
{}

static bool iotlb_access_ok(struct vhost_virtqueue *vq,
			    int access, u64 addr, u64 len, int type)
{}

int vq_meta_prefetch(struct vhost_virtqueue *vq)
{}
EXPORT_SYMBOL_GPL();

/* Can we log writes? */
/* Caller should have device mutex but not vq mutex */
bool vhost_log_access_ok(struct vhost_dev *dev)
{}
EXPORT_SYMBOL_GPL();

static bool vq_log_used_access_ok(struct vhost_virtqueue *vq,
				  void __user *log_base,
				  bool log_used,
				  u64 log_addr)
{}

/* Verify access for write logging. */
/* Caller should have vq mutex and device mutex */
static bool vq_log_access_ok(struct vhost_virtqueue *vq,
			     void __user *log_base)
{}

/* Can we start vq? */
/* Caller should have vq mutex and device mutex */
bool vhost_vq_access_ok(struct vhost_virtqueue *vq)
{}
EXPORT_SYMBOL_GPL();

static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
{}

static long vhost_vring_set_num(struct vhost_dev *d,
				struct vhost_virtqueue *vq,
				void __user *argp)
{}

static long vhost_vring_set_addr(struct vhost_dev *d,
				 struct vhost_virtqueue *vq,
				 void __user *argp)
{}

static long vhost_vring_set_num_addr(struct vhost_dev *d,
				     struct vhost_virtqueue *vq,
				     unsigned int ioctl,
				     void __user *argp)
{}
long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
{}
EXPORT_SYMBOL_GPL();

int vhost_init_device_iotlb(struct vhost_dev *d)
{}
EXPORT_SYMBOL_GPL();

/* Caller must have device mutex */
long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
{}
EXPORT_SYMBOL_GPL();

/* TODO: This is really inefficient.  We need something like get_user()
 * (instruction directly accesses the data, with an exception table entry
 * returning -EFAULT). See Documentation/arch/x86/exception-tables.rst.
 */
static int set_bit_to_user(int nr, void __user *addr)
{}

static int log_write(void __user *log_base,
		     u64 write_address, u64 write_length)
{}

static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
{}

static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
{}

int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
		    unsigned int log_num, u64 len, struct iovec *iov, int count)
{}
EXPORT_SYMBOL_GPL();

static int vhost_update_used_flags(struct vhost_virtqueue *vq)
{}

static int vhost_update_avail_event(struct vhost_virtqueue *vq)
{}

int vhost_vq_init_access(struct vhost_virtqueue *vq)
{}
EXPORT_SYMBOL_GPL();

static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
			  struct iovec iov[], int iov_size, int access)
{}

/* Each buffer in the virtqueues is actually a chain of descriptors.  This
 * function returns the next descriptor in the chain,
 * or -1U if we're at the end. */
static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc)
{}

static int get_indirect(struct vhost_virtqueue *vq,
			struct iovec iov[], unsigned int iov_size,
			unsigned int *out_num, unsigned int *in_num,
			struct vhost_log *log, unsigned int *log_num,
			struct vring_desc *indirect)
{}

/* This looks in the virtqueue and for the first available buffer, and converts
 * it to an iovec for convenient access.  Since descriptors consist of some
 * number of output then some number of input descriptors, it's actually two
 * iovecs, but we pack them into one and note how many of each there were.
 *
 * This function returns the descriptor number found, or vq->num (which is
 * never a valid descriptor number) if none was found.  A negative code is
 * returned on error. */
int vhost_get_vq_desc(struct vhost_virtqueue *vq,
		      struct iovec iov[], unsigned int iov_size,
		      unsigned int *out_num, unsigned int *in_num,
		      struct vhost_log *log, unsigned int *log_num)
{}
EXPORT_SYMBOL_GPL();

/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
{}
EXPORT_SYMBOL_GPL();

/* After we've used one of their buffers, we tell them about it.  We'll then
 * want to notify the guest, using eventfd. */
int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
{}
EXPORT_SYMBOL_GPL();

static int __vhost_add_used_n(struct vhost_virtqueue *vq,
			    struct vring_used_elem *heads,
			    unsigned count)
{}

/* After we've used one of their buffers, we tell them about it.  We'll then
 * want to notify the guest, using eventfd. */
int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
		     unsigned count)
{}
EXPORT_SYMBOL_GPL();

static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{}

/* This actually signals the guest, using eventfd. */
void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{}
EXPORT_SYMBOL_GPL();

/* And here's the combo meal deal.  Supersize me! */
void vhost_add_used_and_signal(struct vhost_dev *dev,
			       struct vhost_virtqueue *vq,
			       unsigned int head, int len)
{}
EXPORT_SYMBOL_GPL();

/* multi-buffer version of vhost_add_used_and_signal */
void vhost_add_used_and_signal_n(struct vhost_dev *dev,
				 struct vhost_virtqueue *vq,
				 struct vring_used_elem *heads, unsigned count)
{}
EXPORT_SYMBOL_GPL();

/* return true if we're sure that avaiable ring is empty */
bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{}
EXPORT_SYMBOL_GPL();

/* OK, now we need to know about added descriptors. */
bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{}
EXPORT_SYMBOL_GPL();

/* We don't need to be notified again. */
void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{}
EXPORT_SYMBOL_GPL();

/* Create a new message. */
struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type)
{}
EXPORT_SYMBOL_GPL();

void vhost_enqueue_msg(struct vhost_dev *dev, struct list_head *head,
		       struct vhost_msg_node *node)
{}
EXPORT_SYMBOL_GPL();

struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
					 struct list_head *head)
{}
EXPORT_SYMBOL_GPL();

void vhost_set_backend_features(struct vhost_dev *dev, u64 features)
{}
EXPORT_SYMBOL_GPL();

static int __init vhost_init(void)
{}

static void __exit vhost_exit(void)
{}

module_init();
module_exit(vhost_exit);

MODULE_VERSION();
MODULE_LICENSE();
MODULE_AUTHOR();
MODULE_DESCRIPTION();