linux/drivers/vhost/vdpa.c

// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (C) 2018-2020 Intel Corporation.
 * Copyright (C) 2020 Red Hat, Inc.
 *
 * Author: Tiwei Bie <[email protected]>
 *         Jason Wang <[email protected]>
 *
 * Thanks Michael S. Tsirkin for the valuable comments and
 * suggestions.  And thanks to Cunming Liang and Zhihong Wang for all
 * their supports.
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/iommu.h>
#include <linux/uuid.h>
#include <linux/vdpa.h>
#include <linux/nospec.h>
#include <linux/vhost.h>

#include "vhost.h"

enum {};

#define VHOST_VDPA_DEV_MAX

#define VHOST_VDPA_IOTLB_BUCKETS

struct vhost_vdpa_as {};

struct vhost_vdpa {};

static DEFINE_IDA(vhost_vdpa_ida);

static dev_t vhost_vdpa_major;

static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
				   struct vhost_iotlb *iotlb, u64 start,
				   u64 last, u32 asid);

static inline u32 iotlb_to_asid(struct vhost_iotlb *iotlb)
{}

static struct vhost_vdpa_as *asid_to_as(struct vhost_vdpa *v, u32 asid)
{}

static struct vhost_iotlb *asid_to_iotlb(struct vhost_vdpa *v, u32 asid)
{}

static struct vhost_vdpa_as *vhost_vdpa_alloc_as(struct vhost_vdpa *v, u32 asid)
{}

static struct vhost_vdpa_as *vhost_vdpa_find_alloc_as(struct vhost_vdpa *v,
						      u32 asid)
{}

static void vhost_vdpa_reset_map(struct vhost_vdpa *v, u32 asid)
{}

static int vhost_vdpa_remove_as(struct vhost_vdpa *v, u32 asid)
{}

static void handle_vq_kick(struct vhost_work *work)
{}

static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
{}

static irqreturn_t vhost_vdpa_config_cb(void *private)
{}

static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
{}

static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
{}

static int _compat_vdpa_reset(struct vhost_vdpa *v)
{}

static int vhost_vdpa_reset(struct vhost_vdpa *v)
{}

static long vhost_vdpa_bind_mm(struct vhost_vdpa *v)
{}

static void vhost_vdpa_unbind_mm(struct vhost_vdpa *v)
{}

static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
{}

static long vhost_vdpa_get_status(struct vhost_vdpa *v, u8 __user *statusp)
{}

static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
{}

static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
				      struct vhost_vdpa_config *c)
{}

static long vhost_vdpa_get_config(struct vhost_vdpa *v,
				  struct vhost_vdpa_config __user *c)
{}

static long vhost_vdpa_set_config(struct vhost_vdpa *v,
				  struct vhost_vdpa_config __user *c)
{}

static bool vhost_vdpa_can_suspend(const struct vhost_vdpa *v)
{}

static bool vhost_vdpa_can_resume(const struct vhost_vdpa *v)
{}

static bool vhost_vdpa_has_desc_group(const struct vhost_vdpa *v)
{}

static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
{}

static u64 vhost_vdpa_get_backend_features(const struct vhost_vdpa *v)
{}

static bool vhost_vdpa_has_persistent_map(const struct vhost_vdpa *v)
{}

static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
{}

static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
{}

static void vhost_vdpa_config_put(struct vhost_vdpa *v)
{}

static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
{}

static long vhost_vdpa_get_iova_range(struct vhost_vdpa *v, u32 __user *argp)
{}

static long vhost_vdpa_get_config_size(struct vhost_vdpa *v, u32 __user *argp)
{}

static long vhost_vdpa_get_vqs_count(struct vhost_vdpa *v, u32 __user *argp)
{}

/* After a successful return of ioctl the device must not process more
 * virtqueue descriptors. The device can answer to read or writes of config
 * fields as if it were not suspended. In particular, writing to "queue_enable"
 * with a value of 1 will not make the device start processing buffers.
 */
static long vhost_vdpa_suspend(struct vhost_vdpa *v)
{}

/* After a successful return of this ioctl the device resumes processing
 * virtqueue descriptors. The device becomes fully operational the same way it
 * was before it was suspended.
 */
static long vhost_vdpa_resume(struct vhost_vdpa *v)
{}

static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
				   void __user *argp)
{}

static long vhost_vdpa_unlocked_ioctl(struct file *filep,
				      unsigned int cmd, unsigned long arg)
{}
static void vhost_vdpa_general_unmap(struct vhost_vdpa *v,
				     struct vhost_iotlb_map *map, u32 asid)
{}

static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
				u64 start, u64 last, u32 asid)
{}

static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
				u64 start, u64 last, u32 asid)
{}

static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
				   struct vhost_iotlb *iotlb, u64 start,
				   u64 last, u32 asid)
{}

static int perm_to_iommu_flags(u32 perm)
{}

static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
			  u64 iova, u64 size, u64 pa, u32 perm, void *opaque)
{}

static void vhost_vdpa_unmap(struct vhost_vdpa *v,
			     struct vhost_iotlb *iotlb,
			     u64 iova, u64 size)
{}

static int vhost_vdpa_va_map(struct vhost_vdpa *v,
			     struct vhost_iotlb *iotlb,
			     u64 iova, u64 size, u64 uaddr, u32 perm)
{}

static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
			     struct vhost_iotlb *iotlb,
			     u64 iova, u64 size, u64 uaddr, u32 perm)
{}

static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
					   struct vhost_iotlb *iotlb,
					   struct vhost_iotlb_msg *msg)
{}

static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, u32 asid,
					struct vhost_iotlb_msg *msg)
{}

static ssize_t vhost_vdpa_chr_write_iter(struct kiocb *iocb,
					 struct iov_iter *from)
{}

static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
{}

static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
{}

static void vhost_vdpa_set_iova_range(struct vhost_vdpa *v)
{}

static void vhost_vdpa_cleanup(struct vhost_vdpa *v)
{}

static int vhost_vdpa_open(struct inode *inode, struct file *filep)
{}

static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
{}

static int vhost_vdpa_release(struct inode *inode, struct file *filep)
{}

#ifdef CONFIG_MMU
static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
{}

static const struct vm_operations_struct vhost_vdpa_vm_ops =;

static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
{}
#endif /* CONFIG_MMU */

static const struct file_operations vhost_vdpa_fops =;

static void vhost_vdpa_release_dev(struct device *device)
{}

static int vhost_vdpa_probe(struct vdpa_device *vdpa)
{}

static void vhost_vdpa_remove(struct vdpa_device *vdpa)
{}

static struct vdpa_driver vhost_vdpa_driver =;

static int __init vhost_vdpa_init(void)
{}
module_init();

static void __exit vhost_vdpa_exit(void)
{}
module_exit(vhost_vdpa_exit);

MODULE_VERSION();
MODULE_LICENSE();
MODULE_AUTHOR();
MODULE_DESCRIPTION();