linux/drivers/vfio/vfio_main.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * VFIO core
 *
 * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
 *     Author: Alex Williamson <[email protected]>
 *
 * Derived from original vfio:
 * Copyright 2010 Cisco Systems, Inc.  All rights reserved.
 * Author: Tom Lyon, [email protected]
 */

#include <linux/cdev.h>
#include <linux/compat.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/idr.h>
#include <linux/iommu.h>
#if IS_ENABLED(CONFIG_KVM)
#include <linux/kvm_host.h>
#endif
#include <linux/list.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/mount.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/pseudo_fs.h>
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/vfio.h>
#include <linux/wait.h>
#include <linux/sched/signal.h>
#include <linux/pm_runtime.h>
#include <linux/interval_tree.h>
#include <linux/iova_bitmap.h>
#include <linux/iommufd.h>
#include "vfio.h"

#define DRIVER_VERSION
#define DRIVER_AUTHOR
#define DRIVER_DESC

#define VFIO_MAGIC

static struct vfio {} vfio;

#ifdef CONFIG_VFIO_NOIOMMU
bool vfio_noiommu __read_mostly;
module_param_named(enable_unsafe_noiommu_mode,
		   vfio_noiommu, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC();
#endif

static DEFINE_XARRAY(vfio_device_set_xa);

int vfio_assign_device_set(struct vfio_device *device, void *set_id)
{}
EXPORT_SYMBOL_GPL();

static void vfio_release_device_set(struct vfio_device *device)
{}

unsigned int vfio_device_set_open_count(struct vfio_device_set *dev_set)
{}
EXPORT_SYMBOL_GPL();

struct vfio_device *
vfio_find_device_in_devset(struct vfio_device_set *dev_set,
			   struct device *dev)
{}
EXPORT_SYMBOL_GPL();

/*
 * Device objects - create, release, get, put, search
 */
/* Device reference always implies a group reference */
void vfio_device_put_registration(struct vfio_device *device)
{}

bool vfio_device_try_get_registration(struct vfio_device *device)
{}

/*
 * VFIO driver API
 */
/* Release helper called by vfio_put_device() */
static void vfio_device_release(struct device *dev)
{}

static int vfio_init_device(struct vfio_device *device, struct device *dev,
			    const struct vfio_device_ops *ops);

/*
 * Allocate and initialize vfio_device so it can be registered to vfio
 * core.
 *
 * Drivers should use the wrapper vfio_alloc_device() for allocation.
 * @size is the size of the structure to be allocated, including any
 * private data used by the driver.
 *
 * Driver may provide an @init callback to cover device private data.
 *
 * Use vfio_put_device() to release the structure after success return.
 */
struct vfio_device *_vfio_alloc_device(size_t size, struct device *dev,
				       const struct vfio_device_ops *ops)
{}
EXPORT_SYMBOL_GPL();

static int vfio_fs_init_fs_context(struct fs_context *fc)
{}

static struct file_system_type vfio_fs_type =;

static struct inode *vfio_fs_inode_new(void)
{}

/*
 * Initialize a vfio_device so it can be registered to vfio core.
 */
static int vfio_init_device(struct vfio_device *device, struct device *dev,
			    const struct vfio_device_ops *ops)
{}

static int __vfio_register_dev(struct vfio_device *device,
			       enum vfio_group_type type)
{}

int vfio_register_group_dev(struct vfio_device *device)
{}
EXPORT_SYMBOL_GPL();

/*
 * Register a virtual device without IOMMU backing.  The user of this
 * device must not be able to directly trigger unmediated DMA.
 */
int vfio_register_emulated_iommu_dev(struct vfio_device *device)
{}
EXPORT_SYMBOL_GPL();

/*
 * Decrement the device reference count and wait for the device to be
 * removed.  Open file descriptors for the device... */
void vfio_unregister_group_dev(struct vfio_device *device)
{}
EXPORT_SYMBOL_GPL();

#if IS_ENABLED(CONFIG_KVM)
void vfio_device_get_kvm_safe(struct vfio_device *device, struct kvm *kvm)
{}

void vfio_device_put_kvm(struct vfio_device *device)
{}
#endif

/* true if the vfio_device has open_device() called but not close_device() */
static bool vfio_assert_device_open(struct vfio_device *device)
{}

struct vfio_device_file *
vfio_allocate_device_file(struct vfio_device *device)
{}

static int vfio_df_device_first_open(struct vfio_device_file *df)
{}

static void vfio_df_device_last_close(struct vfio_device_file *df)
{}

int vfio_df_open(struct vfio_device_file *df)
{}

void vfio_df_close(struct vfio_device_file *df)
{}

/*
 * Wrapper around pm_runtime_resume_and_get().
 * Return error code on failure or 0 on success.
 */
static inline int vfio_device_pm_runtime_get(struct vfio_device *device)
{}

/*
 * Wrapper around pm_runtime_put().
 */
static inline void vfio_device_pm_runtime_put(struct vfio_device *device)
{}

/*
 * VFIO Device fd
 */
static int vfio_device_fops_release(struct inode *inode, struct file *filep)
{}

/*
 * vfio_mig_get_next_state - Compute the next step in the FSM
 * @cur_fsm - The current state the device is in
 * @new_fsm - The target state to reach
 * @next_fsm - Pointer to the next step to get to new_fsm
 *
 * Return 0 upon success, otherwise -errno
 * Upon success the next step in the state progression between cur_fsm and
 * new_fsm will be set in next_fsm.
 *
 * This breaks down requests for combination transitions into smaller steps and
 * returns the next step to get to new_fsm. The function may need to be called
 * multiple times before reaching new_fsm.
 *
 */
int vfio_mig_get_next_state(struct vfio_device *device,
			    enum vfio_device_mig_state cur_fsm,
			    enum vfio_device_mig_state new_fsm,
			    enum vfio_device_mig_state *next_fsm)
{}
EXPORT_SYMBOL_GPL();

/*
 * Convert the drivers's struct file into a FD number and return it to userspace
 */
static int vfio_ioct_mig_return_fd(struct file *filp, void __user *arg,
				   struct vfio_device_feature_mig_state *mig)
{}

static int
vfio_ioctl_device_feature_mig_device_state(struct vfio_device *device,
					   u32 flags, void __user *arg,
					   size_t argsz)
{}

static int
vfio_ioctl_device_feature_migration_data_size(struct vfio_device *device,
					      u32 flags, void __user *arg,
					      size_t argsz)
{}

static int vfio_ioctl_device_feature_migration(struct vfio_device *device,
					       u32 flags, void __user *arg,
					       size_t argsz)
{}

void vfio_combine_iova_ranges(struct rb_root_cached *root, u32 cur_nodes,
			      u32 req_nodes)
{}
EXPORT_SYMBOL_GPL();

/* Ranges should fit into a single kernel page */
#define LOG_MAX_RANGES

static int
vfio_ioctl_device_feature_logging_start(struct vfio_device *device,
					u32 flags, void __user *arg,
					size_t argsz)
{}

static int
vfio_ioctl_device_feature_logging_stop(struct vfio_device *device,
				       u32 flags, void __user *arg,
				       size_t argsz)
{}

static int vfio_device_log_read_and_clear(struct iova_bitmap *iter,
					  unsigned long iova, size_t length,
					  void *opaque)
{}

static int
vfio_ioctl_device_feature_logging_report(struct vfio_device *device,
					 u32 flags, void __user *arg,
					 size_t argsz)
{}

static int vfio_ioctl_device_feature(struct vfio_device *device,
				     struct vfio_device_feature __user *arg)
{}

static long vfio_device_fops_unl_ioctl(struct file *filep,
				       unsigned int cmd, unsigned long arg)
{}

static ssize_t vfio_device_fops_read(struct file *filep, char __user *buf,
				     size_t count, loff_t *ppos)
{}

static ssize_t vfio_device_fops_write(struct file *filep,
				      const char __user *buf,
				      size_t count, loff_t *ppos)
{}

static int vfio_device_fops_mmap(struct file *filep, struct vm_area_struct *vma)
{}

const struct file_operations vfio_device_fops =;

static struct vfio_device *vfio_device_from_file(struct file *file)
{}

/**
 * vfio_file_is_valid - True if the file is valid vfio file
 * @file: VFIO group file or VFIO device file
 */
bool vfio_file_is_valid(struct file *file)
{}
EXPORT_SYMBOL_GPL();

/**
 * vfio_file_enforced_coherent - True if the DMA associated with the VFIO file
 *        is always CPU cache coherent
 * @file: VFIO group file or VFIO device file
 *
 * Enforced coherency means that the IOMMU ignores things like the PCIe no-snoop
 * bit in DMA transactions. A return of false indicates that the user has
 * rights to access additional instructions such as wbinvd on x86.
 */
bool vfio_file_enforced_coherent(struct file *file)
{}
EXPORT_SYMBOL_GPL();

static void vfio_device_file_set_kvm(struct file *file, struct kvm *kvm)
{}

/**
 * vfio_file_set_kvm - Link a kvm with VFIO drivers
 * @file: VFIO group file or VFIO device file
 * @kvm: KVM to link
 *
 * When a VFIO device is first opened the KVM will be available in
 * device->kvm if one was associated with the file.
 */
void vfio_file_set_kvm(struct file *file, struct kvm *kvm)
{}
EXPORT_SYMBOL_GPL();

/*
 * Sub-module support
 */
/*
 * Helper for managing a buffer of info chain capabilities, allocate or
 * reallocate a buffer with additional @size, filling in @id and @version
 * of the capability.  A pointer to the new capability is returned.
 *
 * NB. The chain is based at the head of the buffer, so new entries are
 * added to the tail, vfio_info_cap_shift() should be called to fixup the
 * next offsets prior to copying to the user buffer.
 */
struct vfio_info_cap_header *vfio_info_cap_add(struct vfio_info_cap *caps,
					       size_t size, u16 id, u16 version)
{}
EXPORT_SYMBOL_GPL();

void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset)
{}
EXPORT_SYMBOL();

int vfio_info_add_capability(struct vfio_info_cap *caps,
			     struct vfio_info_cap_header *cap, size_t size)
{}
EXPORT_SYMBOL();

int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, int num_irqs,
				       int max_irq_type, size_t *data_size)
{}
EXPORT_SYMBOL();

/*
 * Pin contiguous user pages and return their associated host pages for local
 * domain only.
 * @device [in]  : device
 * @iova [in]    : starting IOVA of user pages to be pinned.
 * @npage [in]   : count of pages to be pinned.  This count should not
 *		   be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
 * @prot [in]    : protection flags
 * @pages[out]   : array of host pages
 * Return error or number of pages pinned.
 *
 * A driver may only call this function if the vfio_device was created
 * by vfio_register_emulated_iommu_dev() due to vfio_device_container_pin_pages().
 */
int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova,
		   int npage, int prot, struct page **pages)
{}
EXPORT_SYMBOL();

/*
 * Unpin contiguous host pages for local domain only.
 * @device [in]  : device
 * @iova [in]    : starting address of user pages to be unpinned.
 * @npage [in]   : count of pages to be unpinned.  This count should not
 *                 be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
 */
void vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova, int npage)
{}
EXPORT_SYMBOL();

/*
 * This interface allows the CPUs to perform some sort of virtual DMA on
 * behalf of the device.
 *
 * CPUs read/write from/into a range of IOVAs pointing to user space memory
 * into/from a kernel buffer.
 *
 * As the read/write of user space memory is conducted via the CPUs and is
 * not a real device DMA, it is not necessary to pin the user space memory.
 *
 * @device [in]		: VFIO device
 * @iova [in]		: base IOVA of a user space buffer
 * @data [in]		: pointer to kernel buffer
 * @len [in]		: kernel buffer length
 * @write		: indicate read or write
 * Return error code on failure or 0 on success.
 */
int vfio_dma_rw(struct vfio_device *device, dma_addr_t iova, void *data,
		size_t len, bool write)
{}
EXPORT_SYMBOL();

/*
 * Module/class support
 */
static int __init vfio_init(void)
{}

static void __exit vfio_cleanup(void)
{}

module_init();
module_exit(vfio_cleanup);

MODULE_IMPORT_NS();
MODULE_VERSION();
MODULE_LICENSE();
MODULE_AUTHOR();
MODULE_DESCRIPTION();
MODULE_SOFTDEP();