linux/drivers/vfio/pci/vfio_pci_core.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
 *     Author: Alex Williamson <[email protected]>
 *
 * Derived from original vfio:
 * Copyright 2010 Cisco Systems, Inc.  All rights reserved.
 * Author: Tom Lyon, [email protected]
 */

#define pr_fmt(fmt)

#include <linux/aperture.h>
#include <linux/device.h>
#include <linux/eventfd.h>
#include <linux/file.h>
#include <linux/interrupt.h>
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/vgaarb.h>
#include <linux/nospec.h>
#include <linux/sched/mm.h>
#include <linux/iommufd.h>
#if IS_ENABLED(CONFIG_EEH)
#include <asm/eeh.h>
#endif

#include "vfio_pci_priv.h"

#define DRIVER_AUTHOR
#define DRIVER_DESC

static bool nointxmask;
static bool disable_vga;
static bool disable_idle_d3;

/* List of PF's that vfio_pci_core_sriov_configure() has been called on */
static DEFINE_MUTEX(vfio_pci_sriov_pfs_mutex);
static LIST_HEAD(vfio_pci_sriov_pfs);

struct vfio_pci_dummy_resource {};

struct vfio_pci_vf_token {};

struct vfio_pci_mmap_vma {};

static inline bool vfio_vga_disabled(void)
{}

/*
 * Our VGA arbiter participation is limited since we don't know anything
 * about the device itself.  However, if the device is the only VGA device
 * downstream of a bridge and VFIO VGA support is disabled, then we can
 * safely return legacy VGA IO and memory as not decoded since the user
 * has no way to get to it and routing can be disabled externally at the
 * bridge.
 */
static unsigned int vfio_pci_set_decode(struct pci_dev *pdev, bool single_vga)
{}

static void vfio_pci_probe_mmaps(struct vfio_pci_core_device *vdev)
{}

struct vfio_pci_group_info;
static void vfio_pci_dev_set_try_reset(struct vfio_device_set *dev_set);
static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
				      struct vfio_pci_group_info *groups,
				      struct iommufd_ctx *iommufd_ctx);

/*
 * INTx masking requires the ability to disable INTx signaling via PCI_COMMAND
 * _and_ the ability detect when the device is asserting INTx via PCI_STATUS.
 * If a device implements the former but not the latter we would typically
 * expect broken_intx_masking be set and require an exclusive interrupt.
 * However since we do have control of the device's ability to assert INTx,
 * we can instead pretend that the device does not implement INTx, virtualizing
 * the pin register to report zero and maintaining DisINTx set on the host.
 */
static bool vfio_pci_nointx(struct pci_dev *pdev)
{}

static void vfio_pci_probe_power_state(struct vfio_pci_core_device *vdev)
{}

/*
 * pci_set_power_state() wrapper handling devices which perform a soft reset on
 * D3->D0 transition.  Save state prior to D0/1/2->D3, stash it on the vdev,
 * restore when returned to D0.  Saved separately from pci_saved_state for use
 * by PM capability emulation and separately from pci_dev internal saved state
 * to avoid it being overwritten and consumed around other resets.
 */
int vfio_pci_set_power_state(struct vfio_pci_core_device *vdev, pci_power_t state)
{}

static int vfio_pci_runtime_pm_entry(struct vfio_pci_core_device *vdev,
				     struct eventfd_ctx *efdctx)
{}

static int vfio_pci_core_pm_entry(struct vfio_device *device, u32 flags,
				  void __user *arg, size_t argsz)
{}

static int vfio_pci_core_pm_entry_with_wakeup(
	struct vfio_device *device, u32 flags,
	struct vfio_device_low_power_entry_with_wakeup __user *arg,
	size_t argsz)
{}

static void __vfio_pci_runtime_pm_exit(struct vfio_pci_core_device *vdev)
{}

static void vfio_pci_runtime_pm_exit(struct vfio_pci_core_device *vdev)
{}

static int vfio_pci_core_pm_exit(struct vfio_device *device, u32 flags,
				 void __user *arg, size_t argsz)
{}

#ifdef CONFIG_PM
static int vfio_pci_core_runtime_suspend(struct device *dev)
{}

static int vfio_pci_core_runtime_resume(struct device *dev)
{}
#endif /* CONFIG_PM */

/*
 * The pci-driver core runtime PM routines always save the device state
 * before going into suspended state. If the device is going into low power
 * state with only with runtime PM ops, then no explicit handling is needed
 * for the devices which have NoSoftRst-.
 */
static const struct dev_pm_ops vfio_pci_core_pm_ops =;

int vfio_pci_core_enable(struct vfio_pci_core_device *vdev)
{}
EXPORT_SYMBOL_GPL();

void vfio_pci_core_disable(struct vfio_pci_core_device *vdev)
{}
EXPORT_SYMBOL_GPL();

void vfio_pci_core_close_device(struct vfio_device *core_vdev)
{}
EXPORT_SYMBOL_GPL();

void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev)
{}
EXPORT_SYMBOL_GPL();

static int vfio_pci_get_irq_count(struct vfio_pci_core_device *vdev, int irq_type)
{}

static int vfio_pci_count_devs(struct pci_dev *pdev, void *data)
{}

struct vfio_pci_fill_info {};

static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data)
{}

struct vfio_pci_group_info {};

static bool vfio_pci_dev_below_slot(struct pci_dev *pdev, struct pci_slot *slot)
{}

struct vfio_pci_walk_info {};

static int vfio_pci_walk_wrapper(struct pci_dev *pdev, void *data)
{}

static int vfio_pci_for_each_slot_or_bus(struct pci_dev *pdev,
					 int (*fn)(struct pci_dev *,
						   void *data), void *data,
					 bool slot)
{}

static int msix_mmappable_cap(struct vfio_pci_core_device *vdev,
			      struct vfio_info_cap *caps)
{}

int vfio_pci_core_register_dev_region(struct vfio_pci_core_device *vdev,
				      unsigned int type, unsigned int subtype,
				      const struct vfio_pci_regops *ops,
				      size_t size, u32 flags, void *data)
{}
EXPORT_SYMBOL_GPL();

static int vfio_pci_info_atomic_cap(struct vfio_pci_core_device *vdev,
				    struct vfio_info_cap *caps)
{}

static int vfio_pci_ioctl_get_info(struct vfio_pci_core_device *vdev,
				   struct vfio_device_info __user *arg)
{}

static int vfio_pci_ioctl_get_region_info(struct vfio_pci_core_device *vdev,
					  struct vfio_region_info __user *arg)
{}

static int vfio_pci_ioctl_get_irq_info(struct vfio_pci_core_device *vdev,
				       struct vfio_irq_info __user *arg)
{}

static int vfio_pci_ioctl_set_irqs(struct vfio_pci_core_device *vdev,
				   struct vfio_irq_set __user *arg)
{}

static int vfio_pci_ioctl_reset(struct vfio_pci_core_device *vdev,
				void __user *arg)
{}

static int vfio_pci_ioctl_get_pci_hot_reset_info(
	struct vfio_pci_core_device *vdev,
	struct vfio_pci_hot_reset_info __user *arg)
{}

static int
vfio_pci_ioctl_pci_hot_reset_groups(struct vfio_pci_core_device *vdev,
				    int array_count, bool slot,
				    struct vfio_pci_hot_reset __user *arg)
{}

static int vfio_pci_ioctl_pci_hot_reset(struct vfio_pci_core_device *vdev,
					struct vfio_pci_hot_reset __user *arg)
{}

static int vfio_pci_ioctl_ioeventfd(struct vfio_pci_core_device *vdev,
				    struct vfio_device_ioeventfd __user *arg)
{}

long vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd,
			 unsigned long arg)
{}
EXPORT_SYMBOL_GPL();

static int vfio_pci_core_feature_token(struct vfio_device *device, u32 flags,
				       uuid_t __user *arg, size_t argsz)
{}

int vfio_pci_core_ioctl_feature(struct vfio_device *device, u32 flags,
				void __user *arg, size_t argsz)
{}
EXPORT_SYMBOL_GPL();

static ssize_t vfio_pci_rw(struct vfio_pci_core_device *vdev, char __user *buf,
			   size_t count, loff_t *ppos, bool iswrite)
{}

ssize_t vfio_pci_core_read(struct vfio_device *core_vdev, char __user *buf,
		size_t count, loff_t *ppos)
{}
EXPORT_SYMBOL_GPL();

ssize_t vfio_pci_core_write(struct vfio_device *core_vdev, const char __user *buf,
		size_t count, loff_t *ppos)
{}
EXPORT_SYMBOL_GPL();

static void vfio_pci_zap_bars(struct vfio_pci_core_device *vdev)
{}

void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_core_device *vdev)
{}

u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_core_device *vdev)
{}

void vfio_pci_memory_unlock_and_restore(struct vfio_pci_core_device *vdev, u16 cmd)
{}

static unsigned long vma_to_pfn(struct vm_area_struct *vma)
{}

static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
{}

static const struct vm_operations_struct vfio_pci_mmap_ops =;

int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma)
{}
EXPORT_SYMBOL_GPL();

void vfio_pci_core_request(struct vfio_device *core_vdev, unsigned int count)
{}
EXPORT_SYMBOL_GPL();

static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
				      bool vf_token, uuid_t *uuid)
{}

#define VF_TOKEN_ARG

int vfio_pci_core_match(struct vfio_device *core_vdev, char *buf)
{}
EXPORT_SYMBOL_GPL();

static int vfio_pci_bus_notifier(struct notifier_block *nb,
				 unsigned long action, void *data)
{}

static int vfio_pci_vf_init(struct vfio_pci_core_device *vdev)
{}

static void vfio_pci_vf_uninit(struct vfio_pci_core_device *vdev)
{}

static int vfio_pci_vga_init(struct vfio_pci_core_device *vdev)
{}

static void vfio_pci_vga_uninit(struct vfio_pci_core_device *vdev)
{}

int vfio_pci_core_init_dev(struct vfio_device *core_vdev)
{}
EXPORT_SYMBOL_GPL();

void vfio_pci_core_release_dev(struct vfio_device *core_vdev)
{}
EXPORT_SYMBOL_GPL();

int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev)
{}
EXPORT_SYMBOL_GPL();

void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev)
{}
EXPORT_SYMBOL_GPL();

pci_ers_result_t vfio_pci_core_aer_err_detected(struct pci_dev *pdev,
						pci_channel_state_t state)
{}
EXPORT_SYMBOL_GPL();

int vfio_pci_core_sriov_configure(struct vfio_pci_core_device *vdev,
				  int nr_virtfn)
{}
EXPORT_SYMBOL_GPL();

const struct pci_error_handlers vfio_pci_core_err_handlers =;
EXPORT_SYMBOL_GPL();

static bool vfio_dev_in_groups(struct vfio_device *vdev,
			       struct vfio_pci_group_info *groups)
{}

static int vfio_pci_is_device_in_set(struct pci_dev *pdev, void *data)
{}

/*
 * vfio-core considers a group to be viable and will create a vfio_device even
 * if some devices are bound to drivers like pci-stub or pcieport. Here we
 * require all PCI devices to be inside our dev_set since that ensures they stay
 * put and that every driver controlling the device can co-ordinate with the
 * device reset.
 *
 * Returns the pci_dev to pass to pci_reset_bus() if every PCI device to be
 * reset is inside the dev_set, and pci_reset_bus() can succeed. NULL otherwise.
 */
static struct pci_dev *
vfio_pci_dev_set_resettable(struct vfio_device_set *dev_set)
{}

static int vfio_pci_dev_set_pm_runtime_get(struct vfio_device_set *dev_set)
{}

static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
				      struct vfio_pci_group_info *groups,
				      struct iommufd_ctx *iommufd_ctx)
{}

static bool vfio_pci_dev_set_needs_reset(struct vfio_device_set *dev_set)
{}

/*
 * If a bus or slot reset is available for the provided dev_set and:
 *  - All of the devices affected by that bus or slot reset are unused
 *  - At least one of the affected devices is marked dirty via
 *    needs_reset (such as by lack of FLR support)
 * Then attempt to perform that bus or slot reset.
 */
static void vfio_pci_dev_set_try_reset(struct vfio_device_set *dev_set)
{}

void vfio_pci_core_set_params(bool is_nointxmask, bool is_disable_vga,
			      bool is_disable_idle_d3)
{}
EXPORT_SYMBOL_GPL();

static void vfio_pci_core_cleanup(void)
{}

static int __init vfio_pci_core_init(void)
{}

module_init();
module_exit(vfio_pci_core_cleanup);

MODULE_LICENSE();
MODULE_AUTHOR();
MODULE_DESCRIPTION();