linux/drivers/hv/vmbus_drv.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) 2009, Microsoft Corporation.
 *
 * Authors:
 *   Haiyang Zhang <[email protected]>
 *   Hank Janssen  <[email protected]>
 *   K. Y. Srinivasan <[email protected]>
 */
#define pr_fmt(fmt)

#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/sysctl.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/completion.h>
#include <linux/hyperv.h>
#include <linux/kernel_stat.h>
#include <linux/of_address.h>
#include <linux/clockchips.h>
#include <linux/cpu.h>
#include <linux/sched/isolation.h>
#include <linux/sched/task_stack.h>

#include <linux/delay.h>
#include <linux/panic_notifier.h>
#include <linux/ptrace.h>
#include <linux/screen_info.h>
#include <linux/efi.h>
#include <linux/random.h>
#include <linux/kernel.h>
#include <linux/syscore_ops.h>
#include <linux/dma-map-ops.h>
#include <linux/pci.h>
#include <clocksource/hyperv_timer.h>
#include <asm/mshyperv.h>
#include "hyperv_vmbus.h"

struct vmbus_dynid {};

static struct device  *hv_dev;

static int hyperv_cpuhp_online;

static long __percpu *vmbus_evt;

/* Values parsed from ACPI DSDT */
int vmbus_irq;
int vmbus_interrupt;

/*
 * The panic notifier below is responsible solely for unloading the
 * vmbus connection, which is necessary in a panic event.
 *
 * Notice an intrincate relation of this notifier with Hyper-V
 * framebuffer panic notifier exists - we need vmbus connection alive
 * there in order to succeed, so we need to order both with each other
 * [see hvfb_on_panic()] - this is done using notifiers' priorities.
 */
static int hv_panic_vmbus_unload(struct notifier_block *nb, unsigned long val,
			      void *args)
{}
static struct notifier_block hyperv_panic_vmbus_unload_block =;

static const char *fb_mmio_name =;
static struct resource *fb_mmio;
static struct resource *hyperv_mmio;
static DEFINE_MUTEX(hyperv_mmio_lock);

static int vmbus_exists(void)
{}

static u8 channel_monitor_group(const struct vmbus_channel *channel)
{}

static u8 channel_monitor_offset(const struct vmbus_channel *channel)
{}

static u32 channel_pending(const struct vmbus_channel *channel,
			   const struct hv_monitor_page *monitor_page)
{}

static u32 channel_latency(const struct vmbus_channel *channel,
			   const struct hv_monitor_page *monitor_page)
{}

static u32 channel_conn_id(struct vmbus_channel *channel,
			   struct hv_monitor_page *monitor_page)
{}

static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr,
		       char *buf)
{}
static DEVICE_ATTR_RO(id);

static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr,
			  char *buf)
{}
static DEVICE_ATTR_RO(state);

static ssize_t monitor_id_show(struct device *dev,
			       struct device_attribute *dev_attr, char *buf)
{}
static DEVICE_ATTR_RO(monitor_id);

static ssize_t class_id_show(struct device *dev,
			       struct device_attribute *dev_attr, char *buf)
{}
static DEVICE_ATTR_RO(class_id);

static ssize_t device_id_show(struct device *dev,
			      struct device_attribute *dev_attr, char *buf)
{}
static DEVICE_ATTR_RO(device_id);

static ssize_t modalias_show(struct device *dev,
			     struct device_attribute *dev_attr, char *buf)
{}
static DEVICE_ATTR_RO(modalias);

#ifdef CONFIG_NUMA
static ssize_t numa_node_show(struct device *dev,
			      struct device_attribute *attr, char *buf)
{}
static DEVICE_ATTR_RO(numa_node);
#endif

static ssize_t server_monitor_pending_show(struct device *dev,
					   struct device_attribute *dev_attr,
					   char *buf)
{}
static DEVICE_ATTR_RO(server_monitor_pending);

static ssize_t client_monitor_pending_show(struct device *dev,
					   struct device_attribute *dev_attr,
					   char *buf)
{}
static DEVICE_ATTR_RO(client_monitor_pending);

static ssize_t server_monitor_latency_show(struct device *dev,
					   struct device_attribute *dev_attr,
					   char *buf)
{}
static DEVICE_ATTR_RO(server_monitor_latency);

static ssize_t client_monitor_latency_show(struct device *dev,
					   struct device_attribute *dev_attr,
					   char *buf)
{}
static DEVICE_ATTR_RO(client_monitor_latency);

static ssize_t server_monitor_conn_id_show(struct device *dev,
					   struct device_attribute *dev_attr,
					   char *buf)
{}
static DEVICE_ATTR_RO(server_monitor_conn_id);

static ssize_t client_monitor_conn_id_show(struct device *dev,
					   struct device_attribute *dev_attr,
					   char *buf)
{}
static DEVICE_ATTR_RO(client_monitor_conn_id);

static ssize_t out_intr_mask_show(struct device *dev,
				  struct device_attribute *dev_attr, char *buf)
{}
static DEVICE_ATTR_RO(out_intr_mask);

static ssize_t out_read_index_show(struct device *dev,
				   struct device_attribute *dev_attr, char *buf)
{}
static DEVICE_ATTR_RO(out_read_index);

static ssize_t out_write_index_show(struct device *dev,
				    struct device_attribute *dev_attr,
				    char *buf)
{}
static DEVICE_ATTR_RO(out_write_index);

static ssize_t out_read_bytes_avail_show(struct device *dev,
					 struct device_attribute *dev_attr,
					 char *buf)
{}
static DEVICE_ATTR_RO(out_read_bytes_avail);

static ssize_t out_write_bytes_avail_show(struct device *dev,
					  struct device_attribute *dev_attr,
					  char *buf)
{}
static DEVICE_ATTR_RO(out_write_bytes_avail);

static ssize_t in_intr_mask_show(struct device *dev,
				 struct device_attribute *dev_attr, char *buf)
{}
static DEVICE_ATTR_RO(in_intr_mask);

static ssize_t in_read_index_show(struct device *dev,
				  struct device_attribute *dev_attr, char *buf)
{}
static DEVICE_ATTR_RO(in_read_index);

static ssize_t in_write_index_show(struct device *dev,
				   struct device_attribute *dev_attr, char *buf)
{}
static DEVICE_ATTR_RO(in_write_index);

static ssize_t in_read_bytes_avail_show(struct device *dev,
					struct device_attribute *dev_attr,
					char *buf)
{}
static DEVICE_ATTR_RO(in_read_bytes_avail);

static ssize_t in_write_bytes_avail_show(struct device *dev,
					 struct device_attribute *dev_attr,
					 char *buf)
{}
static DEVICE_ATTR_RO(in_write_bytes_avail);

static ssize_t channel_vp_mapping_show(struct device *dev,
				       struct device_attribute *dev_attr,
				       char *buf)
{}
static DEVICE_ATTR_RO(channel_vp_mapping);

static ssize_t vendor_show(struct device *dev,
			   struct device_attribute *dev_attr,
			   char *buf)
{}
static DEVICE_ATTR_RO(vendor);

static ssize_t device_show(struct device *dev,
			   struct device_attribute *dev_attr,
			   char *buf)
{}
static DEVICE_ATTR_RO(device);

static ssize_t driver_override_store(struct device *dev,
				     struct device_attribute *attr,
				     const char *buf, size_t count)
{}

static ssize_t driver_override_show(struct device *dev,
				    struct device_attribute *attr, char *buf)
{}
static DEVICE_ATTR_RW(driver_override);

/* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
static struct attribute *vmbus_dev_attrs[] =;

/*
 * Device-level attribute_group callback function. Returns the permission for
 * each attribute, and returns 0 if an attribute is not visible.
 */
static umode_t vmbus_dev_attr_is_visible(struct kobject *kobj,
					 struct attribute *attr, int idx)
{}

static const struct attribute_group vmbus_dev_group =;
__ATTRIBUTE_GROUPS();

/* Set up the attribute for /sys/bus/vmbus/hibernation */
static ssize_t hibernation_show(const struct bus_type *bus, char *buf)
{}

static BUS_ATTR_RO(hibernation);

static struct attribute *vmbus_bus_attrs[] =;
static const struct attribute_group vmbus_bus_group =;
__ATTRIBUTE_GROUPS();

/*
 * vmbus_uevent - add uevent for our device
 *
 * This routine is invoked when a device is added or removed on the vmbus to
 * generate a uevent to udev in the userspace. The udev will then look at its
 * rule and the uevent generated here to load the appropriate driver
 *
 * The alias string will be of the form vmbus:guid where guid is the string
 * representation of the device guid (each byte of the guid will be
 * represented with two hex characters.
 */
static int vmbus_uevent(const struct device *device, struct kobj_uevent_env *env)
{}

static const struct hv_vmbus_device_id *
hv_vmbus_dev_match(const struct hv_vmbus_device_id *id, const guid_t *guid)
{}

static const struct hv_vmbus_device_id *
hv_vmbus_dynid_match(struct hv_driver *drv, const guid_t *guid)
{}

static const struct hv_vmbus_device_id vmbus_device_null;

/*
 * Return a matching hv_vmbus_device_id pointer.
 * If there is no match, return NULL.
 */
static const struct hv_vmbus_device_id *hv_vmbus_get_id(const struct hv_driver *drv,
							struct hv_device *dev)
{}

/* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */
static int vmbus_add_dynid(struct hv_driver *drv, guid_t *guid)
{}

static void vmbus_free_dynids(struct hv_driver *drv)
{}

/*
 * store_new_id - sysfs frontend to vmbus_add_dynid()
 *
 * Allow GUIDs to be added to an existing driver via sysfs.
 */
static ssize_t new_id_store(struct device_driver *driver, const char *buf,
			    size_t count)
{}
static DRIVER_ATTR_WO(new_id);

/*
 * store_remove_id - remove a PCI device ID from this driver
 *
 * Removes a dynamic pci device ID to this driver.
 */
static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
			       size_t count)
{}
static DRIVER_ATTR_WO(remove_id);

static struct attribute *vmbus_drv_attrs[] =;
ATTRIBUTE_GROUPS();


/*
 * vmbus_match - Attempt to match the specified device to the specified driver
 */
static int vmbus_match(struct device *device, const struct device_driver *driver)
{}

/*
 * vmbus_probe - Add the new vmbus's child device
 */
static int vmbus_probe(struct device *child_device)
{}

/*
 * vmbus_dma_configure -- Configure DMA coherence for VMbus device
 */
static int vmbus_dma_configure(struct device *child_device)
{}

/*
 * vmbus_remove - Remove a vmbus device
 */
static void vmbus_remove(struct device *child_device)
{}

/*
 * vmbus_shutdown - Shutdown a vmbus device
 */
static void vmbus_shutdown(struct device *child_device)
{}

#ifdef CONFIG_PM_SLEEP
/*
 * vmbus_suspend - Suspend a vmbus device
 */
static int vmbus_suspend(struct device *child_device)
{}

/*
 * vmbus_resume - Resume a vmbus device
 */
static int vmbus_resume(struct device *child_device)
{}
#else
#define vmbus_suspend
#define vmbus_resume
#endif /* CONFIG_PM_SLEEP */

/*
 * vmbus_device_release - Final callback release of the vmbus child device
 */
static void vmbus_device_release(struct device *device)
{}

/*
 * Note: we must use the "noirq" ops: see the comment before vmbus_bus_pm.
 *
 * suspend_noirq/resume_noirq are set to NULL to support Suspend-to-Idle: we
 * shouldn't suspend the vmbus devices upon Suspend-to-Idle, otherwise there
 * is no way to wake up a Generation-2 VM.
 *
 * The other 4 ops are for hibernation.
 */

static const struct dev_pm_ops vmbus_pm =;

/* The one and only one */
static const struct bus_type  hv_bus =;

struct onmessage_work_context {};

static void vmbus_onmessage_work(struct work_struct *work)
{}

void vmbus_on_msg_dpc(unsigned long data)
{}

#ifdef CONFIG_PM_SLEEP
/*
 * Fake RESCIND_CHANNEL messages to clean up hv_sock channels by force for
 * hibernation, because hv_sock connections can not persist across hibernation.
 */
static void vmbus_force_channel_rescinded(struct vmbus_channel *channel)
{}
#endif /* CONFIG_PM_SLEEP */

/*
 * Schedule all channels with events pending
 */
static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
{}

static void vmbus_isr(void)
{}

static irqreturn_t vmbus_percpu_isr(int irq, void *dev_id)
{}

static void vmbus_percpu_work(struct work_struct *work)
{}

/*
 * vmbus_bus_init -Main vmbus driver initialization routine.
 *
 * Here, we
 *	- initialize the vmbus driver context
 *	- invoke the vmbus hv main init routine
 *	- retrieve the channel offers
 */
static int vmbus_bus_init(void)
{}

/**
 * __vmbus_driver_register() - Register a vmbus's driver
 * @hv_driver: Pointer to driver structure you want to register
 * @owner: owner module of the drv
 * @mod_name: module name string
 *
 * Registers the given driver with Linux through the 'driver_register()' call
 * and sets up the hyper-v vmbus handling for this driver.
 * It will return the state of the 'driver_register()' call.
 *
 */
int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, const char *mod_name)
{}
EXPORT_SYMBOL_GPL();

/**
 * vmbus_driver_unregister() - Unregister a vmbus's driver
 * @hv_driver: Pointer to driver structure you want to
 *             un-register
 *
 * Un-register the given driver that was previous registered with a call to
 * vmbus_driver_register()
 */
void vmbus_driver_unregister(struct hv_driver *hv_driver)
{}
EXPORT_SYMBOL_GPL();


/*
 * Called when last reference to channel is gone.
 */
static void vmbus_chan_release(struct kobject *kobj)
{}

struct vmbus_chan_attribute {};
#define VMBUS_CHAN_ATTR(_name, _mode, _show, _store)
#define VMBUS_CHAN_ATTR_RW(_name)
#define VMBUS_CHAN_ATTR_RO(_name)
#define VMBUS_CHAN_ATTR_WO(_name)

static ssize_t vmbus_chan_attr_show(struct kobject *kobj,
				    struct attribute *attr, char *buf)
{}

static ssize_t vmbus_chan_attr_store(struct kobject *kobj,
				     struct attribute *attr, const char *buf,
				     size_t count)
{}

static const struct sysfs_ops vmbus_chan_sysfs_ops =;

static ssize_t out_mask_show(struct vmbus_channel *channel, char *buf)
{}
static VMBUS_CHAN_ATTR_RO(out_mask);

static ssize_t in_mask_show(struct vmbus_channel *channel, char *buf)
{}
static VMBUS_CHAN_ATTR_RO(in_mask);

static ssize_t read_avail_show(struct vmbus_channel *channel, char *buf)
{}
static VMBUS_CHAN_ATTR_RO(read_avail);

static ssize_t write_avail_show(struct vmbus_channel *channel, char *buf)
{}
static VMBUS_CHAN_ATTR_RO(write_avail);

static ssize_t target_cpu_show(struct vmbus_channel *channel, char *buf)
{}
static ssize_t target_cpu_store(struct vmbus_channel *channel,
				const char *buf, size_t count)
{}
static VMBUS_CHAN_ATTR(cpu, 0644, target_cpu_show, target_cpu_store);

static ssize_t channel_pending_show(struct vmbus_channel *channel,
				    char *buf)
{}
static VMBUS_CHAN_ATTR(pending, 0444, channel_pending_show, NULL);

static ssize_t channel_latency_show(struct vmbus_channel *channel,
				    char *buf)
{}
static VMBUS_CHAN_ATTR(latency, 0444, channel_latency_show, NULL);

static ssize_t channel_interrupts_show(struct vmbus_channel *channel, char *buf)
{}
static VMBUS_CHAN_ATTR(interrupts, 0444, channel_interrupts_show, NULL);

static ssize_t channel_events_show(struct vmbus_channel *channel, char *buf)
{}
static VMBUS_CHAN_ATTR(events, 0444, channel_events_show, NULL);

static ssize_t channel_intr_in_full_show(struct vmbus_channel *channel,
					 char *buf)
{}
static VMBUS_CHAN_ATTR(intr_in_full, 0444, channel_intr_in_full_show, NULL);

static ssize_t channel_intr_out_empty_show(struct vmbus_channel *channel,
					   char *buf)
{}
static VMBUS_CHAN_ATTR(intr_out_empty, 0444, channel_intr_out_empty_show, NULL);

static ssize_t channel_out_full_first_show(struct vmbus_channel *channel,
					   char *buf)
{}
static VMBUS_CHAN_ATTR(out_full_first, 0444, channel_out_full_first_show, NULL);

static ssize_t channel_out_full_total_show(struct vmbus_channel *channel,
					   char *buf)
{}
static VMBUS_CHAN_ATTR(out_full_total, 0444, channel_out_full_total_show, NULL);

static ssize_t subchannel_monitor_id_show(struct vmbus_channel *channel,
					  char *buf)
{}
static VMBUS_CHAN_ATTR(monitor_id, 0444, subchannel_monitor_id_show, NULL);

static ssize_t subchannel_id_show(struct vmbus_channel *channel,
				  char *buf)
{}
static VMBUS_CHAN_ATTR_RO(subchannel_id);

static struct attribute *vmbus_chan_attrs[] =;

/*
 * Channel-level attribute_group callback function. Returns the permission for
 * each attribute, and returns 0 if an attribute is not visible.
 */
static umode_t vmbus_chan_attr_is_visible(struct kobject *kobj,
					  struct attribute *attr, int idx)
{}

static const struct attribute_group vmbus_chan_group =;

static const struct kobj_type vmbus_chan_ktype =;

/*
 * vmbus_add_channel_kobj - setup a sub-directory under device/channels
 */
int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
{}

/*
 * vmbus_remove_channel_attr_group - remove the channel's attribute group
 */
void vmbus_remove_channel_attr_group(struct vmbus_channel *channel)
{}

/*
 * vmbus_device_create - Creates and registers a new child device
 * on the vmbus.
 */
struct hv_device *vmbus_device_create(const guid_t *type,
				      const guid_t *instance,
				      struct vmbus_channel *channel)
{}

/*
 * vmbus_device_register - Register the child device
 */
int vmbus_device_register(struct hv_device *child_device_obj)
{}

/*
 * vmbus_device_unregister - Remove the specified child device
 * from the vmbus.
 */
void vmbus_device_unregister(struct hv_device *device_obj)
{}
EXPORT_SYMBOL_GPL();

#ifdef CONFIG_ACPI
/*
 * VMBUS is an acpi enumerated device. Get the information we
 * need from DSDT.
 */
static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
{}
#endif

static void vmbus_mmio_remove(void)
{}

static void __maybe_unused vmbus_reserve_fb(void)
{}

/**
 * vmbus_allocate_mmio() - Pick a memory-mapped I/O range.
 * @new:		If successful, supplied a pointer to the
 *			allocated MMIO space.
 * @device_obj:		Identifies the caller
 * @min:		Minimum guest physical address of the
 *			allocation
 * @max:		Maximum guest physical address
 * @size:		Size of the range to be allocated
 * @align:		Alignment of the range to be allocated
 * @fb_overlap_ok:	Whether this allocation can be allowed
 *			to overlap the video frame buffer.
 *
 * This function walks the resources granted to VMBus by the
 * _CRS object in the ACPI namespace underneath the parent
 * "bridge" whether that's a root PCI bus in the Generation 1
 * case or a Module Device in the Generation 2 case.  It then
 * attempts to allocate from the global MMIO pool in a way that
 * matches the constraints supplied in these parameters and by
 * that _CRS.
 *
 * Return: 0 on success, -errno on failure
 */
int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
			resource_size_t min, resource_size_t max,
			resource_size_t size, resource_size_t align,
			bool fb_overlap_ok)
{}
EXPORT_SYMBOL_GPL();

/**
 * vmbus_free_mmio() - Free a memory-mapped I/O range.
 * @start:		Base address of region to release.
 * @size:		Size of the range to be allocated
 *
 * This function releases anything requested by
 * vmbus_mmio_allocate().
 */
void vmbus_free_mmio(resource_size_t start, resource_size_t size)
{}
EXPORT_SYMBOL_GPL();

#ifdef CONFIG_ACPI
static int vmbus_acpi_add(struct platform_device *pdev)
{}
#else
static int vmbus_acpi_add(struct platform_device *pdev)
{
	return 0;
}
#endif

static int vmbus_device_add(struct platform_device *pdev)
{}

static int vmbus_platform_driver_probe(struct platform_device *pdev)
{}

static void vmbus_platform_driver_remove(struct platform_device *pdev)
{}

#ifdef CONFIG_PM_SLEEP
static int vmbus_bus_suspend(struct device *dev)
{}

static int vmbus_bus_resume(struct device *dev)
{}
#else
#define vmbus_bus_suspend
#define vmbus_bus_resume
#endif /* CONFIG_PM_SLEEP */

static const __maybe_unused struct of_device_id vmbus_of_match[] =;
MODULE_DEVICE_TABLE(of, vmbus_of_match);

static const __maybe_unused struct acpi_device_id vmbus_acpi_device_ids[] =;
MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);

/*
 * Note: we must use the "no_irq" ops, otherwise hibernation can not work with
 * PCI device assignment, because "pci_dev_pm_ops" uses the "noirq" ops: in
 * the resume path, the pci "noirq" restore op runs before "non-noirq" op (see
 * resume_target_kernel() -> dpm_resume_start(), and hibernation_restore() ->
 * dpm_resume_end()). This means vmbus_bus_resume() and the pci-hyperv's
 * resume callback must also run via the "noirq" ops.
 *
 * Set suspend_noirq/resume_noirq to NULL for Suspend-to-Idle: see the comment
 * earlier in this file before vmbus_pm.
 */

static const struct dev_pm_ops vmbus_bus_pm =;

static struct platform_driver vmbus_platform_driver =;

static void hv_kexec_handler(void)
{
	hv_stimer_global_cleanup();
	vmbus_initiate_unload(false);
	/* Make sure conn_state is set as hv_synic_cleanup checks for it */
	mb();
	cpuhp_remove_state(hyperv_cpuhp_online);
};

static void hv_crash_handler(struct pt_regs *regs)
{
	int cpu;

	vmbus_initiate_unload(true);
	/*
	 * In crash handler we can't schedule synic cleanup for all CPUs,
	 * doing the cleanup for current CPU only. This should be sufficient
	 * for kdump.
	 */
	cpu = smp_processor_id();
	hv_stimer_cleanup(cpu);
	hv_synic_disable_regs(cpu);
};

static int hv_synic_suspend(void)
{}

static void hv_synic_resume(void)
{}

/* The callbacks run only on CPU0, with irqs_disabled. */
static struct syscore_ops hv_synic_syscore_ops =;

static int __init hv_acpi_init(void)
{}

static void __exit vmbus_exit(void)
{}


MODULE_LICENSE();
MODULE_DESCRIPTION();

subsys_initcall(hv_acpi_init);
module_exit(vmbus_exit);