linux/drivers/xen/events/events_base.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Xen event channels
 *
 * Xen models interrupts with abstract event channels.  Because each
 * domain gets 1024 event channels, but NR_IRQ is not that large, we
 * must dynamically map irqs<->event channels.  The event channels
 * interface with the rest of the kernel by defining a xen interrupt
 * chip.  When an event is received, it is mapped to an irq and sent
 * through the normal interrupt processing path.
 *
 * There are four kinds of events which can be mapped to an event
 * channel:
 *
 * 1. Inter-domain notifications.  This includes all the virtual
 *    device events, since they're driven by front-ends in another domain
 *    (typically dom0).
 * 2. VIRQs, typically used for timers.  These are per-cpu events.
 * 3. IPIs.
 * 4. PIRQs - Hardware interrupts.
 *
 * Jeremy Fitzhardinge <[email protected]>, XenSource Inc, 2007
 */

#define pr_fmt(fmt)

#include <linux/linkage.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/moduleparam.h>
#include <linux/string.h>
#include <linux/memblock.h>
#include <linux/slab.h>
#include <linux/irqnr.h>
#include <linux/pci.h>
#include <linux/rcupdate.h>
#include <linux/spinlock.h>
#include <linux/cpuhotplug.h>
#include <linux/atomic.h>
#include <linux/ktime.h>

#ifdef CONFIG_X86
#include <asm/desc.h>
#include <asm/ptrace.h>
#include <asm/idtentry.h>
#include <asm/irq.h>
#include <asm/io_apic.h>
#include <asm/i8259.h>
#include <asm/xen/cpuid.h>
#include <asm/xen/pci.h>
#endif
#include <asm/sync_bitops.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
#include <xen/page.h>

#include <xen/xen.h>
#include <xen/hvm.h>
#include <xen/xen-ops.h>
#include <xen/events.h>
#include <xen/interface/xen.h>
#include <xen/interface/event_channel.h>
#include <xen/interface/hvm/hvm_op.h>
#include <xen/interface/hvm/params.h>
#include <xen/interface/physdev.h>
#include <xen/interface/sched.h>
#include <xen/interface/vcpu.h>
#include <xen/xenbus.h>
#include <asm/hw_irq.h>

#include "events_internal.h"

#undef MODULE_PARAM_PREFIX
#define MODULE_PARAM_PREFIX

/* Interrupt types. */
enum xen_irq_type {};

/*
 * Packed IRQ information:
 * type - enum xen_irq_type
 * event channel - irq->event channel mapping
 * cpu - cpu this event channel is bound to
 * index - type-specific information:
 *    PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
 *           guest, or GSI (real passthrough IRQ) of the device.
 *    VIRQ - virq number
 *    IPI - IPI vector
 *    EVTCHN -
 */
struct irq_info {};

#define PIRQ_NEEDS_EOI
#define PIRQ_SHAREABLE
#define PIRQ_MSI_GROUP

static uint __read_mostly event_loop_timeout =;
module_param(event_loop_timeout, uint, 0644);

static uint __read_mostly event_eoi_delay =;
module_param(event_eoi_delay, uint, 0644);

const struct evtchn_ops *evtchn_ops;

/*
 * This lock protects updates to the following mapping and reference-count
 * arrays. The lock does not need to be acquired to read the mapping tables.
 */
static DEFINE_MUTEX(irq_mapping_update_lock);

/*
 * Lock hierarchy:
 *
 * irq_mapping_update_lock
 *   IRQ-desc lock
 *     percpu eoi_list_lock
 *       irq_info->lock
 */

static LIST_HEAD(xen_irq_list_head);

/* IRQ <-> VIRQ mapping. */
static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) =;

/* IRQ <-> IPI mapping */
static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) =;
/* Cache for IPI event channels - needed for hot cpu unplug (avoid RCU usage). */
static DEFINE_PER_CPU(evtchn_port_t [XEN_NR_IPIS], ipi_to_evtchn) =;

/* Event channel distribution data */
static atomic_t channels_on_cpu[NR_CPUS];

static int **evtchn_to_irq;
#ifdef CONFIG_X86
static unsigned long *pirq_eoi_map;
#endif
static bool (*pirq_needs_eoi)(struct irq_info *info);

#define EVTCHN_ROW(e)
#define EVTCHN_COL(e)
#define EVTCHN_PER_ROW

/* Xen will never allocate port zero for any purpose. */
#define VALID_EVTCHN(chn)

static struct irq_info *legacy_info_ptrs[NR_IRQS_LEGACY];

static struct irq_chip xen_dynamic_chip;
static struct irq_chip xen_lateeoi_chip;
static struct irq_chip xen_percpu_chip;
static struct irq_chip xen_pirq_chip;
static void enable_dynirq(struct irq_data *data);

static DEFINE_PER_CPU(unsigned int, irq_epoch);

static void clear_evtchn_to_irq_row(int *evtchn_row)
{}

static void clear_evtchn_to_irq_all(void)
{}

static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq)
{}

/* Get info for IRQ */
static struct irq_info *info_for_irq(unsigned irq)
{}

static void set_info_for_irq(unsigned int irq, struct irq_info *info)
{}

static struct irq_info *evtchn_to_info(evtchn_port_t evtchn)
{}

/* Per CPU channel accounting */
static void channels_on_cpu_dec(struct irq_info *info)
{}

static void channels_on_cpu_inc(struct irq_info *info)
{}

static void xen_irq_free_desc(unsigned int irq)
{}

static void delayed_free_irq(struct work_struct *work)
{}

/* Constructors for packed IRQ information. */
static int xen_irq_info_common_setup(struct irq_info *info,
				     enum xen_irq_type type,
				     evtchn_port_t evtchn,
				     unsigned short cpu)
{}

static int xen_irq_info_evtchn_setup(struct irq_info *info,
				     evtchn_port_t evtchn,
				     struct xenbus_device *dev)
{}

static int xen_irq_info_ipi_setup(struct irq_info *info, unsigned int cpu,
				  evtchn_port_t evtchn, enum ipi_vector ipi)
{}

static int xen_irq_info_virq_setup(struct irq_info *info, unsigned int cpu,
				   evtchn_port_t evtchn, unsigned int virq)
{}

static int xen_irq_info_pirq_setup(struct irq_info *info, evtchn_port_t evtchn,
				   unsigned int pirq, unsigned int gsi,
				   uint16_t domid, unsigned char flags)
{}

static void xen_irq_info_cleanup(struct irq_info *info)
{}

/*
 * Accessors for packed IRQ information.
 */
static evtchn_port_t evtchn_from_irq(unsigned int irq)
{}

unsigned int irq_from_evtchn(evtchn_port_t evtchn)
{}
EXPORT_SYMBOL_GPL();

int irq_evtchn_from_virq(unsigned int cpu, unsigned int virq,
			 evtchn_port_t *evtchn)
{}

static enum ipi_vector ipi_from_irq(struct irq_info *info)
{}

static unsigned int virq_from_irq(struct irq_info *info)
{}

static unsigned int pirq_from_irq(struct irq_info *info)
{}

unsigned int cpu_from_evtchn(evtchn_port_t evtchn)
{}

static void do_mask(struct irq_info *info, u8 reason)
{}

static void do_unmask(struct irq_info *info, u8 reason)
{}

#ifdef CONFIG_X86
static bool pirq_check_eoi_map(struct irq_info *info)
{}
#endif

static bool pirq_needs_eoi_flag(struct irq_info *info)
{}

static void bind_evtchn_to_cpu(struct irq_info *info, unsigned int cpu,
			       bool force_affinity)
{}

/**
 * notify_remote_via_irq - send event to remote end of event channel via irq
 * @irq: irq of event channel to send event to
 *
 * Unlike notify_remote_via_evtchn(), this is safe to use across
 * save/restore. Notifications on a broken connection are silently
 * dropped.
 */
void notify_remote_via_irq(int irq)
{}
EXPORT_SYMBOL_GPL();

struct lateeoi_work {};

static DEFINE_PER_CPU(struct lateeoi_work, lateeoi);

static void lateeoi_list_del(struct irq_info *info)
{}

static void lateeoi_list_add(struct irq_info *info)
{}

static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious)
{}

static void xen_irq_lateeoi_worker(struct work_struct *work)
{}

static void xen_cpu_init_eoi(unsigned int cpu)
{}

void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags)
{}
EXPORT_SYMBOL_GPL();

static struct irq_info *xen_irq_init(unsigned int irq)
{}

static struct irq_info *xen_allocate_irq_dynamic(void)
{}

static struct irq_info *xen_allocate_irq_gsi(unsigned int gsi)
{}

static void xen_free_irq(struct irq_info *info)
{}

/* Not called for lateeoi events. */
static void event_handler_exit(struct irq_info *info)
{}

static void pirq_query_unmask(struct irq_info *info)
{}

static void do_eoi_pirq(struct irq_info *info)
{}

static void eoi_pirq(struct irq_data *data)
{}

static void do_disable_dynirq(struct irq_info *info)
{}

static void disable_dynirq(struct irq_data *data)
{}

static void mask_ack_pirq(struct irq_data *data)
{}

static unsigned int __startup_pirq(struct irq_info *info)
{}

static unsigned int startup_pirq(struct irq_data *data)
{}

static void shutdown_pirq(struct irq_data *data)
{}

static void enable_pirq(struct irq_data *data)
{}

static void disable_pirq(struct irq_data *data)
{}

int xen_irq_from_gsi(unsigned gsi)
{}
EXPORT_SYMBOL_GPL();

static void __unbind_from_irq(struct irq_info *info, unsigned int irq)
{}

/*
 * Do not make any assumptions regarding the relationship between the
 * IRQ number returned here and the Xen pirq argument.
 *
 * Note: We don't assign an event channel until the irq actually started
 * up.  Return an existing irq if we've already got one for the gsi.
 *
 * Shareable implies level triggered, not shareable implies edge
 * triggered here.
 */
int xen_bind_pirq_gsi_to_irq(unsigned gsi,
			     unsigned pirq, int shareable, char *name)
{}

#ifdef CONFIG_PCI_MSI
int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
{}

int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
			     int pirq, int nvec, const char *name, domid_t domid)
{}
#endif

int xen_destroy_irq(int irq)
{}

int xen_pirq_from_irq(unsigned irq)
{}
EXPORT_SYMBOL_GPL();

static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip,
				   struct xenbus_device *dev, bool shared)
{}

int bind_evtchn_to_irq(evtchn_port_t evtchn)
{}
EXPORT_SYMBOL_GPL();

int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn)
{}
EXPORT_SYMBOL_GPL();

static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
{}

static int bind_interdomain_evtchn_to_irq_chip(struct xenbus_device *dev,
					       evtchn_port_t remote_port,
					       struct irq_chip *chip,
					       bool shared)
{}

int bind_interdomain_evtchn_to_irq_lateeoi(struct xenbus_device *dev,
					   evtchn_port_t remote_port)
{}
EXPORT_SYMBOL_GPL();

static int find_virq(unsigned int virq, unsigned int cpu, evtchn_port_t *evtchn)
{}

/**
 * xen_evtchn_nr_channels - number of usable event channel ports
 *
 * This may be less than the maximum supported by the current
 * hypervisor ABI. Use xen_evtchn_max_channels() for the maximum
 * supported.
 */
unsigned xen_evtchn_nr_channels(void)
{}
EXPORT_SYMBOL_GPL();

int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
{}

static void unbind_from_irq(unsigned int irq)
{}

static int bind_evtchn_to_irqhandler_chip(evtchn_port_t evtchn,
					  irq_handler_t handler,
					  unsigned long irqflags,
					  const char *devname, void *dev_id,
					  struct irq_chip *chip)
{}

int bind_evtchn_to_irqhandler(evtchn_port_t evtchn,
			      irq_handler_t handler,
			      unsigned long irqflags,
			      const char *devname, void *dev_id)
{}
EXPORT_SYMBOL_GPL();

int bind_evtchn_to_irqhandler_lateeoi(evtchn_port_t evtchn,
				      irq_handler_t handler,
				      unsigned long irqflags,
				      const char *devname, void *dev_id)
{}
EXPORT_SYMBOL_GPL();

static int bind_interdomain_evtchn_to_irqhandler_chip(
		struct xenbus_device *dev, evtchn_port_t remote_port,
		irq_handler_t handler, unsigned long irqflags,
		const char *devname, void *dev_id, struct irq_chip *chip)
{}

int bind_interdomain_evtchn_to_irqhandler_lateeoi(struct xenbus_device *dev,
						  evtchn_port_t remote_port,
						  irq_handler_t handler,
						  unsigned long irqflags,
						  const char *devname,
						  void *dev_id)
{}
EXPORT_SYMBOL_GPL();

int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
			    irq_handler_t handler,
			    unsigned long irqflags, const char *devname, void *dev_id)
{}
EXPORT_SYMBOL_GPL();

int bind_ipi_to_irqhandler(enum ipi_vector ipi,
			   unsigned int cpu,
			   irq_handler_t handler,
			   unsigned long irqflags,
			   const char *devname,
			   void *dev_id)
{}

void unbind_from_irqhandler(unsigned int irq, void *dev_id)
{}
EXPORT_SYMBOL_GPL();

/**
 * xen_set_irq_priority() - set an event channel priority.
 * @irq:irq bound to an event channel.
 * @priority: priority between XEN_IRQ_PRIORITY_MAX and XEN_IRQ_PRIORITY_MIN.
 */
int xen_set_irq_priority(unsigned irq, unsigned priority)
{}
EXPORT_SYMBOL_GPL();

int evtchn_make_refcounted(evtchn_port_t evtchn, bool is_static)
{}
EXPORT_SYMBOL_GPL();

int evtchn_get(evtchn_port_t evtchn)
{}
EXPORT_SYMBOL_GPL();

void evtchn_put(evtchn_port_t evtchn)
{}
EXPORT_SYMBOL_GPL();

void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
{}

struct evtchn_loop_ctrl {};

void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
{}

int xen_evtchn_do_upcall(void)
{}
EXPORT_SYMBOL_GPL();

/* Rebind a new event channel to an existing irq. */
void rebind_evtchn_irq(evtchn_port_t evtchn, int irq)
{}

/* Rebind an evtchn so that it gets delivered to a specific cpu */
static int xen_rebind_evtchn_to_cpu(struct irq_info *info, unsigned int tcpu)
{}

/*
 * Find the CPU within @dest mask which has the least number of channels
 * assigned. This is not precise as the per cpu counts can be modified
 * concurrently.
 */
static unsigned int select_target_cpu(const struct cpumask *dest)
{}

static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
			    bool force)
{}

static void enable_dynirq(struct irq_data *data)
{}

static void do_ack_dynirq(struct irq_info *info)
{}

static void ack_dynirq(struct irq_data *data)
{}

static void mask_ack_dynirq(struct irq_data *data)
{}

static void lateeoi_ack_dynirq(struct irq_data *data)
{}

static void lateeoi_mask_ack_dynirq(struct irq_data *data)
{}

static int retrigger_dynirq(struct irq_data *data)
{}

static void restore_pirqs(void)
{}

static void restore_cpu_virqs(unsigned int cpu)
{}

static void restore_cpu_ipis(unsigned int cpu)
{}

/* Clear an irq's pending state, in preparation for polling on it */
void xen_clear_irq_pending(int irq)
{}
EXPORT_SYMBOL();

bool xen_test_irq_pending(int irq)
{}

/* Poll waiting for an irq to become pending with timeout.  In the usual case,
 * the irq will be disabled so it won't deliver an interrupt. */
void xen_poll_irq_timeout(int irq, u64 timeout)
{}
EXPORT_SYMBOL();
/* Poll waiting for an irq to become pending.  In the usual case, the
 * irq will be disabled so it won't deliver an interrupt. */
void xen_poll_irq(int irq)
{}

/* Check whether the IRQ line is shared with other guests. */
int xen_test_irq_shared(int irq)
{}
EXPORT_SYMBOL_GPL();

void xen_irq_resume(void)
{}

static struct irq_chip xen_dynamic_chip __read_mostly =;

static struct irq_chip xen_lateeoi_chip __read_mostly =;

static struct irq_chip xen_pirq_chip __read_mostly =;

static struct irq_chip xen_percpu_chip __read_mostly =;

#ifdef CONFIG_X86
#ifdef CONFIG_XEN_PVHVM
/* Vector callbacks are better than PCI interrupts to receive event
 * channel notifications because we can receive vector callbacks on any
 * vcpu and we don't need PCI support or APIC interactions. */
void xen_setup_callback_vector(void)
{}

/*
 * Setup per-vCPU vector-type callbacks. If this setup is unavailable,
 * fallback to the global vector-type callback.
 */
static __init void xen_init_setup_upcall_vector(void)
{}

int xen_set_upcall_vector(unsigned int cpu)
{}

static __init void xen_alloc_callback_vector(void)
{}
#else
void xen_setup_callback_vector(void) {}
static inline void xen_init_setup_upcall_vector(void) {}
int xen_set_upcall_vector(unsigned int cpu) {}
static inline void xen_alloc_callback_vector(void) {}
#endif /* CONFIG_XEN_PVHVM */
#endif /* CONFIG_X86 */

bool xen_fifo_events =;
module_param_named(fifo_events, xen_fifo_events, bool, 0);

static int xen_evtchn_cpu_prepare(unsigned int cpu)
{}

static int xen_evtchn_cpu_dead(unsigned int cpu)
{}

void __init xen_init_IRQ(void)
{}