linux/drivers/iommu/intel/irq_remapping.c

// SPDX-License-Identifier: GPL-2.0

#define pr_fmt(fmt)

#include <linux/interrupt.h>
#include <linux/dmar.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/hpet.h>
#include <linux/pci.h>
#include <linux/irq.h>
#include <linux/acpi.h>
#include <linux/irqdomain.h>
#include <linux/crash_dump.h>
#include <asm/io_apic.h>
#include <asm/apic.h>
#include <asm/smp.h>
#include <asm/cpu.h>
#include <asm/irq_remapping.h>
#include <asm/pci-direct.h>
#include <asm/posted_intr.h>

#include "iommu.h"
#include "../irq_remapping.h"
#include "../iommu-pages.h"
#include "cap_audit.h"

enum irq_mode {};

struct ioapic_scope {};

struct hpet_scope {};

struct irq_2_iommu {};

struct intel_ir_data {};

#define IR_X2APIC_MODE(mode)
#define IRTE_DEST(dest)

static int __read_mostly eim_mode;
static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
static struct hpet_scope ir_hpet[MAX_HPET_TBS];

/*
 * Lock ordering:
 * ->dmar_global_lock
 *	->irq_2_ir_lock
 *		->qi->q_lock
 *	->iommu->register_lock
 * Note:
 * intel_irq_remap_ops.{supported,prepare,enable,disable,reenable} are called
 * in single-threaded environment with interrupt disabled, so no need to tabke
 * the dmar_global_lock.
 */
DEFINE_RAW_SPINLOCK();
static const struct irq_domain_ops intel_ir_domain_ops;

static void iommu_disable_irq_remapping(struct intel_iommu *iommu);
static int __init parse_ioapics_under_ir(void);
static const struct msi_parent_ops dmar_msi_parent_ops;

static bool ir_pre_enabled(struct intel_iommu *iommu)
{}

static void clear_ir_pre_enabled(struct intel_iommu *iommu)
{}

static void init_ir_status(struct intel_iommu *iommu)
{}

static int alloc_irte(struct intel_iommu *iommu,
		      struct irq_2_iommu *irq_iommu, u16 count)
{}

static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
{}

static int modify_irte(struct irq_2_iommu *irq_iommu,
		       struct irte *irte_modified)
{}

static struct intel_iommu *map_hpet_to_iommu(u8 hpet_id)
{}

static struct intel_iommu *map_ioapic_to_iommu(int apic)
{}

static struct irq_domain *map_dev_to_ir(struct pci_dev *dev)
{}

static int clear_entries(struct irq_2_iommu *irq_iommu)
{}

/*
 * source validation type
 */
#define SVT_NO_VERIFY
#define SVT_VERIFY_SID_SQ
#define SVT_VERIFY_BUS

/*
 * source-id qualifier
 */
#define SQ_ALL_16
#define SQ_13_IGNORE_1
#define SQ_13_IGNORE_2
#define SQ_13_IGNORE_3

/*
 * set SVT, SQ and SID fields of irte to verify
 * source ids of interrupt requests
 */
static void set_irte_sid(struct irte *irte, unsigned int svt,
			 unsigned int sq, unsigned int sid)
{}

/*
 * Set an IRTE to match only the bus number. Interrupt requests that reference
 * this IRTE must have a requester-id whose bus number is between or equal
 * to the start_bus and end_bus arguments.
 */
static void set_irte_verify_bus(struct irte *irte, unsigned int start_bus,
				unsigned int end_bus)
{}

static int set_ioapic_sid(struct irte *irte, int apic)
{}

static int set_hpet_sid(struct irte *irte, u8 id)
{}

struct set_msi_sid_data {};

static int set_msi_sid_cb(struct pci_dev *pdev, u16 alias, void *opaque)
{}

static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
{}

static int iommu_load_old_irte(struct intel_iommu *iommu)
{}


static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
{}

static void iommu_enable_irq_remapping(struct intel_iommu *iommu)
{}

static int intel_setup_irq_remapping(struct intel_iommu *iommu)
{}

static void intel_teardown_irq_remapping(struct intel_iommu *iommu)
{}

/*
 * Disable Interrupt Remapping.
 */
static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
{}

static int __init dmar_x2apic_optout(void)
{}

static void __init intel_cleanup_irq_remapping(void)
{}

static int __init intel_prepare_irq_remapping(void)
{}

/*
 * Set Posted-Interrupts capability.
 */
static inline void set_irq_posting_cap(void)
{}

static int __init intel_enable_irq_remapping(void)
{}

static int ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
				   struct intel_iommu *iommu,
				   struct acpi_dmar_hardware_unit *drhd)
{}

static int ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
				     struct intel_iommu *iommu,
				     struct acpi_dmar_hardware_unit *drhd)
{}

static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
				      struct intel_iommu *iommu)
{}

static void ir_remove_ioapic_hpet_scope(struct intel_iommu *iommu)
{}

/*
 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
 * hardware unit.
 */
static int __init parse_ioapics_under_ir(void)
{}

static int __init ir_dev_scope_init(void)
{}
rootfs_initcall(ir_dev_scope_init);

static void disable_irq_remapping(void)
{}

static int reenable_irq_remapping(int eim)
{}

/*
 * Store the MSI remapping domain pointer in the device if enabled.
 *
 * This is called from dmar_pci_bus_add_dev() so it works even when DMA
 * remapping is disabled. Only update the pointer if the device is not
 * already handled by a non default PCI/MSI interrupt domain. This protects
 * e.g. VMD devices.
 */
void intel_irq_remap_add_device(struct dmar_pci_notify_info *info)
{}

static void prepare_irte(struct irte *irte, int vector, unsigned int dest)
{}

static void prepare_irte_posted(struct irte *irte)
{}

struct irq_remap_ops intel_irq_remap_ops =;

#ifdef CONFIG_X86_POSTED_MSI

static phys_addr_t get_pi_desc_addr(struct irq_data *irqd)
{}

static void intel_ir_reconfigure_irte_posted(struct irq_data *irqd)
{}

#else
static inline void intel_ir_reconfigure_irte_posted(struct irq_data *irqd) {}
#endif

static void intel_ir_reconfigure_irte(struct irq_data *irqd, bool force)
{}

/*
 * Migrate the IO-APIC irq in the presence of intr-remapping.
 *
 * For both level and edge triggered, irq migration is a simple atomic
 * update(of vector and cpu destination) of IRTE and flush the hardware cache.
 *
 * For level triggered, we eliminate the io-apic RTE modification (with the
 * updated vector information), by using a virtual vector (io-apic pin number).
 * Real vector that is used for interrupting cpu will be coming from
 * the interrupt-remapping table entry.
 *
 * As the migration is a simple atomic update of IRTE, the same mechanism
 * is used to migrate MSI irq's in the presence of interrupt-remapping.
 */
static int
intel_ir_set_affinity(struct irq_data *data, const struct cpumask *mask,
		      bool force)
{}

static void intel_ir_compose_msi_msg(struct irq_data *irq_data,
				     struct msi_msg *msg)
{}

static int intel_ir_set_vcpu_affinity(struct irq_data *data, void *info)
{}

static struct irq_chip intel_ir_chip =;

/*
 * With posted MSIs, all vectors are multiplexed into a single notification
 * vector. Devices MSIs are then dispatched in a demux loop where
 * EOIs can be coalesced as well.
 *
 * "INTEL-IR-POST" IRQ chip does not do EOI on ACK, thus the dummy irq_ack()
 * function. Instead EOI is performed by the posted interrupt notification
 * handler.
 *
 * For the example below, 3 MSIs are coalesced into one CPU notification. Only
 * one apic_eoi() is needed.
 *
 * __sysvec_posted_msi_notification()
 *	irq_enter();
 *		handle_edge_irq()
 *			irq_chip_ack_parent()
 *				dummy(); // No EOI
 *			handle_irq_event()
 *				driver_handler()
 *		handle_edge_irq()
 *			irq_chip_ack_parent()
 *				dummy(); // No EOI
 *			handle_irq_event()
 *				driver_handler()
 *		handle_edge_irq()
 *			irq_chip_ack_parent()
 *				dummy(); // No EOI
 *			handle_irq_event()
 *				driver_handler()
 *	apic_eoi()
 *	irq_exit()
 */

static void dummy_ack(struct irq_data *d) {}

static struct irq_chip intel_ir_chip_post_msi =;

static void fill_msi_msg(struct msi_msg *msg, u32 index, u32 subhandle)
{}

static void intel_irq_remapping_prepare_irte(struct intel_ir_data *data,
					     struct irq_cfg *irq_cfg,
					     struct irq_alloc_info *info,
					     int index, int sub_handle)
{}

static void intel_free_irq_resources(struct irq_domain *domain,
				     unsigned int virq, unsigned int nr_irqs)
{}

static int intel_irq_remapping_alloc(struct irq_domain *domain,
				     unsigned int virq, unsigned int nr_irqs,
				     void *arg)
{}

static void intel_irq_remapping_free(struct irq_domain *domain,
				     unsigned int virq, unsigned int nr_irqs)
{}

static int intel_irq_remapping_activate(struct irq_domain *domain,
					struct irq_data *irq_data, bool reserve)
{}

static void intel_irq_remapping_deactivate(struct irq_domain *domain,
					   struct irq_data *irq_data)
{}

static int intel_irq_remapping_select(struct irq_domain *d,
				      struct irq_fwspec *fwspec,
				      enum irq_domain_bus_token bus_token)
{}

static const struct irq_domain_ops intel_ir_domain_ops =;

static const struct msi_parent_ops dmar_msi_parent_ops =;

/*
 * Support of Interrupt Remapping Unit Hotplug
 */
static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu)
{}

int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
{}