linux/drivers/iommu/intel/dmar.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) 2006, Intel Corporation.
 *
 * Copyright (C) 2006-2008 Intel Corporation
 * Author: Ashok Raj <[email protected]>
 * Author: Shaohua Li <[email protected]>
 * Author: Anil S Keshavamurthy <[email protected]>
 *
 * This file implements early detection/parsing of Remapping Devices
 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
 * tables.
 *
 * These routines are used by both DMA-remapping and Interrupt-remapping
 */

#define pr_fmt(fmt)

#include <linux/pci.h>
#include <linux/dmar.h>
#include <linux/iova.h>
#include <linux/timer.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/tboot.h>
#include <linux/dmi.h>
#include <linux/slab.h>
#include <linux/iommu.h>
#include <linux/numa.h>
#include <linux/limits.h>
#include <asm/irq_remapping.h>

#include "iommu.h"
#include "../irq_remapping.h"
#include "../iommu-pages.h"
#include "perf.h"
#include "trace.h"
#include "perfmon.h"

dmar_res_handler_t;
struct dmar_res_callback {};

/*
 * Assumptions:
 * 1) The hotplug framework guarentees that DMAR unit will be hot-added
 *    before IO devices managed by that unit.
 * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
 *    after IO devices managed by that unit.
 * 3) Hotplug events are rare.
 *
 * Locking rules for DMA and interrupt remapping related global data structures:
 * 1) Use dmar_global_lock in process context
 * 2) Use RCU in interrupt context
 */
DECLARE_RWSEM();
LIST_HEAD();

struct acpi_table_header * __initdata dmar_tbl;
static int dmar_dev_scope_status =;
static DEFINE_IDA(dmar_seq_ids);

static int alloc_iommu(struct dmar_drhd_unit *drhd);
static void free_iommu(struct intel_iommu *iommu);

static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
{}

void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
{}

void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt)
{}

/* Optimize out kzalloc()/kfree() for normal cases */
static char dmar_pci_notify_info_buf[64];

static struct dmar_pci_notify_info *
dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
{}

static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info)
{}

static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
				struct acpi_dmar_pci_path *path, int count)
{}

/* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
			  void *start, void*end, u16 segment,
			  struct dmar_dev_scope *devices,
			  int devices_cnt)
{}

int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
			  struct dmar_dev_scope *devices, int count)
{}

static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
{}

static void  dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
{}

static inline void vf_inherit_msi_domain(struct pci_dev *pdev)
{}

static int dmar_pci_bus_notifier(struct notifier_block *nb,
				 unsigned long action, void *data)
{}

static struct notifier_block dmar_pci_bus_nb =;

static struct dmar_drhd_unit *
dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
{}

/*
 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
 * structure which uniquely represent one DMA remapping hardware unit
 * present in the platform
 */
static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
{}

static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
{}

static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
				      void *arg)
{}

#ifdef CONFIG_ACPI_NUMA
static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
{}
#else
#define dmar_parse_one_rhsa
#endif

static void
dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
{}

/**
 * dmar_table_detect - checks to see if the platform supports DMAR devices
 */
static int __init dmar_table_detect(void)
{}

static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
				       size_t len, struct dmar_res_callback *cb)
{}

static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar,
				       struct dmar_res_callback *cb)
{}

/**
 * parse_dmar_table - parses the DMA reporting table
 */
static int __init
parse_dmar_table(void)
{}

static int dmar_pci_device_match(struct dmar_dev_scope devices[],
				 int cnt, struct pci_dev *dev)
{}

struct dmar_drhd_unit *
dmar_find_matched_drhd_unit(struct pci_dev *dev)
{}

static void __init dmar_acpi_insert_dev_scope(u8 device_number,
					      struct acpi_device *adev)
{}

static int __init dmar_acpi_dev_scope_init(void)
{}

int __init dmar_dev_scope_init(void)
{}

void __init dmar_register_bus_notifier(void)
{}


int __init dmar_table_init(void)
{}

static void warn_invalid_dmar(u64 addr, const char *message)
{}

static int __ref
dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
{}

void __init detect_intel_iommu(void)
{}

static void unmap_iommu(struct intel_iommu *iommu)
{}

/**
 * map_iommu: map the iommu's registers
 * @iommu: the iommu to map
 * @drhd: DMA remapping hardware definition structure
 *
 * Memory map the iommu's registers.  Start w/ a single page, and
 * possibly expand if that turns out to be insufficent.
 */
static int map_iommu(struct intel_iommu *iommu, struct dmar_drhd_unit *drhd)
{}

static int alloc_iommu(struct dmar_drhd_unit *drhd)
{}

static void free_iommu(struct intel_iommu *iommu)
{}

/*
 * Reclaim all the submitted descriptors which have completed its work.
 */
static inline void reclaim_free_desc(struct q_inval *qi)
{}

static const char *qi_type_string(u8 type)
{}

static void qi_dump_fault(struct intel_iommu *iommu, u32 fault)
{}

static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
{}

/*
 * Function to submit invalidation descriptors of all types to the queued
 * invalidation interface(QI). Multiple descriptors can be submitted at a
 * time, a wait descriptor will be appended to each submission to ensure
 * hardware has completed the invalidation before return. Wait descriptors
 * can be part of the submission but it will not be polled for completion.
 */
int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
		   unsigned int count, unsigned long options)
{}

/*
 * Flush the global interrupt entry cache.
 */
void qi_global_iec(struct intel_iommu *iommu)
{}

void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
		      u64 type)
{}

void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
		    unsigned int size_order, u64 type)
{}

void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
			u16 qdep, u64 addr, unsigned mask)
{}

/* PASID-based IOTLB invalidation */
void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
		     unsigned long npages, bool ih)
{}

/* PASID-based device IOTLB Invalidate */
void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
			      u32 pasid,  u16 qdep, u64 addr, unsigned int size_order)
{}

void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did,
			  u64 granu, u32 pasid)
{}

/*
 * Disable Queued Invalidation interface.
 */
void dmar_disable_qi(struct intel_iommu *iommu)
{}

/*
 * Enable queued invalidation.
 */
static void __dmar_enable_qi(struct intel_iommu *iommu)
{}

/*
 * Enable Queued Invalidation interface. This is a must to support
 * interrupt-remapping. Also used by DMA-remapping, which replaces
 * register based IOTLB invalidation.
 */
int dmar_enable_qi(struct intel_iommu *iommu)
{}

/* iommu interrupt handling. Most stuff are MSI-like. */

enum faulttype {};

static const char *dma_remap_fault_reasons[] =;

static const char * const dma_remap_sm_fault_reasons[] =;

static const char *irq_remap_fault_reasons[] =;

static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
{}


static inline int dmar_msi_reg(struct intel_iommu *iommu, int irq)
{}

void dmar_msi_unmask(struct irq_data *data)
{}

void dmar_msi_mask(struct irq_data *data)
{}

void dmar_msi_write(int irq, struct msi_msg *msg)
{}

void dmar_msi_read(int irq, struct msi_msg *msg)
{}

static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
		u8 fault_reason, u32 pasid, u16 source_id,
		unsigned long long addr)
{}

#define PRIMARY_FAULT_REG_LEN
irqreturn_t dmar_fault(int irq, void *dev_id)
{}

int dmar_set_interrupt(struct intel_iommu *iommu)
{}

int enable_drhd_fault_handling(unsigned int cpu)
{}

/*
 * Re-enable Queued Invalidation interface.
 */
int dmar_reenable_qi(struct intel_iommu *iommu)
{}

/*
 * Check interrupt remapping support in DMAR table description.
 */
int __init dmar_ir_support(void)
{}

/* Check whether DMAR units are in use */
static inline bool dmar_in_use(void)
{}

static int __init dmar_free_unused_resources(void)
{}

late_initcall(dmar_free_unused_resources);

/*
 * DMAR Hotplug Support
 * For more details, please refer to Intel(R) Virtualization Technology
 * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
 * "Remapping Hardware Unit Hot Plug".
 */
static guid_t dmar_hp_guid =;

/*
 * Currently there's only one revision and BIOS will not check the revision id,
 * so use 0 for safety.
 */
#define DMAR_DSM_REV_ID
#define DMAR_DSM_FUNC_DRHD
#define DMAR_DSM_FUNC_ATSR
#define DMAR_DSM_FUNC_RHSA
#define DMAR_DSM_FUNC_SATC

static inline bool dmar_detect_dsm(acpi_handle handle, int func)
{}

static int dmar_walk_dsm_resource(acpi_handle handle, int func,
				  dmar_res_handler_t handler, void *arg)
{}

static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg)
{}

static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
{}

static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg)
{}

static int dmar_hotplug_insert(acpi_handle handle)
{}

static int dmar_hotplug_remove(acpi_handle handle)
{}

static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl,
				       void *context, void **retval)
{}

static int dmar_device_hotplug(acpi_handle handle, bool insert)
{}

int dmar_device_add(acpi_handle handle)
{}

int dmar_device_remove(acpi_handle handle)
{}

/*
 * dmar_platform_optin - Is %DMA_CTRL_PLATFORM_OPT_IN_FLAG set in DMAR table
 *
 * Returns true if the platform has %DMA_CTRL_PLATFORM_OPT_IN_FLAG set in
 * the ACPI DMAR table. This means that the platform boot firmware has made
 * sure no device can issue DMA outside of RMRR regions.
 */
bool dmar_platform_optin(void)
{}
EXPORT_SYMBOL_GPL();