linux/drivers/iommu/intel/iommu.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright © 2006-2014 Intel Corporation.
 *
 * Authors: David Woodhouse <[email protected]>,
 *          Ashok Raj <[email protected]>,
 *          Shaohua Li <[email protected]>,
 *          Anil S Keshavamurthy <[email protected]>,
 *          Fenghua Yu <[email protected]>
 *          Joerg Roedel <[email protected]>
 */

#define pr_fmt(fmt)
#define dev_fmt(fmt)

#include <linux/crash_dump.h>
#include <linux/dma-direct.h>
#include <linux/dmi.h>
#include <linux/memory.h>
#include <linux/pci.h>
#include <linux/pci-ats.h>
#include <linux/spinlock.h>
#include <linux/syscore_ops.h>
#include <linux/tboot.h>
#include <uapi/linux/iommufd.h>

#include "iommu.h"
#include "../dma-iommu.h"
#include "../irq_remapping.h"
#include "../iommu-pages.h"
#include "pasid.h"
#include "cap_audit.h"
#include "perfmon.h"

#define ROOT_SIZE
#define CONTEXT_SIZE

#define IS_GFX_DEVICE(pdev)
#define IS_USB_DEVICE(pdev)
#define IS_ISA_DEVICE(pdev)
#define IS_AZALIA(pdev)

#define IOAPIC_RANGE_START
#define IOAPIC_RANGE_END
#define IOVA_START_ADDR

#define DEFAULT_DOMAIN_ADDRESS_WIDTH

#define __DOMAIN_MAX_PFN(gaw)
#define __DOMAIN_MAX_ADDR(gaw)

/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
   to match. That way, we can use 'unsigned long' for PFNs with impunity. */
#define DOMAIN_MAX_PFN(gaw)
#define DOMAIN_MAX_ADDR(gaw)

static void __init check_tylersburg_isoch(void);
static int rwbf_quirk;

/*
 * set to 1 to panic kernel if can't successfully enable VT-d
 * (used when kernel is launched w/ TXT)
 */
static int force_on =;
static int intel_iommu_tboot_noforce;
static int no_platform_optin;

#define ROOT_ENTRY_NR

/*
 * Take a root_entry and return the Lower Context Table Pointer (LCTP)
 * if marked present.
 */
static phys_addr_t root_entry_lctp(struct root_entry *re)
{}

/*
 * Take a root_entry and return the Upper Context Table Pointer (UCTP)
 * if marked present.
 */
static phys_addr_t root_entry_uctp(struct root_entry *re)
{}

static int device_rid_cmp_key(const void *key, const struct rb_node *node)
{}

static int device_rid_cmp(struct rb_node *lhs, const struct rb_node *rhs)
{}

/*
 * Looks up an IOMMU-probed device using its source ID.
 *
 * Returns the pointer to the device if there is a match. Otherwise,
 * returns NULL.
 *
 * Note that this helper doesn't guarantee that the device won't be
 * released by the iommu subsystem after being returned. The caller
 * should use its own synchronization mechanism to avoid the device
 * being released during its use if its possibly the case.
 */
struct device *device_rbtree_find(struct intel_iommu *iommu, u16 rid)
{}

static int device_rbtree_insert(struct intel_iommu *iommu,
				struct device_domain_info *info)
{}

static void device_rbtree_remove(struct device_domain_info *info)
{}

/*
 * This domain is a statically identity mapping domain.
 *	1. This domain creats a static 1:1 mapping to all usable memory.
 * 	2. It maps to each iommu if successful.
 *	3. Each iommu mapps to this domain if successful.
 */
static struct dmar_domain *si_domain;
static int hw_pass_through =;

struct dmar_rmrr_unit {};

struct dmar_atsr_unit {};

struct dmar_satc_unit {};

static LIST_HEAD(dmar_atsr_units);
static LIST_HEAD(dmar_rmrr_units);
static LIST_HEAD(dmar_satc_units);

#define for_each_rmrr_units(rmrr)

static void intel_iommu_domain_free(struct iommu_domain *domain);

int dmar_disabled = !IS_ENABLED();
int intel_iommu_sm = IS_ENABLED();

int intel_iommu_enabled =;
EXPORT_SYMBOL_GPL();

static int intel_iommu_superpage =;
static int iommu_identity_mapping;
static int iommu_skip_te_disable;
static int disable_igfx_iommu;

#define IDENTMAP_AZALIA

const struct iommu_ops intel_iommu_ops;
static const struct iommu_dirty_ops intel_dirty_ops;

static bool translation_pre_enabled(struct intel_iommu *iommu)
{}

static void clear_translation_pre_enabled(struct intel_iommu *iommu)
{}

static void init_translation_status(struct intel_iommu *iommu)
{}

static int __init intel_iommu_setup(char *str)
{}
__setup();

static int domain_type_is_si(struct dmar_domain *domain)
{}

static int domain_pfn_supported(struct dmar_domain *domain, unsigned long pfn)
{}

/*
 * Calculate the Supported Adjusted Guest Address Widths of an IOMMU.
 * Refer to 11.4.2 of the VT-d spec for the encoding of each bit of
 * the returned SAGAW.
 */
static unsigned long __iommu_calculate_sagaw(struct intel_iommu *iommu)
{}

static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
{}

/*
 * Calculate max SAGAW for each iommu.
 */
int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
{}

/*
 * calculate agaw for each iommu.
 * "SAGAW" may be different across iommus, use a default agaw, and
 * get a supported less agaw for iommus that don't support the default agaw.
 */
int iommu_calculate_agaw(struct intel_iommu *iommu)
{}

static bool iommu_paging_structure_coherency(struct intel_iommu *iommu)
{}

static void domain_update_iommu_coherency(struct dmar_domain *domain)
{}

static int domain_update_iommu_superpage(struct dmar_domain *domain,
					 struct intel_iommu *skip)
{}

static int domain_update_device_node(struct dmar_domain *domain)
{}

/* Return the super pagesize bitmap if supported. */
static unsigned long domain_super_pgsize_bitmap(struct dmar_domain *domain)
{}

/* Some capabilities may be different across iommus */
void domain_update_iommu_cap(struct dmar_domain *domain)
{}

struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
					 u8 devfn, int alloc)
{}

/**
 * is_downstream_to_pci_bridge - test if a device belongs to the PCI
 *				 sub-hierarchy of a candidate PCI-PCI bridge
 * @dev: candidate PCI device belonging to @bridge PCI sub-hierarchy
 * @bridge: the candidate PCI-PCI bridge
 *
 * Return: true if @dev belongs to @bridge PCI sub-hierarchy, else false.
 */
static bool
is_downstream_to_pci_bridge(struct device *dev, struct device *bridge)
{}

static bool quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
{}

static bool iommu_is_dummy(struct intel_iommu *iommu, struct device *dev)
{}

static struct intel_iommu *device_lookup_iommu(struct device *dev, u8 *bus, u8 *devfn)
{}

static void domain_flush_cache(struct dmar_domain *domain,
			       void *addr, int size)
{}

static void free_context_table(struct intel_iommu *iommu)
{}

#ifdef CONFIG_DMAR_DEBUG
static void pgtable_walk(struct intel_iommu *iommu, unsigned long pfn,
			 u8 bus, u8 devfn, struct dma_pte *parent, int level)
{}

void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
			  unsigned long long addr, u32 pasid)
{}
#endif

static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
				      unsigned long pfn, int *target_level,
				      gfp_t gfp)
{}

/* return address's pte at specific level */
static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
					 unsigned long pfn,
					 int level, int *large_page)
{}

/* clear last level pte, a tlb flush should be followed */
static void dma_pte_clear_range(struct dmar_domain *domain,
				unsigned long start_pfn,
				unsigned long last_pfn)
{}

static void dma_pte_free_level(struct dmar_domain *domain, int level,
			       int retain_level, struct dma_pte *pte,
			       unsigned long pfn, unsigned long start_pfn,
			       unsigned long last_pfn)
{}

/*
 * clear last level (leaf) ptes and free page table pages below the
 * level we wish to keep intact.
 */
static void dma_pte_free_pagetable(struct dmar_domain *domain,
				   unsigned long start_pfn,
				   unsigned long last_pfn,
				   int retain_level)
{}

/* When a page at a given level is being unlinked from its parent, we don't
   need to *modify* it at all. All we need to do is make a list of all the
   pages which can be freed just as soon as we've flushed the IOTLB and we
   know the hardware page-walk will no longer touch them.
   The 'pte' argument is the *parent* PTE, pointing to the page that is to
   be freed. */
static void dma_pte_list_pagetables(struct dmar_domain *domain,
				    int level, struct dma_pte *pte,
				    struct list_head *freelist)
{}

static void dma_pte_clear_level(struct dmar_domain *domain, int level,
				struct dma_pte *pte, unsigned long pfn,
				unsigned long start_pfn, unsigned long last_pfn,
				struct list_head *freelist)
{}

/* We can't just free the pages because the IOMMU may still be walking
   the page tables, and may have cached the intermediate levels. The
   pages can only be freed after the IOTLB flush has been done. */
static void domain_unmap(struct dmar_domain *domain, unsigned long start_pfn,
			 unsigned long last_pfn, struct list_head *freelist)
{}

/* iommu handling */
static int iommu_alloc_root_entry(struct intel_iommu *iommu)
{}

static void iommu_set_root_entry(struct intel_iommu *iommu)
{}

void iommu_flush_write_buffer(struct intel_iommu *iommu)
{}

/* return value determine if we need a write buffer flush */
static void __iommu_flush_context(struct intel_iommu *iommu,
				  u16 did, u16 source_id, u8 function_mask,
				  u64 type)
{}

/* return value determine if we need a write buffer flush */
static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
				u64 addr, unsigned int size_order, u64 type)
{}

static struct device_domain_info *
domain_lookup_dev_info(struct dmar_domain *domain,
		       struct intel_iommu *iommu, u8 bus, u8 devfn)
{}

void domain_update_iotlb(struct dmar_domain *domain)
{}

/*
 * The extra devTLB flush quirk impacts those QAT devices with PCI device
 * IDs ranging from 0x4940 to 0x4943. It is exempted from risky_device()
 * check because it applies only to the built-in QAT devices and it doesn't
 * grant additional privileges.
 */
#define BUGGY_QAT_DEVID_MASK
static bool dev_needs_extra_dtlb_flush(struct pci_dev *pdev)
{}

static void iommu_enable_pci_caps(struct device_domain_info *info)
{}

static void iommu_disable_pci_caps(struct device_domain_info *info)
{}

static void intel_flush_iotlb_all(struct iommu_domain *domain)
{}

static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
{}

static void iommu_enable_translation(struct intel_iommu *iommu)
{}

static void iommu_disable_translation(struct intel_iommu *iommu)
{}

static int iommu_init_domains(struct intel_iommu *iommu)
{}

static void disable_dmar_iommu(struct intel_iommu *iommu)
{}

static void free_dmar_iommu(struct intel_iommu *iommu)
{}

/*
 * Check and return whether first level is used by default for
 * DMA translation.
 */
static bool first_level_by_default(unsigned int type)
{}

static struct dmar_domain *alloc_domain(unsigned int type)
{}

int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
{}

void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
{}

static int guestwidth_to_adjustwidth(int gaw)
{}

static void domain_exit(struct dmar_domain *domain)
{}

static int domain_context_mapping_one(struct dmar_domain *domain,
				      struct intel_iommu *iommu,
				      u8 bus, u8 devfn)
{}

static int domain_context_mapping_cb(struct pci_dev *pdev,
				     u16 alias, void *opaque)
{}

static int
domain_context_mapping(struct dmar_domain *domain, struct device *dev)
{}

/* Return largest possible superpage level for a given mapping */
static int hardware_largepage_caps(struct dmar_domain *domain, unsigned long iov_pfn,
				   unsigned long phy_pfn, unsigned long pages)
{}

/*
 * Ensure that old small page tables are removed to make room for superpage(s).
 * We're going to add new large pages, so make sure we don't remove their parent
 * tables. The IOTLB/devTLBs should be flushed if any PDE/PTEs are cleared.
 */
static void switch_to_super_page(struct dmar_domain *domain,
				 unsigned long start_pfn,
				 unsigned long end_pfn, int level)
{}

static int
__domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
		 unsigned long phys_pfn, unsigned long nr_pages, int prot,
		 gfp_t gfp)
{}

static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8 devfn)
{}

static int domain_setup_first_level(struct intel_iommu *iommu,
				    struct dmar_domain *domain,
				    struct device *dev,
				    u32 pasid)
{}

static bool dev_is_real_dma_subdevice(struct device *dev)
{}

static int iommu_domain_identity_map(struct dmar_domain *domain,
				     unsigned long first_vpfn,
				     unsigned long last_vpfn)
{}

static int md_domain_init(struct dmar_domain *domain, int guest_width);

static int __init si_domain_init(int hw)
{}

static int dmar_domain_attach_device(struct dmar_domain *domain,
				     struct device *dev)
{}

/**
 * device_rmrr_is_relaxable - Test whether the RMRR of this device
 * is relaxable (ie. is allowed to be not enforced under some conditions)
 * @dev: device handle
 *
 * We assume that PCI USB devices with RMRRs have them largely
 * for historical reasons and that the RMRR space is not actively used post
 * boot.  This exclusion may change if vendors begin to abuse it.
 *
 * The same exception is made for graphics devices, with the requirement that
 * any use of the RMRR regions will be torn down before assigning the device
 * to a guest.
 *
 * Return: true if the RMRR is relaxable, false otherwise
 */
static bool device_rmrr_is_relaxable(struct device *dev)
{}

static int device_def_domain_type(struct device *dev)
{}

static void intel_iommu_init_qi(struct intel_iommu *iommu)
{}

static int copy_context_table(struct intel_iommu *iommu,
			      struct root_entry *old_re,
			      struct context_entry **tbl,
			      int bus, bool ext)
{}

static int copy_translation_tables(struct intel_iommu *iommu)
{}

static int __init init_dmars(void)
{}

static void __init init_no_remapping_devices(void)
{}

#ifdef CONFIG_SUSPEND
static int init_iommu_hw(void)
{}

static void iommu_flush_all(void)
{}

static int iommu_suspend(void)
{}

static void iommu_resume(void)
{}

static struct syscore_ops iommu_syscore_ops =;

static void __init init_iommu_pm_ops(void)
{}

#else
static inline void init_iommu_pm_ops(void) {}
#endif	/* CONFIG_PM */

static int __init rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr)
{}

int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
{}

static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
{}

int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
{}

static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
{}

int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
{}

int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
{}

static struct dmar_satc_unit *dmar_find_satc(struct acpi_dmar_satc *satc)
{}

int dmar_parse_one_satc(struct acpi_dmar_header *hdr, void *arg)
{}

static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
{}

int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
{}

static void intel_iommu_free_dmars(void)
{}

static struct dmar_satc_unit *dmar_find_matched_satc_unit(struct pci_dev *dev)
{}

static int dmar_ats_supported(struct pci_dev *dev, struct intel_iommu *iommu)
{}

int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
{}

static int intel_iommu_memory_notifier(struct notifier_block *nb,
				       unsigned long val, void *v)
{}

static struct notifier_block intel_iommu_memory_nb =;

static void intel_disable_iommus(void)
{}

void intel_iommu_shutdown(void)
{}

static struct intel_iommu *dev_to_intel_iommu(struct device *dev)
{}

static ssize_t version_show(struct device *dev,
			    struct device_attribute *attr, char *buf)
{}
static DEVICE_ATTR_RO(version);

static ssize_t address_show(struct device *dev,
			    struct device_attribute *attr, char *buf)
{}
static DEVICE_ATTR_RO(address);

static ssize_t cap_show(struct device *dev,
			struct device_attribute *attr, char *buf)
{}
static DEVICE_ATTR_RO(cap);

static ssize_t ecap_show(struct device *dev,
			 struct device_attribute *attr, char *buf)
{}
static DEVICE_ATTR_RO(ecap);

static ssize_t domains_supported_show(struct device *dev,
				      struct device_attribute *attr, char *buf)
{}
static DEVICE_ATTR_RO(domains_supported);

static ssize_t domains_used_show(struct device *dev,
				 struct device_attribute *attr, char *buf)
{}
static DEVICE_ATTR_RO(domains_used);

static struct attribute *intel_iommu_attrs[] =;

static struct attribute_group intel_iommu_group =;

const struct attribute_group *intel_iommu_groups[] =;

static bool has_external_pci(void)
{}

static int __init platform_optin_force_iommu(void)
{}

static int __init probe_acpi_namespace_devices(void)
{}

static __init int tboot_force_iommu(void)
{}

int __init intel_iommu_init(void)
{}

static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
{}

/*
 * NB - intel-iommu lacks any sort of reference counting for the users of
 * dependent devices.  If multiple endpoints have intersecting dependent
 * devices, unbinding the driver from any one of them will possibly leave
 * the others unable to operate.
 */
static void domain_context_clear(struct device_domain_info *info)
{}

/*
 * Clear the page table pointer in context or pasid table entries so that
 * all DMA requests without PASID from the device are blocked. If the page
 * table has been set, clean up the data structures.
 */
void device_block_translation(struct device *dev)
{}

static int md_domain_init(struct dmar_domain *domain, int guest_width)
{}

static int blocking_domain_attach_dev(struct iommu_domain *domain,
				      struct device *dev)
{}

static struct iommu_domain blocking_domain =;

static int iommu_superpage_capability(struct intel_iommu *iommu, bool first_stage)
{}

static struct dmar_domain *paging_domain_alloc(struct device *dev, bool first_stage)
{}

static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
{}

static struct iommu_domain *
intel_iommu_domain_alloc_user(struct device *dev, u32 flags,
			      struct iommu_domain *parent,
			      const struct iommu_user_data *user_data)
{}

static void intel_iommu_domain_free(struct iommu_domain *domain)
{}

int prepare_domain_attach_device(struct iommu_domain *domain,
				 struct device *dev)
{}

static int intel_iommu_attach_device(struct iommu_domain *domain,
				     struct device *dev)
{}

static int intel_iommu_map(struct iommu_domain *domain,
			   unsigned long iova, phys_addr_t hpa,
			   size_t size, int iommu_prot, gfp_t gfp)
{}

static int intel_iommu_map_pages(struct iommu_domain *domain,
				 unsigned long iova, phys_addr_t paddr,
				 size_t pgsize, size_t pgcount,
				 int prot, gfp_t gfp, size_t *mapped)
{}

static size_t intel_iommu_unmap(struct iommu_domain *domain,
				unsigned long iova, size_t size,
				struct iommu_iotlb_gather *gather)
{}

static size_t intel_iommu_unmap_pages(struct iommu_domain *domain,
				      unsigned long iova,
				      size_t pgsize, size_t pgcount,
				      struct iommu_iotlb_gather *gather)
{}

static void intel_iommu_tlb_sync(struct iommu_domain *domain,
				 struct iommu_iotlb_gather *gather)
{}

static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
					    dma_addr_t iova)
{}

static bool domain_support_force_snooping(struct dmar_domain *domain)
{}

static void domain_set_force_snooping(struct dmar_domain *domain)
{}

static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain)
{}

static bool intel_iommu_capable(struct device *dev, enum iommu_cap cap)
{}

static struct iommu_device *intel_iommu_probe_device(struct device *dev)
{}

static void intel_iommu_release_device(struct device *dev)
{}

static void intel_iommu_get_resv_regions(struct device *device,
					 struct list_head *head)
{}

static struct iommu_group *intel_iommu_device_group(struct device *dev)
{}

static int intel_iommu_enable_sva(struct device *dev)
{}

static int context_flip_pri(struct device_domain_info *info, bool enable)
{}

static int intel_iommu_enable_iopf(struct device *dev)
{}

static int intel_iommu_disable_iopf(struct device *dev)
{}

static int
intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
{}

static int
intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
{}

static bool intel_iommu_is_attach_deferred(struct device *dev)
{}

/*
 * Check that the device does not live on an external facing PCI port that is
 * marked as untrusted. Such devices should not be able to apply quirks and
 * thus not be able to bypass the IOMMU restrictions.
 */
static bool risky_device(struct pci_dev *pdev)
{}

static int intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
				      unsigned long iova, size_t size)
{}

static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid,
					 struct iommu_domain *domain)
{}

static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
				     struct device *dev, ioasid_t pasid)
{}

static void *intel_iommu_hw_info(struct device *dev, u32 *length, u32 *type)
{}

/*
 * Set dirty tracking for the device list of a domain. The caller must
 * hold the domain->lock when calling it.
 */
static int device_set_dirty_tracking(struct list_head *devices, bool enable)
{}

static int parent_domain_set_dirty_tracking(struct dmar_domain *domain,
					    bool enable)
{}

static int intel_iommu_set_dirty_tracking(struct iommu_domain *domain,
					  bool enable)
{}

static int intel_iommu_read_and_clear_dirty(struct iommu_domain *domain,
					    unsigned long iova, size_t size,
					    unsigned long flags,
					    struct iommu_dirty_bitmap *dirty)
{}

static const struct iommu_dirty_ops intel_dirty_ops =;

const struct iommu_ops intel_iommu_ops =;

static void quirk_iommu_igfx(struct pci_dev *dev)
{}

/* G4x/GM45 integrated gfx dmar support is totally busted. */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_igfx);

/* Broadwell igfx malfunctions with dmar */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1606, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160B, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160E, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1602, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160A, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160D, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1616, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161B, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161E, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1612, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161A, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161D, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1626, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162B, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162E, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1622, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162A, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162D, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1636, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163B, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163E, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1632, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163A, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163D, quirk_iommu_igfx);

static void quirk_iommu_rwbf(struct pci_dev *dev)
{}

DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);

#define GGC
#define GGC_MEMORY_SIZE_MASK
#define GGC_MEMORY_SIZE_NONE
#define GGC_MEMORY_SIZE_1M
#define GGC_MEMORY_SIZE_2M
#define GGC_MEMORY_VT_ENABLED
#define GGC_MEMORY_SIZE_2M_VT
#define GGC_MEMORY_SIZE_3M_VT
#define GGC_MEMORY_SIZE_4M_VT

static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
{}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);

static void quirk_igfx_skip_te_disable(struct pci_dev *dev)
{}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_igfx_skip_te_disable);

/* On Tylersburg chipsets, some BIOSes have been known to enable the
   ISOCH DMAR unit for the Azalia sound device, but not give it any
   TLB entries, which causes it to deadlock. Check for that.  We do
   this in a function called from init_dmars(), instead of in a PCI
   quirk, because we don't want to print the obnoxious "BIOS broken"
   message if VT-d is actually disabled.
*/
static void __init check_tylersburg_isoch(void)
{}

/*
 * Here we deal with a device TLB defect where device may inadvertently issue ATS
 * invalidation completion before posted writes initiated with translated address
 * that utilized translations matching the invalidation address range, violating
 * the invalidation completion ordering.
 * Therefore, any use cases that cannot guarantee DMA is stopped before unmap is
 * vulnerable to this defect. In other words, any dTLB invalidation initiated not
 * under the control of the trusted/privileged host device driver must use this
 * quirk.
 * Device TLBs are invalidated under the following six conditions:
 * 1. Device driver does DMA API unmap IOVA
 * 2. Device driver unbind a PASID from a process, sva_unbind_device()
 * 3. PASID is torn down, after PASID cache is flushed. e.g. process
 *    exit_mmap() due to crash
 * 4. Under SVA usage, called by mmu_notifier.invalidate_range() where
 *    VM has to free pages that were unmapped
 * 5. Userspace driver unmaps a DMA buffer
 * 6. Cache invalidation in vSVA usage (upcoming)
 *
 * For #1 and #2, device drivers are responsible for stopping DMA traffic
 * before unmap/unbind. For #3, iommu driver gets mmu_notifier to
 * invalidate TLB the same way as normal user unmap which will use this quirk.
 * The dTLB invalidation after PASID cache flush does not need this quirk.
 *
 * As a reminder, #6 will *NEED* this quirk as we enable nested translation.
 */
void quirk_extra_dev_tlb_flush(struct device_domain_info *info,
			       unsigned long address, unsigned long mask,
			       u32 pasid, u16 qdep)
{}

#define ecmd_get_status_code(res)

/*
 * Function to submit a command to the enhanced command interface. The
 * valid enhanced command descriptions are defined in Table 47 of the
 * VT-d spec. The VT-d hardware implementation may support some but not
 * all commands, which can be determined by checking the Enhanced
 * Command Capability Register.
 *
 * Return values:
 *  - 0: Command successful without any error;
 *  - Negative: software error value;
 *  - Nonzero positive: failure status code defined in Table 48.
 */
int ecmd_submit_sync(struct intel_iommu *iommu, u8 ecmd, u64 oa, u64 ob)
{}