#define pr_fmt(fmt) …
#define dev_fmt(fmt) …
#include <linux/crash_dump.h>
#include <linux/dma-direct.h>
#include <linux/dmi.h>
#include <linux/memory.h>
#include <linux/pci.h>
#include <linux/pci-ats.h>
#include <linux/spinlock.h>
#include <linux/syscore_ops.h>
#include <linux/tboot.h>
#include <uapi/linux/iommufd.h>
#include "iommu.h"
#include "../dma-iommu.h"
#include "../irq_remapping.h"
#include "../iommu-pages.h"
#include "pasid.h"
#include "cap_audit.h"
#include "perfmon.h"
#define ROOT_SIZE …
#define CONTEXT_SIZE …
#define IS_GFX_DEVICE(pdev) …
#define IS_USB_DEVICE(pdev) …
#define IS_ISA_DEVICE(pdev) …
#define IS_AZALIA(pdev) …
#define IOAPIC_RANGE_START …
#define IOAPIC_RANGE_END …
#define IOVA_START_ADDR …
#define DEFAULT_DOMAIN_ADDRESS_WIDTH …
#define __DOMAIN_MAX_PFN(gaw) …
#define __DOMAIN_MAX_ADDR(gaw) …
#define DOMAIN_MAX_PFN(gaw) …
#define DOMAIN_MAX_ADDR(gaw) …
static void __init check_tylersburg_isoch(void);
static int rwbf_quirk;
static int force_on = …;
static int intel_iommu_tboot_noforce;
static int no_platform_optin;
#define ROOT_ENTRY_NR …
static phys_addr_t root_entry_lctp(struct root_entry *re)
{ … }
static phys_addr_t root_entry_uctp(struct root_entry *re)
{ … }
static int device_rid_cmp_key(const void *key, const struct rb_node *node)
{ … }
static int device_rid_cmp(struct rb_node *lhs, const struct rb_node *rhs)
{ … }
struct device *device_rbtree_find(struct intel_iommu *iommu, u16 rid)
{ … }
static int device_rbtree_insert(struct intel_iommu *iommu,
struct device_domain_info *info)
{ … }
static void device_rbtree_remove(struct device_domain_info *info)
{ … }
static struct dmar_domain *si_domain;
static int hw_pass_through = …;
struct dmar_rmrr_unit { … };
struct dmar_atsr_unit { … };
struct dmar_satc_unit { … };
static LIST_HEAD(dmar_atsr_units);
static LIST_HEAD(dmar_rmrr_units);
static LIST_HEAD(dmar_satc_units);
#define for_each_rmrr_units(rmrr) …
static void intel_iommu_domain_free(struct iommu_domain *domain);
int dmar_disabled = !IS_ENABLED(…);
int intel_iommu_sm = … IS_ENABLED(…);
int intel_iommu_enabled = …;
EXPORT_SYMBOL_GPL(…);
static int intel_iommu_superpage = …;
static int iommu_identity_mapping;
static int iommu_skip_te_disable;
static int disable_igfx_iommu;
#define IDENTMAP_AZALIA …
const struct iommu_ops intel_iommu_ops;
static const struct iommu_dirty_ops intel_dirty_ops;
static bool translation_pre_enabled(struct intel_iommu *iommu)
{ … }
static void clear_translation_pre_enabled(struct intel_iommu *iommu)
{ … }
static void init_translation_status(struct intel_iommu *iommu)
{ … }
static int __init intel_iommu_setup(char *str)
{ … }
__setup(…);
static int domain_type_is_si(struct dmar_domain *domain)
{ … }
static int domain_pfn_supported(struct dmar_domain *domain, unsigned long pfn)
{ … }
static unsigned long __iommu_calculate_sagaw(struct intel_iommu *iommu)
{ … }
static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
{ … }
int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
{ … }
int iommu_calculate_agaw(struct intel_iommu *iommu)
{ … }
static bool iommu_paging_structure_coherency(struct intel_iommu *iommu)
{ … }
static void domain_update_iommu_coherency(struct dmar_domain *domain)
{ … }
static int domain_update_iommu_superpage(struct dmar_domain *domain,
struct intel_iommu *skip)
{ … }
static int domain_update_device_node(struct dmar_domain *domain)
{ … }
static unsigned long domain_super_pgsize_bitmap(struct dmar_domain *domain)
{ … }
void domain_update_iommu_cap(struct dmar_domain *domain)
{ … }
struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
u8 devfn, int alloc)
{ … }
static bool
is_downstream_to_pci_bridge(struct device *dev, struct device *bridge)
{ … }
static bool quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
{ … }
static bool iommu_is_dummy(struct intel_iommu *iommu, struct device *dev)
{ … }
static struct intel_iommu *device_lookup_iommu(struct device *dev, u8 *bus, u8 *devfn)
{ … }
static void domain_flush_cache(struct dmar_domain *domain,
void *addr, int size)
{ … }
static void free_context_table(struct intel_iommu *iommu)
{ … }
#ifdef CONFIG_DMAR_DEBUG
static void pgtable_walk(struct intel_iommu *iommu, unsigned long pfn,
u8 bus, u8 devfn, struct dma_pte *parent, int level)
{ … }
void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
unsigned long long addr, u32 pasid)
{ … }
#endif
static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
unsigned long pfn, int *target_level,
gfp_t gfp)
{ … }
static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
unsigned long pfn,
int level, int *large_page)
{ … }
static void dma_pte_clear_range(struct dmar_domain *domain,
unsigned long start_pfn,
unsigned long last_pfn)
{ … }
static void dma_pte_free_level(struct dmar_domain *domain, int level,
int retain_level, struct dma_pte *pte,
unsigned long pfn, unsigned long start_pfn,
unsigned long last_pfn)
{ … }
static void dma_pte_free_pagetable(struct dmar_domain *domain,
unsigned long start_pfn,
unsigned long last_pfn,
int retain_level)
{ … }
static void dma_pte_list_pagetables(struct dmar_domain *domain,
int level, struct dma_pte *pte,
struct list_head *freelist)
{ … }
static void dma_pte_clear_level(struct dmar_domain *domain, int level,
struct dma_pte *pte, unsigned long pfn,
unsigned long start_pfn, unsigned long last_pfn,
struct list_head *freelist)
{ … }
static void domain_unmap(struct dmar_domain *domain, unsigned long start_pfn,
unsigned long last_pfn, struct list_head *freelist)
{ … }
static int iommu_alloc_root_entry(struct intel_iommu *iommu)
{ … }
static void iommu_set_root_entry(struct intel_iommu *iommu)
{ … }
void iommu_flush_write_buffer(struct intel_iommu *iommu)
{ … }
static void __iommu_flush_context(struct intel_iommu *iommu,
u16 did, u16 source_id, u8 function_mask,
u64 type)
{ … }
static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
u64 addr, unsigned int size_order, u64 type)
{ … }
static struct device_domain_info *
domain_lookup_dev_info(struct dmar_domain *domain,
struct intel_iommu *iommu, u8 bus, u8 devfn)
{ … }
void domain_update_iotlb(struct dmar_domain *domain)
{ … }
#define BUGGY_QAT_DEVID_MASK …
static bool dev_needs_extra_dtlb_flush(struct pci_dev *pdev)
{ … }
static void iommu_enable_pci_caps(struct device_domain_info *info)
{ … }
static void iommu_disable_pci_caps(struct device_domain_info *info)
{ … }
static void intel_flush_iotlb_all(struct iommu_domain *domain)
{ … }
static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
{ … }
static void iommu_enable_translation(struct intel_iommu *iommu)
{ … }
static void iommu_disable_translation(struct intel_iommu *iommu)
{ … }
static int iommu_init_domains(struct intel_iommu *iommu)
{ … }
static void disable_dmar_iommu(struct intel_iommu *iommu)
{ … }
static void free_dmar_iommu(struct intel_iommu *iommu)
{ … }
static bool first_level_by_default(unsigned int type)
{ … }
static struct dmar_domain *alloc_domain(unsigned int type)
{ … }
int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
{ … }
void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
{ … }
static int guestwidth_to_adjustwidth(int gaw)
{ … }
static void domain_exit(struct dmar_domain *domain)
{ … }
static int domain_context_mapping_one(struct dmar_domain *domain,
struct intel_iommu *iommu,
u8 bus, u8 devfn)
{ … }
static int domain_context_mapping_cb(struct pci_dev *pdev,
u16 alias, void *opaque)
{ … }
static int
domain_context_mapping(struct dmar_domain *domain, struct device *dev)
{ … }
static int hardware_largepage_caps(struct dmar_domain *domain, unsigned long iov_pfn,
unsigned long phy_pfn, unsigned long pages)
{ … }
static void switch_to_super_page(struct dmar_domain *domain,
unsigned long start_pfn,
unsigned long end_pfn, int level)
{ … }
static int
__domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
unsigned long phys_pfn, unsigned long nr_pages, int prot,
gfp_t gfp)
{ … }
static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8 devfn)
{ … }
static int domain_setup_first_level(struct intel_iommu *iommu,
struct dmar_domain *domain,
struct device *dev,
u32 pasid)
{ … }
static bool dev_is_real_dma_subdevice(struct device *dev)
{ … }
static int iommu_domain_identity_map(struct dmar_domain *domain,
unsigned long first_vpfn,
unsigned long last_vpfn)
{ … }
static int md_domain_init(struct dmar_domain *domain, int guest_width);
static int __init si_domain_init(int hw)
{ … }
static int dmar_domain_attach_device(struct dmar_domain *domain,
struct device *dev)
{ … }
static bool device_rmrr_is_relaxable(struct device *dev)
{ … }
static int device_def_domain_type(struct device *dev)
{ … }
static void intel_iommu_init_qi(struct intel_iommu *iommu)
{ … }
static int copy_context_table(struct intel_iommu *iommu,
struct root_entry *old_re,
struct context_entry **tbl,
int bus, bool ext)
{ … }
static int copy_translation_tables(struct intel_iommu *iommu)
{ … }
static int __init init_dmars(void)
{ … }
static void __init init_no_remapping_devices(void)
{ … }
#ifdef CONFIG_SUSPEND
static int init_iommu_hw(void)
{ … }
static void iommu_flush_all(void)
{ … }
static int iommu_suspend(void)
{ … }
static void iommu_resume(void)
{ … }
static struct syscore_ops iommu_syscore_ops = …;
static void __init init_iommu_pm_ops(void)
{ … }
#else
static inline void init_iommu_pm_ops(void) {}
#endif
static int __init rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr)
{ … }
int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
{ … }
static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
{ … }
int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
{ … }
static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
{ … }
int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
{ … }
int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
{ … }
static struct dmar_satc_unit *dmar_find_satc(struct acpi_dmar_satc *satc)
{ … }
int dmar_parse_one_satc(struct acpi_dmar_header *hdr, void *arg)
{ … }
static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
{ … }
int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
{ … }
static void intel_iommu_free_dmars(void)
{ … }
static struct dmar_satc_unit *dmar_find_matched_satc_unit(struct pci_dev *dev)
{ … }
static int dmar_ats_supported(struct pci_dev *dev, struct intel_iommu *iommu)
{ … }
int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
{ … }
static int intel_iommu_memory_notifier(struct notifier_block *nb,
unsigned long val, void *v)
{ … }
static struct notifier_block intel_iommu_memory_nb = …;
static void intel_disable_iommus(void)
{ … }
void intel_iommu_shutdown(void)
{ … }
static struct intel_iommu *dev_to_intel_iommu(struct device *dev)
{ … }
static ssize_t version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{ … }
static DEVICE_ATTR_RO(version);
static ssize_t address_show(struct device *dev,
struct device_attribute *attr, char *buf)
{ … }
static DEVICE_ATTR_RO(address);
static ssize_t cap_show(struct device *dev,
struct device_attribute *attr, char *buf)
{ … }
static DEVICE_ATTR_RO(cap);
static ssize_t ecap_show(struct device *dev,
struct device_attribute *attr, char *buf)
{ … }
static DEVICE_ATTR_RO(ecap);
static ssize_t domains_supported_show(struct device *dev,
struct device_attribute *attr, char *buf)
{ … }
static DEVICE_ATTR_RO(domains_supported);
static ssize_t domains_used_show(struct device *dev,
struct device_attribute *attr, char *buf)
{ … }
static DEVICE_ATTR_RO(domains_used);
static struct attribute *intel_iommu_attrs[] = …;
static struct attribute_group intel_iommu_group = …;
const struct attribute_group *intel_iommu_groups[] = …;
static bool has_external_pci(void)
{ … }
static int __init platform_optin_force_iommu(void)
{ … }
static int __init probe_acpi_namespace_devices(void)
{ … }
static __init int tboot_force_iommu(void)
{ … }
int __init intel_iommu_init(void)
{ … }
static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
{ … }
static void domain_context_clear(struct device_domain_info *info)
{ … }
void device_block_translation(struct device *dev)
{ … }
static int md_domain_init(struct dmar_domain *domain, int guest_width)
{ … }
static int blocking_domain_attach_dev(struct iommu_domain *domain,
struct device *dev)
{ … }
static struct iommu_domain blocking_domain = …;
static int iommu_superpage_capability(struct intel_iommu *iommu, bool first_stage)
{ … }
static struct dmar_domain *paging_domain_alloc(struct device *dev, bool first_stage)
{ … }
static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
{ … }
static struct iommu_domain *
intel_iommu_domain_alloc_user(struct device *dev, u32 flags,
struct iommu_domain *parent,
const struct iommu_user_data *user_data)
{ … }
static void intel_iommu_domain_free(struct iommu_domain *domain)
{ … }
int prepare_domain_attach_device(struct iommu_domain *domain,
struct device *dev)
{ … }
static int intel_iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{ … }
static int intel_iommu_map(struct iommu_domain *domain,
unsigned long iova, phys_addr_t hpa,
size_t size, int iommu_prot, gfp_t gfp)
{ … }
static int intel_iommu_map_pages(struct iommu_domain *domain,
unsigned long iova, phys_addr_t paddr,
size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped)
{ … }
static size_t intel_iommu_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size,
struct iommu_iotlb_gather *gather)
{ … }
static size_t intel_iommu_unmap_pages(struct iommu_domain *domain,
unsigned long iova,
size_t pgsize, size_t pgcount,
struct iommu_iotlb_gather *gather)
{ … }
static void intel_iommu_tlb_sync(struct iommu_domain *domain,
struct iommu_iotlb_gather *gather)
{ … }
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{ … }
static bool domain_support_force_snooping(struct dmar_domain *domain)
{ … }
static void domain_set_force_snooping(struct dmar_domain *domain)
{ … }
static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain)
{ … }
static bool intel_iommu_capable(struct device *dev, enum iommu_cap cap)
{ … }
static struct iommu_device *intel_iommu_probe_device(struct device *dev)
{ … }
static void intel_iommu_release_device(struct device *dev)
{ … }
static void intel_iommu_get_resv_regions(struct device *device,
struct list_head *head)
{ … }
static struct iommu_group *intel_iommu_device_group(struct device *dev)
{ … }
static int intel_iommu_enable_sva(struct device *dev)
{ … }
static int context_flip_pri(struct device_domain_info *info, bool enable)
{ … }
static int intel_iommu_enable_iopf(struct device *dev)
{ … }
static int intel_iommu_disable_iopf(struct device *dev)
{ … }
static int
intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
{ … }
static int
intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
{ … }
static bool intel_iommu_is_attach_deferred(struct device *dev)
{ … }
static bool risky_device(struct pci_dev *pdev)
{ … }
static int intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
unsigned long iova, size_t size)
{ … }
static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid,
struct iommu_domain *domain)
{ … }
static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
struct device *dev, ioasid_t pasid)
{ … }
static void *intel_iommu_hw_info(struct device *dev, u32 *length, u32 *type)
{ … }
static int device_set_dirty_tracking(struct list_head *devices, bool enable)
{ … }
static int parent_domain_set_dirty_tracking(struct dmar_domain *domain,
bool enable)
{ … }
static int intel_iommu_set_dirty_tracking(struct iommu_domain *domain,
bool enable)
{ … }
static int intel_iommu_read_and_clear_dirty(struct iommu_domain *domain,
unsigned long iova, size_t size,
unsigned long flags,
struct iommu_dirty_bitmap *dirty)
{ … }
static const struct iommu_dirty_ops intel_dirty_ops = …;
const struct iommu_ops intel_iommu_ops = …;
static void quirk_iommu_igfx(struct pci_dev *dev)
{ … }
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1606, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160B, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160E, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1602, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160A, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160D, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1616, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161B, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161E, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1612, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161A, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161D, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1626, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162B, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162E, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1622, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162A, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162D, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1636, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163B, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163E, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1632, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163A, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163D, quirk_iommu_igfx);
static void quirk_iommu_rwbf(struct pci_dev *dev)
{ … }
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
#define GGC …
#define GGC_MEMORY_SIZE_MASK …
#define GGC_MEMORY_SIZE_NONE …
#define GGC_MEMORY_SIZE_1M …
#define GGC_MEMORY_SIZE_2M …
#define GGC_MEMORY_VT_ENABLED …
#define GGC_MEMORY_SIZE_2M_VT …
#define GGC_MEMORY_SIZE_3M_VT …
#define GGC_MEMORY_SIZE_4M_VT …
static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
{ … }
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
static void quirk_igfx_skip_te_disable(struct pci_dev *dev)
{ … }
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_igfx_skip_te_disable);
static void __init check_tylersburg_isoch(void)
{ … }
void quirk_extra_dev_tlb_flush(struct device_domain_info *info,
unsigned long address, unsigned long mask,
u32 pasid, u16 qdep)
{ … }
#define ecmd_get_status_code(res) …
int ecmd_submit_sync(struct intel_iommu *iommu, u8 ecmd, u64 oa, u64 ob)
{ … }