linux/drivers/iommu/intel/iommu.h

/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Copyright © 2006-2015, Intel Corporation.
 *
 * Authors: Ashok Raj <[email protected]>
 *          Anil S Keshavamurthy <[email protected]>
 *          David Woodhouse <[email protected]>
 */

#ifndef _INTEL_IOMMU_H_
#define _INTEL_IOMMU_H_

#include <linux/types.h>
#include <linux/iova.h>
#include <linux/io.h>
#include <linux/idr.h>
#include <linux/mmu_notifier.h>
#include <linux/list.h>
#include <linux/iommu.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/dmar.h>
#include <linux/bitfield.h>
#include <linux/xarray.h>
#include <linux/perf_event.h>

#include <asm/cacheflush.h>
#include <asm/iommu.h>
#include <uapi/linux/iommufd.h>

/*
 * VT-d hardware uses 4KiB page size regardless of host page size.
 */
#define VTD_PAGE_SHIFT
#define VTD_PAGE_SIZE
#define VTD_PAGE_MASK
#define VTD_PAGE_ALIGN(addr)

#define IOVA_PFN(addr)

#define VTD_STRIDE_SHIFT
#define VTD_STRIDE_MASK

#define DMA_PTE_READ
#define DMA_PTE_WRITE
#define DMA_PTE_LARGE_PAGE
#define DMA_PTE_SNP

#define DMA_FL_PTE_PRESENT
#define DMA_FL_PTE_US
#define DMA_FL_PTE_ACCESS
#define DMA_FL_PTE_DIRTY

#define DMA_SL_PTE_DIRTY_BIT
#define DMA_SL_PTE_DIRTY

#define ADDR_WIDTH_5LEVEL
#define ADDR_WIDTH_4LEVEL

#define CONTEXT_TT_MULTI_LEVEL
#define CONTEXT_TT_DEV_IOTLB
#define CONTEXT_TT_PASS_THROUGH
#define CONTEXT_PASIDE

/*
 * Intel IOMMU register specification per version 1.0 public spec.
 */
#define DMAR_VER_REG
#define DMAR_CAP_REG
#define DMAR_ECAP_REG
#define DMAR_GCMD_REG
#define DMAR_GSTS_REG
#define DMAR_RTADDR_REG
#define DMAR_CCMD_REG
#define DMAR_FSTS_REG
#define DMAR_FECTL_REG
#define DMAR_FEDATA_REG
#define DMAR_FEADDR_REG
#define DMAR_FEUADDR_REG
#define DMAR_AFLOG_REG
#define DMAR_PMEN_REG
#define DMAR_PLMBASE_REG
#define DMAR_PLMLIMIT_REG
#define DMAR_PHMBASE_REG
#define DMAR_PHMLIMIT_REG
#define DMAR_IQH_REG
#define DMAR_IQT_REG
#define DMAR_IQ_SHIFT
#define DMAR_IQA_REG
#define DMAR_ICS_REG
#define DMAR_IQER_REG
#define DMAR_IRTA_REG
#define DMAR_PQH_REG
#define DMAR_PQT_REG
#define DMAR_PQA_REG
#define DMAR_PRS_REG
#define DMAR_PECTL_REG
#define DMAR_PEDATA_REG
#define DMAR_PEADDR_REG
#define DMAR_PEUADDR_REG
#define DMAR_MTRRCAP_REG
#define DMAR_MTRRDEF_REG
#define DMAR_MTRR_FIX64K_00000_REG
#define DMAR_MTRR_FIX16K_80000_REG
#define DMAR_MTRR_FIX16K_A0000_REG
#define DMAR_MTRR_FIX4K_C0000_REG
#define DMAR_MTRR_FIX4K_C8000_REG
#define DMAR_MTRR_FIX4K_D0000_REG
#define DMAR_MTRR_FIX4K_D8000_REG
#define DMAR_MTRR_FIX4K_E0000_REG
#define DMAR_MTRR_FIX4K_E8000_REG
#define DMAR_MTRR_FIX4K_F0000_REG
#define DMAR_MTRR_FIX4K_F8000_REG
#define DMAR_MTRR_PHYSBASE0_REG
#define DMAR_MTRR_PHYSMASK0_REG
#define DMAR_MTRR_PHYSBASE1_REG
#define DMAR_MTRR_PHYSMASK1_REG
#define DMAR_MTRR_PHYSBASE2_REG
#define DMAR_MTRR_PHYSMASK2_REG
#define DMAR_MTRR_PHYSBASE3_REG
#define DMAR_MTRR_PHYSMASK3_REG
#define DMAR_MTRR_PHYSBASE4_REG
#define DMAR_MTRR_PHYSMASK4_REG
#define DMAR_MTRR_PHYSBASE5_REG
#define DMAR_MTRR_PHYSMASK5_REG
#define DMAR_MTRR_PHYSBASE6_REG
#define DMAR_MTRR_PHYSMASK6_REG
#define DMAR_MTRR_PHYSBASE7_REG
#define DMAR_MTRR_PHYSMASK7_REG
#define DMAR_MTRR_PHYSBASE8_REG
#define DMAR_MTRR_PHYSMASK8_REG
#define DMAR_MTRR_PHYSBASE9_REG
#define DMAR_MTRR_PHYSMASK9_REG
#define DMAR_PERFCAP_REG
#define DMAR_PERFCFGOFF_REG
#define DMAR_PERFOVFOFF_REG
#define DMAR_PERFCNTROFF_REG
#define DMAR_PERFINTRSTS_REG
#define DMAR_PERFINTRCTL_REG
#define DMAR_PERFEVNTCAP_REG
#define DMAR_ECMD_REG
#define DMAR_ECEO_REG
#define DMAR_ECRSP_REG
#define DMAR_ECCAP_REG

#define DMAR_IQER_REG_IQEI(reg)
#define DMAR_IQER_REG_ITESID(reg)
#define DMAR_IQER_REG_ICESID(reg)

#define OFFSET_STRIDE

#define dmar_readq(a)
#define dmar_writeq(a,v)
#define dmar_readl(a)
#define dmar_writel(a, v)

#define DMAR_VER_MAJOR(v)
#define DMAR_VER_MINOR(v)

/*
 * Decoding Capability Register
 */
#define cap_esrtps(c)
#define cap_esirtps(c)
#define cap_ecmds(c)
#define cap_fl5lp_support(c)
#define cap_pi_support(c)
#define cap_fl1gp_support(c)
#define cap_read_drain(c)
#define cap_write_drain(c)
#define cap_max_amask_val(c)
#define cap_num_fault_regs(c)
#define cap_pgsel_inv(c)

#define cap_super_page_val(c)
#define cap_super_offset(c)

#define cap_fault_reg_offset(c)
#define cap_max_fault_reg_offset(c)

#define cap_zlr(c)
#define cap_isoch(c)
#define cap_mgaw(c)
#define cap_sagaw(c)
#define cap_caching_mode(c)
#define cap_phmr(c)
#define cap_plmr(c)
#define cap_rwbf(c)
#define cap_afl(c)
#define cap_ndoms(c)
/*
 * Extended Capability Register
 */

#define ecap_pms(e)
#define ecap_rps(e)
#define ecap_smpwc(e)
#define ecap_flts(e)
#define ecap_slts(e)
#define ecap_slads(e)
#define ecap_smts(e)
#define ecap_dit(e)
#define ecap_pds(e)
#define ecap_pasid(e)
#define ecap_pss(e)
#define ecap_eafs(e)
#define ecap_nwfs(e)
#define ecap_srs(e)
#define ecap_ers(e)
#define ecap_prs(e)
#define ecap_broken_pasid(e)
#define ecap_dis(e)
#define ecap_nest(e)
#define ecap_mts(e)
#define ecap_iotlb_offset(e)
#define ecap_max_iotlb_offset(e)
#define ecap_coherent(e)
#define ecap_qis(e)
#define ecap_pass_through(e)
#define ecap_eim_support(e)
#define ecap_ir_support(e)
#define ecap_dev_iotlb_support(e)
#define ecap_max_handle_mask(e)
#define ecap_sc_support(e)

/*
 * Decoding Perf Capability Register
 */
#define pcap_num_cntr(p)
#define pcap_cntr_width(p)
#define pcap_num_event_group(p)
#define pcap_filters_mask(p)
#define pcap_interrupt(p)
/* The counter stride is calculated as 2 ^ (x+10) bytes */
#define pcap_cntr_stride(p)

/*
 * Decoding Perf Event Capability Register
 */
#define pecap_es(p)

/* Virtual command interface capability */
#define vccap_pasid(v)

/* IOTLB_REG */
#define DMA_TLB_FLUSH_GRANU_OFFSET
#define DMA_TLB_GLOBAL_FLUSH
#define DMA_TLB_DSI_FLUSH
#define DMA_TLB_PSI_FLUSH
#define DMA_TLB_IIRG(type)
#define DMA_TLB_IAIG(val)
#define DMA_TLB_READ_DRAIN
#define DMA_TLB_WRITE_DRAIN
#define DMA_TLB_DID(id)
#define DMA_TLB_IVT
#define DMA_TLB_IH_NONLEAF
#define DMA_TLB_MAX_SIZE

/* INVALID_DESC */
#define DMA_CCMD_INVL_GRANU_OFFSET
#define DMA_ID_TLB_GLOBAL_FLUSH
#define DMA_ID_TLB_DSI_FLUSH
#define DMA_ID_TLB_PSI_FLUSH
#define DMA_ID_TLB_READ_DRAIN
#define DMA_ID_TLB_WRITE_DRAIN
#define DMA_ID_TLB_DID(id)
#define DMA_ID_TLB_IH_NONLEAF
#define DMA_ID_TLB_ADDR(addr)
#define DMA_ID_TLB_ADDR_MASK(mask)

/* PMEN_REG */
#define DMA_PMEN_EPM
#define DMA_PMEN_PRS

/* GCMD_REG */
#define DMA_GCMD_TE
#define DMA_GCMD_SRTP
#define DMA_GCMD_SFL
#define DMA_GCMD_EAFL
#define DMA_GCMD_WBF
#define DMA_GCMD_QIE
#define DMA_GCMD_SIRTP
#define DMA_GCMD_IRE
#define DMA_GCMD_CFI

/* GSTS_REG */
#define DMA_GSTS_TES
#define DMA_GSTS_RTPS
#define DMA_GSTS_FLS
#define DMA_GSTS_AFLS
#define DMA_GSTS_WBFS
#define DMA_GSTS_QIES
#define DMA_GSTS_IRTPS
#define DMA_GSTS_IRES
#define DMA_GSTS_CFIS

/* DMA_RTADDR_REG */
#define DMA_RTADDR_SMT

/* CCMD_REG */
#define DMA_CCMD_ICC
#define DMA_CCMD_GLOBAL_INVL
#define DMA_CCMD_DOMAIN_INVL
#define DMA_CCMD_DEVICE_INVL
#define DMA_CCMD_FM(m)
#define DMA_CCMD_MASK_NOBIT
#define DMA_CCMD_MASK_1BIT
#define DMA_CCMD_MASK_2BIT
#define DMA_CCMD_MASK_3BIT
#define DMA_CCMD_SID(s)
#define DMA_CCMD_DID(d)

/* ECMD_REG */
#define DMA_MAX_NUM_ECMD
#define DMA_MAX_NUM_ECMDCAP
#define DMA_ECMD_REG_STEP
#define DMA_ECMD_ENABLE
#define DMA_ECMD_DISABLE
#define DMA_ECMD_FREEZE
#define DMA_ECMD_UNFREEZE
#define DMA_ECMD_OA_SHIFT
#define DMA_ECMD_ECRSP_IP
#define DMA_ECMD_ECCAP3
#define DMA_ECMD_ECCAP3_ECNTS
#define DMA_ECMD_ECCAP3_DCNTS
#define DMA_ECMD_ECCAP3_FCNTS
#define DMA_ECMD_ECCAP3_UFCNTS
#define DMA_ECMD_ECCAP3_ESSENTIAL

/* FECTL_REG */
#define DMA_FECTL_IM

/* FSTS_REG */
#define DMA_FSTS_PFO
#define DMA_FSTS_PPF
#define DMA_FSTS_IQE
#define DMA_FSTS_ICE
#define DMA_FSTS_ITE
#define DMA_FSTS_PRO
#define dma_fsts_fault_record_index(s)

/* FRCD_REG, 32 bits access */
#define DMA_FRCD_F
#define dma_frcd_type(d)
#define dma_frcd_fault_reason(c)
#define dma_frcd_source_id(c)
#define dma_frcd_pasid_value(c)
#define dma_frcd_pasid_present(c)
/* low 64 bit */
#define dma_frcd_page_addr(d)

/* PRS_REG */
#define DMA_PRS_PPR
#define DMA_PRS_PRO

#define DMA_VCS_PAS

/* PERFINTRSTS_REG */
#define DMA_PERFINTRSTS_PIS

#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts)

#define QI_LENGTH

enum {};

#define QI_CC_TYPE
#define QI_IOTLB_TYPE
#define QI_DIOTLB_TYPE
#define QI_IEC_TYPE
#define QI_IWD_TYPE
#define QI_EIOTLB_TYPE
#define QI_PC_TYPE
#define QI_DEIOTLB_TYPE
#define QI_PGRP_RESP_TYPE
#define QI_PSTRM_RESP_TYPE

#define QI_IEC_SELECTIVE
#define QI_IEC_IIDEX(idx)
#define QI_IEC_IM(m)

#define QI_IWD_STATUS_DATA(d)
#define QI_IWD_STATUS_WRITE
#define QI_IWD_FENCE
#define QI_IWD_PRQ_DRAIN

#define QI_IOTLB_DID(did)
#define QI_IOTLB_DR(dr)
#define QI_IOTLB_DW(dw)
#define QI_IOTLB_GRAN(gran)
#define QI_IOTLB_ADDR(addr)
#define QI_IOTLB_IH(ih)
#define QI_IOTLB_AM(am)

#define QI_CC_FM(fm)
#define QI_CC_SID(sid)
#define QI_CC_DID(did)
#define QI_CC_GRAN(gran)

#define QI_DEV_IOTLB_SID(sid)
#define QI_DEV_IOTLB_QDEP(qdep)
#define QI_DEV_IOTLB_ADDR(addr)
#define QI_DEV_IOTLB_PFSID(pfsid)
#define QI_DEV_IOTLB_SIZE
#define QI_DEV_IOTLB_MAX_INVS

#define QI_PC_PASID(pasid)
#define QI_PC_DID(did)
#define QI_PC_GRAN(gran)

/* PASID cache invalidation granu */
#define QI_PC_ALL_PASIDS
#define QI_PC_PASID_SEL
#define QI_PC_GLOBAL

#define QI_EIOTLB_ADDR(addr)
#define QI_EIOTLB_IH(ih)
#define QI_EIOTLB_AM(am)
#define QI_EIOTLB_PASID(pasid)
#define QI_EIOTLB_DID(did)
#define QI_EIOTLB_GRAN(gran)

/* QI Dev-IOTLB inv granu */
#define QI_DEV_IOTLB_GRAN_ALL
#define QI_DEV_IOTLB_GRAN_PASID_SEL

#define QI_DEV_EIOTLB_ADDR(a)
#define QI_DEV_EIOTLB_SIZE
#define QI_DEV_EIOTLB_PASID(p)
#define QI_DEV_EIOTLB_SID(sid)
#define QI_DEV_EIOTLB_QDEP(qd)
#define QI_DEV_EIOTLB_PFSID(pfsid)
#define QI_DEV_EIOTLB_MAX_INVS

/* Page group response descriptor QW0 */
#define QI_PGRP_PASID_P(p)
#define QI_PGRP_RESP_CODE(res)
#define QI_PGRP_DID(rid)
#define QI_PGRP_PASID(pasid)

/* Page group response descriptor QW1 */
#define QI_PGRP_LPIG(x)
#define QI_PGRP_IDX(idx)


#define QI_RESP_SUCCESS
#define QI_RESP_INVALID
#define QI_RESP_FAILURE

#define QI_GRAN_NONG_PASID
#define QI_GRAN_PSI_PASID

#define qi_shift(iommu)

struct qi_desc {};

struct q_inval {};

/* Page Request Queue depth */
#define PRQ_ORDER
#define PRQ_RING_MASK
#define PRQ_DEPTH

struct dmar_pci_notify_info;

#ifdef CONFIG_IRQ_REMAP
/* 1MB - maximum possible interrupt remapping table size */
#define INTR_REMAP_PAGE_ORDER
#define INTR_REMAP_TABLE_REG_SIZE
#define INTR_REMAP_TABLE_REG_SIZE_MASK

#define INTR_REMAP_TABLE_ENTRIES

struct irq_domain;

struct ir_table {};

void intel_irq_remap_add_device(struct dmar_pci_notify_info *info);
#else
static inline void
intel_irq_remap_add_device(struct dmar_pci_notify_info *info) { }
#endif

struct iommu_flush {};

enum {};

#define VTD_FLAG_TRANS_PRE_ENABLED
#define VTD_FLAG_IRQ_REMAP_PRE_ENABLED
#define VTD_FLAG_SVM_CAPABLE

#define sm_supported(iommu)
#define pasid_supported(iommu)
#define ssads_supported(iommu)
#define nested_supported(iommu)

struct pasid_entry;
struct pasid_state_entry;
struct page_req_dsc;

/*
 * 0: Present
 * 1-11: Reserved
 * 12-63: Context Ptr (12 - (haw-1))
 * 64-127: Reserved
 */
struct root_entry {};

/*
 * low 64 bits:
 * 0: present
 * 1: fault processing disable
 * 2-3: translation type
 * 12-63: address space root
 * high 64 bits:
 * 0-2: address width
 * 3-6: aval
 * 8-23: domain id
 */
struct context_entry {};

struct iommu_domain_info {};

/*
 * We start simply by using a fixed size for the batched descriptors. This
 * size is currently sufficient for our needs. Future improvements could
 * involve dynamically allocating the batch buffer based on actual demand,
 * allowing us to adjust the batch size for optimal performance in different
 * scenarios.
 */
#define QI_MAX_BATCHED_DESC_COUNT
struct qi_batch {};

struct dmar_domain {};

/*
 * In theory, the VT-d 4.0 spec can support up to 2 ^ 16 counters.
 * But in practice, there are only 14 counters for the existing
 * platform. Setting the max number of counters to 64 should be good
 * enough for a long time. Also, supporting more than 64 counters
 * requires more extras, e.g., extra freeze and overflow registers,
 * which is not necessary for now.
 */
#define IOMMU_PMU_IDX_MAX

struct iommu_pmu {};

#define IOMMU_IRQ_ID_OFFSET_PRQ
#define IOMMU_IRQ_ID_OFFSET_PERF

struct intel_iommu {};

/* PCI domain-device relationship */
struct device_domain_info {};

struct dev_pasid_info {};

static inline void __iommu_flush_cache(
	struct intel_iommu *iommu, void *addr, int size)
{}

/* Convert generic struct iommu_domain to private struct dmar_domain */
static inline struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
{}

/* Retrieve the domain ID which has allocated to the domain */
static inline u16
domain_id_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
{}

/*
 * 0: readable
 * 1: writable
 * 2-6: reserved
 * 7: super page
 * 8-10: available
 * 11: snoop behavior
 * 12-63: Host physical address
 */
struct dma_pte {};

static inline void dma_clear_pte(struct dma_pte *pte)
{}

static inline u64 dma_pte_addr(struct dma_pte *pte)
{}

static inline bool dma_pte_present(struct dma_pte *pte)
{}

static inline bool dma_sl_pte_test_and_clear_dirty(struct dma_pte *pte,
						   unsigned long flags)
{}

static inline bool dma_pte_superpage(struct dma_pte *pte)
{}

static inline bool first_pte_in_page(struct dma_pte *pte)
{}

static inline int nr_pte_to_next_page(struct dma_pte *pte)
{}

static inline bool context_present(struct context_entry *context)
{}

#define LEVEL_STRIDE
#define LEVEL_MASK
#define MAX_AGAW_WIDTH
#define MAX_AGAW_PFN_WIDTH

static inline int agaw_to_level(int agaw)
{}

static inline int agaw_to_width(int agaw)
{}

static inline int width_to_agaw(int width)
{}

static inline unsigned int level_to_offset_bits(int level)
{}

static inline int pfn_level_offset(u64 pfn, int level)
{}

static inline u64 level_mask(int level)
{}

static inline u64 level_size(int level)
{}

static inline u64 align_to_level(u64 pfn, int level)
{}

static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
{}

/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
   are never going to work. */
static inline unsigned long mm_to_dma_pfn_start(unsigned long mm_pfn)
{}
static inline unsigned long mm_to_dma_pfn_end(unsigned long mm_pfn)
{}
static inline unsigned long page_to_dma_pfn(struct page *pg)
{}
static inline unsigned long virt_to_dma_pfn(void *p)
{}

static inline void context_set_present(struct context_entry *context)
{}

static inline void context_set_fault_enable(struct context_entry *context)
{}

static inline void context_set_translation_type(struct context_entry *context,
						unsigned long value)
{}

static inline void context_set_address_root(struct context_entry *context,
					    unsigned long value)
{}

static inline void context_set_address_width(struct context_entry *context,
					     unsigned long value)
{}

static inline void context_set_domain_id(struct context_entry *context,
					 unsigned long value)
{}

static inline void context_set_pasid(struct context_entry *context)
{}

static inline int context_domain_id(struct context_entry *c)
{}

static inline void context_clear_entry(struct context_entry *context)
{}

#ifdef CONFIG_INTEL_IOMMU
static inline bool context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
{}

static inline void
set_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
{}

static inline void
clear_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
{}
#endif /* CONFIG_INTEL_IOMMU */

/*
 * Set the RID_PASID field of a scalable mode context entry. The
 * IOMMU hardware will use the PASID value set in this field for
 * DMA translations of DMA requests without PASID.
 */
static inline void
context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid)
{}

/*
 * Set the DTE(Device-TLB Enable) field of a scalable mode context
 * entry.
 */
static inline void context_set_sm_dte(struct context_entry *context)
{}

/*
 * Set the PRE(Page Request Enable) field of a scalable mode context
 * entry.
 */
static inline void context_set_sm_pre(struct context_entry *context)
{}

/*
 * Clear the PRE(Page Request Enable) field of a scalable mode context
 * entry.
 */
static inline void context_clear_sm_pre(struct context_entry *context)
{}

/* Returns a number of VTD pages, but aligned to MM page size */
static inline unsigned long aligned_nrpages(unsigned long host_addr, size_t size)
{}

/* Return a size from number of VTD pages. */
static inline unsigned long nrpages_to_size(unsigned long npages)
{}

static inline void qi_desc_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
				 unsigned int size_order, u64 type,
				 struct qi_desc *desc)
{}

static inline void qi_desc_dev_iotlb(u16 sid, u16 pfsid, u16 qdep, u64 addr,
				     unsigned int mask, struct qi_desc *desc)
{}

static inline void qi_desc_piotlb(u16 did, u32 pasid, u64 addr,
				  unsigned long npages, bool ih,
				  struct qi_desc *desc)
{}

static inline void qi_desc_dev_iotlb_pasid(u16 sid, u16 pfsid, u32 pasid,
					   u16 qdep, u64 addr,
					   unsigned int size_order,
					   struct qi_desc *desc)
{}

/* Convert value to context PASID directory size field coding. */
#define context_pdts(pds)

struct dmar_drhd_unit *dmar_find_matched_drhd_unit(struct pci_dev *dev);

int dmar_enable_qi(struct intel_iommu *iommu);
void dmar_disable_qi(struct intel_iommu *iommu);
int dmar_reenable_qi(struct intel_iommu *iommu);
void qi_global_iec(struct intel_iommu *iommu);

void qi_flush_context(struct intel_iommu *iommu, u16 did,
		      u16 sid, u8 fm, u64 type);
void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
		    unsigned int size_order, u64 type);
void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
			u16 qdep, u64 addr, unsigned mask);

void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
		     unsigned long npages, bool ih);

void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
			      u32 pasid, u16 qdep, u64 addr,
			      unsigned int size_order);
void quirk_extra_dev_tlb_flush(struct device_domain_info *info,
			       unsigned long address, unsigned long pages,
			       u32 pasid, u16 qdep);
void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu,
			  u32 pasid);

int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
		   unsigned int count, unsigned long options);

void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
			 unsigned int size_order, u64 type);
/*
 * Options used in qi_submit_sync:
 * QI_OPT_WAIT_DRAIN - Wait for PRQ drain completion, spec 6.5.2.8.
 */
#define QI_OPT_WAIT_DRAIN

int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu);
void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu);
void device_block_translation(struct device *dev);
int prepare_domain_attach_device(struct iommu_domain *domain,
				 struct device *dev);
void domain_update_iommu_cap(struct dmar_domain *domain);

int dmar_ir_support(void);

void iommu_flush_write_buffer(struct intel_iommu *iommu);
struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent,
					       const struct iommu_user_data *user_data);
struct device *device_rbtree_find(struct intel_iommu *iommu, u16 rid);

enum cache_tag_type {};

struct cache_tag {};

int cache_tag_assign_domain(struct dmar_domain *domain,
			    struct device *dev, ioasid_t pasid);
void cache_tag_unassign_domain(struct dmar_domain *domain,
			       struct device *dev, ioasid_t pasid);
void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start,
			   unsigned long end, int ih);
void cache_tag_flush_all(struct dmar_domain *domain);
void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start,
			      unsigned long end);

void intel_context_flush_present(struct device_domain_info *info,
				 struct context_entry *context,
				 u16 did, bool affect_domains);

#ifdef CONFIG_INTEL_IOMMU_SVM
void intel_svm_check(struct intel_iommu *iommu);
int intel_svm_enable_prq(struct intel_iommu *iommu);
int intel_svm_finish_prq(struct intel_iommu *iommu);
void intel_svm_page_response(struct device *dev, struct iopf_fault *evt,
			     struct iommu_page_response *msg);
struct iommu_domain *intel_svm_domain_alloc(struct device *dev,
					    struct mm_struct *mm);
void intel_drain_pasid_prq(struct device *dev, u32 pasid);
#else
static inline void intel_svm_check(struct intel_iommu *iommu) {}
static inline void intel_drain_pasid_prq(struct device *dev, u32 pasid) {}
static inline struct iommu_domain *intel_svm_domain_alloc(struct device *dev,
							  struct mm_struct *mm)
{
	return ERR_PTR(-ENODEV);
}
#endif

#ifdef CONFIG_INTEL_IOMMU_DEBUGFS
void intel_iommu_debugfs_init(void);
void intel_iommu_debugfs_create_dev(struct device_domain_info *info);
void intel_iommu_debugfs_remove_dev(struct device_domain_info *info);
void intel_iommu_debugfs_create_dev_pasid(struct dev_pasid_info *dev_pasid);
void intel_iommu_debugfs_remove_dev_pasid(struct dev_pasid_info *dev_pasid);
#else
static inline void intel_iommu_debugfs_init(void) {}
static inline void intel_iommu_debugfs_create_dev(struct device_domain_info *info) {}
static inline void intel_iommu_debugfs_remove_dev(struct device_domain_info *info) {}
static inline void intel_iommu_debugfs_create_dev_pasid(struct dev_pasid_info *dev_pasid) {}
static inline void intel_iommu_debugfs_remove_dev_pasid(struct dev_pasid_info *dev_pasid) {}
#endif /* CONFIG_INTEL_IOMMU_DEBUGFS */

extern const struct attribute_group *intel_iommu_groups[];
struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
					 u8 devfn, int alloc);

extern const struct iommu_ops intel_iommu_ops;

#ifdef CONFIG_INTEL_IOMMU
extern int intel_iommu_sm;
int iommu_calculate_agaw(struct intel_iommu *iommu);
int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
int ecmd_submit_sync(struct intel_iommu *iommu, u8 ecmd, u64 oa, u64 ob);

static inline bool ecmd_has_pmu_essential(struct intel_iommu *iommu)
{}

extern int dmar_disabled;
extern int intel_iommu_enabled;
#else
static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
{
	return 0;
}
static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
{
	return 0;
}
#define dmar_disabled
#define intel_iommu_enabled
#define intel_iommu_sm
#endif

static inline const char *decode_prq_descriptor(char *str, size_t size,
		u64 dw0, u64 dw1, u64 dw2, u64 dw3)
{}

#endif