#include <linux/acpi_iort.h>
#include <linux/atomic.h>
#include <linux/crash_dump.h>
#include <linux/device.h>
#include <linux/dma-direct.h>
#include <linux/dma-map-ops.h>
#include <linux/gfp.h>
#include <linux/huge_mm.h>
#include <linux/iommu.h>
#include <linux/iova.h>
#include <linux/irq.h>
#include <linux/list_sort.h>
#include <linux/memremap.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/of_iommu.h>
#include <linux/pci.h>
#include <linux/scatterlist.h>
#include <linux/spinlock.h>
#include <linux/swiotlb.h>
#include <linux/vmalloc.h>
#include <trace/events/swiotlb.h>
#include "dma-iommu.h"
#include "iommu-pages.h"
struct iommu_dma_msi_page { … };
enum iommu_dma_cookie_type { … };
enum iommu_dma_queue_type { … };
struct iommu_dma_options { … };
struct iommu_dma_cookie { … };
static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled);
bool iommu_dma_forcedac __read_mostly;
static int __init iommu_dma_forcedac_setup(char *str)
{ … }
early_param(…);
#define IOVA_DEFAULT_FQ_SIZE …
#define IOVA_SINGLE_FQ_SIZE …
#define IOVA_DEFAULT_FQ_TIMEOUT …
#define IOVA_SINGLE_FQ_TIMEOUT …
struct iova_fq_entry { … };
struct iova_fq { … };
#define fq_ring_for_each(i, fq) …
static inline bool fq_full(struct iova_fq *fq)
{ … }
static inline unsigned int fq_ring_add(struct iova_fq *fq)
{ … }
static void fq_ring_free_locked(struct iommu_dma_cookie *cookie, struct iova_fq *fq)
{ … }
static void fq_ring_free(struct iommu_dma_cookie *cookie, struct iova_fq *fq)
{ … }
static void fq_flush_iotlb(struct iommu_dma_cookie *cookie)
{ … }
static void fq_flush_timeout(struct timer_list *t)
{ … }
static void queue_iova(struct iommu_dma_cookie *cookie,
unsigned long pfn, unsigned long pages,
struct list_head *freelist)
{ … }
static void iommu_dma_free_fq_single(struct iova_fq *fq)
{ … }
static void iommu_dma_free_fq_percpu(struct iova_fq __percpu *percpu_fq)
{ … }
static void iommu_dma_free_fq(struct iommu_dma_cookie *cookie)
{ … }
static void iommu_dma_init_one_fq(struct iova_fq *fq, size_t fq_size)
{ … }
static int iommu_dma_init_fq_single(struct iommu_dma_cookie *cookie)
{ … }
static int iommu_dma_init_fq_percpu(struct iommu_dma_cookie *cookie)
{ … }
int iommu_dma_init_fq(struct iommu_domain *domain)
{ … }
static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
{ … }
static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
{ … }
int iommu_get_dma_cookie(struct iommu_domain *domain)
{ … }
int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
{ … }
EXPORT_SYMBOL(…);
void iommu_put_dma_cookie(struct iommu_domain *domain)
{ … }
void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
{ … }
EXPORT_SYMBOL(…);
static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
phys_addr_t start, phys_addr_t end)
{ … }
static int iommu_dma_ranges_sort(void *priv, const struct list_head *a,
const struct list_head *b)
{ … }
static int iova_reserve_pci_windows(struct pci_dev *dev,
struct iova_domain *iovad)
{ … }
static int iova_reserve_iommu_regions(struct device *dev,
struct iommu_domain *domain)
{ … }
static bool dev_is_untrusted(struct device *dev)
{ … }
static bool dev_use_swiotlb(struct device *dev, size_t size,
enum dma_data_direction dir)
{ … }
static bool dev_use_sg_swiotlb(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{ … }
static void iommu_dma_init_options(struct iommu_dma_options *options,
struct device *dev)
{ … }
static int iommu_dma_init_domain(struct iommu_domain *domain, struct device *dev)
{ … }
static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
unsigned long attrs)
{ … }
static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
size_t size, u64 dma_limit, struct device *dev)
{ … }
static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
dma_addr_t iova, size_t size, struct iommu_iotlb_gather *gather)
{ … }
static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
size_t size)
{ … }
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
size_t size, int prot, u64 dma_mask)
{ … }
static void __iommu_dma_free_pages(struct page **pages, int count)
{ … }
static struct page **__iommu_dma_alloc_pages(struct device *dev,
unsigned int count, unsigned long order_mask, gfp_t gfp)
{ … }
static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
size_t size, struct sg_table *sgt, gfp_t gfp, unsigned long attrs)
{ … }
static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{ … }
static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev,
size_t size, enum dma_data_direction dir, gfp_t gfp,
unsigned long attrs)
{ … }
static void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
struct sg_table *sgt, enum dma_data_direction dir)
{ … }
static void iommu_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
{ … }
static void iommu_dma_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
{ … }
static void iommu_dma_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sgl, int nelems,
enum dma_data_direction dir)
{ … }
static void iommu_dma_sync_sg_for_device(struct device *dev,
struct scatterlist *sgl, int nelems,
enum dma_data_direction dir)
{ … }
static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
unsigned long attrs)
{ … }
static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{ … }
static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
dma_addr_t dma_addr)
{ … }
static void __invalidate_sg(struct scatterlist *sg, int nents)
{ … }
static void iommu_dma_unmap_sg_swiotlb(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir, unsigned long attrs)
{ … }
static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir, unsigned long attrs)
{ … }
static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir, unsigned long attrs)
{ … }
static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir, unsigned long attrs)
{ … }
static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{ … }
static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{ … }
static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
{ … }
static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t handle, unsigned long attrs)
{ … }
static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
struct page **pagep, gfp_t gfp, unsigned long attrs)
{ … }
static void *iommu_dma_alloc(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
{ … }
static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{ … }
static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{ … }
static unsigned long iommu_dma_get_merge_boundary(struct device *dev)
{ … }
static size_t iommu_dma_opt_mapping_size(void)
{ … }
static size_t iommu_dma_max_mapping_size(struct device *dev)
{ … }
static const struct dma_map_ops iommu_dma_ops = …;
void iommu_setup_dma_ops(struct device *dev)
{ … }
static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
phys_addr_t msi_addr, struct iommu_domain *domain)
{ … }
int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
{ … }
void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
{ … }
static int iommu_dma_init(void)
{ … }
arch_initcall(iommu_dma_init);