linux/drivers/iommu/dma-iommu.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * A fairly generic DMA-API to IOMMU-API glue layer.
 *
 * Copyright (C) 2014-2015 ARM Ltd.
 *
 * based in part on arch/arm/mm/dma-mapping.c:
 * Copyright (C) 2000-2004 Russell King
 */

#include <linux/acpi_iort.h>
#include <linux/atomic.h>
#include <linux/crash_dump.h>
#include <linux/device.h>
#include <linux/dma-direct.h>
#include <linux/dma-map-ops.h>
#include <linux/gfp.h>
#include <linux/huge_mm.h>
#include <linux/iommu.h>
#include <linux/iova.h>
#include <linux/irq.h>
#include <linux/list_sort.h>
#include <linux/memremap.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/of_iommu.h>
#include <linux/pci.h>
#include <linux/scatterlist.h>
#include <linux/spinlock.h>
#include <linux/swiotlb.h>
#include <linux/vmalloc.h>
#include <trace/events/swiotlb.h>

#include "dma-iommu.h"
#include "iommu-pages.h"

struct iommu_dma_msi_page {};

enum iommu_dma_cookie_type {};

enum iommu_dma_queue_type {};

struct iommu_dma_options {};

struct iommu_dma_cookie {};

static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled);
bool iommu_dma_forcedac __read_mostly;

static int __init iommu_dma_forcedac_setup(char *str)
{}
early_param();

/* Number of entries per flush queue */
#define IOVA_DEFAULT_FQ_SIZE
#define IOVA_SINGLE_FQ_SIZE

/* Timeout (in ms) after which entries are flushed from the queue */
#define IOVA_DEFAULT_FQ_TIMEOUT
#define IOVA_SINGLE_FQ_TIMEOUT

/* Flush queue entry for deferred flushing */
struct iova_fq_entry {};

/* Per-CPU flush queue structure */
struct iova_fq {};

#define fq_ring_for_each(i, fq)

static inline bool fq_full(struct iova_fq *fq)
{}

static inline unsigned int fq_ring_add(struct iova_fq *fq)
{}

static void fq_ring_free_locked(struct iommu_dma_cookie *cookie, struct iova_fq *fq)
{}

static void fq_ring_free(struct iommu_dma_cookie *cookie, struct iova_fq *fq)
{}

static void fq_flush_iotlb(struct iommu_dma_cookie *cookie)
{}

static void fq_flush_timeout(struct timer_list *t)
{}

static void queue_iova(struct iommu_dma_cookie *cookie,
		unsigned long pfn, unsigned long pages,
		struct list_head *freelist)
{}

static void iommu_dma_free_fq_single(struct iova_fq *fq)
{}

static void iommu_dma_free_fq_percpu(struct iova_fq __percpu *percpu_fq)
{}

static void iommu_dma_free_fq(struct iommu_dma_cookie *cookie)
{}

static void iommu_dma_init_one_fq(struct iova_fq *fq, size_t fq_size)
{}

static int iommu_dma_init_fq_single(struct iommu_dma_cookie *cookie)
{}

static int iommu_dma_init_fq_percpu(struct iommu_dma_cookie *cookie)
{}

/* sysfs updates are serialised by the mutex of the group owning @domain */
int iommu_dma_init_fq(struct iommu_domain *domain)
{}

static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
{}

static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
{}

/**
 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
 * @domain: IOMMU domain to prepare for DMA-API usage
 */
int iommu_get_dma_cookie(struct iommu_domain *domain)
{}

/**
 * iommu_get_msi_cookie - Acquire just MSI remapping resources
 * @domain: IOMMU domain to prepare
 * @base: Start address of IOVA region for MSI mappings
 *
 * Users who manage their own IOVA allocation and do not want DMA API support,
 * but would still like to take advantage of automatic MSI remapping, can use
 * this to initialise their own domain appropriately. Users should reserve a
 * contiguous IOVA region, starting at @base, large enough to accommodate the
 * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
 * used by the devices attached to @domain.
 */
int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
{}
EXPORT_SYMBOL();

/**
 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
 *          iommu_get_msi_cookie()
 */
void iommu_put_dma_cookie(struct iommu_domain *domain)
{}

/**
 * iommu_dma_get_resv_regions - Reserved region driver helper
 * @dev: Device from iommu_get_resv_regions()
 * @list: Reserved region list from iommu_get_resv_regions()
 *
 * IOMMU drivers can use this to implement their .get_resv_regions callback
 * for general non-IOMMU-specific reservations. Currently, this covers GICv3
 * ITS region reservation on ACPI based ARM platforms that may require HW MSI
 * reservation.
 */
void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
{}
EXPORT_SYMBOL();

static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
		phys_addr_t start, phys_addr_t end)
{}

static int iommu_dma_ranges_sort(void *priv, const struct list_head *a,
		const struct list_head *b)
{}

static int iova_reserve_pci_windows(struct pci_dev *dev,
		struct iova_domain *iovad)
{}

static int iova_reserve_iommu_regions(struct device *dev,
		struct iommu_domain *domain)
{}

static bool dev_is_untrusted(struct device *dev)
{}

static bool dev_use_swiotlb(struct device *dev, size_t size,
			    enum dma_data_direction dir)
{}

static bool dev_use_sg_swiotlb(struct device *dev, struct scatterlist *sg,
			       int nents, enum dma_data_direction dir)
{}

/**
 * iommu_dma_init_options - Initialize dma-iommu options
 * @options: The options to be initialized
 * @dev: Device the options are set for
 *
 * This allows tuning dma-iommu specific to device properties
 */
static void iommu_dma_init_options(struct iommu_dma_options *options,
				   struct device *dev)
{}

/**
 * iommu_dma_init_domain - Initialise a DMA mapping domain
 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
 * @dev: Device the domain is being initialised for
 *
 * If the geometry and dma_range_map include address 0, we reserve that page
 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
 * any change which could make prior IOVAs invalid will fail.
 */
static int iommu_dma_init_domain(struct iommu_domain *domain, struct device *dev)
{}

/**
 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
 *                    page flags.
 * @dir: Direction of DMA transfer
 * @coherent: Is the DMA master cache-coherent?
 * @attrs: DMA attributes for the mapping
 *
 * Return: corresponding IOMMU API page protection flags
 */
static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
		     unsigned long attrs)
{}

static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
		size_t size, u64 dma_limit, struct device *dev)
{}

static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
		dma_addr_t iova, size_t size, struct iommu_iotlb_gather *gather)
{}

static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
		size_t size)
{}

static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
		size_t size, int prot, u64 dma_mask)
{}

static void __iommu_dma_free_pages(struct page **pages, int count)
{}

static struct page **__iommu_dma_alloc_pages(struct device *dev,
		unsigned int count, unsigned long order_mask, gfp_t gfp)
{}

/*
 * If size is less than PAGE_SIZE, then a full CPU page will be allocated,
 * but an IOMMU which supports smaller pages might not map the whole thing.
 */
static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
		size_t size, struct sg_table *sgt, gfp_t gfp, unsigned long attrs)
{}

static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{}

static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev,
		size_t size, enum dma_data_direction dir, gfp_t gfp,
		unsigned long attrs)
{}

static void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
		struct sg_table *sgt, enum dma_data_direction dir)
{}

static void iommu_dma_sync_single_for_cpu(struct device *dev,
		dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
{}

static void iommu_dma_sync_single_for_device(struct device *dev,
		dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
{}

static void iommu_dma_sync_sg_for_cpu(struct device *dev,
		struct scatterlist *sgl, int nelems,
		enum dma_data_direction dir)
{}

static void iommu_dma_sync_sg_for_device(struct device *dev,
		struct scatterlist *sgl, int nelems,
		enum dma_data_direction dir)
{}

static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
		unsigned long offset, size_t size, enum dma_data_direction dir,
		unsigned long attrs)
{}

static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
		size_t size, enum dma_data_direction dir, unsigned long attrs)
{}

/*
 * Prepare a successfully-mapped scatterlist to give back to the caller.
 *
 * At this point the segments are already laid out by iommu_dma_map_sg() to
 * avoid individually crossing any boundaries, so we merely need to check a
 * segment's start address to avoid concatenating across one.
 */
static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
		dma_addr_t dma_addr)
{}

/*
 * If mapping failed, then just restore the original list,
 * but making sure the DMA fields are invalidated.
 */
static void __invalidate_sg(struct scatterlist *sg, int nents)
{}

static void iommu_dma_unmap_sg_swiotlb(struct device *dev, struct scatterlist *sg,
		int nents, enum dma_data_direction dir, unsigned long attrs)
{}

static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg,
		int nents, enum dma_data_direction dir, unsigned long attrs)
{}

/*
 * The DMA API client is passing in a scatterlist which could describe
 * any old buffer layout, but the IOMMU API requires everything to be
 * aligned to IOMMU pages. Hence the need for this complicated bit of
 * impedance-matching, to be able to hand off a suitably-aligned list,
 * but still preserve the original offsets and sizes for the caller.
 */
static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
		int nents, enum dma_data_direction dir, unsigned long attrs)
{}

static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
		int nents, enum dma_data_direction dir, unsigned long attrs)
{}

static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
		size_t size, enum dma_data_direction dir, unsigned long attrs)
{}

static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
		size_t size, enum dma_data_direction dir, unsigned long attrs)
{}

static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
{}

static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
		dma_addr_t handle, unsigned long attrs)
{}

static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
		struct page **pagep, gfp_t gfp, unsigned long attrs)
{}

static void *iommu_dma_alloc(struct device *dev, size_t size,
		dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
{}

static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
		void *cpu_addr, dma_addr_t dma_addr, size_t size,
		unsigned long attrs)
{}

static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
		void *cpu_addr, dma_addr_t dma_addr, size_t size,
		unsigned long attrs)
{}

static unsigned long iommu_dma_get_merge_boundary(struct device *dev)
{}

static size_t iommu_dma_opt_mapping_size(void)
{}

static size_t iommu_dma_max_mapping_size(struct device *dev)
{}

static const struct dma_map_ops iommu_dma_ops =;

void iommu_setup_dma_ops(struct device *dev)
{}

static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
		phys_addr_t msi_addr, struct iommu_domain *domain)
{}

/**
 * iommu_dma_prepare_msi() - Map the MSI page in the IOMMU domain
 * @desc: MSI descriptor, will store the MSI page
 * @msi_addr: MSI target address to be mapped
 *
 * Return: 0 on success or negative error code if the mapping failed.
 */
int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
{}

/**
 * iommu_dma_compose_msi_msg() - Apply translation to an MSI message
 * @desc: MSI descriptor prepared by iommu_dma_prepare_msi()
 * @msg: MSI message containing target physical address
 */
void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
{}

static int iommu_dma_init(void)
{}
arch_initcall(iommu_dma_init);