linux/drivers/iommu/iommu.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
 * Author: Joerg Roedel <[email protected]>
 */

#define pr_fmt(fmt)

#include <linux/amba/bus.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/bits.h>
#include <linux/bug.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/host1x_context_bus.h>
#include <linux/iommu.h>
#include <linux/idr.h>
#include <linux/err.h>
#include <linux/pci.h>
#include <linux/pci-ats.h>
#include <linux/bitops.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/fsl/mc.h>
#include <linux/module.h>
#include <linux/cc_platform.h>
#include <linux/cdx/cdx_bus.h>
#include <trace/events/iommu.h>
#include <linux/sched/mm.h>
#include <linux/msi.h>

#include "dma-iommu.h"
#include "iommu-priv.h"

static struct kset *iommu_group_kset;
static DEFINE_IDA(iommu_group_ida);
static DEFINE_IDA(iommu_global_pasid_ida);

static unsigned int iommu_def_domain_type __read_mostly;
static bool iommu_dma_strict __read_mostly = IS_ENABLED();
static u32 iommu_cmd_line __read_mostly;

struct iommu_group {};

struct group_device {};

/* Iterate over each struct group_device in a struct iommu_group */
#define for_each_group_device(group, pos)

struct iommu_group_attribute {};

static const char * const iommu_group_resv_type_string[] =;

#define IOMMU_CMD_LINE_DMA_API
#define IOMMU_CMD_LINE_STRICT

static int iommu_bus_notifier(struct notifier_block *nb,
			      unsigned long action, void *data);
static void iommu_release_device(struct device *dev);
static struct iommu_domain *
__iommu_group_domain_alloc(struct iommu_group *group, unsigned int type);
static int __iommu_attach_device(struct iommu_domain *domain,
				 struct device *dev);
static int __iommu_attach_group(struct iommu_domain *domain,
				struct iommu_group *group);

enum {};

static int __iommu_device_set_domain(struct iommu_group *group,
				     struct device *dev,
				     struct iommu_domain *new_domain,
				     unsigned int flags);
static int __iommu_group_set_domain_internal(struct iommu_group *group,
					     struct iommu_domain *new_domain,
					     unsigned int flags);
static int __iommu_group_set_domain(struct iommu_group *group,
				    struct iommu_domain *new_domain)
{}
static void __iommu_group_set_domain_nofail(struct iommu_group *group,
					    struct iommu_domain *new_domain)
{}

static int iommu_setup_default_domain(struct iommu_group *group,
				      int target_type);
static int iommu_create_device_direct_mappings(struct iommu_domain *domain,
					       struct device *dev);
static ssize_t iommu_group_store_type(struct iommu_group *group,
				      const char *buf, size_t count);
static struct group_device *iommu_group_alloc_device(struct iommu_group *group,
						     struct device *dev);
static void __iommu_group_free_device(struct iommu_group *group,
				      struct group_device *grp_dev);

#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store)

#define to_iommu_group_attr(_attr)
#define to_iommu_group(_kobj)

static LIST_HEAD(iommu_device_list);
static DEFINE_SPINLOCK(iommu_device_lock);

static const struct bus_type * const iommu_buses[] =;

/*
 * Use a function instead of an array here because the domain-type is a
 * bit-field, so an array would waste memory.
 */
static const char *iommu_domain_type_str(unsigned int t)
{}

static int __init iommu_subsys_init(void)
{}
subsys_initcall(iommu_subsys_init);

static int remove_iommu_group(struct device *dev, void *data)
{}

/**
 * iommu_device_register() - Register an IOMMU hardware instance
 * @iommu: IOMMU handle for the instance
 * @ops:   IOMMU ops to associate with the instance
 * @hwdev: (optional) actual instance device, used for fwnode lookup
 *
 * Return: 0 on success, or an error.
 */
int iommu_device_register(struct iommu_device *iommu,
			  const struct iommu_ops *ops, struct device *hwdev)
{}
EXPORT_SYMBOL_GPL();

void iommu_device_unregister(struct iommu_device *iommu)
{}
EXPORT_SYMBOL_GPL();

#if IS_ENABLED(CONFIG_IOMMUFD_TEST)
void iommu_device_unregister_bus(struct iommu_device *iommu,
				 const struct bus_type *bus,
				 struct notifier_block *nb)
{}
EXPORT_SYMBOL_GPL();

/*
 * Register an iommu driver against a single bus. This is only used by iommufd
 * selftest to create a mock iommu driver. The caller must provide
 * some memory to hold a notifier_block.
 */
int iommu_device_register_bus(struct iommu_device *iommu,
			      const struct iommu_ops *ops,
			      const struct bus_type *bus,
			      struct notifier_block *nb)
{}
EXPORT_SYMBOL_GPL();
#endif

static struct dev_iommu *dev_iommu_get(struct device *dev)
{}

static void dev_iommu_free(struct device *dev)
{}

/*
 * Internal equivalent of device_iommu_mapped() for when we care that a device
 * actually has API ops, and don't want false positives from VFIO-only groups.
 */
static bool dev_has_iommu(struct device *dev)
{}

static u32 dev_iommu_get_max_pasids(struct device *dev)
{}

void dev_iommu_priv_set(struct device *dev, void *priv)
{}
EXPORT_SYMBOL_GPL();

/*
 * Init the dev->iommu and dev->iommu_group in the struct device and get the
 * driver probed
 */
static int iommu_init_device(struct device *dev, const struct iommu_ops *ops)
{}

static void iommu_deinit_device(struct device *dev)
{}

DEFINE_MUTEX();

static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
{}

int iommu_probe_device(struct device *dev)
{}

static void __iommu_group_free_device(struct iommu_group *group,
				      struct group_device *grp_dev)
{}

/* Remove the iommu_group from the struct device. */
static void __iommu_group_remove_device(struct device *dev)
{}

static void iommu_release_device(struct device *dev)
{}

static int __init iommu_set_def_domain_type(char *str)
{}
early_param();

static int __init iommu_dma_setup(char *str)
{}
early_param();

void iommu_set_dma_strict(void)
{}

static ssize_t iommu_group_attr_show(struct kobject *kobj,
				     struct attribute *__attr, char *buf)
{}

static ssize_t iommu_group_attr_store(struct kobject *kobj,
				      struct attribute *__attr,
				      const char *buf, size_t count)
{}

static const struct sysfs_ops iommu_group_sysfs_ops =;

static int iommu_group_create_file(struct iommu_group *group,
				   struct iommu_group_attribute *attr)
{}

static void iommu_group_remove_file(struct iommu_group *group,
				    struct iommu_group_attribute *attr)
{}

static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
{}

/**
 * iommu_insert_resv_region - Insert a new region in the
 * list of reserved regions.
 * @new: new region to insert
 * @regions: list of regions
 *
 * Elements are sorted by start address and overlapping segments
 * of the same type are merged.
 */
static int iommu_insert_resv_region(struct iommu_resv_region *new,
				    struct list_head *regions)
{}

static int
iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
				 struct list_head *group_resv_regions)
{}

int iommu_get_group_resv_regions(struct iommu_group *group,
				 struct list_head *head)
{}
EXPORT_SYMBOL_GPL();

static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
					     char *buf)
{}

static ssize_t iommu_group_show_type(struct iommu_group *group,
				     char *buf)
{}

static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);

static IOMMU_GROUP_ATTR(reserved_regions, 0444,
			iommu_group_show_resv_regions, NULL);

static IOMMU_GROUP_ATTR(type, 0644, iommu_group_show_type,
			iommu_group_store_type);

static void iommu_group_release(struct kobject *kobj)
{}

static const struct kobj_type iommu_group_ktype =;

/**
 * iommu_group_alloc - Allocate a new group
 *
 * This function is called by an iommu driver to allocate a new iommu
 * group.  The iommu group represents the minimum granularity of the iommu.
 * Upon successful return, the caller holds a reference to the supplied
 * group in order to hold the group until devices are added.  Use
 * iommu_group_put() to release this extra reference count, allowing the
 * group to be automatically reclaimed once it has no devices or external
 * references.
 */
struct iommu_group *iommu_group_alloc(void)
{}
EXPORT_SYMBOL_GPL();

/**
 * iommu_group_get_iommudata - retrieve iommu_data registered for a group
 * @group: the group
 *
 * iommu drivers can store data in the group for use when doing iommu
 * operations.  This function provides a way to retrieve it.  Caller
 * should hold a group reference.
 */
void *iommu_group_get_iommudata(struct iommu_group *group)
{}
EXPORT_SYMBOL_GPL();

/**
 * iommu_group_set_iommudata - set iommu_data for a group
 * @group: the group
 * @iommu_data: new data
 * @release: release function for iommu_data
 *
 * iommu drivers can store data in the group for use when doing iommu
 * operations.  This function provides a way to set the data after
 * the group has been allocated.  Caller should hold a group reference.
 */
void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
			       void (*release)(void *iommu_data))
{}
EXPORT_SYMBOL_GPL();

/**
 * iommu_group_set_name - set name for a group
 * @group: the group
 * @name: name
 *
 * Allow iommu driver to set a name for a group.  When set it will
 * appear in a name attribute file under the group in sysfs.
 */
int iommu_group_set_name(struct iommu_group *group, const char *name)
{}
EXPORT_SYMBOL_GPL();

static int iommu_create_device_direct_mappings(struct iommu_domain *domain,
					       struct device *dev)
{}

/* This is undone by __iommu_group_free_device() */
static struct group_device *iommu_group_alloc_device(struct iommu_group *group,
						     struct device *dev)
{}

/**
 * iommu_group_add_device - add a device to an iommu group
 * @group: the group into which to add the device (reference should be held)
 * @dev: the device
 *
 * This function is called by an iommu driver to add a device into a
 * group.  Adding a device increments the group reference count.
 */
int iommu_group_add_device(struct iommu_group *group, struct device *dev)
{}
EXPORT_SYMBOL_GPL();

/**
 * iommu_group_remove_device - remove a device from it's current group
 * @dev: device to be removed
 *
 * This function is called by an iommu driver to remove the device from
 * it's current group.  This decrements the iommu group reference count.
 */
void iommu_group_remove_device(struct device *dev)
{}
EXPORT_SYMBOL_GPL();

#if IS_ENABLED(CONFIG_LOCKDEP) && IS_ENABLED(CONFIG_IOMMU_API)
/**
 * iommu_group_mutex_assert - Check device group mutex lock
 * @dev: the device that has group param set
 *
 * This function is called by an iommu driver to check whether it holds
 * group mutex lock for the given device or not.
 *
 * Note that this function must be called after device group param is set.
 */
void iommu_group_mutex_assert(struct device *dev)
{}
EXPORT_SYMBOL_GPL();
#endif

static struct device *iommu_group_first_dev(struct iommu_group *group)
{}

/**
 * iommu_group_for_each_dev - iterate over each device in the group
 * @group: the group
 * @data: caller opaque data to be passed to callback function
 * @fn: caller supplied callback function
 *
 * This function is called by group users to iterate over group devices.
 * Callers should hold a reference count to the group during callback.
 * The group->mutex is held across callbacks, which will block calls to
 * iommu_group_add/remove_device.
 */
int iommu_group_for_each_dev(struct iommu_group *group, void *data,
			     int (*fn)(struct device *, void *))
{}
EXPORT_SYMBOL_GPL();

/**
 * iommu_group_get - Return the group for a device and increment reference
 * @dev: get the group that this device belongs to
 *
 * This function is called by iommu drivers and users to get the group
 * for the specified device.  If found, the group is returned and the group
 * reference in incremented, else NULL.
 */
struct iommu_group *iommu_group_get(struct device *dev)
{}
EXPORT_SYMBOL_GPL();

/**
 * iommu_group_ref_get - Increment reference on a group
 * @group: the group to use, must not be NULL
 *
 * This function is called by iommu drivers to take additional references on an
 * existing group.  Returns the given group for convenience.
 */
struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
{}
EXPORT_SYMBOL_GPL();

/**
 * iommu_group_put - Decrement group reference
 * @group: the group to use
 *
 * This function is called by iommu drivers and users to release the
 * iommu group.  Once the reference count is zero, the group is released.
 */
void iommu_group_put(struct iommu_group *group)
{}
EXPORT_SYMBOL_GPL();

/**
 * iommu_group_id - Return ID for a group
 * @group: the group to ID
 *
 * Return the unique ID for the group matching the sysfs group number.
 */
int iommu_group_id(struct iommu_group *group)
{}
EXPORT_SYMBOL_GPL();

static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
					       unsigned long *devfns);

/*
 * To consider a PCI device isolated, we require ACS to support Source
 * Validation, Request Redirection, Completer Redirection, and Upstream
 * Forwarding.  This effectively means that devices cannot spoof their
 * requester ID, requests and completions cannot be redirected, and all
 * transactions are forwarded upstream, even as it passes through a
 * bridge where the target device is downstream.
 */
#define REQ_ACS_FLAGS

/*
 * For multifunction devices which are not isolated from each other, find
 * all the other non-isolated functions and look for existing groups.  For
 * each function, we also need to look for aliases to or from other devices
 * that may already have a group.
 */
static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev,
							unsigned long *devfns)
{}

/*
 * Look for aliases to or from the given device for existing groups. DMA
 * aliases are only supported on the same bus, therefore the search
 * space is quite small (especially since we're really only looking at pcie
 * device, and therefore only expect multiple slots on the root complex or
 * downstream switch ports).  It's conceivable though that a pair of
 * multifunction devices could have aliases between them that would cause a
 * loop.  To prevent this, we use a bitmap to track where we've been.
 */
static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
					       unsigned long *devfns)
{}

struct group_for_pci_data {};

/*
 * DMA alias iterator callback, return the last seen device.  Stop and return
 * the IOMMU group if we find one along the way.
 */
static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque)
{}

/*
 * Generic device_group call-back function. It just allocates one
 * iommu-group per device.
 */
struct iommu_group *generic_device_group(struct device *dev)
{}
EXPORT_SYMBOL_GPL();

/*
 * Generic device_group call-back function. It just allocates one
 * iommu-group per iommu driver instance shared by every device
 * probed by that iommu driver.
 */
struct iommu_group *generic_single_device_group(struct device *dev)
{}
EXPORT_SYMBOL_GPL();

/*
 * Use standard PCI bus topology, isolation features, and DMA alias quirks
 * to find or create an IOMMU group for a device.
 */
struct iommu_group *pci_device_group(struct device *dev)
{}
EXPORT_SYMBOL_GPL();

/* Get the IOMMU group for device on fsl-mc bus */
struct iommu_group *fsl_mc_device_group(struct device *dev)
{}
EXPORT_SYMBOL_GPL();

static struct iommu_domain *
__iommu_group_alloc_default_domain(struct iommu_group *group, int req_type)
{}

/*
 * req_type of 0 means "auto" which means to select a domain based on
 * iommu_def_domain_type or what the driver actually supports.
 */
static struct iommu_domain *
iommu_group_alloc_default_domain(struct iommu_group *group, int req_type)
{}

struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
{}

static int probe_iommu_group(struct device *dev, void *data)
{}

static int iommu_bus_notifier(struct notifier_block *nb,
			      unsigned long action, void *data)
{}

/*
 * Combine the driver's chosen def_domain_type across all the devices in a
 * group. Drivers must give a consistent result.
 */
static int iommu_get_def_domain_type(struct iommu_group *group,
				     struct device *dev, int cur_type)
{}

/*
 * A target_type of 0 will select the best domain type. 0 can be returned in
 * this case meaning the global default should be used.
 */
static int iommu_get_default_domain_type(struct iommu_group *group,
					 int target_type)
{}

static void iommu_group_do_probe_finalize(struct device *dev)
{}

int bus_iommu_probe(const struct bus_type *bus)
{}

/**
 * iommu_present() - make platform-specific assumptions about an IOMMU
 * @bus: bus to check
 *
 * Do not use this function. You want device_iommu_mapped() instead.
 *
 * Return: true if some IOMMU is present and aware of devices on the given bus;
 * in general it may not be the only IOMMU, and it may not have anything to do
 * with whatever device you are ultimately interested in.
 */
bool iommu_present(const struct bus_type *bus)
{}
EXPORT_SYMBOL_GPL();

/**
 * device_iommu_capable() - check for a general IOMMU capability
 * @dev: device to which the capability would be relevant, if available
 * @cap: IOMMU capability
 *
 * Return: true if an IOMMU is present and supports the given capability
 * for the given device, otherwise false.
 */
bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
{}
EXPORT_SYMBOL_GPL();

/**
 * iommu_group_has_isolated_msi() - Compute msi_device_has_isolated_msi()
 *       for a group
 * @group: Group to query
 *
 * IOMMU groups should not have differing values of
 * msi_device_has_isolated_msi() for devices in a group. However nothing
 * directly prevents this, so ensure mistakes don't result in isolation failures
 * by checking that all the devices are the same.
 */
bool iommu_group_has_isolated_msi(struct iommu_group *group)
{}
EXPORT_SYMBOL_GPL();

/**
 * iommu_set_fault_handler() - set a fault handler for an iommu domain
 * @domain: iommu domain
 * @handler: fault handler
 * @token: user data, will be passed back to the fault handler
 *
 * This function should be used by IOMMU users which want to be notified
 * whenever an IOMMU fault happens.
 *
 * The fault handler itself should return 0 on success, and an appropriate
 * error code otherwise.
 */
void iommu_set_fault_handler(struct iommu_domain *domain,
					iommu_fault_handler_t handler,
					void *token)
{}
EXPORT_SYMBOL_GPL();

static struct iommu_domain *__iommu_domain_alloc(const struct iommu_ops *ops,
						 struct device *dev,
						 unsigned int type)
{}

static struct iommu_domain *
__iommu_group_domain_alloc(struct iommu_group *group, unsigned int type)
{}

static int __iommu_domain_alloc_dev(struct device *dev, void *data)
{}

/*
 * The iommu ops in bus has been retired. Do not use this interface in
 * new drivers.
 */
struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus)
{}
EXPORT_SYMBOL_GPL();

/**
 * iommu_paging_domain_alloc() - Allocate a paging domain
 * @dev: device for which the domain is allocated
 *
 * Allocate a paging domain which will be managed by a kernel driver. Return
 * allocated domain if successful, or a ERR pointer for failure.
 */
struct iommu_domain *iommu_paging_domain_alloc(struct device *dev)
{}
EXPORT_SYMBOL_GPL();

void iommu_domain_free(struct iommu_domain *domain)
{}
EXPORT_SYMBOL_GPL();

/*
 * Put the group's domain back to the appropriate core-owned domain - either the
 * standard kernel-mode DMA configuration or an all-DMA-blocked domain.
 */
static void __iommu_group_set_core_domain(struct iommu_group *group)
{}

static int __iommu_attach_device(struct iommu_domain *domain,
				 struct device *dev)
{}

/**
 * iommu_attach_device - Attach an IOMMU domain to a device
 * @domain: IOMMU domain to attach
 * @dev: Device that will be attached
 *
 * Returns 0 on success and error code on failure
 *
 * Note that EINVAL can be treated as a soft failure, indicating
 * that certain configuration of the domain is incompatible with
 * the device. In this case attaching a different domain to the
 * device may succeed.
 */
int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
{}
EXPORT_SYMBOL_GPL();

int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain)
{}

void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
{}
EXPORT_SYMBOL_GPL();

struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
{}
EXPORT_SYMBOL_GPL();

/*
 * For IOMMU_DOMAIN_DMA implementations which already provide their own
 * guarantees that the group and its default domain are valid and correct.
 */
struct iommu_domain *iommu_get_dma_domain(struct device *dev)
{}

static int __iommu_attach_group(struct iommu_domain *domain,
				struct iommu_group *group)
{}

/**
 * iommu_attach_group - Attach an IOMMU domain to an IOMMU group
 * @domain: IOMMU domain to attach
 * @group: IOMMU group that will be attached
 *
 * Returns 0 on success and error code on failure
 *
 * Note that EINVAL can be treated as a soft failure, indicating
 * that certain configuration of the domain is incompatible with
 * the group. In this case attaching a different domain to the
 * group may succeed.
 */
int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
{}
EXPORT_SYMBOL_GPL();

/**
 * iommu_group_replace_domain - replace the domain that a group is attached to
 * @new_domain: new IOMMU domain to replace with
 * @group: IOMMU group that will be attached to the new domain
 *
 * This API allows the group to switch domains without being forced to go to
 * the blocking domain in-between.
 *
 * If the currently attached domain is a core domain (e.g. a default_domain),
 * it will act just like the iommu_attach_group().
 */
int iommu_group_replace_domain(struct iommu_group *group,
			       struct iommu_domain *new_domain)
{}
EXPORT_SYMBOL_NS_GPL();

static int __iommu_device_set_domain(struct iommu_group *group,
				     struct device *dev,
				     struct iommu_domain *new_domain,
				     unsigned int flags)
{}

/*
 * If 0 is returned the group's domain is new_domain. If an error is returned
 * then the group's domain will be set back to the existing domain unless
 * IOMMU_SET_DOMAIN_MUST_SUCCEED, otherwise an error is returned and the group's
 * domains is left inconsistent. This is a driver bug to fail attach with a
 * previously good domain. We try to avoid a kernel UAF because of this.
 *
 * IOMMU groups are really the natural working unit of the IOMMU, but the IOMMU
 * API works on domains and devices.  Bridge that gap by iterating over the
 * devices in a group.  Ideally we'd have a single device which represents the
 * requestor ID of the group, but we also allow IOMMU drivers to create policy
 * defined minimum sets, where the physical hardware may be able to distiguish
 * members, but we wish to group them at a higher level (ex. untrusted
 * multi-function PCI devices).  Thus we attach each device.
 */
static int __iommu_group_set_domain_internal(struct iommu_group *group,
					     struct iommu_domain *new_domain,
					     unsigned int flags)
{}

void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
{}
EXPORT_SYMBOL_GPL();

phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
{}
EXPORT_SYMBOL_GPL();

static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova,
			   phys_addr_t paddr, size_t size, size_t *count)
{}

static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
		       phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{}

int iommu_map(struct iommu_domain *domain, unsigned long iova,
	      phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{}
EXPORT_SYMBOL_GPL();

static size_t __iommu_unmap(struct iommu_domain *domain,
			    unsigned long iova, size_t size,
			    struct iommu_iotlb_gather *iotlb_gather)
{}

size_t iommu_unmap(struct iommu_domain *domain,
		   unsigned long iova, size_t size)
{}
EXPORT_SYMBOL_GPL();

size_t iommu_unmap_fast(struct iommu_domain *domain,
			unsigned long iova, size_t size,
			struct iommu_iotlb_gather *iotlb_gather)
{}
EXPORT_SYMBOL_GPL();

ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
		     struct scatterlist *sg, unsigned int nents, int prot,
		     gfp_t gfp)
{}
EXPORT_SYMBOL_GPL();

/**
 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
 * @domain: the iommu domain where the fault has happened
 * @dev: the device where the fault has happened
 * @iova: the faulting address
 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
 *
 * This function should be called by the low-level IOMMU implementations
 * whenever IOMMU faults happen, to allow high-level users, that are
 * interested in such events, to know about them.
 *
 * This event may be useful for several possible use cases:
 * - mere logging of the event
 * - dynamic TLB/PTE loading
 * - if restarting of the faulting device is required
 *
 * Returns 0 on success and an appropriate error code otherwise (if dynamic
 * PTE/TLB loading will one day be supported, implementations will be able
 * to tell whether it succeeded or not according to this return value).
 *
 * Specifically, -ENOSYS is returned if a fault handler isn't installed
 * (though fault handlers can also return -ENOSYS, in case they want to
 * elicit the default behavior of the IOMMU drivers).
 */
int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
		       unsigned long iova, int flags)
{}
EXPORT_SYMBOL_GPL();

static int __init iommu_init(void)
{}
core_initcall(iommu_init);

int iommu_enable_nesting(struct iommu_domain *domain)
{}
EXPORT_SYMBOL_GPL();

int iommu_set_pgtable_quirks(struct iommu_domain *domain,
		unsigned long quirk)
{}
EXPORT_SYMBOL_GPL();

/**
 * iommu_get_resv_regions - get reserved regions
 * @dev: device for which to get reserved regions
 * @list: reserved region list for device
 *
 * This returns a list of reserved IOVA regions specific to this device.
 * A domain user should not map IOVA in these ranges.
 */
void iommu_get_resv_regions(struct device *dev, struct list_head *list)
{}
EXPORT_SYMBOL_GPL();

/**
 * iommu_put_resv_regions - release reserved regions
 * @dev: device for which to free reserved regions
 * @list: reserved region list for device
 *
 * This releases a reserved region list acquired by iommu_get_resv_regions().
 */
void iommu_put_resv_regions(struct device *dev, struct list_head *list)
{}
EXPORT_SYMBOL();

struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
						  size_t length, int prot,
						  enum iommu_resv_type type,
						  gfp_t gfp)
{}
EXPORT_SYMBOL_GPL();

void iommu_set_default_passthrough(bool cmd_line)
{}

void iommu_set_default_translated(bool cmd_line)
{}

bool iommu_default_passthrough(void)
{}
EXPORT_SYMBOL_GPL();

const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode)
{}

int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode)
{}
EXPORT_SYMBOL_GPL();

void iommu_fwspec_free(struct device *dev)
{}
EXPORT_SYMBOL_GPL();

int iommu_fwspec_add_ids(struct device *dev, const u32 *ids, int num_ids)
{}
EXPORT_SYMBOL_GPL();

/*
 * Per device IOMMU features.
 */
int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
{}
EXPORT_SYMBOL_GPL();

/*
 * The device drivers should do the necessary cleanups before calling this.
 */
int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
{}
EXPORT_SYMBOL_GPL();

/**
 * iommu_setup_default_domain - Set the default_domain for the group
 * @group: Group to change
 * @target_type: Domain type to set as the default_domain
 *
 * Allocate a default domain and set it as the current domain on the group. If
 * the group already has a default domain it will be changed to the target_type.
 * When target_type is 0 the default domain is selected based on driver and
 * system preferences.
 */
static int iommu_setup_default_domain(struct iommu_group *group,
				      int target_type)
{}

/*
 * Changing the default domain through sysfs requires the users to unbind the
 * drivers from the devices in the iommu group, except for a DMA -> DMA-FQ
 * transition. Return failure if this isn't met.
 *
 * We need to consider the race between this and the device release path.
 * group->mutex is used here to guarantee that the device release path
 * will not be entered at the same time.
 */
static ssize_t iommu_group_store_type(struct iommu_group *group,
				      const char *buf, size_t count)
{}

/**
 * iommu_device_use_default_domain() - Device driver wants to handle device
 *                                     DMA through the kernel DMA API.
 * @dev: The device.
 *
 * The device driver about to bind @dev wants to do DMA through the kernel
 * DMA API. Return 0 if it is allowed, otherwise an error.
 */
int iommu_device_use_default_domain(struct device *dev)
{}

/**
 * iommu_device_unuse_default_domain() - Device driver stops handling device
 *                                       DMA through the kernel DMA API.
 * @dev: The device.
 *
 * The device driver doesn't want to do DMA through kernel DMA API anymore.
 * It must be called after iommu_device_use_default_domain().
 */
void iommu_device_unuse_default_domain(struct device *dev)
{}

static int __iommu_group_alloc_blocking_domain(struct iommu_group *group)
{}

static int __iommu_take_dma_ownership(struct iommu_group *group, void *owner)
{}

/**
 * iommu_group_claim_dma_owner() - Set DMA ownership of a group
 * @group: The group.
 * @owner: Caller specified pointer. Used for exclusive ownership.
 *
 * This is to support backward compatibility for vfio which manages the dma
 * ownership in iommu_group level. New invocations on this interface should be
 * prohibited. Only a single owner may exist for a group.
 */
int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner)
{}
EXPORT_SYMBOL_GPL();

/**
 * iommu_device_claim_dma_owner() - Set DMA ownership of a device
 * @dev: The device.
 * @owner: Caller specified pointer. Used for exclusive ownership.
 *
 * Claim the DMA ownership of a device. Multiple devices in the same group may
 * concurrently claim ownership if they present the same owner value. Returns 0
 * on success and error code on failure
 */
int iommu_device_claim_dma_owner(struct device *dev, void *owner)
{}
EXPORT_SYMBOL_GPL();

static void __iommu_release_dma_ownership(struct iommu_group *group)
{}

/**
 * iommu_group_release_dma_owner() - Release DMA ownership of a group
 * @group: The group
 *
 * Release the DMA ownership claimed by iommu_group_claim_dma_owner().
 */
void iommu_group_release_dma_owner(struct iommu_group *group)
{}
EXPORT_SYMBOL_GPL();

/**
 * iommu_device_release_dma_owner() - Release DMA ownership of a device
 * @dev: The device.
 *
 * Release the DMA ownership claimed by iommu_device_claim_dma_owner().
 */
void iommu_device_release_dma_owner(struct device *dev)
{}
EXPORT_SYMBOL_GPL();

/**
 * iommu_group_dma_owner_claimed() - Query group dma ownership status
 * @group: The group.
 *
 * This provides status query on a given group. It is racy and only for
 * non-binding status reporting.
 */
bool iommu_group_dma_owner_claimed(struct iommu_group *group)
{}
EXPORT_SYMBOL_GPL();

static int __iommu_set_group_pasid(struct iommu_domain *domain,
				   struct iommu_group *group, ioasid_t pasid)
{}

static void __iommu_remove_group_pasid(struct iommu_group *group,
				       ioasid_t pasid,
				       struct iommu_domain *domain)
{}

/*
 * iommu_attach_device_pasid() - Attach a domain to pasid of device
 * @domain: the iommu domain.
 * @dev: the attached device.
 * @pasid: the pasid of the device.
 * @handle: the attach handle.
 *
 * Return: 0 on success, or an error.
 */
int iommu_attach_device_pasid(struct iommu_domain *domain,
			      struct device *dev, ioasid_t pasid,
			      struct iommu_attach_handle *handle)
{}
EXPORT_SYMBOL_GPL();

/*
 * iommu_detach_device_pasid() - Detach the domain from pasid of device
 * @domain: the iommu domain.
 * @dev: the attached device.
 * @pasid: the pasid of the device.
 *
 * The @domain must have been attached to @pasid of the @dev with
 * iommu_attach_device_pasid().
 */
void iommu_detach_device_pasid(struct iommu_domain *domain, struct device *dev,
			       ioasid_t pasid)
{}
EXPORT_SYMBOL_GPL();

ioasid_t iommu_alloc_global_pasid(struct device *dev)
{}
EXPORT_SYMBOL_GPL();

void iommu_free_global_pasid(ioasid_t pasid)
{}
EXPORT_SYMBOL_GPL();

/**
 * iommu_attach_handle_get - Return the attach handle
 * @group: the iommu group that domain was attached to
 * @pasid: the pasid within the group
 * @type: matched domain type, 0 for any match
 *
 * Return handle or ERR_PTR(-ENOENT) on none, ERR_PTR(-EBUSY) on mismatch.
 *
 * Return the attach handle to the caller. The life cycle of an iommu attach
 * handle is from the time when the domain is attached to the time when the
 * domain is detached. Callers are required to synchronize the call of
 * iommu_attach_handle_get() with domain attachment and detachment. The attach
 * handle can only be used during its life cycle.
 */
struct iommu_attach_handle *
iommu_attach_handle_get(struct iommu_group *group, ioasid_t pasid, unsigned int type)
{}
EXPORT_SYMBOL_NS_GPL();

/**
 * iommu_attach_group_handle - Attach an IOMMU domain to an IOMMU group
 * @domain: IOMMU domain to attach
 * @group: IOMMU group that will be attached
 * @handle: attach handle
 *
 * Returns 0 on success and error code on failure.
 *
 * This is a variant of iommu_attach_group(). It allows the caller to provide
 * an attach handle and use it when the domain is attached. This is currently
 * used by IOMMUFD to deliver the I/O page faults.
 */
int iommu_attach_group_handle(struct iommu_domain *domain,
			      struct iommu_group *group,
			      struct iommu_attach_handle *handle)
{}
EXPORT_SYMBOL_NS_GPL();

/**
 * iommu_detach_group_handle - Detach an IOMMU domain from an IOMMU group
 * @domain: IOMMU domain to attach
 * @group: IOMMU group that will be attached
 *
 * Detach the specified IOMMU domain from the specified IOMMU group.
 * It must be used in conjunction with iommu_attach_group_handle().
 */
void iommu_detach_group_handle(struct iommu_domain *domain,
			       struct iommu_group *group)
{}
EXPORT_SYMBOL_NS_GPL();

/**
 * iommu_replace_group_handle - replace the domain that a group is attached to
 * @group: IOMMU group that will be attached to the new domain
 * @new_domain: new IOMMU domain to replace with
 * @handle: attach handle
 *
 * This is a variant of iommu_group_replace_domain(). It allows the caller to
 * provide an attach handle for the new domain and use it when the domain is
 * attached.
 */
int iommu_replace_group_handle(struct iommu_group *group,
			       struct iommu_domain *new_domain,
			       struct iommu_attach_handle *handle)
{}
EXPORT_SYMBOL_NS_GPL();