linux/drivers/iommu/arm/arm-smmu/arm-smmu.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * IOMMU API for ARM architected SMMU implementations.
 *
 * Copyright (C) 2013 ARM Limited
 *
 * Author: Will Deacon <[email protected]>
 *
 * This driver currently supports:
 *	- SMMUv1 and v2 implementations
 *	- Stream-matching and stream-indexing
 *	- v7/v8 long-descriptor format
 *	- Non-secure access to the SMMU
 *	- Context fault reporting
 *	- Extended Stream ID (16 bit)
 */

#define pr_fmt(fmt)

#include <linux/acpi.h>
#include <linux/acpi_iort.h>
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/ratelimit.h>
#include <linux/slab.h>

#include <linux/fsl/mc.h>

#include "arm-smmu.h"
#include "../../dma-iommu.h"

/*
 * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
 * global register space are still, in fact, using a hypervisor to mediate it
 * by trapping and emulating register accesses. Sadly, some deployed versions
 * of said trapping code have bugs wherein they go horribly wrong for stores
 * using r31 (i.e. XZR/WZR) as the source register.
 */
#define QCOM_DUMMY_VAL

#define MSI_IOVA_BASE
#define MSI_IOVA_LENGTH

static int force_stage;
module_param(force_stage, int, S_IRUGO);
MODULE_PARM_DESC();
static bool disable_bypass =
	IS_ENABLED();
module_param(disable_bypass, bool, S_IRUGO);
MODULE_PARM_DESC();

#define s2cr_init_val

static bool using_legacy_binding, using_generic_binding;

static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
{}

static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
{}

static void arm_smmu_rpm_use_autosuspend(struct arm_smmu_device *smmu)
{}

static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
{}

static struct platform_driver arm_smmu_driver;
static struct iommu_ops arm_smmu_ops;

#ifdef CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS
static struct device_node *dev_get_dev_node(struct device *dev)
{}

static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
{}

static int __find_legacy_master_phandle(struct device *dev, void *data)
{}

static int arm_smmu_register_legacy_master(struct device *dev,
					   struct arm_smmu_device **smmu)
{}
#else
static int arm_smmu_register_legacy_master(struct device *dev,
					   struct arm_smmu_device **smmu)
{
	return -ENODEV;
}
#endif /* CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS */

static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
{}

/* Wait for any pending TLB invalidations to complete */
static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
				int sync, int status)
{}

static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
{}

static void arm_smmu_tlb_sync_context(struct arm_smmu_domain *smmu_domain)
{}

static void arm_smmu_tlb_inv_context_s1(void *cookie)
{}

static void arm_smmu_tlb_inv_context_s2(void *cookie)
{}

static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
				      size_t granule, void *cookie, int reg)
{}

static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
				      size_t granule, void *cookie, int reg)
{}

static void arm_smmu_tlb_inv_walk_s1(unsigned long iova, size_t size,
				     size_t granule, void *cookie)
{}

static void arm_smmu_tlb_add_page_s1(struct iommu_iotlb_gather *gather,
				     unsigned long iova, size_t granule,
				     void *cookie)
{}

static void arm_smmu_tlb_inv_walk_s2(unsigned long iova, size_t size,
				     size_t granule, void *cookie)
{}

static void arm_smmu_tlb_add_page_s2(struct iommu_iotlb_gather *gather,
				     unsigned long iova, size_t granule,
				     void *cookie)
{}

static void arm_smmu_tlb_inv_walk_s2_v1(unsigned long iova, size_t size,
					size_t granule, void *cookie)
{}
/*
 * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
 * almost negligible, but the benefit of getting the first one in as far ahead
 * of the sync as possible is significant, hence we don't just make this a
 * no-op and call arm_smmu_tlb_inv_context_s2() from .iotlb_sync as you might
 * think.
 */
static void arm_smmu_tlb_add_page_s2_v1(struct iommu_iotlb_gather *gather,
					unsigned long iova, size_t granule,
					void *cookie)
{}

static const struct iommu_flush_ops arm_smmu_s1_tlb_ops =;

static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v2 =;

static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 =;


void arm_smmu_read_context_fault_info(struct arm_smmu_device *smmu, int idx,
				      struct arm_smmu_context_fault_info *cfi)
{}

void arm_smmu_print_context_fault_info(struct arm_smmu_device *smmu, int idx,
				       const struct arm_smmu_context_fault_info *cfi)
{}

static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
{}

static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
{}

static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
				       struct io_pgtable_cfg *pgtbl_cfg)
{}

void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
{}

static int arm_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_domain,
				       struct arm_smmu_device *smmu,
				       struct device *dev, unsigned int start)
{}

static int arm_smmu_init_domain_context(struct arm_smmu_domain *smmu_domain,
					struct arm_smmu_device *smmu,
					struct device *dev)
{}

static void arm_smmu_destroy_domain_context(struct arm_smmu_domain *smmu_domain)
{}

static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
{}

static void arm_smmu_domain_free(struct iommu_domain *domain)
{}

static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
{}

static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
{}

static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
{}

/*
 * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
 * should be called after sCR0 is written.
 */
static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
{}

static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
{}

static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
{}

static int arm_smmu_master_alloc_smes(struct device *dev)
{}

static void arm_smmu_master_free_smes(struct arm_smmu_master_cfg *cfg,
				      struct iommu_fwspec *fwspec)
{}

static void arm_smmu_master_install_s2crs(struct arm_smmu_master_cfg *cfg,
					  enum arm_smmu_s2cr_type type,
					  u8 cbndx, struct iommu_fwspec *fwspec)
{}

static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
{}

static int arm_smmu_attach_dev_type(struct device *dev,
				    enum arm_smmu_s2cr_type type)
{}

static int arm_smmu_attach_dev_identity(struct iommu_domain *domain,
					struct device *dev)
{}

static const struct iommu_domain_ops arm_smmu_identity_ops =;

static struct iommu_domain arm_smmu_identity_domain =;

static int arm_smmu_attach_dev_blocked(struct iommu_domain *domain,
				       struct device *dev)
{}

static const struct iommu_domain_ops arm_smmu_blocked_ops =;

static struct iommu_domain arm_smmu_blocked_domain =;

static int arm_smmu_map_pages(struct iommu_domain *domain, unsigned long iova,
			      phys_addr_t paddr, size_t pgsize, size_t pgcount,
			      int prot, gfp_t gfp, size_t *mapped)
{}

static size_t arm_smmu_unmap_pages(struct iommu_domain *domain, unsigned long iova,
				   size_t pgsize, size_t pgcount,
				   struct iommu_iotlb_gather *iotlb_gather)
{}

static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
{}

static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
				struct iommu_iotlb_gather *gather)
{}

static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
					      dma_addr_t iova)
{}

static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
					dma_addr_t iova)
{}

static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
{}

static
struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
{}

static struct iommu_device *arm_smmu_probe_device(struct device *dev)
{}

static void arm_smmu_release_device(struct device *dev)
{}

static void arm_smmu_probe_finalize(struct device *dev)
{}

static struct iommu_group *arm_smmu_device_group(struct device *dev)
{}

static int arm_smmu_enable_nesting(struct iommu_domain *domain)
{}

static int arm_smmu_set_pgtable_quirks(struct iommu_domain *domain,
		unsigned long quirks)
{}

static int arm_smmu_of_xlate(struct device *dev,
			     const struct of_phandle_args *args)
{}

static void arm_smmu_get_resv_regions(struct device *dev,
				      struct list_head *head)
{}

static int arm_smmu_def_domain_type(struct device *dev)
{}

static struct iommu_ops arm_smmu_ops =;

static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
{}

static int arm_smmu_id_size_to_bits(int size)
{}

static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
{}

struct arm_smmu_match_data {};

#define ARM_SMMU_MATCH_DATA(name, ver, imp)

ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);

static const struct of_device_id arm_smmu_of_match[] =;
MODULE_DEVICE_TABLE(of, arm_smmu_of_match);

#ifdef CONFIG_ACPI
static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
{}

static int arm_smmu_device_acpi_probe(struct arm_smmu_device *smmu,
				      u32 *global_irqs, u32 *pmu_irqs)
{}
#else
static inline int arm_smmu_device_acpi_probe(struct arm_smmu_device *smmu,
					     u32 *global_irqs, u32 *pmu_irqs)
{
	return -ENODEV;
}
#endif

static int arm_smmu_device_dt_probe(struct arm_smmu_device *smmu,
				    u32 *global_irqs, u32 *pmu_irqs)
{}

static void arm_smmu_rmr_install_bypass_smr(struct arm_smmu_device *smmu)
{}

static int arm_smmu_device_probe(struct platform_device *pdev)
{}

static void arm_smmu_device_shutdown(struct platform_device *pdev)
{}

static void arm_smmu_device_remove(struct platform_device *pdev)
{}

static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)
{}

static int __maybe_unused arm_smmu_runtime_suspend(struct device *dev)
{}

static int __maybe_unused arm_smmu_pm_resume(struct device *dev)
{}

static int __maybe_unused arm_smmu_pm_suspend(struct device *dev)
{}

static const struct dev_pm_ops arm_smmu_pm_ops =;

static struct platform_driver arm_smmu_driver =;
module_platform_driver();

MODULE_DESCRIPTION();
MODULE_AUTHOR();
MODULE_ALIAS();
MODULE_LICENSE();