linux/drivers/iommu/amd/iommu.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
 * Author: Joerg Roedel <[email protected]>
 *         Leo Duran <[email protected]>
 */

#define pr_fmt(fmt)
#define dev_fmt(fmt)

#include <linux/ratelimit.h>
#include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/pci-ats.h>
#include <linux/bitmap.h>
#include <linux/slab.h>
#include <linux/debugfs.h>
#include <linux/scatterlist.h>
#include <linux/dma-map-ops.h>
#include <linux/dma-direct.h>
#include <linux/iommu-helper.h>
#include <linux/delay.h>
#include <linux/amd-iommu.h>
#include <linux/notifier.h>
#include <linux/export.h>
#include <linux/irq.h>
#include <linux/msi.h>
#include <linux/irqdomain.h>
#include <linux/percpu.h>
#include <linux/io-pgtable.h>
#include <linux/cc_platform.h>
#include <asm/irq_remapping.h>
#include <asm/io_apic.h>
#include <asm/apic.h>
#include <asm/hw_irq.h>
#include <asm/proto.h>
#include <asm/iommu.h>
#include <asm/gart.h>
#include <asm/dma.h>
#include <uapi/linux/iommufd.h>

#include "amd_iommu.h"
#include "../dma-iommu.h"
#include "../irq_remapping.h"
#include "../iommu-pages.h"

#define CMD_SET_TYPE(cmd, t)

/* Reserved IOVA ranges */
#define MSI_RANGE_START
#define MSI_RANGE_END
#define HT_RANGE_START
#define HT_RANGE_END

#define DEFAULT_PGTABLE_LEVEL

static DEFINE_SPINLOCK(pd_bitmap_lock);

LIST_HEAD();
LIST_HEAD();
LIST_HEAD();

const struct iommu_ops amd_iommu_ops;
static const struct iommu_dirty_ops amd_dirty_ops;

int amd_iommu_max_glx_val =;

/*
 * general struct to manage commands send to an IOMMU
 */
struct iommu_cmd {};

struct kmem_cache *amd_iommu_irq_cache;

static void detach_device(struct device *dev);

static void set_dte_entry(struct amd_iommu *iommu,
			  struct iommu_dev_data *dev_data);

/****************************************************************************
 *
 * Helper functions
 *
 ****************************************************************************/

static inline bool pdom_is_v2_pgtbl_mode(struct protection_domain *pdom)
{}

static inline bool pdom_is_in_pt_mode(struct protection_domain *pdom)
{}

/*
 * We cannot support PASID w/ existing v1 page table in the same domain
 * since it will be nested. However, existing domain w/ v2 page table
 * or passthrough mode can be used for PASID.
 */
static inline bool pdom_is_sva_capable(struct protection_domain *pdom)
{}

static inline int get_acpihid_device_id(struct device *dev,
					struct acpihid_map_entry **entry)
{}

static inline int get_device_sbdf_id(struct device *dev)
{}

struct dev_table_entry *get_dev_table(struct amd_iommu *iommu)
{}

static inline u16 get_device_segment(struct device *dev)
{}

/* Writes the specific IOMMU for a device into the PCI segment rlookup table */
void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid)
{}

static struct amd_iommu *__rlookup_amd_iommu(u16 seg, u16 devid)
{}

static struct amd_iommu *rlookup_amd_iommu(struct device *dev)
{}

static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid)
{}

static struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid)
{}

static int clone_alias(struct pci_dev *pdev, u16 alias, void *data)
{}

static void clone_aliases(struct amd_iommu *iommu, struct device *dev)
{}

static void setup_aliases(struct amd_iommu *iommu, struct device *dev)
{}

static struct iommu_dev_data *find_dev_data(struct amd_iommu *iommu, u16 devid)
{}

/*
* Find or create an IOMMU group for a acpihid device.
*/
static struct iommu_group *acpihid_device_group(struct device *dev)
{}

static inline bool pdev_pasid_supported(struct iommu_dev_data *dev_data)
{}

static u32 pdev_get_caps(struct pci_dev *pdev)
{}

static inline int pdev_enable_cap_ats(struct pci_dev *pdev)
{}

static inline void pdev_disable_cap_ats(struct pci_dev *pdev)
{}

static inline int pdev_enable_cap_pri(struct pci_dev *pdev)
{}

static inline void pdev_disable_cap_pri(struct pci_dev *pdev)
{}

static inline int pdev_enable_cap_pasid(struct pci_dev *pdev)
{}

static inline void pdev_disable_cap_pasid(struct pci_dev *pdev)
{}

static void pdev_enable_caps(struct pci_dev *pdev)
{}

static void pdev_disable_caps(struct pci_dev *pdev)
{}

/*
 * This function checks if the driver got a valid device from the caller to
 * avoid dereferencing invalid pointers.
 */
static bool check_device(struct device *dev)
{}

static int iommu_init_device(struct amd_iommu *iommu, struct device *dev)
{}

static void iommu_ignore_device(struct amd_iommu *iommu, struct device *dev)
{}

static void amd_iommu_uninit_device(struct device *dev)
{}

/****************************************************************************
 *
 * Interrupt handling functions
 *
 ****************************************************************************/

static void dump_dte_entry(struct amd_iommu *iommu, u16 devid)
{}

static void dump_command(unsigned long phys_addr)
{}

static void amd_iommu_report_rmp_hw_error(struct amd_iommu *iommu, volatile u32 *event)
{}

static void amd_iommu_report_rmp_fault(struct amd_iommu *iommu, volatile u32 *event)
{}

#define IS_IOMMU_MEM_TRANSACTION(flags)

#define IS_WRITE_REQUEST(flags)

static void amd_iommu_report_page_fault(struct amd_iommu *iommu,
					u16 devid, u16 domain_id,
					u64 address, int flags)
{}

static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
{}

static void iommu_poll_events(struct amd_iommu *iommu)
{}

#ifdef CONFIG_IRQ_REMAP
static int (*iommu_ga_log_notifier)(u32);

int amd_iommu_register_ga_log_notifier(int (*notifier)(u32))
{}
EXPORT_SYMBOL();

static void iommu_poll_ga_log(struct amd_iommu *iommu)
{}

static void
amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu)
{}

#else /* CONFIG_IRQ_REMAP */
static inline void
amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { }
#endif /* !CONFIG_IRQ_REMAP */

static void amd_iommu_handle_irq(void *data, const char *evt_type,
				 u32 int_mask, u32 overflow_mask,
				 void (*int_handler)(struct amd_iommu *),
				 void (*overflow_handler)(struct amd_iommu *))
{}

irqreturn_t amd_iommu_int_thread_evtlog(int irq, void *data)
{}

irqreturn_t amd_iommu_int_thread_pprlog(int irq, void *data)
{}

irqreturn_t amd_iommu_int_thread_galog(int irq, void *data)
{}

irqreturn_t amd_iommu_int_thread(int irq, void *data)
{}

irqreturn_t amd_iommu_int_handler(int irq, void *data)
{}

/****************************************************************************
 *
 * IOMMU command queuing functions
 *
 ****************************************************************************/

static int wait_on_sem(struct amd_iommu *iommu, u64 data)
{}

static void copy_cmd_to_buffer(struct amd_iommu *iommu,
			       struct iommu_cmd *cmd)
{}

static void build_completion_wait(struct iommu_cmd *cmd,
				  struct amd_iommu *iommu,
				  u64 data)
{}

static void build_inv_dte(struct iommu_cmd *cmd, u16 devid)
{}

/*
 * Builds an invalidation address which is suitable for one page or multiple
 * pages. Sets the size bit (S) as needed is more than one page is flushed.
 */
static inline u64 build_inv_address(u64 address, size_t size)
{}

static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
				  size_t size, u16 domid,
				  ioasid_t pasid, bool gn)
{}

static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
				  u64 address, size_t size,
				  ioasid_t pasid, bool gn)
{}

static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, u32 pasid,
			       int status, int tag, u8 gn)
{}

static void build_inv_all(struct iommu_cmd *cmd)
{}

static void build_inv_irt(struct iommu_cmd *cmd, u16 devid)
{}

/*
 * Writes the command to the IOMMUs command buffer and informs the
 * hardware about the new command.
 */
static int __iommu_queue_command_sync(struct amd_iommu *iommu,
				      struct iommu_cmd *cmd,
				      bool sync)
{}

static int iommu_queue_command_sync(struct amd_iommu *iommu,
				    struct iommu_cmd *cmd,
				    bool sync)
{}

static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
{}

/*
 * This function queues a completion wait command into the command
 * buffer of an IOMMU
 */
static int iommu_completion_wait(struct amd_iommu *iommu)
{}

static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
{}

static void amd_iommu_flush_dte_all(struct amd_iommu *iommu)
{}

/*
 * This function uses heavy locking and may disable irqs for some time. But
 * this is no issue because it is only called during resume.
 */
static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)
{}

static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id)
{}

static void amd_iommu_flush_all(struct amd_iommu *iommu)
{}

static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid)
{}

static void amd_iommu_flush_irt_all(struct amd_iommu *iommu)
{}

void amd_iommu_flush_all_caches(struct amd_iommu *iommu)
{}

/*
 * Command send function for flushing on-device TLB
 */
static int device_flush_iotlb(struct iommu_dev_data *dev_data, u64 address,
			      size_t size, ioasid_t pasid, bool gn)
{}

static int device_flush_dte_alias(struct pci_dev *pdev, u16 alias, void *data)
{}

/*
 * Command send function for invalidating a device table entry
 */
static int device_flush_dte(struct iommu_dev_data *dev_data)
{}

static int domain_flush_pages_v2(struct protection_domain *pdom,
				 u64 address, size_t size)
{}

static int domain_flush_pages_v1(struct protection_domain *pdom,
				 u64 address, size_t size)
{}

/*
 * TLB invalidation function which is called from the mapping functions.
 * It flushes range of PTEs of the domain.
 */
static void __domain_flush_pages(struct protection_domain *domain,
				 u64 address, size_t size)
{}

void amd_iommu_domain_flush_pages(struct protection_domain *domain,
				  u64 address, size_t size)
{}

/* Flush the whole IO/TLB for a given protection domain - including PDE */
static void amd_iommu_domain_flush_all(struct protection_domain *domain)
{}

void amd_iommu_dev_flush_pasid_pages(struct iommu_dev_data *dev_data,
				     ioasid_t pasid, u64 address, size_t size)
{}

void amd_iommu_dev_flush_pasid_all(struct iommu_dev_data *dev_data,
				   ioasid_t pasid)
{}

void amd_iommu_domain_flush_complete(struct protection_domain *domain)
{}

/* Flush the not present cache if it exists */
static void domain_flush_np_cache(struct protection_domain *domain,
		dma_addr_t iova, size_t size)
{}


/*
 * This function flushes the DTEs for all devices in domain
 */
static void domain_flush_devices(struct protection_domain *domain)
{}

static void update_device_table(struct protection_domain *domain)
{}

void amd_iommu_update_and_flush_device_table(struct protection_domain *domain)
{}

void amd_iommu_domain_update(struct protection_domain *domain)
{}

int amd_iommu_complete_ppr(struct device *dev, u32 pasid, int status, int tag)
{}

/****************************************************************************
 *
 * The next functions belong to the domain allocation. A domain is
 * allocated for every IOMMU as the default domain. If device isolation
 * is enabled, every device get its own domain. The most important thing
 * about domains is the page table mapping the DMA address space they
 * contain.
 *
 ****************************************************************************/

static u16 domain_id_alloc(void)
{}

static void domain_id_free(int id)
{}

static void free_gcr3_tbl_level1(u64 *tbl)
{}

static void free_gcr3_tbl_level2(u64 *tbl)
{}

static void free_gcr3_table(struct gcr3_tbl_info *gcr3_info)
{}

/*
 * Number of GCR3 table levels required. Level must be 4-Kbyte
 * page and can contain up to 512 entries.
 */
static int get_gcr3_levels(int pasids)
{}

static int setup_gcr3_table(struct gcr3_tbl_info *gcr3_info,
			    struct amd_iommu *iommu, int pasids)
{}

static u64 *__get_gcr3_pte(struct gcr3_tbl_info *gcr3_info,
			   ioasid_t pasid, bool alloc)
{}

static int update_gcr3(struct iommu_dev_data *dev_data,
		       ioasid_t pasid, unsigned long gcr3, bool set)
{}

int amd_iommu_set_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid,
		       unsigned long gcr3)
{}

int amd_iommu_clear_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid)
{}

static void set_dte_entry(struct amd_iommu *iommu,
			  struct iommu_dev_data *dev_data)
{}

static void clear_dte_entry(struct amd_iommu *iommu, u16 devid)
{}

/* Update and flush DTE for the given device */
void amd_iommu_dev_update_dte(struct iommu_dev_data *dev_data, bool set)
{}

/*
 * If domain is SVA capable then initialize GCR3 table. Also if domain is
 * in v2 page table mode then update GCR3[0].
 */
static int init_gcr3_table(struct iommu_dev_data *dev_data,
			   struct protection_domain *pdom)
{}

static void destroy_gcr3_table(struct iommu_dev_data *dev_data,
			       struct protection_domain *pdom)
{}

static int do_attach(struct iommu_dev_data *dev_data,
		     struct protection_domain *domain)
{}

static void do_detach(struct iommu_dev_data *dev_data)
{}

/*
 * If a device is not yet associated with a domain, this function makes the
 * device visible in the domain
 */
static int attach_device(struct device *dev,
			 struct protection_domain *domain)
{}

/*
 * Removes a device from a protection domain (with devtable_lock held)
 */
static void detach_device(struct device *dev)
{}

static struct iommu_device *amd_iommu_probe_device(struct device *dev)
{}

static void amd_iommu_release_device(struct device *dev)
{}

static struct iommu_group *amd_iommu_device_group(struct device *dev)
{}

/*****************************************************************************
 *
 * The following functions belong to the exported interface of AMD IOMMU
 *
 * This interface allows access to lower level functions of the IOMMU
 * like protection domain handling and assignement of devices to domains
 * which is not possible with the dma_ops interface.
 *
 *****************************************************************************/

static void cleanup_domain(struct protection_domain *domain)
{}

void protection_domain_free(struct protection_domain *domain)
{}

static int protection_domain_init_v1(struct protection_domain *domain, int mode)
{}

static int protection_domain_init_v2(struct protection_domain *pdom)
{}

struct protection_domain *protection_domain_alloc(unsigned int type)
{}

static inline u64 dma_max_address(void)
{}

static bool amd_iommu_hd_support(struct amd_iommu *iommu)
{}

static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
						  struct device *dev, u32 flags)
{}

static struct iommu_domain *amd_iommu_domain_alloc(unsigned int type)
{}

static struct iommu_domain *
amd_iommu_domain_alloc_user(struct device *dev, u32 flags,
			    struct iommu_domain *parent,
			    const struct iommu_user_data *user_data)

{}

void amd_iommu_domain_free(struct iommu_domain *dom)
{}

static int amd_iommu_attach_device(struct iommu_domain *dom,
				   struct device *dev)
{}

static int amd_iommu_iotlb_sync_map(struct iommu_domain *dom,
				    unsigned long iova, size_t size)
{}

static int amd_iommu_map_pages(struct iommu_domain *dom, unsigned long iova,
			       phys_addr_t paddr, size_t pgsize, size_t pgcount,
			       int iommu_prot, gfp_t gfp, size_t *mapped)
{}

static void amd_iommu_iotlb_gather_add_page(struct iommu_domain *domain,
					    struct iommu_iotlb_gather *gather,
					    unsigned long iova, size_t size)
{}

static size_t amd_iommu_unmap_pages(struct iommu_domain *dom, unsigned long iova,
				    size_t pgsize, size_t pgcount,
				    struct iommu_iotlb_gather *gather)
{}

static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
					  dma_addr_t iova)
{}

static bool amd_iommu_capable(struct device *dev, enum iommu_cap cap)
{}

static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain,
					bool enable)
{}

static int amd_iommu_read_and_clear_dirty(struct iommu_domain *domain,
					  unsigned long iova, size_t size,
					  unsigned long flags,
					  struct iommu_dirty_bitmap *dirty)
{}

static void amd_iommu_get_resv_regions(struct device *dev,
				       struct list_head *head)
{}

bool amd_iommu_is_attach_deferred(struct device *dev)
{}

static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
{}

static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
				 struct iommu_iotlb_gather *gather)
{}

static int amd_iommu_def_domain_type(struct device *dev)
{}

static bool amd_iommu_enforce_cache_coherency(struct iommu_domain *domain)
{}

static const struct iommu_dirty_ops amd_dirty_ops =;

static int amd_iommu_dev_enable_feature(struct device *dev,
					enum iommu_dev_features feat)
{}

static int amd_iommu_dev_disable_feature(struct device *dev,
					 enum iommu_dev_features feat)
{}

const struct iommu_ops amd_iommu_ops =;

#ifdef CONFIG_IRQ_REMAP

/*****************************************************************************
 *
 * Interrupt Remapping Implementation
 *
 *****************************************************************************/

static struct irq_chip amd_ir_chip;
static DEFINE_SPINLOCK(iommu_table_lock);

static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid)
{}

static void set_dte_irq_entry(struct amd_iommu *iommu, u16 devid,
			      struct irq_remap_table *table)
{}

static struct irq_remap_table *get_irq_table(struct amd_iommu *iommu, u16 devid)
{}

static struct irq_remap_table *__alloc_irq_table(void)
{}

static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid,
				  struct irq_remap_table *table)
{}

static int set_remap_table_entry_alias(struct pci_dev *pdev, u16 alias,
				       void *data)
{}

static struct irq_remap_table *alloc_irq_table(struct amd_iommu *iommu,
					       u16 devid, struct pci_dev *pdev)
{}

static int alloc_irq_index(struct amd_iommu *iommu, u16 devid, int count,
			   bool align, struct pci_dev *pdev)
{}

static int __modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
			    struct irte_ga *irte)
{}

static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
			  struct irte_ga *irte)
{}

static int modify_irte(struct amd_iommu *iommu,
		       u16 devid, int index, union irte *irte)
{}

static void free_irte(struct amd_iommu *iommu, u16 devid, int index)
{}

static void irte_prepare(void *entry,
			 u32 delivery_mode, bool dest_mode,
			 u8 vector, u32 dest_apicid, int devid)
{}

static void irte_ga_prepare(void *entry,
			    u32 delivery_mode, bool dest_mode,
			    u8 vector, u32 dest_apicid, int devid)
{}

static void irte_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
{}

static void irte_ga_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
{}

static void irte_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
{}

static void irte_ga_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index)
{}

static void irte_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index,
			      u8 vector, u32 dest_apicid)
{}

static void irte_ga_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index,
				 u8 vector, u32 dest_apicid)
{}

#define IRTE_ALLOCATED
static void irte_set_allocated(struct irq_remap_table *table, int index)
{}

static void irte_ga_set_allocated(struct irq_remap_table *table, int index)
{}

static bool irte_is_allocated(struct irq_remap_table *table, int index)
{}

static bool irte_ga_is_allocated(struct irq_remap_table *table, int index)
{}

static void irte_clear_allocated(struct irq_remap_table *table, int index)
{}

static void irte_ga_clear_allocated(struct irq_remap_table *table, int index)
{}

static int get_devid(struct irq_alloc_info *info)
{}

struct irq_remap_ops amd_iommu_irq_ops =;

static void fill_msi_msg(struct msi_msg *msg, u32 index)
{}

static void irq_remapping_prepare_irte(struct amd_ir_data *data,
				       struct irq_cfg *irq_cfg,
				       struct irq_alloc_info *info,
				       int devid, int index, int sub_handle)
{}

struct amd_irte_ops irte_32_ops =;

struct amd_irte_ops irte_128_ops =;

static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
			       unsigned int nr_irqs, void *arg)
{}

static void irq_remapping_free(struct irq_domain *domain, unsigned int virq,
			       unsigned int nr_irqs)
{}

static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
			       struct amd_ir_data *ir_data,
			       struct irq_2_irte *irte_info,
			       struct irq_cfg *cfg);

static int irq_remapping_activate(struct irq_domain *domain,
				  struct irq_data *irq_data, bool reserve)
{}

static void irq_remapping_deactivate(struct irq_domain *domain,
				     struct irq_data *irq_data)
{}

static int irq_remapping_select(struct irq_domain *d, struct irq_fwspec *fwspec,
				enum irq_domain_bus_token bus_token)
{}

static const struct irq_domain_ops amd_ir_domain_ops =;

int amd_iommu_activate_guest_mode(void *data)
{}
EXPORT_SYMBOL();

int amd_iommu_deactivate_guest_mode(void *data)
{}
EXPORT_SYMBOL();

static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
{}


static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
			       struct amd_ir_data *ir_data,
			       struct irq_2_irte *irte_info,
			       struct irq_cfg *cfg)
{}

static int amd_ir_set_affinity(struct irq_data *data,
			       const struct cpumask *mask, bool force)
{}

static void ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
{}

static struct irq_chip amd_ir_chip =;

static const struct msi_parent_ops amdvi_msi_parent_ops =;

int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
{}

int amd_iommu_update_ga(int cpu, bool is_run, void *data)
{}
EXPORT_SYMBOL();
#endif