linux/drivers/iommu/amd/init.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
 * Author: Joerg Roedel <[email protected]>
 *         Leo Duran <[email protected]>
 */

#define pr_fmt(fmt)
#define dev_fmt(fmt)

#include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/list.h>
#include <linux/bitmap.h>
#include <linux/slab.h>
#include <linux/syscore_ops.h>
#include <linux/interrupt.h>
#include <linux/msi.h>
#include <linux/irq.h>
#include <linux/amd-iommu.h>
#include <linux/export.h>
#include <linux/kmemleak.h>
#include <linux/cc_platform.h>
#include <linux/iopoll.h>
#include <asm/pci-direct.h>
#include <asm/iommu.h>
#include <asm/apic.h>
#include <asm/gart.h>
#include <asm/x86_init.h>
#include <asm/io_apic.h>
#include <asm/irq_remapping.h>
#include <asm/set_memory.h>
#include <asm/sev.h>

#include <linux/crash_dump.h>

#include "amd_iommu.h"
#include "../irq_remapping.h"
#include "../iommu-pages.h"

/*
 * definitions for the ACPI scanning code
 */
#define IVRS_HEADER_LENGTH

#define ACPI_IVHD_TYPE_MAX_SUPPORTED
#define ACPI_IVMD_TYPE_ALL
#define ACPI_IVMD_TYPE
#define ACPI_IVMD_TYPE_RANGE

#define IVHD_DEV_ALL
#define IVHD_DEV_SELECT
#define IVHD_DEV_SELECT_RANGE_START
#define IVHD_DEV_RANGE_END
#define IVHD_DEV_ALIAS
#define IVHD_DEV_ALIAS_RANGE
#define IVHD_DEV_EXT_SELECT
#define IVHD_DEV_EXT_SELECT_RANGE
#define IVHD_DEV_SPECIAL
#define IVHD_DEV_ACPI_HID

#define UID_NOT_PRESENT
#define UID_IS_INTEGER
#define UID_IS_CHARACTER

#define IVHD_SPECIAL_IOAPIC
#define IVHD_SPECIAL_HPET

#define IVHD_FLAG_HT_TUN_EN_MASK
#define IVHD_FLAG_PASSPW_EN_MASK
#define IVHD_FLAG_RESPASSPW_EN_MASK
#define IVHD_FLAG_ISOC_EN_MASK

#define IVMD_FLAG_EXCL_RANGE
#define IVMD_FLAG_IW
#define IVMD_FLAG_IR
#define IVMD_FLAG_UNITY_MAP

#define ACPI_DEVFLAG_INITPASS
#define ACPI_DEVFLAG_EXTINT
#define ACPI_DEVFLAG_NMI
#define ACPI_DEVFLAG_SYSMGT1
#define ACPI_DEVFLAG_SYSMGT2
#define ACPI_DEVFLAG_LINT0
#define ACPI_DEVFLAG_LINT1
#define ACPI_DEVFLAG_ATSDIS

#define IVRS_GET_SBDF_ID(seg, bus, dev, fn)

/*
 * ACPI table definitions
 *
 * These data structures are laid over the table to parse the important values
 * out of it.
 */

/*
 * structure describing one IOMMU in the ACPI table. Typically followed by one
 * or more ivhd_entrys.
 */
struct ivhd_header {} __attribute__((packed));

/*
 * A device entry describing which devices a specific IOMMU translates and
 * which requestor ids they use.
 */
struct ivhd_entry {} __attribute__((packed));

/*
 * An AMD IOMMU memory definition structure. It defines things like exclusion
 * ranges for devices and regions that should be unity mapped.
 */
struct ivmd_header {} __attribute__((packed));

bool amd_iommu_dump;
bool amd_iommu_irq_remap __read_mostly;

enum io_pgtable_fmt amd_iommu_pgtable =;
/* Guest page table level */
int amd_iommu_gpt_level =;

int amd_iommu_guest_ir =;
static int amd_iommu_xt_mode =;

static bool amd_iommu_detected;
static bool amd_iommu_disabled __initdata;
static bool amd_iommu_force_enable __initdata;
static bool amd_iommu_irtcachedis;
static int amd_iommu_target_ivhd_type;

/* Global EFR and EFR2 registers */
u64 amd_iommu_efr;
u64 amd_iommu_efr2;

/* SNP is enabled on the system? */
bool amd_iommu_snp_en;
EXPORT_SYMBOL();

LIST_HEAD();	/* list of all PCI segments */
LIST_HEAD();		/* list of all AMD IOMMUs in the
					   system */

/* Array to assign indices to IOMMUs*/
struct amd_iommu *amd_iommus[MAX_IOMMUS];

/* Number of IOMMUs present in the system */
static int amd_iommus_present;

/* IOMMUs have a non-present cache? */
bool amd_iommu_np_cache __read_mostly;
bool amd_iommu_iotlb_sup __read_mostly =;

static bool amd_iommu_pc_present __read_mostly;
bool amdr_ivrs_remap_support __read_mostly;

bool amd_iommu_force_isolation __read_mostly;

/*
 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
 * to know which ones are already in use.
 */
unsigned long *amd_iommu_pd_alloc_bitmap;

enum iommu_init_state {};

/* Early ioapic and hpet maps from kernel command line */
#define EARLY_MAP_SIZE
static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE];

static int __initdata early_ioapic_map_size;
static int __initdata early_hpet_map_size;
static int __initdata early_acpihid_map_size;

static bool __initdata cmdline_maps;

static enum iommu_init_state init_state =;

static int amd_iommu_enable_interrupts(void);
static int __init iommu_go_to_state(enum iommu_init_state state);
static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg);

static bool amd_iommu_pre_enabled =;

static u32 amd_iommu_ivinfo __initdata;

bool translation_pre_enabled(struct amd_iommu *iommu)
{}

static void clear_translation_pre_enabled(struct amd_iommu *iommu)
{}

static void init_translation_status(struct amd_iommu *iommu)
{}

static inline unsigned long tbl_size(int entry_size, int last_bdf)
{}

int amd_iommu_get_num_iommus(void)
{}

/*
 * Iterate through all the IOMMUs to get common EFR
 * masks among all IOMMUs and warn if found inconsistency.
 */
static __init void get_global_efr(void)
{}

/*
 * For IVHD type 0x11/0x40, EFR is also available via IVHD.
 * Default to IVHD EFR since it is available sooner
 * (i.e. before PCI init).
 */
static void __init early_iommu_features_init(struct amd_iommu *iommu,
					     struct ivhd_header *h)
{}

/* Access to l1 and l2 indexed register spaces */

static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
{}

static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
{}

static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
{}

static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
{}

/****************************************************************************
 *
 * AMD IOMMU MMIO register space handling functions
 *
 * These functions are used to program the IOMMU device registers in
 * MMIO space required for that driver.
 *
 ****************************************************************************/

/*
 * This function set the exclusion range in the IOMMU. DMA accesses to the
 * exclusion range are passed through untranslated
 */
static void iommu_set_exclusion_range(struct amd_iommu *iommu)
{}

static void iommu_set_cwwb_range(struct amd_iommu *iommu)
{}

/* Programs the physical address of the device table into the IOMMU hardware */
static void iommu_set_device_table(struct amd_iommu *iommu)
{}

/* Generic functions to enable/disable certain features of the IOMMU. */
void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
{}

static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
{}

static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
{}

/* Function to enable the hardware */
static void iommu_enable(struct amd_iommu *iommu)
{}

static void iommu_disable(struct amd_iommu *iommu)
{}

/*
 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
 * the system has one.
 */
static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
{}

static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
{}

static inline u32 get_ivhd_header_size(struct ivhd_header *h)
{}

/****************************************************************************
 *
 * The functions below belong to the first pass of AMD IOMMU ACPI table
 * parsing. In this pass we try to find out the highest device id this
 * code has to handle. Upon this information the size of the shared data
 * structures is determined later.
 *
 ****************************************************************************/

/*
 * This function calculates the length of a given IVHD entry
 */
static inline int ivhd_entry_length(u8 *ivhd)
{}

/*
 * After reading the highest device id from the IOMMU PCI capability header
 * this function looks if there is a higher device id defined in the ACPI table
 */
static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
{}

static int __init check_ivrs_checksum(struct acpi_table_header *table)
{}

/*
 * Iterate over all IVHD entries in the ACPI table and find the highest device
 * id which we need to handle. This is the first of three functions which parse
 * the ACPI table. So we check the checksum here.
 */
static int __init find_last_devid_acpi(struct acpi_table_header *table, u16 pci_seg)
{}

/****************************************************************************
 *
 * The following functions belong to the code path which parses the ACPI table
 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
 * data structures, initialize the per PCI segment device/alias/rlookup table
 * and also basically initialize the hardware.
 *
 ****************************************************************************/

/* Allocate per PCI segment device table */
static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg)
{}

static inline void free_dev_table(struct amd_iommu_pci_seg *pci_seg)
{}

/* Allocate per PCI segment IOMMU rlookup table. */
static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
{}

static inline void free_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
{}

static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
{}

static inline void free_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
{}

static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg)
{}

static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg)
{}

/*
 * Allocates the command buffer. This buffer is per AMD IOMMU. We can
 * write commands to that buffer later and the IOMMU will execute them
 * asynchronously
 */
static int __init alloc_command_buffer(struct amd_iommu *iommu)
{}

/*
 * Interrupt handler has processed all pending events and adjusted head
 * and tail pointer. Reset overflow mask and restart logging again.
 */
void amd_iommu_restart_log(struct amd_iommu *iommu, const char *evt_type,
			   u8 cntrl_intr, u8 cntrl_log,
			   u32 status_run_mask, u32 status_overflow_mask)
{}

/*
 * This function restarts event logging in case the IOMMU experienced
 * an event log buffer overflow.
 */
void amd_iommu_restart_event_logging(struct amd_iommu *iommu)
{}

/*
 * This function restarts event logging in case the IOMMU experienced
 * GA log overflow.
 */
void amd_iommu_restart_ga_log(struct amd_iommu *iommu)
{}

/*
 * This function resets the command buffer if the IOMMU stopped fetching
 * commands from it.
 */
static void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
{}

/*
 * This function writes the command buffer address to the hardware and
 * enables it.
 */
static void iommu_enable_command_buffer(struct amd_iommu *iommu)
{}

/*
 * This function disables the command buffer
 */
static void iommu_disable_command_buffer(struct amd_iommu *iommu)
{}

static void __init free_command_buffer(struct amd_iommu *iommu)
{}

void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, gfp_t gfp,
				  size_t size)
{}

/* allocates the memory where the IOMMU will log its events to */
static int __init alloc_event_buffer(struct amd_iommu *iommu)
{}

static void iommu_enable_event_buffer(struct amd_iommu *iommu)
{}

/*
 * This function disables the event log buffer
 */
static void iommu_disable_event_buffer(struct amd_iommu *iommu)
{}

static void __init free_event_buffer(struct amd_iommu *iommu)
{}

static void free_ga_log(struct amd_iommu *iommu)
{}

#ifdef CONFIG_IRQ_REMAP
static int iommu_ga_log_enable(struct amd_iommu *iommu)
{}

static int iommu_init_ga_log(struct amd_iommu *iommu)
{}
#endif /* CONFIG_IRQ_REMAP */

static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
{}

static void __init free_cwwb_sem(struct amd_iommu *iommu)
{}

static void iommu_enable_xt(struct amd_iommu *iommu)
{}

static void iommu_enable_gt(struct amd_iommu *iommu)
{}

/* sets a specific bit in the device table entry. */
static void __set_dev_entry_bit(struct dev_table_entry *dev_table,
				u16 devid, u8 bit)
{}

static void set_dev_entry_bit(struct amd_iommu *iommu, u16 devid, u8 bit)
{}

static int __get_dev_entry_bit(struct dev_table_entry *dev_table,
			       u16 devid, u8 bit)
{}

static int get_dev_entry_bit(struct amd_iommu *iommu, u16 devid, u8 bit)
{}

static bool __copy_device_table(struct amd_iommu *iommu)
{}

static bool copy_device_table(void)
{}

void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid)
{}

/*
 * This function takes the device specific flags read from the ACPI
 * table and sets up the device table entry with that information
 */
static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
					   u16 devid, u32 flags, u32 ext_flags)
{}

int __init add_special_device(u8 type, u8 id, u32 *devid, bool cmd_line)
{}

static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u32 *devid,
				      bool cmd_line)
{}

static int __init add_early_maps(void)
{}

/*
 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
 * initializes the hardware and our data structures with it.
 */
static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
					struct ivhd_header *h)
{}

/* Allocate PCI segment data structure */
static struct amd_iommu_pci_seg *__init alloc_pci_segment(u16 id,
					  struct acpi_table_header *ivrs_base)
{}

static struct amd_iommu_pci_seg *__init get_pci_segment(u16 id,
					struct acpi_table_header *ivrs_base)
{}

static void __init free_pci_segments(void)
{}

static void __init free_sysfs(struct amd_iommu *iommu)
{}

static void __init free_iommu_one(struct amd_iommu *iommu)
{}

static void __init free_iommu_all(void)
{}

/*
 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
 * Workaround:
 *     BIOS should disable L2B micellaneous clock gating by setting
 *     L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
 */
static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
{}

/*
 * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
 * Workaround:
 *     BIOS should enable ATS write permission check by setting
 *     L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
 */
static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
{}

/*
 * This function glues the initialization function for one IOMMU
 * together and also allocates the command buffer and programs the
 * hardware. It does NOT enable the IOMMU. This is done afterwards.
 */
static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h,
				 struct acpi_table_header *ivrs_base)
{}

static int __init init_iommu_one_late(struct amd_iommu *iommu)
{}

/**
 * get_highest_supported_ivhd_type - Look up the appropriate IVHD type
 * @ivrs: Pointer to the IVRS header
 *
 * This function search through all IVDB of the maximum supported IVHD
 */
static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs)
{}

/*
 * Iterates over all IOMMU entries in the ACPI table, allocates the
 * IOMMU structure and initializes it with init_iommu_one()
 */
static int __init init_iommu_all(struct acpi_table_header *table)
{}

static void init_iommu_perf_ctr(struct amd_iommu *iommu)
{}

static ssize_t amd_iommu_show_cap(struct device *dev,
				  struct device_attribute *attr,
				  char *buf)
{}
static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL);

static ssize_t amd_iommu_show_features(struct device *dev,
				       struct device_attribute *attr,
				       char *buf)
{}
static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);

static struct attribute *amd_iommu_attrs[] =;

static struct attribute_group amd_iommu_group =;

static const struct attribute_group *amd_iommu_groups[] =;

/*
 * Note: IVHD 0x11 and 0x40 also contains exact copy
 * of the IOMMU Extended Feature Register [MMIO Offset 0030h].
 * Default to EFR in IVHD since it is available sooner (i.e. before PCI init).
 */
static void __init late_iommu_features_init(struct amd_iommu *iommu)
{}

static int __init iommu_init_pci(struct amd_iommu *iommu)
{}

static void print_iommu_info(void)
{}

static int __init amd_iommu_init_pci(void)
{}

/****************************************************************************
 *
 * The following functions initialize the MSI interrupts for all IOMMUs
 * in the system. It's a bit challenging because there could be multiple
 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
 * pci_dev.
 *
 ****************************************************************************/

static int iommu_setup_msi(struct amd_iommu *iommu)
{}

intcapxt __attribute__ ((packed));


static struct irq_chip intcapxt_controller;

static int intcapxt_irqdomain_activate(struct irq_domain *domain,
				       struct irq_data *irqd, bool reserve)
{}

static void intcapxt_irqdomain_deactivate(struct irq_domain *domain,
					  struct irq_data *irqd)
{}


static int intcapxt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
				    unsigned int nr_irqs, void *arg)
{}

static void intcapxt_irqdomain_free(struct irq_domain *domain, unsigned int virq,
				    unsigned int nr_irqs)
{}


static void intcapxt_unmask_irq(struct irq_data *irqd)
{}

static void intcapxt_mask_irq(struct irq_data *irqd)
{}


static int intcapxt_set_affinity(struct irq_data *irqd,
				 const struct cpumask *mask, bool force)
{}

static int intcapxt_set_wake(struct irq_data *irqd, unsigned int on)
{}

static struct irq_chip intcapxt_controller =;

static const struct irq_domain_ops intcapxt_domain_ops =;


static struct irq_domain *iommu_irqdomain;

static struct irq_domain *iommu_get_irqdomain(void)
{}

static int __iommu_setup_intcapxt(struct amd_iommu *iommu, const char *devname,
				  int hwirq, irq_handler_t thread_fn)
{}

static int iommu_setup_intcapxt(struct amd_iommu *iommu)
{}

static int iommu_init_irq(struct amd_iommu *iommu)
{}

/****************************************************************************
 *
 * The next functions belong to the third pass of parsing the ACPI
 * table. In this last pass the memory mapping requirements are
 * gathered (like exclusion and unity mapping ranges).
 *
 ****************************************************************************/

static void __init free_unity_maps(void)
{}

/* called for unity map ACPI definition */
static int __init init_unity_map_range(struct ivmd_header *m,
				       struct acpi_table_header *ivrs_base)
{}

/* iterates over all memory definitions we find in the ACPI table */
static int __init init_memory_definitions(struct acpi_table_header *table)
{}

/*
 * Init the device table to not allow DMA access for devices
 */
static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg)
{}

static void __init uninit_device_table_dma(struct amd_iommu_pci_seg *pci_seg)
{}

static void init_device_table(void)
{}

static void iommu_init_flags(struct amd_iommu *iommu)
{}

static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
{}

static void iommu_enable_ga(struct amd_iommu *iommu)
{}

static void iommu_disable_irtcachedis(struct amd_iommu *iommu)
{}

static void iommu_enable_irtcachedis(struct amd_iommu *iommu)
{}

static void early_enable_iommu(struct amd_iommu *iommu)
{}

/*
 * This function finally enables all IOMMUs found in the system after
 * they have been initialized.
 *
 * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy
 * the old content of device table entries. Not this case or copy failed,
 * just continue as normal kernel does.
 */
static void early_enable_iommus(void)
{}

static void enable_iommus_ppr(void)
{}

static void enable_iommus_vapic(void)
{}

static void enable_iommus(void)
{}

static void disable_iommus(void)
{}

/*
 * Suspend/Resume support
 * disable suspend until real resume implemented
 */

static void amd_iommu_resume(void)
{}

static int amd_iommu_suspend(void)
{}

static struct syscore_ops amd_iommu_syscore_ops =;

static void __init free_iommu_resources(void)
{}

/* SB IOAPIC is always on this device in AMD systems */
#define IOAPIC_SB_DEVID

static bool __init check_ioapic_information(void)
{}

static void __init free_dma_resources(void)
{}

static void __init ivinfo_init(void *ivrs)
{}

/*
 * This is the hardware init function for AMD IOMMU in the system.
 * This function is called either from amd_iommu_init or from the interrupt
 * remapping setup code.
 *
 * This function basically parses the ACPI table for AMD IOMMU (IVRS)
 * four times:
 *
 *	1 pass) Discover the most comprehensive IVHD type to use.
 *
 *	2 pass) Find the highest PCI device id the driver has to handle.
 *		Upon this information the size of the data structures is
 *		determined that needs to be allocated.
 *
 *	3 pass) Initialize the data structures just allocated with the
 *		information in the ACPI table about available AMD IOMMUs
 *		in the system. It also maps the PCI devices in the
 *		system to specific IOMMUs
 *
 *	4 pass) After the basic data structures are allocated and
 *		initialized we update them with information about memory
 *		remapping requirements parsed out of the ACPI table in
 *		this last pass.
 *
 * After everything is set up the IOMMUs are enabled and the necessary
 * hotplug and suspend notifiers are registered.
 */
static int __init early_amd_iommu_init(void)
{}

static int amd_iommu_enable_interrupts(void)
{}

static bool __init detect_ivrs(void)
{}

static void iommu_snp_enable(void)
{}

/****************************************************************************
 *
 * AMD IOMMU Initialization State Machine
 *
 ****************************************************************************/

static int __init state_next(void)
{}

static int __init iommu_go_to_state(enum iommu_init_state state)
{}

#ifdef CONFIG_IRQ_REMAP
int __init amd_iommu_prepare(void)
{}

int __init amd_iommu_enable(void)
{}

void amd_iommu_disable(void)
{}

int amd_iommu_reenable(int mode)
{}

int amd_iommu_enable_faulting(unsigned int cpu)
{}
#endif

/*
 * This is the core init function for AMD IOMMU hardware in the system.
 * This function is called from the generic x86 DMA layer initialization
 * code.
 */
static int __init amd_iommu_init(void)
{}

static bool amd_iommu_sme_check(void)
{}

/****************************************************************************
 *
 * Early detect code. This code runs at IOMMU detection time in the DMA
 * layer. It just looks if there is an IVRS ACPI table to detect AMD
 * IOMMUs
 *
 ****************************************************************************/
int __init amd_iommu_detect(void)
{}

/****************************************************************************
 *
 * Parsing functions for the AMD IOMMU specific kernel command line
 * options.
 *
 ****************************************************************************/

static int __init parse_amd_iommu_dump(char *str)
{}

static int __init parse_amd_iommu_intr(char *str)
{}

static int __init parse_amd_iommu_options(char *str)
{}

static int __init parse_ivrs_ioapic(char *str)
{}

static int __init parse_ivrs_hpet(char *str)
{}

#define ACPIID_LEN

static int __init parse_ivrs_acpihid(char *str)
{}

__setup();
__setup();
__setup();
__setup();
__setup();
__setup();

bool amd_iommu_pasid_supported(void)
{}

struct amd_iommu *get_amd_iommu(unsigned int idx)
{}

/****************************************************************************
 *
 * IOMMU EFR Performance Counter support functionality. This code allows
 * access to the IOMMU PC functionality.
 *
 ****************************************************************************/

u8 amd_iommu_pc_get_max_banks(unsigned int idx)
{}

bool amd_iommu_pc_supported(void)
{}

u8 amd_iommu_pc_get_max_counters(unsigned int idx)
{}

static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
				u8 fxn, u64 *value, bool is_write)
{}

int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
{}

int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
{}

#ifdef CONFIG_KVM_AMD_SEV
static int iommu_page_make_shared(void *page)
{}

static int iommu_make_shared(void *va, size_t size)
{}

int amd_iommu_snp_disable(void)
{}
EXPORT_SYMBOL_GPL();
#endif