linux/arch/x86/kernel/amd_gart_64.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Dynamic DMA mapping support for AMD Hammer.
 *
 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
 * This allows to use PCI devices that only support 32bit addresses on systems
 * with more than 4GB.
 *
 * See Documentation/core-api/dma-api-howto.rst for the interface specification.
 *
 * Copyright 2002 Andi Kleen, SuSE Labs.
 */

#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/agp_backend.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/topology.h>
#include <linux/interrupt.h>
#include <linux/bitmap.h>
#include <linux/kdebug.h>
#include <linux/scatterlist.h>
#include <linux/iommu-helper.h>
#include <linux/syscore_ops.h>
#include <linux/io.h>
#include <linux/gfp.h>
#include <linux/atomic.h>
#include <linux/dma-direct.h>
#include <linux/dma-map-ops.h>
#include <asm/mtrr.h>
#include <asm/proto.h>
#include <asm/iommu.h>
#include <asm/gart.h>
#include <asm/set_memory.h>
#include <asm/dma.h>
#include <asm/amd_nb.h>
#include <asm/x86_init.h>

static unsigned long iommu_bus_base;	/* GART remapping area (physical) */
static unsigned long iommu_size;	/* size of remapping area bytes */
static unsigned long iommu_pages;	/* .. and in pages */

static u32 *iommu_gatt_base;		/* Remapping table */

/*
 * If this is disabled the IOMMU will use an optimized flushing strategy
 * of only flushing when an mapping is reused. With it true the GART is
 * flushed for every mapping. Problem is that doing the lazy flush seems
 * to trigger bugs with some popular PCI cards, in particular 3ware (but
 * has been also seen with Qlogic at least).
 */
static int iommu_fullflush =;

/* Allocation bitmap for the remapping area: */
static DEFINE_SPINLOCK(iommu_bitmap_lock);
/* Guarded by iommu_bitmap_lock: */
static unsigned long *iommu_gart_bitmap;

static u32 gart_unmapped_entry;

#define GPTE_VALID
#define GPTE_COHERENT
#define GPTE_ENCODE(x)
#define GPTE_DECODE(x)

#ifdef CONFIG_AGP
#define AGPEXTERN
#else
#define AGPEXTERN
#endif

/* GART can only remap to physical addresses < 1TB */
#define GART_MAX_PHYS_ADDR

/* backdoor interface to AGP driver */
AGPEXTERN int agp_memory_reserved;
AGPEXTERN __u32 *agp_gatt_table;

static unsigned long next_bit;  /* protected by iommu_bitmap_lock */
static bool need_flush;		/* global flush state. set for each gart wrap */

static unsigned long alloc_iommu(struct device *dev, int size,
				 unsigned long align_mask)
{}

static void free_iommu(unsigned long offset, int size)
{}

/*
 * Use global flush state to avoid races with multiple flushers.
 */
static void flush_gart(void)
{}

#ifdef CONFIG_IOMMU_LEAK
/* Debugging aid for drivers that don't free their IOMMU tables */
static void dump_leak(void)
{}
#endif

static void iommu_full(struct device *dev, size_t size, int dir)
{}

static inline int
need_iommu(struct device *dev, unsigned long addr, size_t size)
{}

static inline int
nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
{}

/* Map a single continuous physical area into the IOMMU.
 * Caller needs to check if the iommu is needed and flush.
 */
static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
				size_t size, int dir, unsigned long align_mask)
{}

/* Map a single area into the IOMMU */
static dma_addr_t gart_map_page(struct device *dev, struct page *page,
				unsigned long offset, size_t size,
				enum dma_data_direction dir,
				unsigned long attrs)
{}

/*
 * Free a DMA mapping.
 */
static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr,
			    size_t size, enum dma_data_direction dir,
			    unsigned long attrs)
{}

/*
 * Wrapper for pci_unmap_single working with scatterlists.
 */
static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
			  enum dma_data_direction dir, unsigned long attrs)
{}

/* Fallback for dma_map_sg in case of overflow */
static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
			       int nents, int dir)
{}

/* Map multiple scatterlist entries continuous into the first. */
static int __dma_map_cont(struct device *dev, struct scatterlist *start,
			  int nelems, struct scatterlist *sout,
			  unsigned long pages)
{}

static inline int
dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
	     struct scatterlist *sout, unsigned long pages, int need)
{}

/*
 * DMA map all entries in a scatterlist.
 * Merge chunks that have page aligned sizes into a continuous mapping.
 */
static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
		       enum dma_data_direction dir, unsigned long attrs)
{}

/* allocate and map a coherent mapping */
static void *
gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
		    gfp_t flag, unsigned long attrs)
{}

/* free a coherent mapping */
static void
gart_free_coherent(struct device *dev, size_t size, void *vaddr,
		   dma_addr_t dma_addr, unsigned long attrs)
{}

static int no_agp;

static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
{}

static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
{}

static void enable_gart_translations(void)
{}

/*
 * If fix_up_north_bridges is set, the north bridges have to be fixed up on
 * resume in the same way as they are handled in gart_iommu_hole_init().
 */
static bool fix_up_north_bridges;
static u32 aperture_order;
static u32 aperture_alloc;

void set_up_gart_resume(u32 aper_order, u32 aper_alloc)
{}

static void gart_fixup_northbridges(void)
{}

static void gart_resume(void)
{}

static struct syscore_ops gart_syscore_ops =;

/*
 * Private Northbridge GATT initialization in case we cannot use the
 * AGP driver for some reason.
 */
static __init int init_amd_gatt(struct agp_kern_info *info)
{}

static const struct dma_map_ops gart_dma_ops =;

static void gart_iommu_shutdown(void)
{}

int __init gart_iommu_init(void)
{}

void __init gart_parse_options(char *p)
{}