linux/kernel/dma/direct.c

// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (C) 2018-2020 Christoph Hellwig.
 *
 * DMA operations that map physical memory directly without using an IOMMU.
 */
#include <linux/memblock.h> /* for max_pfn */
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/dma-map-ops.h>
#include <linux/scatterlist.h>
#include <linux/pfn.h>
#include <linux/vmalloc.h>
#include <linux/set_memory.h>
#include <linux/slab.h>
#include "direct.h"

/*
 * Most architectures use ZONE_DMA for the first 16 Megabytes, but some use
 * it for entirely different regions. In that case the arch code needs to
 * override the variable below for dma-direct to work properly.
 */
unsigned int zone_dma_bits __ro_after_init =;

static inline dma_addr_t phys_to_dma_direct(struct device *dev,
		phys_addr_t phys)
{}

static inline struct page *dma_direct_to_page(struct device *dev,
		dma_addr_t dma_addr)
{}

u64 dma_direct_get_required_mask(struct device *dev)
{}

static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 *phys_limit)
{}

bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
{}

static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size)
{}

static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size)
{}

static void __dma_direct_free_pages(struct device *dev, struct page *page,
				    size_t size)
{}

static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size)
{}

static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
		gfp_t gfp, bool allow_highmem)
{}

/*
 * Check if a potentially blocking operations needs to dip into the atomic
 * pools for the given device/gfp.
 */
static bool dma_direct_use_pool(struct device *dev, gfp_t gfp)
{}

static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
		dma_addr_t *dma_handle, gfp_t gfp)
{}

static void *dma_direct_alloc_no_mapping(struct device *dev, size_t size,
		dma_addr_t *dma_handle, gfp_t gfp)
{}

void *dma_direct_alloc(struct device *dev, size_t size,
		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{}

void dma_direct_free(struct device *dev, size_t size,
		void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
{}

struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
		dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
{}

void dma_direct_free_pages(struct device *dev, size_t size,
		struct page *page, dma_addr_t dma_addr,
		enum dma_data_direction dir)
{}

#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
    defined(CONFIG_SWIOTLB)
void dma_direct_sync_sg_for_device(struct device *dev,
		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
{}
#endif

#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
    defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
    defined(CONFIG_SWIOTLB)
void dma_direct_sync_sg_for_cpu(struct device *dev,
		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
{}

/*
 * Unmaps segments, except for ones marked as pci_p2pdma which do not
 * require any further action as they contain a bus address.
 */
void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
		int nents, enum dma_data_direction dir, unsigned long attrs)
{}
#endif

int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
		enum dma_data_direction dir, unsigned long attrs)
{}

dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
		size_t size, enum dma_data_direction dir, unsigned long attrs)
{}

int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
		void *cpu_addr, dma_addr_t dma_addr, size_t size,
		unsigned long attrs)
{}

bool dma_direct_can_mmap(struct device *dev)
{}

int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
		void *cpu_addr, dma_addr_t dma_addr, size_t size,
		unsigned long attrs)
{}

int dma_direct_supported(struct device *dev, u64 mask)
{}

/*
 * To check whether all ram resource ranges are covered by dma range map
 * Returns 0 when further check is needed
 * Returns 1 if there is some RAM range can't be covered by dma_range_map
 */
static int check_ram_in_range_map(unsigned long start_pfn,
				  unsigned long nr_pages, void *data)
{}

bool dma_direct_all_ram_mapped(struct device *dev)
{}

size_t dma_direct_max_mapping_size(struct device *dev)
{}

bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr)
{}

/**
 * dma_direct_set_offset - Assign scalar offset for a single DMA range.
 * @dev:	device pointer; needed to "own" the alloced memory.
 * @cpu_start:  beginning of memory region covered by this offset.
 * @dma_start:  beginning of DMA/PCI region covered by this offset.
 * @size:	size of the region.
 *
 * This is for the simple case of a uniform offset which cannot
 * be discovered by "dma-ranges".
 *
 * It returns -ENOMEM if out of memory, -EINVAL if a map
 * already exists, 0 otherwise.
 *
 * Note: any call to this from a driver is a bug.  The mapping needs
 * to be described by the device tree or other firmware interfaces.
 */
int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start,
			 dma_addr_t dma_start, u64 size)
{}