#include <linux/memblock.h>
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/dma-map-ops.h>
#include <linux/scatterlist.h>
#include <linux/pfn.h>
#include <linux/vmalloc.h>
#include <linux/set_memory.h>
#include <linux/slab.h>
#include "direct.h"
unsigned int zone_dma_bits __ro_after_init = …;
static inline dma_addr_t phys_to_dma_direct(struct device *dev,
phys_addr_t phys)
{ … }
static inline struct page *dma_direct_to_page(struct device *dev,
dma_addr_t dma_addr)
{ … }
u64 dma_direct_get_required_mask(struct device *dev)
{ … }
static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 *phys_limit)
{ … }
bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
{ … }
static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size)
{ … }
static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size)
{ … }
static void __dma_direct_free_pages(struct device *dev, struct page *page,
size_t size)
{ … }
static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size)
{ … }
static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
gfp_t gfp, bool allow_highmem)
{ … }
static bool dma_direct_use_pool(struct device *dev, gfp_t gfp)
{ … }
static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
{ … }
static void *dma_direct_alloc_no_mapping(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
{ … }
void *dma_direct_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{ … }
void dma_direct_free(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
{ … }
struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
{ … }
void dma_direct_free_pages(struct device *dev, size_t size,
struct page *page, dma_addr_t dma_addr,
enum dma_data_direction dir)
{ … }
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
defined(CONFIG_SWIOTLB)
void dma_direct_sync_sg_for_device(struct device *dev,
struct scatterlist *sgl, int nents, enum dma_data_direction dir)
{ … }
#endif
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
defined(CONFIG_SWIOTLB)
void dma_direct_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sgl, int nents, enum dma_data_direction dir)
{ … }
void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction dir, unsigned long attrs)
{ … }
#endif
int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
enum dma_data_direction dir, unsigned long attrs)
{ … }
dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{ … }
int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{ … }
bool dma_direct_can_mmap(struct device *dev)
{ … }
int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{ … }
int dma_direct_supported(struct device *dev, u64 mask)
{ … }
static int check_ram_in_range_map(unsigned long start_pfn,
unsigned long nr_pages, void *data)
{ … }
bool dma_direct_all_ram_mapped(struct device *dev)
{ … }
size_t dma_direct_max_mapping_size(struct device *dev)
{ … }
bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr)
{ … }
int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start,
dma_addr_t dma_start, u64 size)
{ … }