linux/drivers/dma-buf/heaps/system_heap.c

// SPDX-License-Identifier: GPL-2.0
/*
 * DMABUF System heap exporter
 *
 * Copyright (C) 2011 Google, Inc.
 * Copyright (C) 2019, 2020 Linaro Ltd.
 *
 * Portions based off of Andrew Davis' SRAM heap:
 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
 *	Andrew F. Davis <[email protected]>
 */

#include <linux/dma-buf.h>
#include <linux/dma-mapping.h>
#include <linux/dma-heap.h>
#include <linux/err.h>
#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>

static struct dma_heap *sys_heap;

struct system_heap_buffer {};

struct dma_heap_attachment {};

#define LOW_ORDER_GFP
#define HIGH_ORDER_GFP
static gfp_t order_flags[] =;
/*
 * The selection of the orders used for allocation (1MB, 64K, 4K) is designed
 * to match with the sizes often found in IOMMUs. Using order 4 pages instead
 * of order 0 pages can significantly improve the performance of many IOMMUs
 * by reducing TLB pressure and time spent updating page tables.
 */
static const unsigned int orders[] =;
#define NUM_ORDERS

static struct sg_table *dup_sg_table(struct sg_table *table)
{}

static int system_heap_attach(struct dma_buf *dmabuf,
			      struct dma_buf_attachment *attachment)
{}

static void system_heap_detach(struct dma_buf *dmabuf,
			       struct dma_buf_attachment *attachment)
{}

static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attachment,
						enum dma_data_direction direction)
{}

static void system_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
				      struct sg_table *table,
				      enum dma_data_direction direction)
{}

static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
						enum dma_data_direction direction)
{}

static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
					      enum dma_data_direction direction)
{}

static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
{}

static void *system_heap_do_vmap(struct system_heap_buffer *buffer)
{}

static int system_heap_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
{}

static void system_heap_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
{}

static void system_heap_dma_buf_release(struct dma_buf *dmabuf)
{}

static const struct dma_buf_ops system_heap_buf_ops =;

static struct page *alloc_largest_available(unsigned long size,
					    unsigned int max_order)
{}

static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
					    unsigned long len,
					    u32 fd_flags,
					    u64 heap_flags)
{}

static const struct dma_heap_ops system_heap_ops =;

static int system_heap_create(void)
{}
module_init();