linux/drivers/gpu/drm/panthor/panthor_heap.c

// SPDX-License-Identifier: GPL-2.0 or MIT
/* Copyright 2023 Collabora ltd. */

#include <linux/iosys-map.h>
#include <linux/rwsem.h>

#include <drm/panthor_drm.h>

#include "panthor_device.h"
#include "panthor_gem.h"
#include "panthor_heap.h"
#include "panthor_mmu.h"
#include "panthor_regs.h"

/*
 * The GPU heap context is an opaque structure used by the GPU to track the
 * heap allocations. The driver should only touch it to initialize it (zero all
 * fields). Because the CPU and GPU can both access this structure it is
 * required to be GPU cache line aligned.
 */
#define HEAP_CONTEXT_SIZE

/**
 * struct panthor_heap_chunk_header - Heap chunk header
 */
struct panthor_heap_chunk_header {};

/**
 * struct panthor_heap_chunk - Structure used to keep track of allocated heap chunks.
 */
struct panthor_heap_chunk {};

/**
 * struct panthor_heap - Structure used to manage tiler heap contexts.
 */
struct panthor_heap {};

#define MAX_HEAPS_PER_POOL

/**
 * struct panthor_heap_pool - Pool of heap contexts
 *
 * The pool is attached to a panthor_file and can't be shared across processes.
 */
struct panthor_heap_pool {};

static int panthor_heap_ctx_stride(struct panthor_device *ptdev)
{}

static int panthor_get_heap_ctx_offset(struct panthor_heap_pool *pool, int id)
{}

static void *panthor_get_heap_ctx(struct panthor_heap_pool *pool, int id)
{}

static void panthor_free_heap_chunk(struct panthor_vm *vm,
				    struct panthor_heap *heap,
				    struct panthor_heap_chunk *chunk)
{}

static int panthor_alloc_heap_chunk(struct panthor_device *ptdev,
				    struct panthor_vm *vm,
				    struct panthor_heap *heap,
				    bool initial_chunk)
{}

static void panthor_free_heap_chunks(struct panthor_vm *vm,
				     struct panthor_heap *heap)
{}

static int panthor_alloc_heap_chunks(struct panthor_device *ptdev,
				     struct panthor_vm *vm,
				     struct panthor_heap *heap,
				     u32 chunk_count)
{}

static int
panthor_heap_destroy_locked(struct panthor_heap_pool *pool, u32 handle)
{}

/**
 * panthor_heap_destroy() - Destroy a heap context
 * @pool: Pool this context belongs to.
 * @handle: Handle returned by panthor_heap_create().
 */
int panthor_heap_destroy(struct panthor_heap_pool *pool, u32 handle)
{}

/**
 * panthor_heap_create() - Create a heap context
 * @pool: Pool to instantiate the heap context from.
 * @initial_chunk_count: Number of chunk allocated at initialization time.
 * Must be at least 1.
 * @chunk_size: The size of each chunk. Must be page-aligned and lie in the
 * [128k:8M] range.
 * @max_chunks: Maximum number of chunks that can be allocated.
 * @target_in_flight: Maximum number of in-flight render passes.
 * @heap_ctx_gpu_va: Pointer holding the GPU address of the allocated heap
 * context.
 * @first_chunk_gpu_va: Pointer holding the GPU address of the first chunk
 * assigned to the heap context.
 *
 * Return: a positive handle on success, a negative error otherwise.
 */
int panthor_heap_create(struct panthor_heap_pool *pool,
			u32 initial_chunk_count,
			u32 chunk_size,
			u32 max_chunks,
			u32 target_in_flight,
			u64 *heap_ctx_gpu_va,
			u64 *first_chunk_gpu_va)
{}

/**
 * panthor_heap_return_chunk() - Return an unused heap chunk
 * @pool: The pool this heap belongs to.
 * @heap_gpu_va: The GPU address of the heap context.
 * @chunk_gpu_va: The chunk VA to return.
 *
 * This function is used when a chunk allocated with panthor_heap_grow()
 * couldn't be linked to the heap context through the FW interface because
 * the group requesting the allocation was scheduled out in the meantime.
 */
int panthor_heap_return_chunk(struct panthor_heap_pool *pool,
			      u64 heap_gpu_va,
			      u64 chunk_gpu_va)
{}

/**
 * panthor_heap_grow() - Make a heap context grow.
 * @pool: The pool this heap belongs to.
 * @heap_gpu_va: The GPU address of the heap context.
 * @renderpasses_in_flight: Number of render passes currently in-flight.
 * @pending_frag_count: Number of fragment jobs waiting for execution/completion.
 * @new_chunk_gpu_va: Pointer used to return the chunk VA.
 *
 * Return:
 * - 0 if a new heap was allocated
 * - -ENOMEM if the tiler context reached the maximum number of chunks
 *   or if too many render passes are in-flight
 *   or if the allocation failed
 * - -EINVAL if any of the arguments passed to panthor_heap_grow() is invalid
 */
int panthor_heap_grow(struct panthor_heap_pool *pool,
		      u64 heap_gpu_va,
		      u32 renderpasses_in_flight,
		      u32 pending_frag_count,
		      u64 *new_chunk_gpu_va)
{}

static void panthor_heap_pool_release(struct kref *refcount)
{}

/**
 * panthor_heap_pool_put() - Release a heap pool reference
 * @pool: Pool to release the reference on. Can be NULL.
 */
void panthor_heap_pool_put(struct panthor_heap_pool *pool)
{}

/**
 * panthor_heap_pool_get() - Get a heap pool reference
 * @pool: Pool to get the reference on. Can be NULL.
 *
 * Return: @pool.
 */
struct panthor_heap_pool *
panthor_heap_pool_get(struct panthor_heap_pool *pool)
{}

/**
 * panthor_heap_pool_create() - Create a heap pool
 * @ptdev: Device.
 * @vm: The VM this heap pool will be attached to.
 *
 * Heap pools might contain up to 128 heap contexts, and are per-VM.
 *
 * Return: A valid pointer on success, a negative error code otherwise.
 */
struct panthor_heap_pool *
panthor_heap_pool_create(struct panthor_device *ptdev, struct panthor_vm *vm)
{}

/**
 * panthor_heap_pool_destroy() - Destroy a heap pool.
 * @pool: Pool to destroy.
 *
 * This function destroys all heap contexts and their resources. Thus
 * preventing any use of the heap context or the chunk attached to them
 * after that point.
 *
 * If the GPU still has access to some heap contexts, a fault should be
 * triggered, which should flag the command stream groups using these
 * context as faulty.
 *
 * The heap pool object is only released when all references to this pool
 * are released.
 */
void panthor_heap_pool_destroy(struct panthor_heap_pool *pool)
{}