linux/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c

/*
 * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 * DEALINGS IN THE SOFTWARE.
 */

/*
 * GK20A does not have dedicated video memory, and to accurately represent this
 * fact Nouveau will not create a RAM device for it. Therefore its instmem
 * implementation must be done directly on top of system memory, while
 * preserving coherency for read and write operations.
 *
 * Instmem can be allocated through two means:
 * 1) If an IOMMU unit has been probed, the IOMMU API is used to make memory
 *    pages contiguous to the GPU. This is the preferred way.
 * 2) If no IOMMU unit is probed, the DMA API is used to allocate physically
 *    contiguous memory.
 *
 * In both cases CPU read and writes are performed by creating a write-combined
 * mapping. The GPU L2 cache must thus be flushed/invalidated when required. To
 * be conservative we do this every time we acquire or release an instobj, but
 * ideally L2 management should be handled at a higher level.
 *
 * To improve performance, CPU mappings are not removed upon instobj release.
 * Instead they are placed into a LRU list to be recycled when the mapped space
 * goes beyond a certain threshold. At the moment this limit is 1MB.
 */
#include "priv.h"

#include <core/memory.h>
#include <core/tegra.h>
#include <subdev/ltc.h>
#include <subdev/mmu.h>

struct gk20a_instobj {};
#define gk20a_instobj(p)

/*
 * Used for objects allocated using the DMA API
 */
struct gk20a_instobj_dma {};
#define gk20a_instobj_dma(p)

/*
 * Used for objects flattened using the IOMMU API
 */
struct gk20a_instobj_iommu {};
#define gk20a_instobj_iommu(p)

struct gk20a_instmem {};
#define gk20a_instmem(p)

static enum nvkm_memory_target
gk20a_instobj_target(struct nvkm_memory *memory)
{}

static u8
gk20a_instobj_page(struct nvkm_memory *memory)
{}

static u64
gk20a_instobj_addr(struct nvkm_memory *memory)
{}

static u64
gk20a_instobj_size(struct nvkm_memory *memory)
{}

/*
 * Recycle the vaddr of obj. Must be called with gk20a_instmem::lock held.
 */
static void
gk20a_instobj_iommu_recycle_vaddr(struct gk20a_instobj_iommu *obj)
{}

/*
 * Must be called while holding gk20a_instmem::lock
 */
static void
gk20a_instmem_vaddr_gc(struct gk20a_instmem *imem, const u64 size)
{}

static void __iomem *
gk20a_instobj_acquire_dma(struct nvkm_memory *memory)
{}

static void __iomem *
gk20a_instobj_acquire_iommu(struct nvkm_memory *memory)
{}

static void
gk20a_instobj_release_dma(struct nvkm_memory *memory)
{}

static void
gk20a_instobj_release_iommu(struct nvkm_memory *memory)
{}

static u32
gk20a_instobj_rd32(struct nvkm_memory *memory, u64 offset)
{}

static void
gk20a_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
{}

static int
gk20a_instobj_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
		  struct nvkm_vma *vma, void *argv, u32 argc)
{}

static void *
gk20a_instobj_dtor_dma(struct nvkm_memory *memory)
{}

static void *
gk20a_instobj_dtor_iommu(struct nvkm_memory *memory)
{}

static const struct nvkm_memory_func
gk20a_instobj_func_dma =;

static const struct nvkm_memory_func
gk20a_instobj_func_iommu =;

static const struct nvkm_memory_ptrs
gk20a_instobj_ptrs =;

static int
gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align,
		       struct gk20a_instobj **_node)
{}

static int
gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
			 struct gk20a_instobj **_node)
{}

static int
gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
		  struct nvkm_memory **pmemory)
{}

static void *
gk20a_instmem_dtor(struct nvkm_instmem *base)
{}

static const struct nvkm_instmem_func
gk20a_instmem =;

int
gk20a_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
		  struct nvkm_instmem **pimem)
{}