linux/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c

/*
 * Copyright 2017 Red Hat Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 */
#define NVKM_VMM_LEVELS_MAX
#include "vmm.h"

#include <subdev/fb.h>

static void
nvkm_vmm_pt_del(struct nvkm_vmm_pt **ppgt)
{}


static struct nvkm_vmm_pt *
nvkm_vmm_pt_new(const struct nvkm_vmm_desc *desc, bool sparse,
		const struct nvkm_vmm_page *page)
{}

struct nvkm_vmm_iter {};

#ifdef CONFIG_NOUVEAU_DEBUG_MMU
static const char *
nvkm_vmm_desc_type(const struct nvkm_vmm_desc *desc)
{}

static void
nvkm_vmm_trace(struct nvkm_vmm_iter *it, char *buf)
{}

#define TRA(i,f,a...)
#else
#define TRA
#endif

static inline void
nvkm_vmm_flush_mark(struct nvkm_vmm_iter *it)
{}

static inline void
nvkm_vmm_flush(struct nvkm_vmm_iter *it)
{}

static void
nvkm_vmm_unref_pdes(struct nvkm_vmm_iter *it)
{}

static void
nvkm_vmm_unref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt,
		     const struct nvkm_vmm_desc *desc, u32 ptei, u32 ptes)
{}

static bool
nvkm_vmm_unref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
{}

static void
nvkm_vmm_ref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt,
		   const struct nvkm_vmm_desc *desc, u32 ptei, u32 ptes)
{}

static bool
nvkm_vmm_ref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
{}

static void
nvkm_vmm_sparse_ptes(const struct nvkm_vmm_desc *desc,
		     struct nvkm_vmm_pt *pgt, u32 ptei, u32 ptes)
{}

static bool
nvkm_vmm_sparse_unref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
{}

static bool
nvkm_vmm_sparse_ref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
{}

static bool
nvkm_vmm_ref_hwpt(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgd, u32 pdei)
{}

static bool
nvkm_vmm_ref_swpt(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgd, u32 pdei)
{}

static inline u64
nvkm_vmm_iter(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
	      u64 addr, u64 size, const char *name, bool ref, bool pfn,
	      bool (*REF_PTES)(struct nvkm_vmm_iter *, bool pfn, u32, u32),
	      nvkm_vmm_pte_func MAP_PTES, struct nvkm_vmm_map *map,
	      nvkm_vmm_pxe_func CLR_PTES)
{}

static void
nvkm_vmm_ptes_sparse_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
			 u64 addr, u64 size)
{}

static int
nvkm_vmm_ptes_sparse_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
			 u64 addr, u64 size)
{}

static int
nvkm_vmm_ptes_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref)
{}

static void
nvkm_vmm_ptes_unmap(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
		    u64 addr, u64 size, bool sparse, bool pfn)
{}

static void
nvkm_vmm_ptes_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
		  u64 addr, u64 size, struct nvkm_vmm_map *map,
		  nvkm_vmm_pte_func func)
{}

static void
nvkm_vmm_ptes_put_locked(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
			 u64 addr, u64 size)
{}

static void
nvkm_vmm_ptes_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
		  u64 addr, u64 size)
{}

static int
nvkm_vmm_ptes_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
		  u64 addr, u64 size)
{}

static void
__nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
			  u64 addr, u64 size, bool sparse, bool pfn)
{}

static void
nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
			u64 addr, u64 size, bool sparse, bool pfn)
{}

static int
__nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
			u64 addr, u64 size, struct nvkm_vmm_map *map,
			nvkm_vmm_pte_func func)
{}

static int
nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
		      u64 addr, u64 size, struct nvkm_vmm_map *map,
		      nvkm_vmm_pte_func func)
{}

struct nvkm_vma *
nvkm_vma_new(u64 addr, u64 size)
{}

struct nvkm_vma *
nvkm_vma_tail(struct nvkm_vma *vma, u64 tail)
{}

static inline void
nvkm_vmm_free_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
{}

static inline void
nvkm_vmm_free_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
{}

static void
nvkm_vmm_free_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
{}

static inline void
nvkm_vmm_node_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
{}

static inline void
nvkm_vmm_node_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
{}

static void
nvkm_vmm_node_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
{}

struct nvkm_vma *
nvkm_vmm_node_search(struct nvkm_vmm *vmm, u64 addr)
{}

#define node(root, dir)

static struct nvkm_vma *
nvkm_vmm_node_merge(struct nvkm_vmm *vmm, struct nvkm_vma *prev,
		    struct nvkm_vma *vma, struct nvkm_vma *next, u64 size)
{}

struct nvkm_vma *
nvkm_vmm_node_split(struct nvkm_vmm *vmm,
		    struct nvkm_vma *vma, u64 addr, u64 size)
{}

static void
nvkm_vma_dump(struct nvkm_vma *vma)
{}

static void
nvkm_vmm_dump(struct nvkm_vmm *vmm)
{}

static void
nvkm_vmm_dtor(struct nvkm_vmm *vmm)
{}

static int
nvkm_vmm_ctor_managed(struct nvkm_vmm *vmm, u64 addr, u64 size)
{}

static int
nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
	      u32 pd_header, bool managed, u64 addr, u64 size,
	      struct lock_class_key *key, const char *name,
	      struct nvkm_vmm *vmm)
{}

int
nvkm_vmm_new_(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
	      u32 hdr, bool managed, u64 addr, u64 size,
	      struct lock_class_key *key, const char *name,
	      struct nvkm_vmm **pvmm)
{}

static struct nvkm_vma *
nvkm_vmm_pfn_split_merge(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
			 u64 addr, u64 size, u8 page, bool map)
{}

int
nvkm_vmm_pfn_unmap(struct nvkm_vmm *vmm, u64 addr, u64 size)
{}

/*TODO:
 * - Avoid PT readback (for dma_unmap etc), this might end up being dealt
 *   with inside HMM, which would be a lot nicer for us to deal with.
 * - Support for systems without a 4KiB page size.
 */
int
nvkm_vmm_pfn_map(struct nvkm_vmm *vmm, u8 shift, u64 addr, u64 size, u64 *pfn)
{}

void
nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
{}

void
nvkm_vmm_unmap_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma, bool pfn)
{}

void
nvkm_vmm_unmap(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
{}

static int
nvkm_vmm_map_valid(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
		   void *argv, u32 argc, struct nvkm_vmm_map *map)
{}

static int
nvkm_vmm_map_choose(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
		    void *argv, u32 argc, struct nvkm_vmm_map *map)
{}

static int
nvkm_vmm_map_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
		    void *argv, u32 argc, struct nvkm_vmm_map *map)
{}

int
nvkm_vmm_map(struct nvkm_vmm *vmm, struct nvkm_vma *vma, void *argv, u32 argc,
	     struct nvkm_vmm_map *map)
{}

static void
nvkm_vmm_put_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
{}

void
nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
{}

void
nvkm_vmm_put(struct nvkm_vmm *vmm, struct nvkm_vma **pvma)
{}

int
nvkm_vmm_get_locked(struct nvkm_vmm *vmm, bool getref, bool mapref, bool sparse,
		    u8 shift, u8 align, u64 size, struct nvkm_vma **pvma)
{}

int
nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma)
{}

void
nvkm_vmm_raw_unmap(struct nvkm_vmm *vmm, u64 addr, u64 size,
		   bool sparse, u8 refd)
{}

void
nvkm_vmm_raw_put(struct nvkm_vmm *vmm, u64 addr, u64 size, u8 refd)
{}

int
nvkm_vmm_raw_get(struct nvkm_vmm *vmm, u64 addr, u64 size, u8 refd)
{}

int
nvkm_vmm_raw_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref)
{}

void
nvkm_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
{}

int
nvkm_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
{}

static bool
nvkm_vmm_boot_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
{}

int
nvkm_vmm_boot(struct nvkm_vmm *vmm)
{}

static void
nvkm_vmm_del(struct kref *kref)
{}

void
nvkm_vmm_unref(struct nvkm_vmm **pvmm)
{}

struct nvkm_vmm *
nvkm_vmm_ref(struct nvkm_vmm *vmm)
{}

int
nvkm_vmm_new(struct nvkm_device *device, u64 addr, u64 size, void *argv,
	     u32 argc, struct lock_class_key *key, const char *name,
	     struct nvkm_vmm **pvmm)
{}