linux/drivers/gpu/drm/nouveau/nouveau_uvmm.c

// SPDX-License-Identifier: MIT

/*
 * Locking:
 *
 * The uvmm mutex protects any operations on the GPU VA space provided by the
 * DRM GPU VA manager.
 *
 * The GEMs dma_resv lock protects the GEMs GPUVA list, hence link/unlink of a
 * mapping to it's backing GEM must be performed under this lock.
 *
 * Actual map/unmap operations within the fence signalling critical path are
 * protected by installing DMA fences to the corresponding GEMs DMA
 * reservations, such that concurrent BO moves, which itself walk the GEMs GPUVA
 * list in order to map/unmap it's entries, can't occur concurrently.
 *
 * Accessing the DRM_GPUVA_INVALIDATED flag doesn't need any separate
 * protection, since there are no accesses other than from BO move callbacks
 * and from the fence signalling critical path, which are already protected by
 * the corresponding GEMs DMA reservation fence.
 */

#include "nouveau_drv.h"
#include "nouveau_gem.h"
#include "nouveau_mem.h"
#include "nouveau_uvmm.h"

#include <nvif/vmm.h>
#include <nvif/mem.h>

#include <nvif/class.h>
#include <nvif/if000c.h>
#include <nvif/if900d.h>

#define NOUVEAU_VA_SPACE_BITS
#define NOUVEAU_VA_SPACE_START
#define NOUVEAU_VA_SPACE_END

#define list_last_op(_ops)
#define list_prev_op(_op)
#define list_for_each_op(_op, _ops)
#define list_for_each_op_from_reverse(_op, _ops)
#define list_for_each_op_safe(_op, _n, _ops)

enum vm_bind_op {};

struct nouveau_uvma_prealloc {};

struct bind_job_op {};

struct uvmm_map_args {};

static int
nouveau_uvmm_vmm_sparse_ref(struct nouveau_uvmm *uvmm,
			    u64 addr, u64 range)
{}

static int
nouveau_uvmm_vmm_sparse_unref(struct nouveau_uvmm *uvmm,
			      u64 addr, u64 range)
{}

static int
nouveau_uvmm_vmm_get(struct nouveau_uvmm *uvmm,
		     u64 addr, u64 range)
{}

static int
nouveau_uvmm_vmm_put(struct nouveau_uvmm *uvmm,
		     u64 addr, u64 range)
{}

static int
nouveau_uvmm_vmm_unmap(struct nouveau_uvmm *uvmm,
		       u64 addr, u64 range, bool sparse)
{}

static int
nouveau_uvmm_vmm_map(struct nouveau_uvmm *uvmm,
		     u64 addr, u64 range,
		     u64 bo_offset, u8 kind,
		     struct nouveau_mem *mem)
{}

static int
nouveau_uvma_region_sparse_unref(struct nouveau_uvma_region *reg)
{}

static int
nouveau_uvma_vmm_put(struct nouveau_uvma *uvma)
{}

static int
nouveau_uvma_map(struct nouveau_uvma *uvma,
		 struct nouveau_mem *mem)
{}

static int
nouveau_uvma_unmap(struct nouveau_uvma *uvma)
{}

static int
nouveau_uvma_alloc(struct nouveau_uvma **puvma)
{}

static void
nouveau_uvma_free(struct nouveau_uvma *uvma)
{}

static void
nouveau_uvma_gem_get(struct nouveau_uvma *uvma)
{}

static void
nouveau_uvma_gem_put(struct nouveau_uvma *uvma)
{}

static int
nouveau_uvma_region_alloc(struct nouveau_uvma_region **preg)
{}

static void
nouveau_uvma_region_free(struct kref *kref)
{}

static void
nouveau_uvma_region_get(struct nouveau_uvma_region *reg)
{}

static void
nouveau_uvma_region_put(struct nouveau_uvma_region *reg)
{}

static int
__nouveau_uvma_region_insert(struct nouveau_uvmm *uvmm,
			     struct nouveau_uvma_region *reg)
{}

static int
nouveau_uvma_region_insert(struct nouveau_uvmm *uvmm,
			   struct nouveau_uvma_region *reg,
			   u64 addr, u64 range)
{}

static void
nouveau_uvma_region_remove(struct nouveau_uvma_region *reg)
{}

static int
nouveau_uvma_region_create(struct nouveau_uvmm *uvmm,
			   u64 addr, u64 range)
{}

static struct nouveau_uvma_region *
nouveau_uvma_region_find_first(struct nouveau_uvmm *uvmm,
			       u64 addr, u64 range)
{}

static struct nouveau_uvma_region *
nouveau_uvma_region_find(struct nouveau_uvmm *uvmm,
			 u64 addr, u64 range)
{}

static bool
nouveau_uvma_region_empty(struct nouveau_uvma_region *reg)
{}

static int
__nouveau_uvma_region_destroy(struct nouveau_uvma_region *reg)
{}

static int
nouveau_uvma_region_destroy(struct nouveau_uvmm *uvmm,
			    u64 addr, u64 range)
{}

static void
nouveau_uvma_region_dirty(struct nouveau_uvma_region *reg)
{}

static void
nouveau_uvma_region_complete(struct nouveau_uvma_region *reg)
{}

static void
op_map_prepare_unwind(struct nouveau_uvma *uvma)
{}

static void
op_unmap_prepare_unwind(struct drm_gpuva *va)
{}

static void
nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm,
			       struct nouveau_uvma_prealloc *new,
			       struct drm_gpuva_ops *ops,
			       struct drm_gpuva_op *last,
			       struct uvmm_map_args *args)
{}

static void
nouveau_uvmm_sm_map_prepare_unwind(struct nouveau_uvmm *uvmm,
				   struct nouveau_uvma_prealloc *new,
				   struct drm_gpuva_ops *ops,
				   u64 addr, u64 range)
{}

static void
nouveau_uvmm_sm_unmap_prepare_unwind(struct nouveau_uvmm *uvmm,
				     struct nouveau_uvma_prealloc *new,
				     struct drm_gpuva_ops *ops)
{}

static int
op_map_prepare(struct nouveau_uvmm *uvmm,
	       struct nouveau_uvma **puvma,
	       struct drm_gpuva_op_map *op,
	       struct uvmm_map_args *args)
{}

static void
op_unmap_prepare(struct drm_gpuva_op_unmap *u)
{}

/*
 * Note: @args should not be NULL when calling for a map operation.
 */
static int
nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm,
			struct nouveau_uvma_prealloc *new,
			struct drm_gpuva_ops *ops,
			struct uvmm_map_args *args)
{}

static int
nouveau_uvmm_sm_map_prepare(struct nouveau_uvmm *uvmm,
			    struct nouveau_uvma_prealloc *new,
			    struct nouveau_uvma_region *region,
			    struct drm_gpuva_ops *ops,
			    u64 addr, u64 range, u8 kind)
{}

static int
nouveau_uvmm_sm_unmap_prepare(struct nouveau_uvmm *uvmm,
			      struct nouveau_uvma_prealloc *new,
			      struct drm_gpuva_ops *ops)
{}

static struct drm_gem_object *
op_gem_obj(struct drm_gpuva_op *op)
{}

static void
op_map(struct nouveau_uvma *uvma)
{}

static void
op_unmap(struct drm_gpuva_op_unmap *u)
{}

static void
op_unmap_range(struct drm_gpuva_op_unmap *u,
	       u64 addr, u64 range)
{}

static void
op_remap(struct drm_gpuva_op_remap *r,
	 struct nouveau_uvma_prealloc *new)
{}

static int
nouveau_uvmm_sm(struct nouveau_uvmm *uvmm,
		struct nouveau_uvma_prealloc *new,
		struct drm_gpuva_ops *ops)
{}

static int
nouveau_uvmm_sm_map(struct nouveau_uvmm *uvmm,
		    struct nouveau_uvma_prealloc *new,
		    struct drm_gpuva_ops *ops)
{}

static int
nouveau_uvmm_sm_unmap(struct nouveau_uvmm *uvmm,
		      struct nouveau_uvma_prealloc *new,
		      struct drm_gpuva_ops *ops)
{}

static void
nouveau_uvmm_sm_cleanup(struct nouveau_uvmm *uvmm,
			struct nouveau_uvma_prealloc *new,
			struct drm_gpuva_ops *ops, bool unmap)
{}

static void
nouveau_uvmm_sm_map_cleanup(struct nouveau_uvmm *uvmm,
			    struct nouveau_uvma_prealloc *new,
			    struct drm_gpuva_ops *ops)
{}

static void
nouveau_uvmm_sm_unmap_cleanup(struct nouveau_uvmm *uvmm,
			      struct nouveau_uvma_prealloc *new,
			      struct drm_gpuva_ops *ops)
{}

static int
nouveau_uvmm_validate_range(struct nouveau_uvmm *uvmm, u64 addr, u64 range)
{}

static int
nouveau_uvmm_bind_job_alloc(struct nouveau_uvmm_bind_job **pjob)
{}

static void
nouveau_uvmm_bind_job_free(struct kref *kref)
{}

static void
nouveau_uvmm_bind_job_get(struct nouveau_uvmm_bind_job *job)
{}

static void
nouveau_uvmm_bind_job_put(struct nouveau_uvmm_bind_job *job)
{}

static int
bind_validate_op(struct nouveau_job *job,
		 struct bind_job_op *op)
{}

static void
bind_validate_map_sparse(struct nouveau_job *job, u64 addr, u64 range)
{}

static int
bind_validate_map_common(struct nouveau_job *job, u64 addr, u64 range,
			 bool sparse)
{}

static int
bind_validate_region(struct nouveau_job *job)
{}

static void
bind_link_gpuvas(struct bind_job_op *bop)
{}

static int
bind_lock_validate(struct nouveau_job *job, struct drm_exec *exec,
		   unsigned int num_fences)
{}

static int
nouveau_uvmm_bind_job_submit(struct nouveau_job *job,
			     struct drm_gpuvm_exec *vme)
{}

static void
nouveau_uvmm_bind_job_armed_submit(struct nouveau_job *job,
				   struct drm_gpuvm_exec *vme)
{}

static struct dma_fence *
nouveau_uvmm_bind_job_run(struct nouveau_job *job)
{}

static void
nouveau_uvmm_bind_job_cleanup(struct nouveau_job *job)
{}

static const struct nouveau_job_ops nouveau_bind_job_ops =;

static int
bind_job_op_from_uop(struct bind_job_op **pop,
		     struct drm_nouveau_vm_bind_op *uop)
{}

static void
bind_job_ops_free(struct list_head *ops)
{}

static int
nouveau_uvmm_bind_job_init(struct nouveau_uvmm_bind_job **pjob,
			   struct nouveau_uvmm_bind_job_args *__args)
{}

static int
nouveau_uvmm_vm_bind(struct nouveau_uvmm_bind_job_args *args)
{}

static int
nouveau_uvmm_vm_bind_ucopy(struct nouveau_uvmm_bind_job_args *args,
			   struct drm_nouveau_vm_bind *req)
{}

static void
nouveau_uvmm_vm_bind_ufree(struct nouveau_uvmm_bind_job_args *args)
{}

int
nouveau_uvmm_ioctl_vm_bind(struct drm_device *dev,
			   void *data,
			   struct drm_file *file_priv)
{}

void
nouveau_uvmm_bo_map_all(struct nouveau_bo *nvbo, struct nouveau_mem *mem)
{}

void
nouveau_uvmm_bo_unmap_all(struct nouveau_bo *nvbo)
{}

static void
nouveau_uvmm_free(struct drm_gpuvm *gpuvm)
{}

static int
nouveau_uvmm_bo_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
{}

static const struct drm_gpuvm_ops gpuvm_ops =;

int
nouveau_uvmm_ioctl_vm_init(struct drm_device *dev,
			   void *data,
			   struct drm_file *file_priv)
{}

void
nouveau_uvmm_fini(struct nouveau_uvmm *uvmm)
{}