linux/drivers/gpu/drm/i915/i915_vma_resource.c

// SPDX-License-Identifier: MIT
/*
 * Copyright © 2021 Intel Corporation
 */

#include <linux/interval_tree_generic.h>
#include <linux/sched/mm.h>

#include "i915_sw_fence.h"
#include "i915_vma_resource.h"
#include "i915_drv.h"
#include "intel_memory_region.h"

#include "gt/intel_gtt.h"

static struct kmem_cache *slab_vma_resources;

/**
 * DOC:
 * We use a per-vm interval tree to keep track of vma_resources
 * scheduled for unbind but not yet unbound. The tree is protected by
 * the vm mutex, and nodes are removed just after the unbind fence signals.
 * The removal takes the vm mutex from a kernel thread which we need to
 * keep in mind so that we don't grab the mutex and try to wait for all
 * pending unbinds to complete, because that will temporaryily block many
 * of the workqueue threads, and people will get angry.
 *
 * We should consider using a single ordered fence per VM instead but that
 * requires ordering the unbinds and might introduce unnecessary waiting
 * for unrelated unbinds. Amount of code will probably be roughly the same
 * due to the simplicity of using the interval tree interface.
 *
 * Another drawback of this interval tree is that the complexity of insertion
 * and removal of fences increases as O(ln(pending_unbinds)) instead of
 * O(1) for a single fence without interval tree.
 */
#define VMA_RES_START(_node)
#define VMA_RES_LAST(_node)
INTERVAL_TREE_DEFINE(struct i915_vma_resource, rb,
		     u64, __subtree_last,
		     VMA_RES_START, VMA_RES_LAST, static, vma_res_itree);

/* Callbacks for the unbind dma-fence. */

/**
 * i915_vma_resource_alloc - Allocate a vma resource
 *
 * Return: A pointer to a cleared struct i915_vma_resource or
 * a -ENOMEM error pointer if allocation fails.
 */
struct i915_vma_resource *i915_vma_resource_alloc(void)
{}

/**
 * i915_vma_resource_free - Free a vma resource
 * @vma_res: The vma resource to free.
 */
void i915_vma_resource_free(struct i915_vma_resource *vma_res)
{}

static const char *get_driver_name(struct dma_fence *fence)
{}

static const char *get_timeline_name(struct dma_fence *fence)
{}

static void unbind_fence_free_rcu(struct rcu_head *head)
{}

static void unbind_fence_release(struct dma_fence *fence)
{}

static const struct dma_fence_ops unbind_fence_ops =;

static void __i915_vma_resource_unhold(struct i915_vma_resource *vma_res)
{}

/**
 * i915_vma_resource_unhold - Unhold the signaling of the vma resource unbind
 * fence.
 * @vma_res: The vma resource.
 * @lockdep_cookie: The lockdep cookie returned from i915_vma_resource_hold.
 *
 * The function may leave a dma_fence critical section.
 */
void i915_vma_resource_unhold(struct i915_vma_resource *vma_res,
			      bool lockdep_cookie)
{}

/**
 * i915_vma_resource_hold - Hold the signaling of the vma resource unbind fence.
 * @vma_res: The vma resource.
 * @lockdep_cookie: Pointer to a bool serving as a lockdep cooke that should
 * be given as an argument to the pairing i915_vma_resource_unhold.
 *
 * If returning true, the function enters a dma_fence signalling critical
 * section if not in one already.
 *
 * Return: true if holding successful, false if not.
 */
bool i915_vma_resource_hold(struct i915_vma_resource *vma_res,
			    bool *lockdep_cookie)
{}

static void i915_vma_resource_unbind_work(struct work_struct *work)
{}

static int
i915_vma_resource_fence_notify(struct i915_sw_fence *fence,
			       enum i915_sw_fence_notify state)
{}

/**
 * i915_vma_resource_unbind - Unbind a vma resource
 * @vma_res: The vma resource to unbind.
 * @tlb: pointer to vma->obj->mm.tlb associated with the resource
 *	 to be stored at vma_res->tlb. When not-NULL, it will be used
 *	 to do TLB cache invalidation before freeing a VMA resource.
 *	 Used only for async unbind.
 *
 * At this point this function does little more than publish a fence that
 * signals immediately unless signaling is held back.
 *
 * Return: A refcounted pointer to a dma-fence that signals when unbinding is
 * complete.
 */
struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res,
					   u32 *tlb)
{}

/**
 * __i915_vma_resource_init - Initialize a vma resource.
 * @vma_res: The vma resource to initialize
 *
 * Initializes the private members of a vma resource.
 */
void __i915_vma_resource_init(struct i915_vma_resource *vma_res)
{}

static void
i915_vma_resource_color_adjust_range(struct i915_address_space *vm,
				     u64 *start,
				     u64 *end)
{}

/**
 * i915_vma_resource_bind_dep_sync - Wait for / sync all unbinds touching a
 * certain vm range.
 * @vm: The vm to look at.
 * @offset: The range start.
 * @size: The range size.
 * @intr: Whether to wait interrubtible.
 *
 * The function needs to be called with the vm lock held.
 *
 * Return: Zero on success, -ERESTARTSYS if interrupted and @intr==true
 */
int i915_vma_resource_bind_dep_sync(struct i915_address_space *vm,
				    u64 offset,
				    u64 size,
				    bool intr)
{}

/**
 * i915_vma_resource_bind_dep_sync_all - Wait for / sync all unbinds of a vm,
 * releasing the vm lock while waiting.
 * @vm: The vm to look at.
 *
 * The function may not be called with the vm lock held.
 * Typically this is called at vm destruction to finish any pending
 * unbind operations. The vm mutex is released while waiting to avoid
 * stalling kernel workqueues trying to grab the mutex.
 */
void i915_vma_resource_bind_dep_sync_all(struct i915_address_space *vm)
{}

/**
 * i915_vma_resource_bind_dep_await - Have a struct i915_sw_fence await all
 * pending unbinds in a certain range of a vm.
 * @vm: The vm to look at.
 * @sw_fence: The struct i915_sw_fence that will be awaiting the unbinds.
 * @offset: The range start.
 * @size: The range size.
 * @intr: Whether to wait interrubtible.
 * @gfp: Allocation mode for memory allocations.
 *
 * The function makes @sw_fence await all pending unbinds in a certain
 * vm range before calling the complete notifier. To be able to await
 * each individual unbind, the function needs to allocate memory using
 * the @gpf allocation mode. If that fails, the function will instead
 * wait for the unbind fence to signal, using @intr to judge whether to
 * wait interruptible or not. Note that @gfp should ideally be selected so
 * as to avoid any expensive memory allocation stalls and rather fail and
 * synchronize itself. For now the vm mutex is required when calling this
 * function with means that @gfp can't call into direct reclaim. In reality
 * this means that during heavy memory pressure, we will sync in this
 * function.
 *
 * Return: Zero on success, -ERESTARTSYS if interrupted and @intr==true
 */
int i915_vma_resource_bind_dep_await(struct i915_address_space *vm,
				     struct i915_sw_fence *sw_fence,
				     u64 offset,
				     u64 size,
				     bool intr,
				     gfp_t gfp)
{}

void i915_vma_resource_module_exit(void)
{}

int __init i915_vma_resource_module_init(void)
{}