linux/drivers/gpu/drm/i915/i915_active.c

/*
 * SPDX-License-Identifier: MIT
 *
 * Copyright © 2019 Intel Corporation
 */

#include <linux/debugobjects.h>

#include "gt/intel_context.h"
#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_ring.h"

#include "i915_drv.h"
#include "i915_active.h"

/*
 * Active refs memory management
 *
 * To be more economical with memory, we reap all the i915_active trees as
 * they idle (when we know the active requests are inactive) and allocate the
 * nodes from a local slab cache to hopefully reduce the fragmentation.
 */
static struct kmem_cache *slab_cache;

struct active_node {};

#define fetch_node(x)

static inline struct active_node *
node_from_active(struct i915_active_fence *active)
{}

#define take_preallocated_barriers(x)

static inline bool is_barrier(const struct i915_active_fence *active)
{}

static inline struct llist_node *barrier_to_ll(struct active_node *node)
{}

static inline struct intel_engine_cs *
__barrier_to_engine(struct active_node *node)
{}

static inline struct intel_engine_cs *
barrier_to_engine(struct active_node *node)
{}

static inline struct active_node *barrier_from_ll(struct llist_node *x)
{}

#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS)

static void *active_debug_hint(void *addr)
{
	struct i915_active *ref = addr;

	return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref;
}

static const struct debug_obj_descr active_debug_desc = {
	.name = "i915_active",
	.debug_hint = active_debug_hint,
};

static void debug_active_init(struct i915_active *ref)
{
	debug_object_init(ref, &active_debug_desc);
}

static void debug_active_activate(struct i915_active *ref)
{
	lockdep_assert_held(&ref->tree_lock);
	debug_object_activate(ref, &active_debug_desc);
}

static void debug_active_deactivate(struct i915_active *ref)
{
	lockdep_assert_held(&ref->tree_lock);
	if (!atomic_read(&ref->count)) /* after the last dec */
		debug_object_deactivate(ref, &active_debug_desc);
}

static void debug_active_fini(struct i915_active *ref)
{
	debug_object_free(ref, &active_debug_desc);
}

static void debug_active_assert(struct i915_active *ref)
{
	debug_object_assert_init(ref, &active_debug_desc);
}

#else

static inline void debug_active_init(struct i915_active *ref) {}
static inline void debug_active_activate(struct i915_active *ref) {}
static inline void debug_active_deactivate(struct i915_active *ref) {}
static inline void debug_active_fini(struct i915_active *ref) {}
static inline void debug_active_assert(struct i915_active *ref) {}

#endif

static void
__active_retire(struct i915_active *ref)
{}

static void
active_work(struct work_struct *wrk)
{}

static void
active_retire(struct i915_active *ref)
{}

static inline struct dma_fence **
__active_fence_slot(struct i915_active_fence *active)
{}

static inline bool
active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{}

static void
node_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
{}

static void
excl_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
{}

static struct active_node *__active_lookup(struct i915_active *ref, u64 idx)
{}

static struct i915_active_fence *
active_instance(struct i915_active *ref, u64 idx)
{}

void __i915_active_init(struct i915_active *ref,
			int (*active)(struct i915_active *ref),
			void (*retire)(struct i915_active *ref),
			unsigned long flags,
			struct lock_class_key *mkey,
			struct lock_class_key *wkey)
{}

static bool ____active_del_barrier(struct i915_active *ref,
				   struct active_node *node,
				   struct intel_engine_cs *engine)

{}

static bool
__active_del_barrier(struct i915_active *ref, struct active_node *node)
{}

static bool
replace_barrier(struct i915_active *ref, struct i915_active_fence *active)
{}

int i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
{}

static struct dma_fence *
__i915_active_set_fence(struct i915_active *ref,
			struct i915_active_fence *active,
			struct dma_fence *fence)
{}

struct dma_fence *
i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
{}

bool i915_active_acquire_if_busy(struct i915_active *ref)
{}

static void __i915_active_activate(struct i915_active *ref)
{}

int i915_active_acquire(struct i915_active *ref)
{}

int i915_active_acquire_for_context(struct i915_active *ref, u64 idx)
{}

void i915_active_release(struct i915_active *ref)
{}

static void enable_signaling(struct i915_active_fence *active)
{}

static int flush_barrier(struct active_node *it)
{}

static int flush_lazy_signals(struct i915_active *ref)
{}

int __i915_active_wait(struct i915_active *ref, int state)
{}

static int __await_active(struct i915_active_fence *active,
			  int (*fn)(void *arg, struct dma_fence *fence),
			  void *arg)
{}

struct wait_barrier {};

static int
barrier_wake(wait_queue_entry_t *wq, unsigned int mode, int flags, void *key)
{}

static int __await_barrier(struct i915_active *ref, struct i915_sw_fence *fence)
{}

static int await_active(struct i915_active *ref,
			unsigned int flags,
			int (*fn)(void *arg, struct dma_fence *fence),
			void *arg, struct i915_sw_fence *barrier)
{}

static int rq_await_fence(void *arg, struct dma_fence *fence)
{}

int i915_request_await_active(struct i915_request *rq,
			      struct i915_active *ref,
			      unsigned int flags)
{}

static int sw_await_fence(void *arg, struct dma_fence *fence)
{}

int i915_sw_fence_await_active(struct i915_sw_fence *fence,
			       struct i915_active *ref,
			       unsigned int flags)
{}

void i915_active_fini(struct i915_active *ref)
{}

static inline bool is_idle_barrier(struct active_node *node, u64 idx)
{}

static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
{}

int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
					    struct intel_engine_cs *engine)
{}

void i915_active_acquire_barrier(struct i915_active *ref)
{}

static struct dma_fence **ll_to_fence_slot(struct llist_node *node)
{}

void i915_request_add_active_barriers(struct i915_request *rq)
{}

/*
 * __i915_active_fence_set: Update the last active fence along its timeline
 * @active: the active tracker
 * @fence: the new fence (under construction)
 *
 * Records the new @fence as the last active fence along its timeline in
 * this active tracker, moving the tracking callbacks from the previous
 * fence onto this one. Gets and returns a reference to the previous fence
 * (if not already completed), which the caller must put after making sure
 * that it is executed before the new fence. To ensure that the order of
 * fences within the timeline of the i915_active_fence is understood, it
 * should be locked by the caller.
 */
struct dma_fence *
__i915_active_fence_set(struct i915_active_fence *active,
			struct dma_fence *fence)
{}

int i915_active_fence_set(struct i915_active_fence *active,
			  struct i915_request *rq)
{}

void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb)
{}

struct auto_active {};

struct i915_active *i915_active_get(struct i915_active *ref)
{}

static void auto_release(struct kref *ref)
{}

void i915_active_put(struct i915_active *ref)
{}

static int auto_active(struct i915_active *ref)
{}

static void auto_retire(struct i915_active *ref)
{}

struct i915_active *i915_active_create(void)
{}

#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/i915_active.c"
#endif

void i915_active_module_exit(void)
{}

int __init i915_active_module_init(void)
{}