linux/drivers/gpu/drm/xe/xe_exec_queue.c

// SPDX-License-Identifier: MIT
/*
 * Copyright © 2021 Intel Corporation
 */

#include "xe_exec_queue.h"

#include <linux/nospec.h>

#include <drm/drm_device.h>
#include <drm/drm_file.h>
#include <drm/xe_drm.h>

#include "xe_device.h"
#include "xe_gt.h"
#include "xe_hw_engine_class_sysfs.h"
#include "xe_hw_fence.h"
#include "xe_lrc.h"
#include "xe_macros.h"
#include "xe_migrate.h"
#include "xe_pm.h"
#include "xe_ring_ops_types.h"
#include "xe_trace.h"
#include "xe_vm.h"

enum xe_exec_queue_sched_prop {};

static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
				      u64 extensions, int ext_number);

static void __xe_exec_queue_free(struct xe_exec_queue *q)
{}

static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
						   struct xe_vm *vm,
						   u32 logical_mask,
						   u16 width, struct xe_hw_engine *hwe,
						   u32 flags, u64 extensions)
{}

static int __xe_exec_queue_init(struct xe_exec_queue *q)
{}

struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
					   u32 logical_mask, u16 width,
					   struct xe_hw_engine *hwe, u32 flags,
					   u64 extensions)
{}

struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
						 struct xe_vm *vm,
						 enum xe_engine_class class, u32 flags)
{}

void xe_exec_queue_destroy(struct kref *ref)
{}

void xe_exec_queue_fini(struct xe_exec_queue *q)
{}

void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance)
{}

struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id)
{}

enum xe_exec_queue_priority
xe_exec_queue_device_get_max_priority(struct xe_device *xe)
{}

static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q,
				   u64 value)
{}

static bool xe_exec_queue_enforce_schedule_limit(void)
{}

static void
xe_exec_queue_get_prop_minmax(struct xe_hw_engine_class_intf *eclass,
			      enum xe_exec_queue_sched_prop prop,
			      u32 *min, u32 *max)
{}

static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q,
				    u64 value)
{}

xe_exec_queue_set_property_fn;

static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] =;

static int exec_queue_user_ext_set_property(struct xe_device *xe,
					    struct xe_exec_queue *q,
					    u64 extension)
{}

xe_exec_queue_user_extension_fn;

static const xe_exec_queue_user_extension_fn exec_queue_user_extension_funcs[] =;

#define MAX_USER_EXTENSIONS
static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
				      u64 extensions, int ext_number)
{}

static const enum xe_engine_class user_to_xe_engine_class[] =;

static struct xe_hw_engine *
find_hw_engine(struct xe_device *xe,
	       struct drm_xe_engine_class_instance eci)
{}

static u32 bind_exec_queue_logical_mask(struct xe_device *xe, struct xe_gt *gt,
					struct drm_xe_engine_class_instance *eci,
					u16 width, u16 num_placements)
{}

static u32 calc_validate_logical_mask(struct xe_device *xe, struct xe_gt *gt,
				      struct drm_xe_engine_class_instance *eci,
				      u16 width, u16 num_placements)
{}

int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
			       struct drm_file *file)
{}

int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
				     struct drm_file *file)
{}

/**
 * xe_exec_queue_is_lr() - Whether an exec_queue is long-running
 * @q: The exec_queue
 *
 * Return: True if the exec_queue is long-running, false otherwise.
 */
bool xe_exec_queue_is_lr(struct xe_exec_queue *q)
{}

static s32 xe_exec_queue_num_job_inflight(struct xe_exec_queue *q)
{}

/**
 * xe_exec_queue_ring_full() - Whether an exec_queue's ring is full
 * @q: The exec_queue
 *
 * Return: True if the exec_queue's ring is full, false otherwise.
 */
bool xe_exec_queue_ring_full(struct xe_exec_queue *q)
{}

/**
 * xe_exec_queue_is_idle() - Whether an exec_queue is idle.
 * @q: The exec_queue
 *
 * FIXME: Need to determine what to use as the short-lived
 * timeline lock for the exec_queues, so that the return value
 * of this function becomes more than just an advisory
 * snapshot in time. The timeline lock must protect the
 * seqno from racing submissions on the same exec_queue.
 * Typically vm->resv, but user-created timeline locks use the migrate vm
 * and never grabs the migrate vm->resv so we have a race there.
 *
 * Return: True if the exec_queue is idle, false otherwise.
 */
bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
{}

/**
 * xe_exec_queue_update_run_ticks() - Update run time in ticks for this exec queue
 * from hw
 * @q: The exec queue
 *
 * Update the timestamp saved by HW for this exec queue and save run ticks
 * calculated by using the delta from last update.
 */
void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
{}

void xe_exec_queue_kill(struct xe_exec_queue *q)
{}

int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
				struct drm_file *file)
{}

static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q,
						    struct xe_vm *vm)
{}

/**
 * xe_exec_queue_last_fence_put() - Drop ref to last fence
 * @q: The exec queue
 * @vm: The VM the engine does a bind or exec for
 */
void xe_exec_queue_last_fence_put(struct xe_exec_queue *q, struct xe_vm *vm)
{}

/**
 * xe_exec_queue_last_fence_put_unlocked() - Drop ref to last fence unlocked
 * @q: The exec queue
 *
 * Only safe to be called from xe_exec_queue_destroy().
 */
void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q)
{}

/**
 * xe_exec_queue_last_fence_get() - Get last fence
 * @q: The exec queue
 * @vm: The VM the engine does a bind or exec for
 *
 * Get last fence, takes a ref
 *
 * Returns: last fence if not signaled, dma fence stub if signaled
 */
struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q,
					       struct xe_vm *vm)
{}

/**
 * xe_exec_queue_last_fence_set() - Set last fence
 * @q: The exec queue
 * @vm: The VM the engine does a bind or exec for
 * @fence: The fence
 *
 * Set the last fence for the engine. Increases reference count for fence, when
 * closing engine xe_exec_queue_last_fence_put should be called.
 */
void xe_exec_queue_last_fence_set(struct xe_exec_queue *q, struct xe_vm *vm,
				  struct dma_fence *fence)
{}