linux/drivers/gpu/drm/xe/xe_guc_submit.c

// SPDX-License-Identifier: MIT
/*
 * Copyright © 2022 Intel Corporation
 */

#include "xe_guc_submit.h"

#include <linux/bitfield.h>
#include <linux/bitmap.h>
#include <linux/circ_buf.h>
#include <linux/delay.h>
#include <linux/dma-fence-array.h>
#include <linux/math64.h>

#include <drm/drm_managed.h>

#include "abi/guc_actions_abi.h"
#include "abi/guc_klvs_abi.h"
#include "regs/xe_lrc_layout.h"
#include "xe_assert.h"
#include "xe_devcoredump.h"
#include "xe_device.h"
#include "xe_exec_queue.h"
#include "xe_force_wake.h"
#include "xe_gpu_scheduler.h"
#include "xe_gt.h"
#include "xe_gt_clock.h"
#include "xe_gt_printk.h"
#include "xe_guc.h"
#include "xe_guc_ct.h"
#include "xe_guc_exec_queue_types.h"
#include "xe_guc_id_mgr.h"
#include "xe_guc_submit_types.h"
#include "xe_hw_engine.h"
#include "xe_hw_fence.h"
#include "xe_lrc.h"
#include "xe_macros.h"
#include "xe_map.h"
#include "xe_mocs.h"
#include "xe_pm.h"
#include "xe_ring_ops_types.h"
#include "xe_sched_job.h"
#include "xe_trace.h"
#include "xe_vm.h"

static struct xe_guc *
exec_queue_to_guc(struct xe_exec_queue *q)
{}

/*
 * Helpers for engine state, using an atomic as some of the bits can transition
 * as the same time (e.g. a suspend can be happning at the same time as schedule
 * engine done being processed).
 */
#define EXEC_QUEUE_STATE_REGISTERED
#define EXEC_QUEUE_STATE_ENABLED
#define EXEC_QUEUE_STATE_PENDING_ENABLE
#define EXEC_QUEUE_STATE_PENDING_DISABLE
#define EXEC_QUEUE_STATE_DESTROYED
#define EXEC_QUEUE_STATE_SUSPENDED
#define EXEC_QUEUE_STATE_RESET
#define EXEC_QUEUE_STATE_KILLED
#define EXEC_QUEUE_STATE_WEDGED
#define EXEC_QUEUE_STATE_BANNED
#define EXEC_QUEUE_STATE_CHECK_TIMEOUT
#define EXEC_QUEUE_STATE_EXTRA_REF

static bool exec_queue_registered(struct xe_exec_queue *q)
{}

static void set_exec_queue_registered(struct xe_exec_queue *q)
{}

static void clear_exec_queue_registered(struct xe_exec_queue *q)
{}

static bool exec_queue_enabled(struct xe_exec_queue *q)
{}

static void set_exec_queue_enabled(struct xe_exec_queue *q)
{}

static void clear_exec_queue_enabled(struct xe_exec_queue *q)
{}

static bool exec_queue_pending_enable(struct xe_exec_queue *q)
{}

static void set_exec_queue_pending_enable(struct xe_exec_queue *q)
{}

static void clear_exec_queue_pending_enable(struct xe_exec_queue *q)
{}

static bool exec_queue_pending_disable(struct xe_exec_queue *q)
{}

static void set_exec_queue_pending_disable(struct xe_exec_queue *q)
{}

static void clear_exec_queue_pending_disable(struct xe_exec_queue *q)
{}

static bool exec_queue_destroyed(struct xe_exec_queue *q)
{}

static void set_exec_queue_destroyed(struct xe_exec_queue *q)
{}

static bool exec_queue_banned(struct xe_exec_queue *q)
{}

static void set_exec_queue_banned(struct xe_exec_queue *q)
{}

static bool exec_queue_suspended(struct xe_exec_queue *q)
{}

static void set_exec_queue_suspended(struct xe_exec_queue *q)
{}

static void clear_exec_queue_suspended(struct xe_exec_queue *q)
{}

static bool exec_queue_reset(struct xe_exec_queue *q)
{}

static void set_exec_queue_reset(struct xe_exec_queue *q)
{}

static bool exec_queue_killed(struct xe_exec_queue *q)
{}

static void set_exec_queue_killed(struct xe_exec_queue *q)
{}

static bool exec_queue_wedged(struct xe_exec_queue *q)
{}

static void set_exec_queue_wedged(struct xe_exec_queue *q)
{}

static bool exec_queue_check_timeout(struct xe_exec_queue *q)
{}

static void set_exec_queue_check_timeout(struct xe_exec_queue *q)
{}

static void clear_exec_queue_check_timeout(struct xe_exec_queue *q)
{}

static bool exec_queue_extra_ref(struct xe_exec_queue *q)
{}

static void set_exec_queue_extra_ref(struct xe_exec_queue *q)
{}

static bool exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue *q)
{}

#ifdef CONFIG_PROVE_LOCKING
static int alloc_submit_wq(struct xe_guc *guc)
{}

static void free_submit_wq(struct xe_guc *guc)
{}

static struct workqueue_struct *get_submit_wq(struct xe_guc *guc)
{}
#else
static int alloc_submit_wq(struct xe_guc *guc)
{
	return 0;
}

static void free_submit_wq(struct xe_guc *guc)
{

}

static struct workqueue_struct *get_submit_wq(struct xe_guc *guc)
{
	return NULL;
}
#endif

static void guc_submit_fini(struct drm_device *drm, void *arg)
{}

static void guc_submit_wedged_fini(struct drm_device *drm, void *arg)
{}

static const struct xe_exec_queue_ops guc_exec_queue_ops;

static void primelockdep(struct xe_guc *guc)
{}

/**
 * xe_guc_submit_init() - Initialize GuC submission.
 * @guc: the &xe_guc to initialize
 * @num_ids: number of GuC context IDs to use
 *
 * The bare-metal or PF driver can pass ~0 as &num_ids to indicate that all
 * GuC context IDs supported by the GuC firmware should be used for submission.
 *
 * Only VF drivers will have to provide explicit number of GuC context IDs
 * that they can use for submission.
 *
 * Return: 0 on success or a negative error code on failure.
 */
int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids)
{}

static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa_count)
{}

static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
{}

static void release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
{}

struct exec_queue_policy {};

static u32 __guc_exec_queue_policy_action_size(struct exec_queue_policy *policy)
{}

static void __guc_exec_queue_policy_start_klv(struct exec_queue_policy *policy,
					      u16 guc_id)
{}

#define MAKE_EXEC_QUEUE_POLICY_ADD

MAKE_EXEC_QUEUE_POLICY_ADD
MAKE_EXEC_QUEUE_POLICY_ADD
MAKE_EXEC_QUEUE_POLICY_ADD
#undef MAKE_EXEC_QUEUE_POLICY_ADD

static const int xe_exec_queue_prio_to_guc[] =;

static void init_policies(struct xe_guc *guc, struct xe_exec_queue *q)
{}

static void set_min_preemption_timeout(struct xe_guc *guc, struct xe_exec_queue *q)
{}

#define parallel_read(xe_, map_, field_)
#define parallel_write(xe_, map_, field_, val_)

static void __register_mlrc_exec_queue(struct xe_guc *guc,
				       struct xe_exec_queue *q,
				       struct guc_ctxt_registration_info *info)
{}

static void __register_exec_queue(struct xe_guc *guc,
				  struct guc_ctxt_registration_info *info)
{}

static void register_exec_queue(struct xe_exec_queue *q)
{}

static u32 wq_space_until_wrap(struct xe_exec_queue *q)
{}

static int wq_wait_for_space(struct xe_exec_queue *q, u32 wqi_size)
{}

static int wq_noop_append(struct xe_exec_queue *q)
{}

static void wq_item_append(struct xe_exec_queue *q)
{}

#define RESUME_PENDING
static void submit_exec_queue(struct xe_exec_queue *q)
{}

static struct dma_fence *
guc_exec_queue_run_job(struct drm_sched_job *drm_job)
{}

static void guc_exec_queue_free_job(struct drm_sched_job *drm_job)
{}

static int guc_read_stopped(struct xe_guc *guc)
{}

#define MAKE_SCHED_CONTEXT_ACTION(q, enable_disable)

static void disable_scheduling_deregister(struct xe_guc *guc,
					  struct xe_exec_queue *q)
{}

static void xe_guc_exec_queue_trigger_cleanup(struct xe_exec_queue *q)
{}

/**
 * xe_guc_submit_wedge() - Wedge GuC submission
 * @guc: the GuC object
 *
 * Save exec queue's registered with GuC state by taking a ref to each queue.
 * Register a DRMM handler to drop refs upon driver unload.
 */
void xe_guc_submit_wedge(struct xe_guc *guc)
{}

static bool guc_submit_hint_wedged(struct xe_guc *guc)
{}

static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
{}

#define ADJUST_FIVE_PERCENT(__t)

static bool check_timeout(struct xe_exec_queue *q, struct xe_sched_job *job)
{}

static void enable_scheduling(struct xe_exec_queue *q)
{}

static void disable_scheduling(struct xe_exec_queue *q, bool immediate)
{}

static void __deregister_exec_queue(struct xe_guc *guc, struct xe_exec_queue *q)
{}

static enum drm_gpu_sched_stat
guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
{}

static void __guc_exec_queue_fini_async(struct work_struct *w)
{}

static void guc_exec_queue_fini_async(struct xe_exec_queue *q)
{}

static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q)
{}

static void __guc_exec_queue_process_msg_cleanup(struct xe_sched_msg *msg)
{}

static bool guc_exec_queue_allowed_to_change_state(struct xe_exec_queue *q)
{}

static void __guc_exec_queue_process_msg_set_sched_props(struct xe_sched_msg *msg)
{}

static void suspend_fence_signal(struct xe_exec_queue *q)
{}

static void __guc_exec_queue_process_msg_suspend(struct xe_sched_msg *msg)
{}

static void __guc_exec_queue_process_msg_resume(struct xe_sched_msg *msg)
{}

#define CLEANUP
#define SET_SCHED_PROPS
#define SUSPEND
#define RESUME

static void guc_exec_queue_process_msg(struct xe_sched_msg *msg)
{}

static const struct drm_sched_backend_ops drm_sched_ops =;

static const struct xe_sched_backend_ops xe_sched_ops =;

static int guc_exec_queue_init(struct xe_exec_queue *q)
{}

static void guc_exec_queue_kill(struct xe_exec_queue *q)
{}

static void guc_exec_queue_add_msg(struct xe_exec_queue *q, struct xe_sched_msg *msg,
				   u32 opcode)
{}

#define STATIC_MSG_CLEANUP
#define STATIC_MSG_SUSPEND
#define STATIC_MSG_RESUME
static void guc_exec_queue_fini(struct xe_exec_queue *q)
{}

static int guc_exec_queue_set_priority(struct xe_exec_queue *q,
				       enum xe_exec_queue_priority priority)
{}

static int guc_exec_queue_set_timeslice(struct xe_exec_queue *q, u32 timeslice_us)
{}

static int guc_exec_queue_set_preempt_timeout(struct xe_exec_queue *q,
					      u32 preempt_timeout_us)
{}

static int guc_exec_queue_suspend(struct xe_exec_queue *q)
{}

static void guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
{}

static void guc_exec_queue_resume(struct xe_exec_queue *q)
{}

static bool guc_exec_queue_reset_status(struct xe_exec_queue *q)
{}

/*
 * All of these functions are an abstraction layer which other parts of XE can
 * use to trap into the GuC backend. All of these functions, aside from init,
 * really shouldn't do much other than trap into the DRM scheduler which
 * synchronizes these operations.
 */
static const struct xe_exec_queue_ops guc_exec_queue_ops =;

static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q)
{}

int xe_guc_submit_reset_prepare(struct xe_guc *guc)
{}

void xe_guc_submit_reset_wait(struct xe_guc *guc)
{}

void xe_guc_submit_stop(struct xe_guc *guc)
{}

static void guc_exec_queue_start(struct xe_exec_queue *q)
{}

int xe_guc_submit_start(struct xe_guc *guc)
{}

static struct xe_exec_queue *
g2h_exec_queue_lookup(struct xe_guc *guc, u32 guc_id)
{}

static void deregister_exec_queue(struct xe_guc *guc, struct xe_exec_queue *q)
{}

static void handle_sched_done(struct xe_guc *guc, struct xe_exec_queue *q,
			      u32 runnable_state)
{}

int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
{}

static void handle_deregister_done(struct xe_guc *guc, struct xe_exec_queue *q)
{}

int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
{}

int xe_guc_exec_queue_reset_handler(struct xe_guc *guc, u32 *msg, u32 len)
{}

int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
					       u32 len)
{}

int xe_guc_exec_queue_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len)
{}

static void
guc_exec_queue_wq_snapshot_capture(struct xe_exec_queue *q,
				   struct xe_guc_submit_exec_queue_snapshot *snapshot)
{}

static void
guc_exec_queue_wq_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snapshot,
				 struct drm_printer *p)
{}

/**
 * xe_guc_exec_queue_snapshot_capture - Take a quick snapshot of the GuC Engine.
 * @q: faulty exec queue
 *
 * This can be printed out in a later stage like during dev_coredump
 * analysis.
 *
 * Returns: a GuC Submit Engine snapshot object that must be freed by the
 * caller, using `xe_guc_exec_queue_snapshot_free`.
 */
struct xe_guc_submit_exec_queue_snapshot *
xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q)
{}

/**
 * xe_guc_exec_queue_snapshot_capture_delayed - Take delayed part of snapshot of the GuC Engine.
 * @snapshot: Previously captured snapshot of job.
 *
 * This captures some data that requires taking some locks, so it cannot be done in signaling path.
 */
void
xe_guc_exec_queue_snapshot_capture_delayed(struct xe_guc_submit_exec_queue_snapshot *snapshot)
{}

/**
 * xe_guc_exec_queue_snapshot_print - Print out a given GuC Engine snapshot.
 * @snapshot: GuC Submit Engine snapshot object.
 * @p: drm_printer where it will be printed out.
 *
 * This function prints out a given GuC Submit Engine snapshot object.
 */
void
xe_guc_exec_queue_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snapshot,
				 struct drm_printer *p)
{}

/**
 * xe_guc_exec_queue_snapshot_free - Free all allocated objects for a given
 * snapshot.
 * @snapshot: GuC Submit Engine snapshot object.
 *
 * This function free all the memory that needed to be allocated at capture
 * time.
 */
void xe_guc_exec_queue_snapshot_free(struct xe_guc_submit_exec_queue_snapshot *snapshot)
{}

static void guc_exec_queue_print(struct xe_exec_queue *q, struct drm_printer *p)
{}

/**
 * xe_guc_submit_print - GuC Submit Print.
 * @guc: GuC.
 * @p: drm_printer where it will be printed out.
 *
 * This function capture and prints snapshots of **all** GuC Engines.
 */
void xe_guc_submit_print(struct xe_guc *guc, struct drm_printer *p)
{}