linux/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c

// SPDX-License-Identifier: MIT
/*
 * Copyright © 2014 Intel Corporation
 */

#include <linux/circ_buf.h>

#include "gem/i915_gem_context.h"
#include "gem/i915_gem_lmem.h"
#include "gt/gen8_engine_cs.h"
#include "gt/intel_breadcrumbs.h"
#include "gt/intel_context.h"
#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_regs.h"
#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_clock_utils.h"
#include "gt/intel_gt_irq.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_gt_regs.h"
#include "gt/intel_gt_requests.h"
#include "gt/intel_lrc.h"
#include "gt/intel_lrc_reg.h"
#include "gt/intel_mocs.h"
#include "gt/intel_ring.h"

#include "intel_guc_ads.h"
#include "intel_guc_capture.h"
#include "intel_guc_print.h"
#include "intel_guc_submission.h"

#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_irq.h"
#include "i915_trace.h"

/**
 * DOC: GuC-based command submission
 *
 * The Scratch registers:
 * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
 * a value to the action register (SOFT_SCRATCH_0) along with any data. It then
 * triggers an interrupt on the GuC via another register write (0xC4C8).
 * Firmware writes a success/fail code back to the action register after
 * processes the request. The kernel driver polls waiting for this update and
 * then proceeds.
 *
 * Command Transport buffers (CTBs):
 * Covered in detail in other sections but CTBs (Host to GuC - H2G, GuC to Host
 * - G2H) are a message interface between the i915 and GuC.
 *
 * Context registration:
 * Before a context can be submitted it must be registered with the GuC via a
 * H2G. A unique guc_id is associated with each context. The context is either
 * registered at request creation time (normal operation) or at submission time
 * (abnormal operation, e.g. after a reset).
 *
 * Context submission:
 * The i915 updates the LRC tail value in memory. The i915 must enable the
 * scheduling of the context within the GuC for the GuC to actually consider it.
 * Therefore, the first time a disabled context is submitted we use a schedule
 * enable H2G, while follow up submissions are done via the context submit H2G,
 * which informs the GuC that a previously enabled context has new work
 * available.
 *
 * Context unpin:
 * To unpin a context a H2G is used to disable scheduling. When the
 * corresponding G2H returns indicating the scheduling disable operation has
 * completed it is safe to unpin the context. While a disable is in flight it
 * isn't safe to resubmit the context so a fence is used to stall all future
 * requests of that context until the G2H is returned. Because this interaction
 * with the GuC takes a non-zero amount of time we delay the disabling of
 * scheduling after the pin count goes to zero by a configurable period of time
 * (see SCHED_DISABLE_DELAY_MS). The thought is this gives the user a window of
 * time to resubmit something on the context before doing this costly operation.
 * This delay is only done if the context isn't closed and the guc_id usage is
 * less than a threshold (see NUM_SCHED_DISABLE_GUC_IDS_THRESHOLD).
 *
 * Context deregistration:
 * Before a context can be destroyed or if we steal its guc_id we must
 * deregister the context with the GuC via H2G. If stealing the guc_id it isn't
 * safe to submit anything to this guc_id until the deregister completes so a
 * fence is used to stall all requests associated with this guc_id until the
 * corresponding G2H returns indicating the guc_id has been deregistered.
 *
 * submission_state.guc_ids:
 * Unique number associated with private GuC context data passed in during
 * context registration / submission / deregistration. 64k available. Simple ida
 * is used for allocation.
 *
 * Stealing guc_ids:
 * If no guc_ids are available they can be stolen from another context at
 * request creation time if that context is unpinned. If a guc_id can't be found
 * we punt this problem to the user as we believe this is near impossible to hit
 * during normal use cases.
 *
 * Locking:
 * In the GuC submission code we have 3 basic spin locks which protect
 * everything. Details about each below.
 *
 * sched_engine->lock
 * This is the submission lock for all contexts that share an i915 schedule
 * engine (sched_engine), thus only one of the contexts which share a
 * sched_engine can be submitting at a time. Currently only one sched_engine is
 * used for all of GuC submission but that could change in the future.
 *
 * guc->submission_state.lock
 * Global lock for GuC submission state. Protects guc_ids and destroyed contexts
 * list.
 *
 * ce->guc_state.lock
 * Protects everything under ce->guc_state. Ensures that a context is in the
 * correct state before issuing a H2G. e.g. We don't issue a schedule disable
 * on a disabled context (bad idea), we don't issue a schedule enable when a
 * schedule disable is in flight, etc... Also protects list of inflight requests
 * on the context and the priority management state. Lock is individual to each
 * context.
 *
 * Lock ordering rules:
 * sched_engine->lock -> ce->guc_state.lock
 * guc->submission_state.lock -> ce->guc_state.lock
 *
 * Reset races:
 * When a full GT reset is triggered it is assumed that some G2H responses to
 * H2Gs can be lost as the GuC is also reset. Losing these G2H can prove to be
 * fatal as we do certain operations upon receiving a G2H (e.g. destroy
 * contexts, release guc_ids, etc...). When this occurs we can scrub the
 * context state and cleanup appropriately, however this is quite racey.
 * To avoid races, the reset code must disable submission before scrubbing for
 * the missing G2H, while the submission code must check for submission being
 * disabled and skip sending H2Gs and updating context states when it is. Both
 * sides must also make sure to hold the relevant locks.
 */

/* GuC Virtual Engine */
struct guc_virtual_engine {};

static struct intel_context *
guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
		   unsigned long flags);

static struct intel_context *
guc_create_parallel(struct intel_engine_cs **engines,
		    unsigned int num_siblings,
		    unsigned int width);

#define GUC_REQUEST_SIZE

/*
 * We reserve 1/16 of the guc_ids for multi-lrc as these need to be contiguous
 * per the GuC submission interface. A different allocation algorithm is used
 * (bitmap vs. ida) between multi-lrc and single-lrc hence the reason to
 * partition the guc_id space. We believe the number of multi-lrc contexts in
 * use should be low and 1/16 should be sufficient. Minimum of 32 guc_ids for
 * multi-lrc.
 */
#define NUMBER_MULTI_LRC_GUC_ID(guc)

/*
 * Below is a set of functions which control the GuC scheduling state which
 * require a lock.
 */
#define SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER
#define SCHED_STATE_DESTROYED
#define SCHED_STATE_PENDING_DISABLE
#define SCHED_STATE_BANNED
#define SCHED_STATE_ENABLED
#define SCHED_STATE_PENDING_ENABLE
#define SCHED_STATE_REGISTERED
#define SCHED_STATE_POLICY_REQUIRED
#define SCHED_STATE_CLOSED
#define SCHED_STATE_BLOCKED_SHIFT
#define SCHED_STATE_BLOCKED
#define SCHED_STATE_BLOCKED_MASK

static inline void init_sched_state(struct intel_context *ce)
{}

/*
 * Kernel contexts can have SCHED_STATE_REGISTERED after suspend.
 * A context close can race with the submission path, so SCHED_STATE_CLOSED
 * can be set immediately before we try to register.
 */
#define SCHED_STATE_VALID_INIT

__maybe_unused
static bool sched_state_is_init(struct intel_context *ce)
{}

static inline bool
context_wait_for_deregister_to_register(struct intel_context *ce)
{}

static inline void
set_context_wait_for_deregister_to_register(struct intel_context *ce)
{}

static inline void
clr_context_wait_for_deregister_to_register(struct intel_context *ce)
{}

static inline bool
context_destroyed(struct intel_context *ce)
{}

static inline void
set_context_destroyed(struct intel_context *ce)
{}

static inline void
clr_context_destroyed(struct intel_context *ce)
{}

static inline bool context_pending_disable(struct intel_context *ce)
{}

static inline void set_context_pending_disable(struct intel_context *ce)
{}

static inline void clr_context_pending_disable(struct intel_context *ce)
{}

static inline bool context_banned(struct intel_context *ce)
{}

static inline void set_context_banned(struct intel_context *ce)
{}

static inline void clr_context_banned(struct intel_context *ce)
{}

static inline bool context_enabled(struct intel_context *ce)
{}

static inline void set_context_enabled(struct intel_context *ce)
{}

static inline void clr_context_enabled(struct intel_context *ce)
{}

static inline bool context_pending_enable(struct intel_context *ce)
{}

static inline void set_context_pending_enable(struct intel_context *ce)
{}

static inline void clr_context_pending_enable(struct intel_context *ce)
{}

static inline bool context_registered(struct intel_context *ce)
{}

static inline void set_context_registered(struct intel_context *ce)
{}

static inline void clr_context_registered(struct intel_context *ce)
{}

static inline bool context_policy_required(struct intel_context *ce)
{}

static inline void set_context_policy_required(struct intel_context *ce)
{}

static inline void clr_context_policy_required(struct intel_context *ce)
{}

static inline bool context_close_done(struct intel_context *ce)
{}

static inline void set_context_close_done(struct intel_context *ce)
{}

static inline u32 context_blocked(struct intel_context *ce)
{}

static inline void incr_context_blocked(struct intel_context *ce)
{}

static inline void decr_context_blocked(struct intel_context *ce)
{}

static struct intel_context *
request_to_scheduling_context(struct i915_request *rq)
{}

static inline bool context_guc_id_invalid(struct intel_context *ce)
{}

static inline void set_context_guc_id_invalid(struct intel_context *ce)
{}

static inline struct intel_guc *ce_to_guc(struct intel_context *ce)
{}

static inline struct i915_priolist *to_priolist(struct rb_node *rb)
{}

/*
 * When using multi-lrc submission a scratch memory area is reserved in the
 * parent's context state for the process descriptor, work queue, and handshake
 * between the parent + children contexts to insert safe preemption points
 * between each of the BBs. Currently the scratch area is sized to a page.
 *
 * The layout of this scratch area is below:
 * 0						guc_process_desc
 * + sizeof(struct guc_process_desc)		child go
 * + CACHELINE_BYTES				child join[0]
 * ...
 * + CACHELINE_BYTES				child join[n - 1]
 * ...						unused
 * PARENT_SCRATCH_SIZE / 2			work queue start
 * ...						work queue
 * PARENT_SCRATCH_SIZE - 1			work queue end
 */
#define WQ_SIZE
#define WQ_OFFSET

struct sync_semaphore {};

struct parent_scratch {};

static u32 __get_parent_scratch_offset(struct intel_context *ce)
{}

static u32 __get_wq_offset(struct intel_context *ce)
{}

static struct parent_scratch *
__get_parent_scratch(struct intel_context *ce)
{}

static struct guc_process_desc_v69 *
__get_process_desc_v69(struct intel_context *ce)
{}

static struct guc_sched_wq_desc *
__get_wq_desc_v70(struct intel_context *ce)
{}

static u32 *get_wq_pointer(struct intel_context *ce, u32 wqi_size)
{}

static inline struct intel_context *__get_context(struct intel_guc *guc, u32 id)
{}

static struct guc_lrc_desc_v69 *__get_lrc_desc_v69(struct intel_guc *guc, u32 index)
{}

static int guc_lrc_desc_pool_create_v69(struct intel_guc *guc)
{}

static void guc_lrc_desc_pool_destroy_v69(struct intel_guc *guc)
{}

static inline bool guc_submission_initialized(struct intel_guc *guc)
{}

static inline void _reset_lrc_desc_v69(struct intel_guc *guc, u32 id)
{}

static inline bool ctx_id_mapped(struct intel_guc *guc, u32 id)
{}

static inline void set_ctx_id_mapping(struct intel_guc *guc, u32 id,
				      struct intel_context *ce)
{}

static inline void clr_ctx_id_mapping(struct intel_guc *guc, u32 id)
{}

static void decr_outstanding_submission_g2h(struct intel_guc *guc)
{}

static int guc_submission_send_busy_loop(struct intel_guc *guc,
					 const u32 *action,
					 u32 len,
					 u32 g2h_len_dw,
					 bool loop)
{}

int intel_guc_wait_for_pending_msg(struct intel_guc *guc,
				   atomic_t *wait_var,
				   bool interruptible,
				   long timeout)
{}

int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout)
{}

static int guc_context_policy_init_v70(struct intel_context *ce, bool loop);
static int try_context_registration(struct intel_context *ce, bool loop);

static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq)
{}

static int guc_add_request(struct intel_guc *guc, struct i915_request *rq)
{}

static inline void guc_set_lrc_tail(struct i915_request *rq)
{}

static inline int rq_prio(const struct i915_request *rq)
{}

static bool is_multi_lrc_rq(struct i915_request *rq)
{}

static bool can_merge_rq(struct i915_request *rq,
			 struct i915_request *last)
{}

static u32 wq_space_until_wrap(struct intel_context *ce)
{}

static void write_wqi(struct intel_context *ce, u32 wqi_size)
{}

static int guc_wq_noop_append(struct intel_context *ce)
{}

static int __guc_wq_item_append(struct i915_request *rq)
{}

static int guc_wq_item_append(struct intel_guc *guc,
			      struct i915_request *rq)
{}

static bool multi_lrc_submit(struct i915_request *rq)
{}

static int guc_dequeue_one_context(struct intel_guc *guc)
{}

static void guc_submission_tasklet(struct tasklet_struct *t)
{}

static void cs_irq_handler(struct intel_engine_cs *engine, u16 iir)
{}

static void __guc_context_destroy(struct intel_context *ce);
static void release_guc_id(struct intel_guc *guc, struct intel_context *ce);
static void guc_signal_context_fence(struct intel_context *ce);
static void guc_cancel_context_requests(struct intel_context *ce);
static void guc_blocked_fence_complete(struct intel_context *ce);

static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc)
{}

/*
 * GuC stores busyness stats for each engine at context in/out boundaries. A
 * context 'in' logs execution start time, 'out' adds in -> out delta to total.
 * i915/kmd accesses 'start', 'total' and 'context id' from memory shared with
 * GuC.
 *
 * __i915_pmu_event_read samples engine busyness. When sampling, if context id
 * is valid (!= ~0) and start is non-zero, the engine is considered to be
 * active. For an active engine total busyness = total + (now - start), where
 * 'now' is the time at which the busyness is sampled. For inactive engine,
 * total busyness = total.
 *
 * All times are captured from GUCPMTIMESTAMP reg and are in gt clock domain.
 *
 * The start and total values provided by GuC are 32 bits and wrap around in a
 * few minutes. Since perf pmu provides busyness as 64 bit monotonically
 * increasing ns values, there is a need for this implementation to account for
 * overflows and extend the GuC provided values to 64 bits before returning
 * busyness to the user. In order to do that, a worker runs periodically at
 * frequency = 1/8th the time it takes for the timestamp to wrap (i.e. once in
 * 27 seconds for a gt clock frequency of 19.2 MHz).
 */

#define WRAP_TIME_CLKS
#define POLL_TIME_CLKS

static void
__extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start)
{}

#define record_read(map_, field_)

/*
 * GuC updates shared memory and KMD reads it. Since this is not synchronized,
 * we run into a race where the value read is inconsistent. Sometimes the
 * inconsistency is in reading the upper MSB bytes of the last_in value when
 * this race occurs. 2 types of cases are seen - upper 8 bits are zero and upper
 * 24 bits are zero. Since these are non-zero values, it is non-trivial to
 * determine validity of these values. Instead we read the values multiple times
 * until they are consistent. In test runs, 3 attempts results in consistent
 * values. The upper bound is set to 6 attempts and may need to be tuned as per
 * any new occurences.
 */
static void __get_engine_usage_record(struct intel_engine_cs *engine,
				      u32 *last_in, u32 *id, u32 *total)
{}

static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
{}

static u32 gpm_timestamp_shift(struct intel_gt *gt)
{}

static void guc_update_pm_timestamp(struct intel_guc *guc, ktime_t *now)
{}

/*
 * Unlike the execlist mode of submission total and active times are in terms of
 * gt clocks. The *now parameter is retained to return the cpu time at which the
 * busyness was sampled.
 */
static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
{}

static void guc_enable_busyness_worker(struct intel_guc *guc)
{}

static void guc_cancel_busyness_worker(struct intel_guc *guc)
{}

static void __reset_guc_busyness_stats(struct intel_guc *guc)
{}

static void __update_guc_busyness_stats(struct intel_guc *guc)
{}

static void __guc_context_update_stats(struct intel_context *ce)
{}

static void guc_context_update_stats(struct intel_context *ce)
{}

static void guc_timestamp_ping(struct work_struct *wrk)
{}

static int guc_action_enable_usage_stats(struct intel_guc *guc)
{}

static int guc_init_engine_stats(struct intel_guc *guc)
{}

static void guc_fini_engine_stats(struct intel_guc *guc)
{}

void intel_guc_busyness_park(struct intel_gt *gt)
{}

void intel_guc_busyness_unpark(struct intel_gt *gt)
{}

static inline bool
submission_disabled(struct intel_guc *guc)
{}

static void disable_submission(struct intel_guc *guc)
{}

static void enable_submission(struct intel_guc *guc)
{}

static void guc_flush_submissions(struct intel_guc *guc)
{}

void intel_guc_submission_flush_work(struct intel_guc *guc)
{}

static void guc_flush_destroyed_contexts(struct intel_guc *guc);

void intel_guc_submission_reset_prepare(struct intel_guc *guc)
{}

static struct intel_engine_cs *
guc_virtual_get_sibling(struct intel_engine_cs *ve, unsigned int sibling)
{}

static inline struct intel_engine_cs *
__context_to_physical_engine(struct intel_context *ce)
{}

static void guc_reset_state(struct intel_context *ce, u32 head, bool scrub)
{}

static void guc_engine_reset_prepare(struct intel_engine_cs *engine)
{}

static void guc_reset_nop(struct intel_engine_cs *engine)
{}

static void guc_rewind_nop(struct intel_engine_cs *engine, bool stalled)
{}

static void
__unwind_incomplete_requests(struct intel_context *ce)
{}

static void __guc_reset_context(struct intel_context *ce, intel_engine_mask_t stalled)
{}

void wake_up_all_tlb_invalidate(struct intel_guc *guc)
{}

void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled)
{}

static void guc_cancel_context_requests(struct intel_context *ce)
{}

static void
guc_cancel_sched_engine_requests(struct i915_sched_engine *sched_engine)
{}

void intel_guc_submission_cancel_requests(struct intel_guc *guc)
{}

void intel_guc_submission_reset_finish(struct intel_guc *guc)
{}

static void destroyed_worker_func(struct work_struct *w);
static void reset_fail_worker_func(struct work_struct *w);

bool intel_guc_tlb_invalidation_is_available(struct intel_guc *guc)
{}

static int init_tlb_lookup(struct intel_guc *guc)
{}

static void fini_tlb_lookup(struct intel_guc *guc)
{}

/*
 * Set up the memory resources to be shared with the GuC (via the GGTT)
 * at firmware loading time.
 */
int intel_guc_submission_init(struct intel_guc *guc)
{}

void intel_guc_submission_fini(struct intel_guc *guc)
{}

static inline void queue_request(struct i915_sched_engine *sched_engine,
				 struct i915_request *rq,
				 int prio)
{}

static int guc_bypass_tasklet_submit(struct intel_guc *guc,
				     struct i915_request *rq)
{}

static bool need_tasklet(struct intel_guc *guc, struct i915_request *rq)
{}

static void guc_submit_request(struct i915_request *rq)
{}

static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
{}

static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce)
{}

static void release_guc_id(struct intel_guc *guc, struct intel_context *ce)
{}

static int steal_guc_id(struct intel_guc *guc, struct intel_context *ce)
{}

static int assign_guc_id(struct intel_guc *guc, struct intel_context *ce)
{}

#define PIN_GUC_ID_TRIES
static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce)
{}

static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce)
{}

static int __guc_action_register_multi_lrc_v69(struct intel_guc *guc,
					       struct intel_context *ce,
					       u32 guc_id,
					       u32 offset,
					       bool loop)
{}

static int __guc_action_register_multi_lrc_v70(struct intel_guc *guc,
					       struct intel_context *ce,
					       struct guc_ctxt_registration_info *info,
					       bool loop)
{}

static int __guc_action_register_context_v69(struct intel_guc *guc,
					     u32 guc_id,
					     u32 offset,
					     bool loop)
{}

static int __guc_action_register_context_v70(struct intel_guc *guc,
					     struct guc_ctxt_registration_info *info,
					     bool loop)
{}

static void prepare_context_registration_info_v69(struct intel_context *ce);
static void prepare_context_registration_info_v70(struct intel_context *ce,
						  struct guc_ctxt_registration_info *info);

static int
register_context_v69(struct intel_guc *guc, struct intel_context *ce, bool loop)
{}

static int
register_context_v70(struct intel_guc *guc, struct intel_context *ce, bool loop)
{}

static int register_context(struct intel_context *ce, bool loop)
{}

static int __guc_action_deregister_context(struct intel_guc *guc,
					   u32 guc_id)
{}

static int deregister_context(struct intel_context *ce, u32 guc_id)
{}

static inline void clear_children_join_go_memory(struct intel_context *ce)
{}

static inline u32 get_children_go_value(struct intel_context *ce)
{}

static inline u32 get_children_join_value(struct intel_context *ce,
					  u8 child_index)
{}

struct context_policy {};

static u32 __guc_context_policy_action_size(struct context_policy *policy)
{}

static void __guc_context_policy_start_klv(struct context_policy *policy, u16 guc_id)
{}

#define MAKE_CONTEXT_POLICY_ADD

MAKE_CONTEXT_POLICY_ADD
MAKE_CONTEXT_POLICY_ADD
MAKE_CONTEXT_POLICY_ADD
MAKE_CONTEXT_POLICY_ADD
MAKE_CONTEXT_POLICY_ADD

#undef MAKE_CONTEXT_POLICY_ADD

static int __guc_context_set_context_policies(struct intel_guc *guc,
					      struct context_policy *policy,
					      bool loop)
{}

static int guc_context_policy_init_v70(struct intel_context *ce, bool loop)
{}

static void guc_context_policy_init_v69(struct intel_engine_cs *engine,
					struct guc_lrc_desc_v69 *desc)
{}

static u32 map_guc_prio_to_lrc_desc_prio(u8 prio)
{}

static void prepare_context_registration_info_v69(struct intel_context *ce)
{}

static void prepare_context_registration_info_v70(struct intel_context *ce,
						  struct guc_ctxt_registration_info *info)
{}

static int try_context_registration(struct intel_context *ce, bool loop)
{}

static int __guc_context_pre_pin(struct intel_context *ce,
				 struct intel_engine_cs *engine,
				 struct i915_gem_ww_ctx *ww,
				 void **vaddr)
{}

static int __guc_context_pin(struct intel_context *ce,
			     struct intel_engine_cs *engine,
			     void *vaddr)
{}

static int guc_context_pre_pin(struct intel_context *ce,
			       struct i915_gem_ww_ctx *ww,
			       void **vaddr)
{}

static int guc_context_pin(struct intel_context *ce, void *vaddr)
{}

static void guc_context_unpin(struct intel_context *ce)
{}

static void guc_context_post_unpin(struct intel_context *ce)
{}

static void __guc_context_sched_enable(struct intel_guc *guc,
				       struct intel_context *ce)
{}

static void __guc_context_sched_disable(struct intel_guc *guc,
					struct intel_context *ce,
					u16 guc_id)
{}

static void guc_blocked_fence_complete(struct intel_context *ce)
{}

static void guc_blocked_fence_reinit(struct intel_context *ce)
{}

static u16 prep_context_pending_disable(struct intel_context *ce)
{}

static struct i915_sw_fence *guc_context_block(struct intel_context *ce)
{}

#define SCHED_STATE_MULTI_BLOCKED_MASK
#define SCHED_STATE_NO_UNBLOCK

static bool context_cant_unblock(struct intel_context *ce)
{}

static void guc_context_unblock(struct intel_context *ce)
{}

static void guc_context_cancel_request(struct intel_context *ce,
				       struct i915_request *rq)
{}

static void __guc_context_set_preemption_timeout(struct intel_guc *guc,
						 u16 guc_id,
						 u32 preemption_timeout)
{}

static void
guc_context_revoke(struct intel_context *ce, struct i915_request *rq,
		   unsigned int preempt_timeout_ms)
{}

static void do_sched_disable(struct intel_guc *guc, struct intel_context *ce,
			     unsigned long flags)
	__releases(ce->guc_state.lock)
{}

static bool bypass_sched_disable(struct intel_guc *guc,
				 struct intel_context *ce)
{}

static void __delay_sched_disable(struct work_struct *wrk)
{}

static bool guc_id_pressure(struct intel_guc *guc, struct intel_context *ce)
{}

static void guc_context_sched_disable(struct intel_context *ce)
{}

static void guc_context_close(struct intel_context *ce)
{}

static inline int guc_lrc_desc_unpin(struct intel_context *ce)
{}

static void __guc_context_destroy(struct intel_context *ce)
{}

static void guc_flush_destroyed_contexts(struct intel_guc *guc)
{}

static void deregister_destroyed_contexts(struct intel_guc *guc)
{}

static void destroyed_worker_func(struct work_struct *w)
{}

static void guc_context_destroy(struct kref *kref)
{}

static int guc_context_alloc(struct intel_context *ce)
{}

static void __guc_context_set_prio(struct intel_guc *guc,
				   struct intel_context *ce)
{}

static void guc_context_set_prio(struct intel_guc *guc,
				 struct intel_context *ce,
				 u8 prio)
{}

static inline u8 map_i915_prio_to_guc_prio(int prio)
{}

static inline void add_context_inflight_prio(struct intel_context *ce,
					     u8 guc_prio)
{}

static inline void sub_context_inflight_prio(struct intel_context *ce,
					     u8 guc_prio)
{}

static inline void update_context_prio(struct intel_context *ce)
{}

static inline bool new_guc_prio_higher(u8 old_guc_prio, u8 new_guc_prio)
{}

static void add_to_context(struct i915_request *rq)
{}

static void guc_prio_fini(struct i915_request *rq, struct intel_context *ce)
{}

static void remove_from_context(struct i915_request *rq)
{}

static const struct intel_context_ops guc_context_ops =;

static void submit_work_cb(struct irq_work *wrk)
{}

static void __guc_signal_context_fence(struct intel_context *ce)
{}

static void guc_signal_context_fence(struct intel_context *ce)
{}

static bool context_needs_register(struct intel_context *ce, bool new_guc_id)
{}

static void guc_context_init(struct intel_context *ce)
{}

static int guc_request_alloc(struct i915_request *rq)
{}

static int guc_virtual_context_pre_pin(struct intel_context *ce,
				       struct i915_gem_ww_ctx *ww,
				       void **vaddr)
{}

static int guc_virtual_context_pin(struct intel_context *ce, void *vaddr)
{}

static void guc_virtual_context_unpin(struct intel_context *ce)
{}

static void guc_virtual_context_enter(struct intel_context *ce)
{}

static void guc_virtual_context_exit(struct intel_context *ce)
{}

static int guc_virtual_context_alloc(struct intel_context *ce)
{}

static const struct intel_context_ops virtual_guc_context_ops =;

static int guc_parent_context_pin(struct intel_context *ce, void *vaddr)
{}

static int guc_child_context_pin(struct intel_context *ce, void *vaddr)
{}

static void guc_parent_context_unpin(struct intel_context *ce)
{}

static void guc_child_context_unpin(struct intel_context *ce)
{}

static void guc_child_context_post_unpin(struct intel_context *ce)
{}

static void guc_child_context_destroy(struct kref *kref)
{}

static const struct intel_context_ops virtual_parent_context_ops =;

static const struct intel_context_ops virtual_child_context_ops =;

/*
 * The below override of the breadcrumbs is enabled when the user configures a
 * context for parallel submission (multi-lrc, parent-child).
 *
 * The overridden breadcrumbs implements an algorithm which allows the GuC to
 * safely preempt all the hw contexts configured for parallel submission
 * between each BB. The contract between the i915 and GuC is if the parent
 * context can be preempted, all the children can be preempted, and the GuC will
 * always try to preempt the parent before the children. A handshake between the
 * parent / children breadcrumbs ensures the i915 holds up its end of the deal
 * creating a window to preempt between each set of BBs.
 */
static int emit_bb_start_parent_no_preempt_mid_batch(struct i915_request *rq,
						     u64 offset, u32 len,
						     const unsigned int flags);
static int emit_bb_start_child_no_preempt_mid_batch(struct i915_request *rq,
						    u64 offset, u32 len,
						    const unsigned int flags);
static u32 *
emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
						 u32 *cs);
static u32 *
emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
						u32 *cs);

static struct intel_context *
guc_create_parallel(struct intel_engine_cs **engines,
		    unsigned int num_siblings,
		    unsigned int width)
{}

static bool
guc_irq_enable_breadcrumbs(struct intel_breadcrumbs *b)
{}

static void
guc_irq_disable_breadcrumbs(struct intel_breadcrumbs *b)
{}

static void guc_init_breadcrumbs(struct intel_engine_cs *engine)
{}

static void guc_bump_inflight_request_prio(struct i915_request *rq,
					   int prio)
{}

static void guc_retire_inflight_request_prio(struct i915_request *rq)
{}

static void sanitize_hwsp(struct intel_engine_cs *engine)
{}

static void guc_sanitize(struct intel_engine_cs *engine)
{}

static void setup_hwsp(struct intel_engine_cs *engine)
{}

static void start_engine(struct intel_engine_cs *engine)
{}

static int guc_resume(struct intel_engine_cs *engine)
{}

static bool guc_sched_engine_disabled(struct i915_sched_engine *sched_engine)
{}

static void guc_set_default_submission(struct intel_engine_cs *engine)
{}

static inline int guc_kernel_context_pin(struct intel_guc *guc,
					 struct intel_context *ce)
{}

static inline int guc_init_submission(struct intel_guc *guc)
{}

static void guc_release(struct intel_engine_cs *engine)
{}

static void virtual_guc_bump_serial(struct intel_engine_cs *engine)
{}

static void guc_default_vfuncs(struct intel_engine_cs *engine)
{}

static void rcs_submission_override(struct intel_engine_cs *engine)
{}

static inline void guc_default_irqs(struct intel_engine_cs *engine)
{}

static void guc_sched_engine_destroy(struct kref *kref)
{}

int intel_guc_submission_setup(struct intel_engine_cs *engine)
{}

struct scheduling_policy {};

static u32 __guc_scheduling_policy_action_size(struct scheduling_policy *policy)
{}

static struct scheduling_policy *__guc_scheduling_policy_start_klv(struct scheduling_policy *policy)
{}

static void __guc_scheduling_policy_add_klv(struct scheduling_policy *policy,
					    u32 action, u32 *data, u32 len)
{}

static int __guc_action_set_scheduling_policies(struct intel_guc *guc,
						struct scheduling_policy *policy)
{}

static int guc_init_global_schedule_policy(struct intel_guc *guc)
{}

static void guc_route_semaphores(struct intel_guc *guc, bool to_guc)
{}

int intel_guc_submission_enable(struct intel_guc *guc)
{}

/* Note: By the time we're here, GuC may have already been reset */
void intel_guc_submission_disable(struct intel_guc *guc)
{}

static bool __guc_submission_supported(struct intel_guc *guc)
{}

static bool __guc_submission_selected(struct intel_guc *guc)
{}

int intel_guc_sched_disable_gucid_threshold_max(struct intel_guc *guc)
{}

/*
 * This default value of 33 milisecs (+1 milisec round up) ensures 30fps or higher
 * workloads are able to enjoy the latency reduction when delaying the schedule-disable
 * operation. This matches the 30fps game-render + encode (real world) workload this
 * knob was tested against.
 */
#define SCHED_DISABLE_DELAY_MS

/*
 * A threshold of 75% is a reasonable starting point considering that real world apps
 * generally don't get anywhere near this.
 */
#define NUM_SCHED_DISABLE_GUCIDS_DEFAULT_THRESHOLD(__guc)

void intel_guc_submission_init_early(struct intel_guc *guc)
{}

static inline struct intel_context *
g2h_context_lookup(struct intel_guc *guc, u32 ctx_id)
{}

static void wait_wake_outstanding_tlb_g2h(struct intel_guc *guc, u32 seqno)
{}

int intel_guc_tlb_invalidation_done(struct intel_guc *guc,
				    const u32 *payload, u32 len)
{}

static long must_wait_woken(struct wait_queue_entry *wq_entry, long timeout)
{}

static bool intel_gt_is_enabled(const struct intel_gt *gt)
{}

static int guc_send_invalidate_tlb(struct intel_guc *guc,
				   enum intel_guc_tlb_invalidation_type type)
{}

/* Send a H2G command to invalidate the TLBs at engine level and beyond. */
int intel_guc_invalidate_tlb_engines(struct intel_guc *guc)
{}

/* Send a H2G command to invalidate the GuC's internal TLB. */
int intel_guc_invalidate_tlb_guc(struct intel_guc *guc)
{}

int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
					  const u32 *msg,
					  u32 len)
{}

int intel_guc_sched_done_process_msg(struct intel_guc *guc,
				     const u32 *msg,
				     u32 len)
{}

static void capture_error_state(struct intel_guc *guc,
				struct intel_context *ce)
{}

static void guc_context_replay(struct intel_context *ce)
{}

static void guc_handle_context_reset(struct intel_guc *guc,
				     struct intel_context *ce)
{}

int intel_guc_context_reset_process_msg(struct intel_guc *guc,
					const u32 *msg, u32 len)
{}

int intel_guc_error_capture_process_msg(struct intel_guc *guc,
					const u32 *msg, u32 len)
{}

struct intel_engine_cs *
intel_guc_lookup_engine(struct intel_guc *guc, u8 guc_class, u8 instance)
{}

static void reset_fail_worker_func(struct work_struct *w)
{}

int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
					 const u32 *msg, u32 len)
{}

void intel_guc_find_hung_context(struct intel_engine_cs *engine)
{}

void intel_guc_dump_active_requests(struct intel_engine_cs *engine,
				    struct i915_request *hung_rq,
				    struct drm_printer *m)
{}

void intel_guc_submission_print_info(struct intel_guc *guc,
				     struct drm_printer *p)
{}

static inline void guc_log_context_priority(struct drm_printer *p,
					    struct intel_context *ce)
{}

static inline void guc_log_context(struct drm_printer *p,
				   struct intel_context *ce)
{}

void intel_guc_submission_print_context_info(struct intel_guc *guc,
					     struct drm_printer *p)
{}

static inline u32 get_children_go_addr(struct intel_context *ce)
{}

static inline u32 get_children_join_addr(struct intel_context *ce,
					 u8 child_index)
{}

#define PARENT_GO_BB
#define PARENT_GO_FINI_BREADCRUMB
#define CHILD_GO_BB
#define CHILD_GO_FINI_BREADCRUMB
static int emit_bb_start_parent_no_preempt_mid_batch(struct i915_request *rq,
						     u64 offset, u32 len,
						     const unsigned int flags)
{}

static int emit_bb_start_child_no_preempt_mid_batch(struct i915_request *rq,
						    u64 offset, u32 len,
						    const unsigned int flags)
{}

static u32 *
__emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
						   u32 *cs)
{}

/*
 * If this true, a submission of multi-lrc requests had an error and the
 * requests need to be skipped. The front end (execuf IOCTL) should've called
 * i915_request_skip which squashes the BB but we still need to emit the fini
 * breadrcrumbs seqno write. At this point we don't know how many of the
 * requests in the multi-lrc submission were generated so we can't do the
 * handshake between the parent and children (e.g. if 4 requests should be
 * generated but 2nd hit an error only 1 would be seen by the GuC backend).
 * Simply skip the handshake, but still emit the breadcrumbd seqno, if an error
 * has occurred on any of the requests in submission / relationship.
 */
static inline bool skip_handshake(struct i915_request *rq)
{}

#define NON_SKIP_LEN
static u32 *
emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
						 u32 *cs)
{}

static u32 *
__emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
						  u32 *cs)
{}

static u32 *
emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
						u32 *cs)
{}

#undef NON_SKIP_LEN

static struct intel_context *
guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
		   unsigned long flags)
{}

bool intel_guc_virtual_engine_has_heartbeat(const struct intel_engine_cs *ve)
{}

#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_guc.c"
#include "selftest_guc_multi_lrc.c"
#include "selftest_guc_hangcheck.c"
#endif