linux/drivers/gpu/drm/i915/gt/intel_engine_cs.c

// SPDX-License-Identifier: MIT
/*
 * Copyright © 2016 Intel Corporation
 */

#include <linux/string_helpers.h>

#include <drm/drm_print.h>

#include "gem/i915_gem_context.h"
#include "gem/i915_gem_internal.h"
#include "gt/intel_gt_print.h"
#include "gt/intel_gt_regs.h"

#include "i915_cmd_parser.h"
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
#include "intel_breadcrumbs.h"
#include "intel_context.h"
#include "intel_engine.h"
#include "intel_engine_pm.h"
#include "intel_engine_regs.h"
#include "intel_engine_user.h"
#include "intel_execlists_submission.h"
#include "intel_gt.h"
#include "intel_gt_mcr.h"
#include "intel_gt_pm.h"
#include "intel_gt_requests.h"
#include "intel_lrc.h"
#include "intel_lrc_reg.h"
#include "intel_reset.h"
#include "intel_ring.h"
#include "uc/intel_guc_submission.h"

/* Haswell does have the CXT_SIZE register however it does not appear to be
 * valid. Now, docs explain in dwords what is in the context object. The full
 * size is 70720 bytes, however, the power context and execlist context will
 * never be saved (power context is stored elsewhere, and execlists don't work
 * on HSW) - so the final size, including the extra state required for the
 * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
 */
#define HSW_CXT_TOTAL_SIZE

#define DEFAULT_LR_CONTEXT_RENDER_SIZE
#define GEN8_LR_CONTEXT_RENDER_SIZE
#define GEN9_LR_CONTEXT_RENDER_SIZE
#define GEN11_LR_CONTEXT_RENDER_SIZE

#define GEN8_LR_CONTEXT_OTHER_SIZE

#define MAX_MMIO_BASES
struct engine_info {};

static const struct engine_info intel_engines[] =;

/**
 * intel_engine_context_size() - return the size of the context for an engine
 * @gt: the gt
 * @class: engine class
 *
 * Each engine class may require a different amount of space for a context
 * image.
 *
 * Return: size (in bytes) of an engine class specific context image
 *
 * Note: this size includes the HWSP, which is part of the context image
 * in LRC mode, but does not include the "shared data page" used with
 * GuC submission. The caller should account for this if using the GuC.
 */
u32 intel_engine_context_size(struct intel_gt *gt, u8 class)
{}

static u32 __engine_mmio_base(struct drm_i915_private *i915,
			      const struct engine_mmio_base *bases)
{}

static void __sprint_engine_name(struct intel_engine_cs *engine)
{}

void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask)
{}

static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
{}

static void nop_irq_handler(struct intel_engine_cs *engine, u16 iir)
{}

static u32 get_reset_domain(u8 ver, enum intel_engine_id id)
{}

static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id,
			      u8 logical_instance)
{}

u64 intel_clamp_heartbeat_interval_ms(struct intel_engine_cs *engine, u64 value)
{}

u64 intel_clamp_max_busywait_duration_ns(struct intel_engine_cs *engine, u64 value)
{}

u64 intel_clamp_preempt_timeout_ms(struct intel_engine_cs *engine, u64 value)
{}

u64 intel_clamp_stop_timeout_ms(struct intel_engine_cs *engine, u64 value)
{}

u64 intel_clamp_timeslice_duration_ms(struct intel_engine_cs *engine, u64 value)
{}

static void __setup_engine_capabilities(struct intel_engine_cs *engine)
{}

static void intel_setup_engine_capabilities(struct intel_gt *gt)
{}

/**
 * intel_engines_release() - free the resources allocated for Command Streamers
 * @gt: pointer to struct intel_gt
 */
void intel_engines_release(struct intel_gt *gt)
{}

void intel_engine_free_request_pool(struct intel_engine_cs *engine)
{}

void intel_engines_free(struct intel_gt *gt)
{}

static
bool gen11_vdbox_has_sfc(struct intel_gt *gt,
			 unsigned int physical_vdbox,
			 unsigned int logical_vdbox, u16 vdbox_mask)
{}

static void engine_mask_apply_media_fuses(struct intel_gt *gt)
{}

static void engine_mask_apply_compute_fuses(struct intel_gt *gt)
{}

/*
 * Determine which engines are fused off in our particular hardware.
 * Note that we have a catch-22 situation where we need to be able to access
 * the blitter forcewake domain to read the engine fuses, but at the same time
 * we need to know which engines are available on the system to know which
 * forcewake domains are present. We solve this by intializing the forcewake
 * domains based on the full engine mask in the platform capabilities before
 * calling this function and pruning the domains for fused-off engines
 * afterwards.
 */
static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
{}

static void populate_logical_ids(struct intel_gt *gt, u8 *logical_ids,
				 u8 class, const u8 *map, u8 num_instances)
{}

static void setup_logical_ids(struct intel_gt *gt, u8 *logical_ids, u8 class)
{}

/**
 * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
 * @gt: pointer to struct intel_gt
 *
 * Return: non-zero if the initialization failed.
 */
int intel_engines_init_mmio(struct intel_gt *gt)
{}

void intel_engine_init_execlists(struct intel_engine_cs *engine)
{}

static void cleanup_status_page(struct intel_engine_cs *engine)
{}

static int pin_ggtt_status_page(struct intel_engine_cs *engine,
				struct i915_gem_ww_ctx *ww,
				struct i915_vma *vma)
{}

static int init_status_page(struct intel_engine_cs *engine)
{}

static int intel_engine_init_tlb_invalidation(struct intel_engine_cs *engine)
{}

static int engine_setup_common(struct intel_engine_cs *engine)
{}

struct measure_breadcrumb {};

static int measure_breadcrumb_dw(struct intel_context *ce)
{}

struct intel_context *
intel_engine_create_pinned_context(struct intel_engine_cs *engine,
				   struct i915_address_space *vm,
				   unsigned int ring_size,
				   unsigned int hwsp,
				   struct lock_class_key *key,
				   const char *name)
{}

void intel_engine_destroy_pinned_context(struct intel_context *ce)
{}

static struct intel_context *
create_ggtt_bind_context(struct intel_engine_cs *engine)
{}

static struct intel_context *
create_kernel_context(struct intel_engine_cs *engine)
{}

/*
 * engine_init_common - initialize engine state which might require hw access
 * @engine: Engine to initialize.
 *
 * Initializes @engine@ structure members shared between legacy and execlists
 * submission modes which do require hardware access.
 *
 * Typcally done at later stages of submission mode specific engine setup.
 *
 * Returns zero on success or an error code on failure.
 */
static int engine_init_common(struct intel_engine_cs *engine)
{}

int intel_engines_init(struct intel_gt *gt)
{}

/**
 * intel_engine_cleanup_common - cleans up the engine state created by
 *                                the common initiailizers.
 * @engine: Engine to cleanup.
 *
 * This cleans up everything created by the common helpers.
 */
void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{}

/**
 * intel_engine_resume - re-initializes the HW state of the engine
 * @engine: Engine to resume.
 *
 * Returns zero on success or an error code on failure.
 */
int intel_engine_resume(struct intel_engine_cs *engine)
{}

u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
{}

u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
{}

static unsigned long stop_timeout(const struct intel_engine_cs *engine)
{}

static int __intel_engine_stop_cs(struct intel_engine_cs *engine,
				  int fast_timeout_us,
				  int slow_timeout_ms)
{}

int intel_engine_stop_cs(struct intel_engine_cs *engine)
{}

void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
{}

static u32 __cs_pending_mi_force_wakes(struct intel_engine_cs *engine)
{}

static void __gpm_wait_for_fw_complete(struct intel_gt *gt, u32 fw_mask)
{}

/*
 * Wa_22011802037:gen12: In addition to stopping the cs, we need to wait for any
 * pending MI_FORCE_WAKEUP requests that the CS has initiated to complete. The
 * pending status is indicated by bits[13:9] (masked by bits[29:25]) in the
 * MSG_IDLE register. There's one MSG_IDLE register per reset domain. Since we
 * are concerned only with the gt reset here, we use a logical OR of pending
 * forcewakeups from all reset domains and then wait for them to complete by
 * querying PWRGT_DOMAIN_STATUS.
 */
void intel_engine_wait_for_pending_mi_fw(struct intel_engine_cs *engine)
{}

/* NB: please notice the memset */
void intel_engine_get_instdone(const struct intel_engine_cs *engine,
			       struct intel_instdone *instdone)
{}

static bool ring_is_idle(struct intel_engine_cs *engine)
{}

void __intel_engine_flush_submission(struct intel_engine_cs *engine, bool sync)
{}

/**
 * intel_engine_is_idle() - Report if the engine has finished process all work
 * @engine: the intel_engine_cs
 *
 * Return true if there are no requests pending, nothing left to be submitted
 * to hardware, and that the engine is idle.
 */
bool intel_engine_is_idle(struct intel_engine_cs *engine)
{}

bool intel_engines_are_idle(struct intel_gt *gt)
{}

bool intel_engine_irq_enable(struct intel_engine_cs *engine)
{}

void intel_engine_irq_disable(struct intel_engine_cs *engine)
{}

void intel_engines_reset_default_submission(struct intel_gt *gt)
{}

bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
{}

static struct intel_timeline *get_timeline(struct i915_request *rq)
{}

static int print_ring(char *buf, int sz, struct i915_request *rq)
{}

static void hexdump(struct drm_printer *m, const void *buf, size_t len)
{}

static const char *repr_timer(const struct timer_list *t)
{}

static void intel_engine_print_registers(struct intel_engine_cs *engine,
					 struct drm_printer *m)
{}

static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
{}

static unsigned long read_ul(void *p, size_t x)
{}

static void print_properties(struct intel_engine_cs *engine,
			     struct drm_printer *m)
{}

static void engine_dump_request(struct i915_request *rq, struct drm_printer *m, const char *msg)
{}

void intel_engine_dump_active_requests(struct list_head *requests,
				       struct i915_request *hung_rq,
				       struct drm_printer *m)
{}

static void engine_dump_active_requests(struct intel_engine_cs *engine,
					struct drm_printer *m)
{}

void intel_engine_dump(struct intel_engine_cs *engine,
		       struct drm_printer *m,
		       const char *header, ...)
{}

/**
 * intel_engine_get_busy_time() - Return current accumulated engine busyness
 * @engine: engine to report on
 * @now: monotonic timestamp of sampling
 *
 * Returns accumulated time @engine was busy since engine stats were enabled.
 */
ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now)
{}

struct intel_context *
intel_engine_create_virtual(struct intel_engine_cs **siblings,
			    unsigned int count, unsigned long flags)
{}

static struct i915_request *engine_execlist_find_hung_request(struct intel_engine_cs *engine)
{}

void intel_engine_get_hung_entity(struct intel_engine_cs *engine,
				  struct intel_context **ce, struct i915_request **rq)
{}

void xehp_enable_ccs_engines(struct intel_engine_cs *engine)
{}

#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "mock_engine.c"
#include "selftest_engine.c"
#include "selftest_engine_cs.c"
#endif