linux/drivers/gpu/drm/xe/xe_lrc.c

// SPDX-License-Identifier: MIT
/*
 * Copyright © 2021 Intel Corporation
 */

#include "xe_lrc.h"

#include <linux/ascii85.h>

#include "instructions/xe_mi_commands.h"
#include "instructions/xe_gfxpipe_commands.h"
#include "instructions/xe_gfx_state_commands.h"
#include "regs/xe_engine_regs.h"
#include "regs/xe_lrc_layout.h"
#include "xe_bb.h"
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_drm_client.h"
#include "xe_exec_queue_types.h"
#include "xe_gt.h"
#include "xe_gt_printk.h"
#include "xe_hw_fence.h"
#include "xe_map.h"
#include "xe_memirq.h"
#include "xe_sriov.h"
#include "xe_vm.h"

#define LRC_VALID
#define LRC_PRIVILEGE
#define LRC_ADDRESSING_MODE
#define LRC_LEGACY_64B_CONTEXT

#define LRC_ENGINE_CLASS
#define LRC_ENGINE_INSTANCE

#define LRC_INDIRECT_RING_STATE_SIZE

struct xe_lrc_snapshot {};

static struct xe_device *
lrc_to_xe(struct xe_lrc *lrc)
{}

size_t xe_gt_lrc_size(struct xe_gt *gt, enum xe_engine_class class)
{}

/*
 * The per-platform tables are u8-encoded in @data. Decode @data and set the
 * addresses' offset and commands in @regs. The following encoding is used
 * for each byte. There are 2 steps: decoding commands and decoding addresses.
 *
 * Commands:
 * [7]: create NOPs - number of NOPs are set in lower bits
 * [6]: When creating MI_LOAD_REGISTER_IMM command, allow to set
 *      MI_LRI_FORCE_POSTED
 * [5:0]: Number of NOPs or registers to set values to in case of
 *        MI_LOAD_REGISTER_IMM
 *
 * Addresses: these are decoded after a MI_LOAD_REGISTER_IMM command by "count"
 * number of registers. They are set by using the REG/REG16 macros: the former
 * is used for offsets smaller than 0x200 while the latter is for values bigger
 * than that. Those macros already set all the bits documented below correctly:
 *
 * [7]: When a register offset needs more than 6 bits, use additional bytes, to
 *      follow, for the lower bits
 * [6:0]: Register offset, without considering the engine base.
 *
 * This function only tweaks the commands and register offsets. Values are not
 * filled out.
 */
static void set_offsets(u32 *regs,
			const u8 *data,
			const struct xe_hw_engine *hwe)
#define NOP(x) (BIT(7) | (x))
#define LRI(count, flags) ((flags) << 6 | (count) | \
			   BUILD_BUG_ON_ZERO(count >= BIT(6)))
#define POSTED BIT(0)
#define REG(x) (((x) >> 2) | BUILD_BUG_ON_ZERO(x >= 0x200))
#define REG16(x) \
	(((x) >> 9) | BIT(7) | BUILD_BUG_ON_ZERO(x >= 0x10000)), \
	(((x) >> 2) & 0x7f)
{}

static const u8 gen12_xcs_offsets[] =;

static const u8 dg2_xcs_offsets[] =;

static const u8 gen12_rcs_offsets[] =;

static const u8 xehp_rcs_offsets[] =;

static const u8 dg2_rcs_offsets[] =;

static const u8 mtl_rcs_offsets[] =;

#define XE2_CTX_COMMON

static const u8 xe2_rcs_offsets[] =;

static const u8 xe2_bcs_offsets[] =;

static const u8 xe2_xcs_offsets[] =;

static const u8 xe2_indirect_ring_state_offsets[] =;

#undef REG16
#undef REG
#undef LRI
#undef NOP

static const u8 *reg_offsets(struct xe_device *xe, enum xe_engine_class class)
{}

static void set_context_control(u32 *regs, struct xe_hw_engine *hwe)
{}

static void set_memory_based_intr(u32 *regs, struct xe_hw_engine *hwe)
{}

static int lrc_ring_mi_mode(struct xe_hw_engine *hwe)
{}

static void reset_stop_ring(u32 *regs, struct xe_hw_engine *hwe)
{}

static inline bool xe_lrc_has_indirect_ring_state(struct xe_lrc *lrc)
{}

static inline u32 __xe_lrc_ring_offset(struct xe_lrc *lrc)
{}

u32 xe_lrc_pphwsp_offset(struct xe_lrc *lrc)
{}

/* Make the magic macros work */
#define __xe_lrc_pphwsp_offset
#define __xe_lrc_regs_offset

#define LRC_SEQNO_PPHWSP_OFFSET
#define LRC_START_SEQNO_PPHWSP_OFFSET
#define LRC_CTX_JOB_TIMESTAMP_OFFSET
#define LRC_PARALLEL_PPHWSP_OFFSET
#define LRC_PPHWSP_SIZE

u32 xe_lrc_regs_offset(struct xe_lrc *lrc)
{}

static size_t lrc_reg_size(struct xe_device *xe)
{}

size_t xe_lrc_skip_size(struct xe_device *xe)
{}

static inline u32 __xe_lrc_seqno_offset(struct xe_lrc *lrc)
{}

static inline u32 __xe_lrc_start_seqno_offset(struct xe_lrc *lrc)
{}

static u32 __xe_lrc_ctx_job_timestamp_offset(struct xe_lrc *lrc)
{}

static inline u32 __xe_lrc_parallel_offset(struct xe_lrc *lrc)
{}

static u32 __xe_lrc_ctx_timestamp_offset(struct xe_lrc *lrc)
{}

static inline u32 __xe_lrc_indirect_ring_offset(struct xe_lrc *lrc)
{}

#define DECL_MAP_ADDR_HELPERS \

DECL_MAP_ADDR_HELPERS
DECL_MAP_ADDR_HELPERS
DECL_MAP_ADDR_HELPERS
DECL_MAP_ADDR_HELPERS
DECL_MAP_ADDR_HELPERS
DECL_MAP_ADDR_HELPERS
DECL_MAP_ADDR_HELPERS
DECL_MAP_ADDR_HELPERS
DECL_MAP_ADDR_HELPERS

#undef DECL_MAP_ADDR_HELPERS

/**
 * xe_lrc_ctx_timestamp_ggtt_addr() - Get ctx timestamp GGTT address
 * @lrc: Pointer to the lrc.
 *
 * Returns: ctx timestamp GGTT address
 */
u32 xe_lrc_ctx_timestamp_ggtt_addr(struct xe_lrc *lrc)
{}

/**
 * xe_lrc_ctx_timestamp() - Read ctx timestamp value
 * @lrc: Pointer to the lrc.
 *
 * Returns: ctx timestamp value
 */
u32 xe_lrc_ctx_timestamp(struct xe_lrc *lrc)
{}

/**
 * xe_lrc_ctx_job_timestamp_ggtt_addr() - Get ctx job timestamp GGTT address
 * @lrc: Pointer to the lrc.
 *
 * Returns: ctx timestamp job GGTT address
 */
u32 xe_lrc_ctx_job_timestamp_ggtt_addr(struct xe_lrc *lrc)
{}

/**
 * xe_lrc_ctx_job_timestamp() - Read ctx job timestamp value
 * @lrc: Pointer to the lrc.
 *
 * Returns: ctx timestamp job value
 */
u32 xe_lrc_ctx_job_timestamp(struct xe_lrc *lrc)
{}

u32 xe_lrc_ggtt_addr(struct xe_lrc *lrc)
{}

u32 xe_lrc_indirect_ring_ggtt_addr(struct xe_lrc *lrc)
{}

static u32 xe_lrc_read_indirect_ctx_reg(struct xe_lrc *lrc, int reg_nr)
{}

static void xe_lrc_write_indirect_ctx_reg(struct xe_lrc *lrc,
					  int reg_nr, u32 val)
{}

u32 xe_lrc_read_ctx_reg(struct xe_lrc *lrc, int reg_nr)
{}

void xe_lrc_write_ctx_reg(struct xe_lrc *lrc, int reg_nr, u32 val)
{}

static void *empty_lrc_data(struct xe_hw_engine *hwe)
{}

static void xe_lrc_set_ppgtt(struct xe_lrc *lrc, struct xe_vm *vm)
{}

static void xe_lrc_finish(struct xe_lrc *lrc)
{}

#define PVC_CTX_ASID
#define PVC_CTX_ACC_CTR_THOLD

static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
		       struct xe_vm *vm, u32 ring_size)
{}

/**
 * xe_lrc_create - Create a LRC
 * @hwe: Hardware Engine
 * @vm: The VM (address space)
 * @ring_size: LRC ring size
 *
 * Allocate and initialize the Logical Ring Context (LRC).
 *
 * Return pointer to created LRC upon success and an error pointer
 * upon failure.
 */
struct xe_lrc *xe_lrc_create(struct xe_hw_engine *hwe, struct xe_vm *vm,
			     u32 ring_size)
{}

/**
 * xe_lrc_destroy - Destroy the LRC
 * @ref: reference to LRC
 *
 * Called when ref == 0, release resources held by the Logical Ring Context
 * (LRC) and free the LRC memory.
 */
void xe_lrc_destroy(struct kref *ref)
{}

void xe_lrc_set_ring_tail(struct xe_lrc *lrc, u32 tail)
{}

u32 xe_lrc_ring_tail(struct xe_lrc *lrc)
{}

void xe_lrc_set_ring_head(struct xe_lrc *lrc, u32 head)
{}

u32 xe_lrc_ring_head(struct xe_lrc *lrc)
{}

u32 xe_lrc_ring_space(struct xe_lrc *lrc)
{}

static void __xe_lrc_write_ring(struct xe_lrc *lrc, struct iosys_map ring,
				const void *data, size_t size)
{}

void xe_lrc_write_ring(struct xe_lrc *lrc, const void *data, size_t size)
{}

u64 xe_lrc_descriptor(struct xe_lrc *lrc)
{}

u32 xe_lrc_seqno_ggtt_addr(struct xe_lrc *lrc)
{}

/**
 * xe_lrc_alloc_seqno_fence() - Allocate an lrc seqno fence.
 *
 * Allocate but don't initialize an lrc seqno fence.
 *
 * Return: Pointer to the allocated fence or
 * negative error pointer on error.
 */
struct dma_fence *xe_lrc_alloc_seqno_fence(void)
{}

/**
 * xe_lrc_free_seqno_fence() - Free an lrc seqno fence.
 * @fence: Pointer to the fence to free.
 *
 * Frees an lrc seqno fence that hasn't yet been
 * initialized.
 */
void xe_lrc_free_seqno_fence(struct dma_fence *fence)
{}

/**
 * xe_lrc_init_seqno_fence() - Initialize an lrc seqno fence.
 * @lrc: Pointer to the lrc.
 * @fence: Pointer to the fence to initialize.
 *
 * Initializes a pre-allocated lrc seqno fence.
 * After initialization, the fence is subject to normal
 * dma-fence refcounting.
 */
void xe_lrc_init_seqno_fence(struct xe_lrc *lrc, struct dma_fence *fence)
{}

s32 xe_lrc_seqno(struct xe_lrc *lrc)
{}

s32 xe_lrc_start_seqno(struct xe_lrc *lrc)
{}

u32 xe_lrc_start_seqno_ggtt_addr(struct xe_lrc *lrc)
{}

u32 xe_lrc_parallel_ggtt_addr(struct xe_lrc *lrc)
{}

struct iosys_map xe_lrc_parallel_map(struct xe_lrc *lrc)
{}

static int instr_dw(u32 cmd_header)
{}

static int dump_mi_command(struct drm_printer *p,
			   struct xe_gt *gt,
			   u32 *dw,
			   int remaining_dw)
{}

static int dump_gfxpipe_command(struct drm_printer *p,
				struct xe_gt *gt,
				u32 *dw,
				int remaining_dw)
{}

static int dump_gfx_state_command(struct drm_printer *p,
				  struct xe_gt *gt,
				  u32 *dw,
				  int remaining_dw)
{}

void xe_lrc_dump_default(struct drm_printer *p,
			 struct xe_gt *gt,
			 enum xe_engine_class hwe_class)
{}

struct instr_state {};

static const struct instr_state xe_hpg_svg_state[] =;

void xe_lrc_emit_hwe_state_instructions(struct xe_exec_queue *q, struct xe_bb *bb)
{}

struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc)
{}

void xe_lrc_snapshot_capture_delayed(struct xe_lrc_snapshot *snapshot)
{}

void xe_lrc_snapshot_print(struct xe_lrc_snapshot *snapshot, struct drm_printer *p)
{}

void xe_lrc_snapshot_free(struct xe_lrc_snapshot *snapshot)
{}

/**
 * xe_lrc_update_timestamp() - Update ctx timestamp
 * @lrc: Pointer to the lrc.
 * @old_ts: Old timestamp value
 *
 * Populate @old_ts current saved ctx timestamp, read new ctx timestamp and
 * update saved value.
 *
 * Returns: New ctx timestamp value
 */
u32 xe_lrc_update_timestamp(struct xe_lrc *lrc, u32 *old_ts)
{}