linux/drivers/gpu/drm/xe/xe_execlist.c

// SPDX-License-Identifier: MIT
/*
 * Copyright © 2021 Intel Corporation
 */

#include "xe_execlist.h"

#include <drm/drm_managed.h>

#include "instructions/xe_mi_commands.h"
#include "regs/xe_engine_regs.h"
#include "regs/xe_gt_regs.h"
#include "regs/xe_lrc_layout.h"
#include "xe_assert.h"
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_exec_queue.h"
#include "xe_gt.h"
#include "xe_hw_fence.h"
#include "xe_lrc.h"
#include "xe_macros.h"
#include "xe_mmio.h"
#include "xe_mocs.h"
#include "xe_ring_ops_types.h"
#include "xe_sched_job.h"

#define XE_EXECLIST_HANG_LIMIT

#define SW_CTX_ID_SHIFT
#define SW_CTX_ID_WIDTH
#define XEHP_SW_CTX_ID_SHIFT
#define XEHP_SW_CTX_ID_WIDTH

#define SW_CTX_ID

#define XEHP_SW_CTX_ID


static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc,
			u32 ctx_id)
{}

static void __xe_execlist_port_start(struct xe_execlist_port *port,
				     struct xe_execlist_exec_queue *exl)
{}

static void __xe_execlist_port_idle(struct xe_execlist_port *port)
{}

static bool xe_execlist_is_idle(struct xe_execlist_exec_queue *exl)
{}

static void __xe_execlist_port_start_next_active(struct xe_execlist_port *port)
{}

static u64 read_execlist_status(struct xe_hw_engine *hwe)
{}

static void xe_execlist_port_irq_handler_locked(struct xe_execlist_port *port)
{}

static void xe_execlist_port_irq_handler(struct xe_hw_engine *hwe,
					 u16 intr_vec)
{}

static void xe_execlist_port_wake_locked(struct xe_execlist_port *port,
					 enum xe_exec_queue_priority priority)
{}

static void xe_execlist_make_active(struct xe_execlist_exec_queue *exl)
{}

static void xe_execlist_port_irq_fail_timer(struct timer_list *timer)
{}

struct xe_execlist_port *xe_execlist_port_create(struct xe_device *xe,
						 struct xe_hw_engine *hwe)
{}

void xe_execlist_port_destroy(struct xe_execlist_port *port)
{}

static struct dma_fence *
execlist_run_job(struct drm_sched_job *drm_job)
{}

static void execlist_job_free(struct drm_sched_job *drm_job)
{}

static const struct drm_sched_backend_ops drm_sched_ops =;

static int execlist_exec_queue_init(struct xe_exec_queue *q)
{}

static void execlist_exec_queue_fini_async(struct work_struct *w)
{}

static void execlist_exec_queue_kill(struct xe_exec_queue *q)
{}

static void execlist_exec_queue_fini(struct xe_exec_queue *q)
{}

static int execlist_exec_queue_set_priority(struct xe_exec_queue *q,
					    enum xe_exec_queue_priority priority)
{}

static int execlist_exec_queue_set_timeslice(struct xe_exec_queue *q, u32 timeslice_us)
{}

static int execlist_exec_queue_set_preempt_timeout(struct xe_exec_queue *q,
						   u32 preempt_timeout_us)
{}

static int execlist_exec_queue_suspend(struct xe_exec_queue *q)
{}

static void execlist_exec_queue_suspend_wait(struct xe_exec_queue *q)

{}

static void execlist_exec_queue_resume(struct xe_exec_queue *q)
{}

static bool execlist_exec_queue_reset_status(struct xe_exec_queue *q)
{}

static const struct xe_exec_queue_ops execlist_exec_queue_ops =;

int xe_execlist_init(struct xe_gt *gt)
{}