#include <linux/kthread.h>
#include "gem/i915_gem_pm.h"
#include "gt/intel_context.h"
#include "gt/intel_execlists_submission.h"
#include "gt/intel_gt_regs.h"
#include "gt/intel_lrc.h"
#include "gt/intel_ring.h"
#include "i915_drv.h"
#include "i915_gem_gtt.h"
#include "i915_perf_oa_regs.h"
#include "gvt.h"
#define RING_CTX_OFF(x) …
static void set_context_pdp_root_pointer(
struct execlist_ring_context *ring_context,
u32 pdp[8])
{ … }
static void update_shadow_pdps(struct intel_vgpu_workload *workload)
{ … }
static void sr_oa_regs(struct intel_vgpu_workload *workload,
u32 *reg_state, bool save)
{ … }
static int populate_shadow_context(struct intel_vgpu_workload *workload)
{ … }
static inline bool is_gvt_request(struct i915_request *rq)
{ … }
static void save_ring_hw_state(struct intel_vgpu *vgpu,
const struct intel_engine_cs *engine)
{ … }
static int shadow_context_status_change(struct notifier_block *nb,
unsigned long action, void *data)
{ … }
static void
shadow_context_descriptor_update(struct intel_context *ce,
struct intel_vgpu_workload *workload)
{ … }
static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
{ … }
static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{ … }
static void set_dma_address(struct i915_page_directory *pd, dma_addr_t addr)
{ … }
static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
struct intel_context *ce)
{ … }
static int
intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload)
{ … }
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
{ … }
static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload);
static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
{ … }
static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{ … }
static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{ … }
static void update_vreg_in_ctx(struct intel_vgpu_workload *workload)
{ … }
static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
{ … }
static int
intel_vgpu_shadow_mm_pin(struct intel_vgpu_workload *workload)
{ … }
static void
intel_vgpu_shadow_mm_unpin(struct intel_vgpu_workload *workload)
{ … }
static int prepare_workload(struct intel_vgpu_workload *workload)
{ … }
static int dispatch_workload(struct intel_vgpu_workload *workload)
{ … }
static struct intel_vgpu_workload *
pick_next_workload(struct intel_gvt *gvt, struct intel_engine_cs *engine)
{ … }
static void update_guest_pdps(struct intel_vgpu *vgpu,
u64 ring_context_gpa, u32 pdp[8])
{ … }
static __maybe_unused bool
check_shadow_context_ppgtt(struct execlist_ring_context *c, struct intel_vgpu_mm *m)
{ … }
static void update_guest_context(struct intel_vgpu_workload *workload)
{ … }
void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask)
{ … }
static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
{ … }
static int workload_thread(void *arg)
{ … }
void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
{ … }
void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
{ … }
int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
{ … }
static void
i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s,
struct i915_ppgtt *ppgtt)
{ … }
void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
{ … }
void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask)
{ … }
static void
i915_context_ppgtt_root_save(struct intel_vgpu_submission *s,
struct i915_ppgtt *ppgtt)
{ … }
int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
{ … }
int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask,
unsigned int interface)
{ … }
void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
{ … }
static struct intel_vgpu_workload *
alloc_workload(struct intel_vgpu *vgpu)
{ … }
#define RING_CTX_OFF(x) …
static void read_guest_pdps(struct intel_vgpu *vgpu,
u64 ring_context_gpa, u32 pdp[8])
{ … }
static int prepare_mm(struct intel_vgpu_workload *workload)
{ … }
#define same_context(a, b) …
struct intel_vgpu_workload *
intel_vgpu_create_workload(struct intel_vgpu *vgpu,
const struct intel_engine_cs *engine,
struct execlist_ctx_descriptor_format *desc)
{ … }
void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload)
{ … }