linux/drivers/gpu/drm/i915/gt/intel_migrate.c

// SPDX-License-Identifier: MIT
/*
 * Copyright © 2020 Intel Corporation
 */

#include "i915_drv.h"
#include "intel_context.h"
#include "intel_gpu_commands.h"
#include "intel_gt.h"
#include "intel_gtt.h"
#include "intel_migrate.h"
#include "intel_ring.h"
#include "gem/i915_gem_lmem.h"

struct insert_pte_data {};

#define CHUNK_SZ

#define GET_CCS_BYTES(i915, size)
static bool engine_supports_migration(struct intel_engine_cs *engine)
{}

static void xehp_toggle_pdes(struct i915_address_space *vm,
			     struct i915_page_table *pt,
			     void *data)
{}

static void xehp_insert_pte(struct i915_address_space *vm,
			    struct i915_page_table *pt,
			    void *data)
{}

static void insert_pte(struct i915_address_space *vm,
		       struct i915_page_table *pt,
		       void *data)
{}

static struct i915_address_space *migrate_vm(struct intel_gt *gt)
{}

static struct intel_engine_cs *first_copy_engine(struct intel_gt *gt)
{}

static struct intel_context *pinned_context(struct intel_gt *gt)
{}

int intel_migrate_init(struct intel_migrate *m, struct intel_gt *gt)
{}

static int random_index(unsigned int max)
{}

static struct intel_context *__migrate_engines(struct intel_gt *gt)
{}

struct intel_context *intel_migrate_create_context(struct intel_migrate *m)
{}

static inline struct sgt_dma sg_sgt(struct scatterlist *sg)
{}

static int emit_no_arbitration(struct i915_request *rq)
{}

static int max_pte_pkt_size(struct i915_request *rq, int pkt)
{}

#define I915_EMIT_PTE_NUM_DWORDS

static int emit_pte(struct i915_request *rq,
		    struct sgt_dma *it,
		    unsigned int pat_index,
		    bool is_lmem,
		    u64 offset,
		    int length)
{}

static bool wa_1209644611_applies(int ver, u32 size)
{}

/**
 * DOC: Flat-CCS - Memory compression for Local memory
 *
 * On Xe-HP and later devices, we use dedicated compression control state (CCS)
 * stored in local memory for each surface, to support the 3D and media
 * compression formats.
 *
 * The memory required for the CCS of the entire local memory is 1/256 of the
 * local memory size. So before the kernel boot, the required memory is reserved
 * for the CCS data and a secure register will be programmed with the CCS base
 * address.
 *
 * Flat CCS data needs to be cleared when a lmem object is allocated.
 * And CCS data can be copied in and out of CCS region through
 * XY_CTRL_SURF_COPY_BLT. CPU can't access the CCS data directly.
 *
 * I915 supports Flat-CCS on lmem only objects. When an objects has smem in
 * its preference list, on memory pressure, i915 needs to migrate the lmem
 * content into smem. If the lmem object is Flat-CCS compressed by userspace,
 * then i915 needs to decompress it. But I915 lack the required information
 * for such decompression. Hence I915 supports Flat-CCS only on lmem only objects.
 *
 * When we exhaust the lmem, Flat-CCS capable objects' lmem backing memory can
 * be temporarily evicted to smem, along with the auxiliary CCS state, where
 * it can be potentially swapped-out at a later point, if required.
 * If userspace later touches the evicted pages, then we always move
 * the backing memory back to lmem, which includes restoring the saved CCS state,
 * and potentially performing any required swap-in.
 *
 * For the migration of the lmem objects with smem in placement list, such as
 * {lmem, smem}, objects are treated as non Flat-CCS capable objects.
 */

static inline u32 *i915_flush_dw(u32 *cmd, u32 flags)
{}

static int emit_copy_ccs(struct i915_request *rq,
			 u32 dst_offset, u8 dst_access,
			 u32 src_offset, u8 src_access, int size)
{}

static int emit_copy(struct i915_request *rq,
		     u32 dst_offset, u32 src_offset, int size)
{}

static u64 scatter_list_length(struct scatterlist *sg)
{}

static int
calculate_chunk_sz(struct drm_i915_private *i915, bool src_is_lmem,
		   u64 bytes_to_cpy, u64 ccs_bytes_to_cpy)
{}

static void get_ccs_sg_sgt(struct sgt_dma *it, u64 bytes_to_cpy)
{}

int
intel_context_migrate_copy(struct intel_context *ce,
			   const struct i915_deps *deps,
			   struct scatterlist *src,
			   unsigned int src_pat_index,
			   bool src_is_lmem,
			   struct scatterlist *dst,
			   unsigned int dst_pat_index,
			   bool dst_is_lmem,
			   struct i915_request **out)
{}

static int emit_clear(struct i915_request *rq, u32 offset, int size,
		      u32 value, bool is_lmem)
{}

int
intel_context_migrate_clear(struct intel_context *ce,
			    const struct i915_deps *deps,
			    struct scatterlist *sg,
			    unsigned int pat_index,
			    bool is_lmem,
			    u32 value,
			    struct i915_request **out)
{}

int intel_migrate_copy(struct intel_migrate *m,
		       struct i915_gem_ww_ctx *ww,
		       const struct i915_deps *deps,
		       struct scatterlist *src,
		       unsigned int src_pat_index,
		       bool src_is_lmem,
		       struct scatterlist *dst,
		       unsigned int dst_pat_index,
		       bool dst_is_lmem,
		       struct i915_request **out)
{}

int
intel_migrate_clear(struct intel_migrate *m,
		    struct i915_gem_ww_ctx *ww,
		    const struct i915_deps *deps,
		    struct scatterlist *sg,
		    unsigned int pat_index,
		    bool is_lmem,
		    u32 value,
		    struct i915_request **out)
{}

void intel_migrate_fini(struct intel_migrate *m)
{}

#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_migrate.c"
#endif