#include <linux/dma-fence-array.h>
#include "xe_pt.h"
#include "regs/xe_gtt_defs.h"
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_drm_client.h"
#include "xe_exec_queue.h"
#include "xe_gt.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_migrate.h"
#include "xe_pt_types.h"
#include "xe_pt_walk.h"
#include "xe_res_cursor.h"
#include "xe_sched_job.h"
#include "xe_sync.h"
#include "xe_trace.h"
#include "xe_ttm_stolen_mgr.h"
#include "xe_vm.h"
struct xe_pt_dir { … };
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
#define xe_pt_set_addr(__xe_pt, __addr) …
#define xe_pt_addr(__xe_pt) …
#else
#define xe_pt_set_addr …
#define xe_pt_addr …
#endif
static const u64 xe_normal_pt_shifts[] = …;
static const u64 xe_compact_pt_shifts[] = …;
#define XE_PT_HIGHEST_LEVEL …
static struct xe_pt_dir *as_xe_pt_dir(struct xe_pt *pt)
{ … }
static struct xe_pt *xe_pt_entry(struct xe_pt_dir *pt_dir, unsigned int index)
{ … }
static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm,
unsigned int level)
{ … }
static void xe_pt_free(struct xe_pt *pt)
{ … }
struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
unsigned int level)
{ … }
void xe_pt_populate_empty(struct xe_tile *tile, struct xe_vm *vm,
struct xe_pt *pt)
{ … }
unsigned int xe_pt_shift(unsigned int level)
{ … }
void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred)
{ … }
struct xe_pt_update { … };
struct xe_pt_stage_bind_walk { … };
static int
xe_pt_new_shared(struct xe_walk_update *wupd, struct xe_pt *parent,
pgoff_t offset, bool alloc_entries)
{ … }
static int
xe_pt_insert_entry(struct xe_pt_stage_bind_walk *xe_walk, struct xe_pt *parent,
pgoff_t offset, struct xe_pt *xe_child, u64 pte)
{ … }
static bool xe_pt_hugepte_possible(u64 addr, u64 next, unsigned int level,
struct xe_pt_stage_bind_walk *xe_walk)
{ … }
static bool
xe_pt_scan_64K(u64 addr, u64 next, struct xe_pt_stage_bind_walk *xe_walk)
{ … }
static bool
xe_pt_is_pte_ps64K(u64 addr, u64 next, struct xe_pt_stage_bind_walk *xe_walk)
{ … }
static int
xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
unsigned int level, u64 addr, u64 next,
struct xe_ptw **child,
enum page_walk_action *action,
struct xe_pt_walk *walk)
{ … }
static const struct xe_pt_walk_ops xe_pt_stage_bind_ops = …;
static int
xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
struct xe_vm_pgtable_update *entries, u32 *num_entries)
{ … }
static bool xe_pt_nonshared_offsets(u64 addr, u64 end, unsigned int level,
struct xe_pt_walk *walk,
enum page_walk_action *action,
pgoff_t *offset, pgoff_t *end_offset)
{ … }
struct xe_pt_zap_ptes_walk { … };
static int xe_pt_zap_ptes_entry(struct xe_ptw *parent, pgoff_t offset,
unsigned int level, u64 addr, u64 next,
struct xe_ptw **child,
enum page_walk_action *action,
struct xe_pt_walk *walk)
{ … }
static const struct xe_pt_walk_ops xe_pt_zap_ptes_ops = …;
bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma)
{ … }
static void
xe_vm_populate_pgtable(struct xe_migrate_pt_update *pt_update, struct xe_tile *tile,
struct iosys_map *map, void *data,
u32 qword_ofs, u32 num_qwords,
const struct xe_vm_pgtable_update *update)
{ … }
static void xe_pt_cancel_bind(struct xe_vma *vma,
struct xe_vm_pgtable_update *entries,
u32 num_entries)
{ … }
static void xe_pt_commit_locks_assert(struct xe_vma *vma)
{ … }
static void xe_pt_commit(struct xe_vma *vma,
struct xe_vm_pgtable_update *entries,
u32 num_entries, struct llist_head *deferred)
{ … }
static void xe_pt_abort_bind(struct xe_vma *vma,
struct xe_vm_pgtable_update *entries,
u32 num_entries, bool rebind)
{ … }
static void xe_pt_commit_prepare_bind(struct xe_vma *vma,
struct xe_vm_pgtable_update *entries,
u32 num_entries, bool rebind)
{ … }
static void xe_pt_free_bind(struct xe_vm_pgtable_update *entries,
u32 num_entries)
{ … }
static int
xe_pt_prepare_bind(struct xe_tile *tile, struct xe_vma *vma,
struct xe_vm_pgtable_update *entries, u32 *num_entries)
{ … }
static void xe_vm_dbg_print_entries(struct xe_device *xe,
const struct xe_vm_pgtable_update *entries,
unsigned int num_entries, bool bind)
#if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM))
{ … }
#else
{}
#endif
static bool no_in_syncs(struct xe_sync_entry *syncs, u32 num_syncs)
{ … }
static int job_test_add_deps(struct xe_sched_job *job,
struct dma_resv *resv,
enum dma_resv_usage usage)
{ … }
static int vma_add_deps(struct xe_vma *vma, struct xe_sched_job *job)
{ … }
static int op_add_deps(struct xe_vm *vm, struct xe_vma_op *op,
struct xe_sched_job *job)
{ … }
static int xe_pt_vm_dependencies(struct xe_sched_job *job,
struct xe_vm *vm,
struct xe_vma_ops *vops,
struct xe_vm_pgtable_update_ops *pt_update_ops,
struct xe_range_fence_tree *rftree)
{ … }
static int xe_pt_pre_commit(struct xe_migrate_pt_update *pt_update)
{ … }
#ifdef CONFIG_DRM_XE_USERPTR_INVAL_INJECT
static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
{ … }
#else
static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
{
return false;
}
#endif
static int vma_check_userptr(struct xe_vm *vm, struct xe_vma *vma,
struct xe_vm_pgtable_update_ops *pt_update)
{ … }
static int op_check_userptr(struct xe_vm *vm, struct xe_vma_op *op,
struct xe_vm_pgtable_update_ops *pt_update)
{ … }
static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
{ … }
struct invalidation_fence { … };
static void invalidation_fence_cb(struct dma_fence *fence,
struct dma_fence_cb *cb)
{ … }
static void invalidation_fence_work_func(struct work_struct *w)
{ … }
static void invalidation_fence_init(struct xe_gt *gt,
struct invalidation_fence *ifence,
struct dma_fence *fence,
u64 start, u64 end, u32 asid)
{ … }
struct xe_pt_stage_unbind_walk { … };
static bool xe_pt_check_kill(u64 addr, u64 next, unsigned int level,
const struct xe_pt *child,
enum page_walk_action *action,
struct xe_pt_walk *walk)
{ … }
static int xe_pt_stage_unbind_entry(struct xe_ptw *parent, pgoff_t offset,
unsigned int level, u64 addr, u64 next,
struct xe_ptw **child,
enum page_walk_action *action,
struct xe_pt_walk *walk)
{ … }
static int
xe_pt_stage_unbind_post_descend(struct xe_ptw *parent, pgoff_t offset,
unsigned int level, u64 addr, u64 next,
struct xe_ptw **child,
enum page_walk_action *action,
struct xe_pt_walk *walk)
{ … }
static const struct xe_pt_walk_ops xe_pt_stage_unbind_ops = …;
static unsigned int xe_pt_stage_unbind(struct xe_tile *tile, struct xe_vma *vma,
struct xe_vm_pgtable_update *entries)
{ … }
static void
xe_migrate_clear_pgtable_callback(struct xe_migrate_pt_update *pt_update,
struct xe_tile *tile, struct iosys_map *map,
void *ptr, u32 qword_ofs, u32 num_qwords,
const struct xe_vm_pgtable_update *update)
{ … }
static void xe_pt_abort_unbind(struct xe_vma *vma,
struct xe_vm_pgtable_update *entries,
u32 num_entries)
{ … }
static void
xe_pt_commit_prepare_unbind(struct xe_vma *vma,
struct xe_vm_pgtable_update *entries,
u32 num_entries)
{ … }
static void
xe_pt_update_ops_rfence_interval(struct xe_vm_pgtable_update_ops *pt_update_ops,
struct xe_vma *vma)
{ … }
static int vma_reserve_fences(struct xe_device *xe, struct xe_vma *vma)
{ … }
static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile,
struct xe_vm_pgtable_update_ops *pt_update_ops,
struct xe_vma *vma)
{ … }
static int unbind_op_prepare(struct xe_tile *tile,
struct xe_vm_pgtable_update_ops *pt_update_ops,
struct xe_vma *vma)
{ … }
static int op_prepare(struct xe_vm *vm,
struct xe_tile *tile,
struct xe_vm_pgtable_update_ops *pt_update_ops,
struct xe_vma_op *op)
{ … }
static void
xe_pt_update_ops_init(struct xe_vm_pgtable_update_ops *pt_update_ops)
{ … }
int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops)
{ … }
static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
struct xe_vm_pgtable_update_ops *pt_update_ops,
struct xe_vma *vma, struct dma_fence *fence,
struct dma_fence *fence2)
{ … }
static void unbind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
struct xe_vm_pgtable_update_ops *pt_update_ops,
struct xe_vma *vma, struct dma_fence *fence,
struct dma_fence *fence2)
{ … }
static void op_commit(struct xe_vm *vm,
struct xe_tile *tile,
struct xe_vm_pgtable_update_ops *pt_update_ops,
struct xe_vma_op *op, struct dma_fence *fence,
struct dma_fence *fence2)
{ … }
static const struct xe_migrate_pt_update_ops migrate_ops = …;
static const struct xe_migrate_pt_update_ops userptr_migrate_ops = …;
struct dma_fence *
xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
{ … }
void xe_pt_update_ops_fini(struct xe_tile *tile, struct xe_vma_ops *vops)
{ … }
void xe_pt_update_ops_abort(struct xe_tile *tile, struct xe_vma_ops *vops)
{ … }