#include "xe_vm.h"
#include <linux/dma-fence-array.h>
#include <linux/nospec.h>
#include <drm/drm_exec.h>
#include <drm/drm_print.h>
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_tt.h>
#include <drm/xe_drm.h>
#include <linux/ascii85.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <generated/xe_wa_oob.h>
#include "regs/xe_gtt_defs.h"
#include "xe_assert.h"
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_drm_client.h"
#include "xe_exec_queue.h"
#include "xe_gt_pagefault.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_migrate.h"
#include "xe_pat.h"
#include "xe_pm.h"
#include "xe_preempt_fence.h"
#include "xe_pt.h"
#include "xe_res_cursor.h"
#include "xe_sync.h"
#include "xe_trace_bo.h"
#include "xe_wa.h"
#include "xe_hmm.h"
static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
{ … }
int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma)
{ … }
int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma)
{ … }
static bool preempt_fences_waiting(struct xe_vm *vm)
{ … }
static void free_preempt_fences(struct list_head *list)
{ … }
static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
unsigned int *count)
{ … }
static int wait_for_existing_preempt_fences(struct xe_vm *vm)
{ … }
static bool xe_vm_is_idle(struct xe_vm *vm)
{ … }
static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
{ … }
static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
{ … }
static void resume_and_reinstall_preempt_fences(struct xe_vm *vm,
struct drm_exec *exec)
{ … }
int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
{ … }
void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
{ … }
int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
{ … }
#define XE_VM_REBIND_RETRY_TIMEOUT_MS …
static void xe_vm_kill(struct xe_vm *vm, bool unlocked)
{ … }
bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end)
{ … }
static int xe_gpuvm_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
{ … }
int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
unsigned int num_fences)
{ … }
static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
bool *done)
{ … }
static void preempt_rebind_work_func(struct work_struct *w)
{ … }
static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
const struct mmu_notifier_range *range,
unsigned long cur_seq)
{ … }
static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = …;
int xe_vm_userptr_pin(struct xe_vm *vm)
{ … }
int xe_vm_userptr_check_repin(struct xe_vm *vm)
{ … }
static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,
u8 tile_mask)
{ … }
static int xe_vm_ops_add_rebind(struct xe_vma_ops *vops, struct xe_vma *vma,
u8 tile_mask)
{ … }
static struct dma_fence *ops_execute(struct xe_vm *vm,
struct xe_vma_ops *vops);
static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm,
struct xe_exec_queue *q,
struct xe_sync_entry *syncs, u32 num_syncs);
int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
{ … }
struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_mask)
{ … }
static void xe_vma_free(struct xe_vma *vma)
{ … }
#define VMA_CREATE_FLAG_READ_ONLY …
#define VMA_CREATE_FLAG_IS_NULL …
#define VMA_CREATE_FLAG_DUMPABLE …
static struct xe_vma *xe_vma_create(struct xe_vm *vm,
struct xe_bo *bo,
u64 bo_offset_or_userptr,
u64 start, u64 end,
u16 pat_index, unsigned int flags)
{ … }
static void xe_vma_destroy_late(struct xe_vma *vma)
{ … }
static void vma_destroy_work_func(struct work_struct *w)
{ … }
static void vma_destroy_cb(struct dma_fence *fence,
struct dma_fence_cb *cb)
{ … }
static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
{ … }
int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma)
{ … }
static void xe_vma_destroy_unlocked(struct xe_vma *vma)
{ … }
struct xe_vma *
xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range)
{ … }
static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
{ … }
static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
{ … }
static struct drm_gpuva_op *xe_vm_op_alloc(void)
{ … }
static void xe_vm_free(struct drm_gpuvm *gpuvm);
static const struct drm_gpuvm_ops gpuvm_ops = …;
static u64 pde_encode_pat_index(struct xe_device *xe, u16 pat_index)
{ … }
static u64 pte_encode_pat_index(struct xe_device *xe, u16 pat_index,
u32 pt_level)
{ … }
static u64 pte_encode_ps(u32 pt_level)
{ … }
static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset,
const u16 pat_index)
{ … }
static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
u16 pat_index, u32 pt_level)
{ … }
static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma,
u16 pat_index, u32 pt_level)
{ … }
static u64 xelp_pte_encode_addr(struct xe_device *xe, u64 addr,
u16 pat_index,
u32 pt_level, bool devmem, u64 flags)
{ … }
static const struct xe_pt_ops xelp_pt_ops = …;
static void vm_destroy_work_func(struct work_struct *w);
static int xe_vm_create_scratch(struct xe_device *xe, struct xe_tile *tile,
struct xe_vm *vm)
{ … }
static void xe_vm_free_scratch(struct xe_vm *vm)
{ … }
struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
{ … }
static void xe_vm_close(struct xe_vm *vm)
{ … }
void xe_vm_close_and_put(struct xe_vm *vm)
{ … }
static void vm_destroy_work_func(struct work_struct *w)
{ … }
static void xe_vm_free(struct drm_gpuvm *gpuvm)
{ … }
struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
{ … }
u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
{ … }
static struct xe_exec_queue *
to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
{ … }
static struct dma_fence *
xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
struct xe_sync_entry *syncs, u32 num_syncs,
bool first_op, bool last_op)
{ … }
static struct dma_fence *
xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
struct xe_sync_entry *syncs, u32 num_syncs,
u8 tile_mask, bool first_op, bool last_op)
{ … }
static struct xe_user_fence *
find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs)
{ … }
static struct dma_fence *
xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
struct xe_bo *bo, struct xe_sync_entry *syncs, u32 num_syncs,
u8 tile_mask, bool immediate, bool first_op, bool last_op)
{ … }
static struct dma_fence *
xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
struct xe_exec_queue *q, struct xe_sync_entry *syncs,
u32 num_syncs, bool first_op, bool last_op)
{ … }
#define ALL_DRM_XE_VM_CREATE_FLAGS …
int xe_vm_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{ … }
int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{ … }
static const u32 region_to_mem_type[] = …;
static struct dma_fence *
xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
struct xe_exec_queue *q, struct xe_sync_entry *syncs,
u32 num_syncs, bool first_op, bool last_op)
{ … }
static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma,
bool post_commit)
{ … }
#undef ULL
#define ULL …
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
{ … }
#else
static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
{
}
#endif
static struct drm_gpuva_ops *
vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
u64 bo_offset_or_userptr, u64 addr, u64 range,
u32 operation, u32 flags,
u32 prefetch_region, u16 pat_index)
{ … }
static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
u16 pat_index, unsigned int flags)
{ … }
static u64 xe_vma_max_pte_size(struct xe_vma *vma)
{ … }
static void xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
{ … }
static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
{ … }
static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
struct drm_gpuva_ops *ops,
struct xe_sync_entry *syncs, u32 num_syncs,
struct xe_vma_ops *vops, bool last)
{ … }
static struct dma_fence *op_execute(struct xe_vm *vm, struct xe_vma *vma,
struct xe_vma_op *op)
{ … }
static struct dma_fence *
__xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
struct xe_vma_op *op)
{ … }
static struct dma_fence *
xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
{ … }
static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
bool post_commit, bool prev_post_commit,
bool next_post_commit)
{ … }
static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
struct drm_gpuva_ops **ops,
int num_ops_list)
{ … }
static int vma_lock_and_validate(struct drm_exec *exec, struct xe_vma *vma,
bool validate)
{ … }
static int check_ufence(struct xe_vma *vma)
{ … }
static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
struct xe_vma_op *op)
{ … }
static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec,
struct xe_vm *vm,
struct xe_vma_ops *vops)
{ … }
static struct dma_fence *ops_execute(struct xe_vm *vm,
struct xe_vma_ops *vops)
{ … }
static void vma_add_ufence(struct xe_vma *vma, struct xe_user_fence *ufence)
{ … }
static void op_add_ufence(struct xe_vm *vm, struct xe_vma_op *op,
struct xe_user_fence *ufence)
{ … }
static void vm_bind_ioctl_ops_fini(struct xe_vm *vm, struct xe_vma_ops *vops,
struct dma_fence *fence)
{ … }
static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
struct xe_vma_ops *vops)
{ … }
#define SUPPORTED_FLAGS …
#define XE_64K_PAGE_MASK …
#define ALL_DRM_XE_SYNCS_FLAGS …
static int vm_bind_ioctl_check_args(struct xe_device *xe,
struct drm_xe_vm_bind *args,
struct drm_xe_vm_bind_op **bind_ops)
{ … }
static int vm_bind_ioctl_signal_fences(struct xe_vm *vm,
struct xe_exec_queue *q,
struct xe_sync_entry *syncs,
int num_syncs)
{ … }
static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm,
struct xe_exec_queue *q,
struct xe_sync_entry *syncs, u32 num_syncs)
{ … }
static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
u64 addr, u64 range, u64 obj_offset,
u16 pat_index)
{ … }
int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{ … }
int xe_vm_lock(struct xe_vm *vm, bool intr)
{ … }
void xe_vm_unlock(struct xe_vm *vm)
{ … }
int xe_vm_invalidate_vma(struct xe_vma *vma)
{ … }
struct xe_vm_snapshot { … };
struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm)
{ … }
void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap)
{ … }
void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p)
{ … }
void xe_vm_snapshot_free(struct xe_vm_snapshot *snap)
{ … }