#ifndef __AMDGPU_VM_H__
#define __AMDGPU_VM_H__
#include <linux/idr.h>
#include <linux/kfifo.h>
#include <linux/rbtree.h>
#include <drm/gpu_scheduler.h>
#include <drm/drm_file.h>
#include <drm/ttm/ttm_bo.h>
#include <linux/sched/mm.h>
#include "amdgpu_sync.h"
#include "amdgpu_ring.h"
#include "amdgpu_ids.h"
struct drm_exec;
struct amdgpu_bo_va;
struct amdgpu_job;
struct amdgpu_bo_list_entry;
struct amdgpu_bo_vm;
struct amdgpu_mem_stats;
#define AMDGPU_VM_MAX_UPDATE_SIZE …
#define AMDGPU_VM_PTE_COUNT(adev) …
#define AMDGPU_PTE_VALID …
#define AMDGPU_PTE_SYSTEM …
#define AMDGPU_PTE_SNOOPED …
#define AMDGPU_PTE_TMZ …
#define AMDGPU_PTE_EXECUTABLE …
#define AMDGPU_PTE_READABLE …
#define AMDGPU_PTE_WRITEABLE …
#define AMDGPU_PTE_FRAG(x) …
#define AMDGPU_PTE_PRT …
#define AMDGPU_PDE_PTE …
#define AMDGPU_PTE_LOG …
#define AMDGPU_PTE_TF …
#define AMDGPU_PTE_NOALLOC …
#define AMDGPU_PDE_BFS(a) …
#define AMDGPU_VM_NORETRY_FLAGS …
#define AMDGPU_VM_NORETRY_FLAGS_TF …
#define AMDGPU_PTE_MTYPE_VG10_SHIFT(mtype) …
#define AMDGPU_PTE_MTYPE_VG10_MASK …
#define AMDGPU_PTE_MTYPE_VG10(flags, mtype) …
#define AMDGPU_MTYPE_NC …
#define AMDGPU_MTYPE_CC …
#define AMDGPU_PTE_DEFAULT_ATC …
#define AMDGPU_PTE_MTYPE_NV10_SHIFT(mtype) …
#define AMDGPU_PTE_MTYPE_NV10_MASK …
#define AMDGPU_PTE_MTYPE_NV10(flags, mtype) …
#define AMDGPU_PTE_PRT_GFX12 …
#define AMDGPU_PTE_PRT_FLAG(adev) …
#define AMDGPU_PTE_MTYPE_GFX12_SHIFT(mtype) …
#define AMDGPU_PTE_MTYPE_GFX12_MASK …
#define AMDGPU_PTE_MTYPE_GFX12(flags, mtype) …
#define AMDGPU_PTE_DCC …
#define AMDGPU_PTE_IS_PTE …
#define AMDGPU_PDE_BFS_GFX12(a) …
#define AMDGPU_PDE_BFS_FLAG(adev, a) …
#define AMDGPU_PDE_PTE_GFX12 …
#define AMDGPU_PDE_PTE_FLAG(adev) …
#define AMDGPU_VM_FAULT_STOP_NEVER …
#define AMDGPU_VM_FAULT_STOP_FIRST …
#define AMDGPU_VM_FAULT_STOP_ALWAYS …
#define AMDGPU_VM_RESERVED_VRAM …
#define AMDGPU_MAX_VMHUBS …
#define AMDGPU_GFXHUB_START …
#define AMDGPU_MMHUB0_START …
#define AMDGPU_MMHUB1_START …
#define AMDGPU_GFXHUB(x) …
#define AMDGPU_MMHUB0(x) …
#define AMDGPU_MMHUB1(x) …
#define AMDGPU_IS_GFXHUB(x) …
#define AMDGPU_IS_MMHUB0(x) …
#define AMDGPU_IS_MMHUB1(x) …
#define AMDGPU_VA_RESERVED_CSA_SIZE …
#define AMDGPU_VA_RESERVED_CSA_START(adev) …
#define AMDGPU_VA_RESERVED_SEQ64_SIZE …
#define AMDGPU_VA_RESERVED_SEQ64_START(adev) …
#define AMDGPU_VA_RESERVED_TRAP_SIZE …
#define AMDGPU_VA_RESERVED_TRAP_START(adev) …
#define AMDGPU_VA_RESERVED_BOTTOM …
#define AMDGPU_VA_RESERVED_TOP …
#define AMDGPU_VM_USE_CPU_FOR_GFX …
#define AMDGPU_VM_USE_CPU_FOR_COMPUTE …
enum amdgpu_vm_level { … };
struct amdgpu_vm_bo_base { … };
struct amdgpu_vm_pte_funcs { … };
struct amdgpu_task_info { … };
struct amdgpu_vm_update_params { … };
struct amdgpu_vm_update_funcs { … };
struct amdgpu_vm_fault_info { … };
struct amdgpu_vm { … };
struct amdgpu_vm_manager { … };
struct amdgpu_bo_va_mapping;
#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) …
#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) …
#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) …
extern const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs;
extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs;
void amdgpu_vm_manager_init(struct amdgpu_device *adev);
void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
u32 pasid);
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id);
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
unsigned int num_fences);
bool amdgpu_vm_ready(struct amdgpu_vm *vm);
uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm);
int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct ww_acquire_ctx *ticket,
int (*callback)(void *p, struct amdgpu_bo *bo),
void *param);
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
struct amdgpu_vm *vm, bool immediate);
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct dma_fence **fence);
int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct ww_acquire_ctx *ticket);
int amdgpu_vm_flush_compute_tlb(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
uint32_t flush_type,
uint32_t xcc_mask);
void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
struct amdgpu_vm *vm, struct amdgpu_bo *bo);
int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
bool immediate, bool unlocked, bool flush_tlb, bool allow_override,
struct dma_resv *resv, uint64_t start, uint64_t last,
uint64_t flags, uint64_t offset, uint64_t vram_base,
struct ttm_resource *res, dma_addr_t *pages_addr,
struct dma_fence **fence);
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
bool clear);
bool amdgpu_vm_evictable(struct amdgpu_bo *bo);
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
struct amdgpu_bo *bo, bool evicted);
uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
struct amdgpu_bo *bo);
struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct amdgpu_bo *bo);
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t addr, uint64_t offset,
uint64_t size, uint64_t flags);
int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t addr, uint64_t offset,
uint64_t size, uint64_t flags);
int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t addr);
int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
uint64_t saddr, uint64_t size);
struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
uint64_t addr);
void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket);
void amdgpu_vm_bo_del(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va);
void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
uint32_t fragment_size_default, unsigned max_level,
unsigned max_bits);
int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
struct amdgpu_job *job);
void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
struct amdgpu_task_info *
amdgpu_vm_get_task_info_pasid(struct amdgpu_device *adev, u32 pasid);
struct amdgpu_task_info *
amdgpu_vm_get_task_info_vm(struct amdgpu_vm *vm);
void amdgpu_vm_put_task_info(struct amdgpu_task_info *task_info);
bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
u32 vmid, u32 node_id, uint64_t addr,
bool write_fault);
void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
struct amdgpu_mem_stats *stats);
int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo_vm *vmbo, bool immediate);
int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int level, bool immediate, struct amdgpu_bo_vm **vmbo,
int32_t xcp_id);
void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm);
int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params,
struct amdgpu_vm_bo_base *entry);
int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
uint64_t start, uint64_t end,
uint64_t dst, uint64_t flags);
void amdgpu_vm_pt_free_work(struct work_struct *work);
void amdgpu_vm_pt_free_list(struct amdgpu_device *adev,
struct amdgpu_vm_update_params *params);
#if defined(CONFIG_DEBUG_FS)
void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m);
#endif
int amdgpu_vm_pt_map_tables(struct amdgpu_device *adev, struct amdgpu_vm *vm);
bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct amdgpu_bo *bo);
static inline uint64_t amdgpu_vm_tlb_seq(struct amdgpu_vm *vm)
{ … }
static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm)
{ … }
static inline bool amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
{ … }
static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
{ … }
void amdgpu_vm_update_fault_cache(struct amdgpu_device *adev,
unsigned int pasid,
uint64_t addr,
uint32_t status,
unsigned int vmhub);
void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct dma_fence **fence);
#endif