#include <linux/bitfield.h>
#include <linux/highmem.h>
#include <linux/set_memory.h>
#include <linux/vmalloc.h>
#include <drm/drm_cache.h>
#include "ivpu_drv.h"
#include "ivpu_hw.h"
#include "ivpu_mmu.h"
#include "ivpu_mmu_context.h"
#define IVPU_MMU_VPU_ADDRESS_MASK …
#define IVPU_MMU_PGD_INDEX_MASK …
#define IVPU_MMU_PUD_INDEX_MASK …
#define IVPU_MMU_PMD_INDEX_MASK …
#define IVPU_MMU_PTE_INDEX_MASK …
#define IVPU_MMU_ENTRY_FLAGS_MASK …
#define IVPU_MMU_ENTRY_FLAG_CONT …
#define IVPU_MMU_ENTRY_FLAG_NG …
#define IVPU_MMU_ENTRY_FLAG_AF …
#define IVPU_MMU_ENTRY_FLAG_RO …
#define IVPU_MMU_ENTRY_FLAG_USER …
#define IVPU_MMU_ENTRY_FLAG_LLC_COHERENT …
#define IVPU_MMU_ENTRY_FLAG_TYPE_PAGE …
#define IVPU_MMU_ENTRY_FLAG_VALID …
#define IVPU_MMU_PAGE_SIZE …
#define IVPU_MMU_CONT_PAGES_SIZE …
#define IVPU_MMU_PTE_MAP_SIZE …
#define IVPU_MMU_PMD_MAP_SIZE …
#define IVPU_MMU_PUD_MAP_SIZE …
#define IVPU_MMU_PGD_MAP_SIZE …
#define IVPU_MMU_PGTABLE_SIZE …
#define IVPU_MMU_DUMMY_ADDRESS …
#define IVPU_MMU_ENTRY_VALID …
#define IVPU_MMU_ENTRY_INVALID …
#define IVPU_MMU_ENTRY_MAPPED …
static void *ivpu_pgtable_alloc_page(struct ivpu_device *vdev, dma_addr_t *dma)
{ … }
static void ivpu_pgtable_free_page(struct ivpu_device *vdev, u64 *cpu_addr, dma_addr_t dma_addr)
{ … }
static int ivpu_mmu_pgtable_init(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
{ … }
static void ivpu_mmu_pgtables_free(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
{ … }
static u64*
ivpu_mmu_ensure_pud(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx)
{ … }
static u64*
ivpu_mmu_ensure_pmd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx,
int pud_idx)
{ … }
static u64*
ivpu_mmu_ensure_pte(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable,
int pgd_idx, int pud_idx, int pmd_idx)
{ … }
static int
ivpu_mmu_context_map_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 vpu_addr, dma_addr_t dma_addr, u64 prot)
{ … }
static int
ivpu_mmu_context_map_cont_64k(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr,
dma_addr_t dma_addr, u64 prot)
{ … }
static void ivpu_mmu_context_unmap_page(struct ivpu_mmu_context *ctx, u64 vpu_addr)
{ … }
static int
ivpu_mmu_context_map_pages(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 vpu_addr, dma_addr_t dma_addr, size_t size, u64 prot)
{ … }
static void ivpu_mmu_context_set_page_ro(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 vpu_addr)
{ … }
static void ivpu_mmu_context_split_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 vpu_addr)
{ … }
static void ivpu_mmu_context_split_64k_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 vpu_addr)
{ … }
int
ivpu_mmu_context_set_pages_ro(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr,
size_t size)
{ … }
static void ivpu_mmu_context_unmap_pages(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size)
{ … }
int
ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 vpu_addr, struct sg_table *sgt, bool llc_coherent)
{ … }
void
ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 vpu_addr, struct sg_table *sgt)
{ … }
int
ivpu_mmu_context_insert_node(struct ivpu_mmu_context *ctx, const struct ivpu_addr_range *range,
u64 size, struct drm_mm_node *node)
{ … }
void
ivpu_mmu_context_remove_node(struct ivpu_mmu_context *ctx, struct drm_mm_node *node)
{ … }
static int
ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 context_id)
{ … }
static void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
{ … }
int ivpu_mmu_global_context_init(struct ivpu_device *vdev)
{ … }
void ivpu_mmu_global_context_fini(struct ivpu_device *vdev)
{ … }
int ivpu_mmu_reserved_context_init(struct ivpu_device *vdev)
{ … }
void ivpu_mmu_reserved_context_fini(struct ivpu_device *vdev)
{ … }
void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid)
{ … }
int ivpu_mmu_user_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 ctx_id)
{ … }
void ivpu_mmu_user_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
{ … }