linux/drivers/accel/habanalabs/common/mmu/mmu.c

// SPDX-License-Identifier: GPL-2.0

/*
 * Copyright 2016-2022 HabanaLabs, Ltd.
 * All Rights Reserved.
 */

#include <linux/slab.h>
#include <linux/pci.h>

#include "../habanalabs.h"

#include <trace/events/habanalabs.h>

/**
 * hl_mmu_get_funcs() - get MMU functions structure
 * @hdev: habanalabs device structure.
 * @pgt_residency: page table residency.
 * @is_dram_addr: true if we need HMMU functions
 *
 * @return appropriate MMU functions structure
 */
static struct hl_mmu_funcs *hl_mmu_get_funcs(struct hl_device *hdev, int pgt_residency,
									bool is_dram_addr)
{}

bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr)
{}

/**
 * hl_mmu_init() - initialize the MMU module.
 * @hdev: habanalabs device structure.
 *
 * Return: 0 for success, non-zero for failure.
 */
int hl_mmu_init(struct hl_device *hdev)
{}

/**
 * hl_mmu_fini() - release the MMU module.
 * @hdev: habanalabs device structure.
 *
 * This function does the following:
 * - Disable MMU in H/W.
 * - Free the pgt_infos pool.
 *
 * All contexts should be freed before calling this function.
 */
void hl_mmu_fini(struct hl_device *hdev)
{}

/**
 * hl_mmu_ctx_init() - initialize a context for using the MMU module.
 * @ctx: pointer to the context structure to initialize.
 *
 * Initialize a mutex to protect the concurrent mapping flow, a hash to hold all
 * page tables hops related to this context.
 * Return: 0 on success, non-zero otherwise.
 */
int hl_mmu_ctx_init(struct hl_ctx *ctx)
{}

/*
 * hl_mmu_ctx_fini - disable a ctx from using the mmu module
 *
 * @ctx: pointer to the context structure
 *
 * This function does the following:
 * - Free any pgts which were not freed yet
 * - Free the mutex
 * - Free DRAM default page mapping hops
 */
void hl_mmu_ctx_fini(struct hl_ctx *ctx)
{}

/*
 * hl_mmu_get_real_page_size - get real page size to use in map/unmap operation
 *
 * @hdev: pointer to device data.
 * @mmu_prop: MMU properties.
 * @page_size: page size
 * @real_page_size: set here the actual page size to use for the operation
 * @is_dram_addr: true if DRAM address, otherwise false.
 *
 * @return 0 on success, otherwise non 0 error code
 *
 * note that this is general implementation that can fit most MMU arch. but as this is used as an
 * MMU function:
 * 1. it shall not be called directly- only from mmu_func structure instance
 * 2. each MMU may modify the implementation internally
 */
int hl_mmu_get_real_page_size(struct hl_device *hdev, struct hl_mmu_properties *mmu_prop,
				u32 page_size, u32 *real_page_size, bool is_dram_addr)
{}

static struct hl_mmu_properties *hl_mmu_get_prop(struct hl_device *hdev, u32 page_size,
							bool is_dram_addr)
{}

/*
 * hl_mmu_unmap_page - unmaps a virtual addr
 *
 * @ctx: pointer to the context structure
 * @virt_addr: virt addr to map from
 * @page_size: size of the page to unmap
 * @flush_pte: whether to do a PCI flush
 *
 * This function does the following:
 * - Check that the virt addr is mapped
 * - Unmap the virt addr and frees pgts if possible
 * - Returns 0 on success, -EINVAL if the given addr is not mapped
 *
 * Because this function changes the page tables in the device and because it
 * changes the MMU hash, it must be protected by a lock.
 * However, because it maps only a single page, the lock should be implemented
 * in a higher level in order to protect the entire mapping of the memory area
 *
 * For optimization reasons PCI flush may be requested once after unmapping of
 * large area.
 */
int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, bool flush_pte)
{}

/*
 * hl_mmu_map_page - maps a virtual addr to physical addr
 *
 * @ctx: pointer to the context structure
 * @virt_addr: virt addr to map from
 * @phys_addr: phys addr to map to
 * @page_size: physical page size
 * @flush_pte: whether to do a PCI flush
 *
 * This function does the following:
 * - Check that the virt addr is not mapped
 * - Allocate pgts as necessary in order to map the virt addr to the phys
 * - Returns 0 on success, -EINVAL if addr is already mapped, or -ENOMEM.
 *
 * Because this function changes the page tables in the device and because it
 * changes the MMU hash, it must be protected by a lock.
 * However, because it maps only a single page, the lock should be implemented
 * in a higher level in order to protect the entire mapping of the memory area
 *
 * For optimization reasons PCI flush may be requested once after mapping of
 * large area.
 */
int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size,
			bool flush_pte)
{}

/*
 * hl_mmu_map_contiguous - implements a wrapper for hl_mmu_map_page
 *                         for mapping contiguous physical memory
 *
 * @ctx: pointer to the context structure
 * @virt_addr: virt addr to map from
 * @phys_addr: phys addr to map to
 * @size: size to map
 *
 */
int hl_mmu_map_contiguous(struct hl_ctx *ctx, u64 virt_addr,
					u64 phys_addr, u32 size)
{}

/*
 * hl_mmu_unmap_contiguous - implements a wrapper for hl_mmu_unmap_page
 *                           for unmapping contiguous physical memory
 *
 * @ctx: pointer to the context structure
 * @virt_addr: virt addr to unmap
 * @size: size to unmap
 *
 */
int hl_mmu_unmap_contiguous(struct hl_ctx *ctx, u64 virt_addr, u32 size)
{}

static void hl_mmu_pa_page_with_offset(struct hl_ctx *ctx, u64 virt_addr,
						struct hl_mmu_hop_info *hops,
						u64 *phys_addr)
{}

int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr)
{}

int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
			struct hl_mmu_hop_info *hops)
{}

int hl_mmu_if_set_funcs(struct hl_device *hdev)
{}

/**
 * hl_mmu_scramble_addr() - The generic mmu address scrambling routine.
 * @hdev: pointer to device data.
 * @addr: The address to scramble.
 *
 * Return: The scrambled address.
 */
u64 hl_mmu_scramble_addr(struct hl_device *hdev, u64 addr)
{}

/**
 * hl_mmu_descramble_addr() - The generic mmu address descrambling
 * routine.
 * @hdev: pointer to device data.
 * @addr: The address to descramble.
 *
 * Return: The un-scrambled address.
 */
u64 hl_mmu_descramble_addr(struct hl_device *hdev, u64 addr)
{}

int hl_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard, u32 flags)
{}

int hl_mmu_invalidate_cache_range(struct hl_device *hdev, bool is_hard,
					u32 flags, u32 asid, u64 va, u64 size)
{}

static void hl_mmu_prefetch_work_function(struct work_struct *work)
{}

int hl_mmu_prefetch_cache_range(struct hl_ctx *ctx, u32 flags, u32 asid, u64 va, u64 size)
{}

u64 hl_mmu_get_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte)
{}

/**
 * hl_mmu_get_hop_pte_phys_addr() - extract PTE address from HOP
 * @ctx: pointer to the context structure to initialize.
 * @mmu_prop: MMU properties.
 * @hop_idx: HOP index.
 * @hop_addr: HOP address.
 * @virt_addr: virtual address for the translation.
 *
 * @return the matching PTE value on success, otherwise U64_MAX.
 */
u64 hl_mmu_get_hop_pte_phys_addr(struct hl_ctx *ctx, struct hl_mmu_properties *mmu_prop,
					u8 hop_idx, u64 hop_addr, u64 virt_addr)
{}

static void mmu_dma_mem_free_from_chunk(struct gen_pool *pool,
					struct gen_pool_chunk *chunk,
					void *data)
{}

void hl_mmu_hr_flush(struct hl_ctx *ctx)
{}

/**
 * hl_mmu_hr_pool_destroy() - destroy genpool
 * @hdev: habanalabs device structure.
 * @hr_priv: MMU HR private data.
 * @hop_table_size: HOP table size.
 *
 * This function does the following:
 * - free entries allocated for shadow HOP0
 * - free pool chunks
 * - free pool
 */
static void hl_mmu_hr_pool_destroy(struct hl_device *hdev, struct hl_mmu_hr_priv *hr_priv,
					u32 hop_table_size)
{}

/**
 * hl_mmu_hr_init() - initialize the MMU module.
 * @hdev: habanalabs device structure.
 * @hr_priv: MMU HR private data.
 * @hop_table_size: HOP table size.
 * @pgt_size: memory size allocated for the page table
 *
 * @return 0 on success otherwise non-zero error code
 *
 * This function does the following:
 * - Create a pool of pages for pgt_infos.
 * - Create a shadow table for pgt
 */
int hl_mmu_hr_init(struct hl_device *hdev, struct hl_mmu_hr_priv *hr_priv, u32 hop_table_size,
			u64 pgt_size)
{}

/**
 * hl_mmu_hr_fini() - release the MMU module.
 * @hdev: habanalabs device structure.
 * @hr_priv: MMU host resident private info.
 * @hop_table_size: HOP table size
 *
 * This function does the following:
 * - Disable MMU in H/W.
 * - Free the pgt_infos pool.
 *
 * All contexts should be freed before calling this function.
 */
void hl_mmu_hr_fini(struct hl_device *hdev, struct hl_mmu_hr_priv *hr_priv, u32 hop_table_size)
{}

/**
 * hl_mmu_hr_free_hop_remove_pgt() - free HOP and remove PGT from hash
 * @pgt_info: page table info structure.
 * @hr_priv: MMU HR private data.
 * @hop_table_size: HOP table size.
 */
void hl_mmu_hr_free_hop_remove_pgt(struct pgt_info *pgt_info, struct hl_mmu_hr_priv *hr_priv,
					u32 hop_table_size)
{}

/**
 * hl_mmu_hr_pte_phys_to_virt() - translate PTE phys addr to virt addr
 * @ctx: pointer to the context structure
 * @pgt: pgt_info for the HOP hosting the PTE
 * @phys_pte_addr: phys address of the PTE
 * @hop_table_size: HOP table size
 *
 * @return PTE virtual address
 *
 * The function use the pgt_info to get HOP base virt addr and obtain the PTE's virt addr
 * by adding the PTE offset.
 */
u64 hl_mmu_hr_pte_phys_to_virt(struct hl_ctx *ctx, struct pgt_info *pgt,
							u64 phys_pte_addr, u32 hop_table_size)
{}

/**
 * hl_mmu_hr_write_pte() - write HR PTE
 * @ctx: pointer to the context structure
 * @pgt_info: HOP's page table info structure
 * @phys_pte_addr: phys PTE address
 * @val: raw PTE data
 * @hop_table_size: HOP table size
 */
void hl_mmu_hr_write_pte(struct hl_ctx *ctx, struct pgt_info *pgt_info, u64 phys_pte_addr,
								u64 val, u32 hop_table_size)
{}

/**
 * hl_mmu_hr_clear_pte() - clear HR PTE
 * @ctx: pointer to the context structure
 * @pgt_info: HOP's page table info structure
 * @phys_pte_addr: phys PTE address
 * @hop_table_size: HOP table size
 */
void hl_mmu_hr_clear_pte(struct hl_ctx *ctx, struct pgt_info *pgt_info, u64 phys_pte_addr,
						u32 hop_table_size)
{}

/**
 * hl_mmu_hr_put_pte() - put HR PTE and remove it if necessary (no more PTEs)
 * @ctx: pointer to the context structure
 * @pgt_info: HOP's page table info structure
 * @hr_priv: HR MMU private info
 * @hop_table_size: HOP table size
 *
 * @return number of PTEs still in the HOP
 */
int hl_mmu_hr_put_pte(struct hl_ctx *ctx, struct pgt_info *pgt_info,
						struct hl_mmu_hr_priv *hr_priv,
						u32 hop_table_size)
{}

/**
 * hl_mmu_hr_get_pte() - increase PGT PTE count
 * @ctx: pointer to the context structure
 * @hr_func: host resident functions
 * @phys_hop_addr: HOP phys address
 */
void hl_mmu_hr_get_pte(struct hl_ctx *ctx, struct hl_hr_mmu_funcs *hr_func, u64 phys_hop_addr)
{}

/**
 * hl_mmu_hr_get_next_hop_pgt_info() - get pgt_info structure for the next HOP
 * @ctx: pointer to the context structure.
 * @hr_func: host resident functions.
 * @curr_pte: current PTE value.
 *
 * @return pgt_info structure on success, otherwise NULL.
 */
struct pgt_info *hl_mmu_hr_get_next_hop_pgt_info(struct hl_ctx *ctx,
							struct hl_hr_mmu_funcs *hr_func,
							u64 curr_pte)
{}

/**
 * hl_mmu_hr_alloc_hop() - allocate HOP
 * @ctx: pointer to the context structure.
 * @hr_priv: host resident private info structure.
 * @hr_func: host resident functions.
 * @mmu_prop: MMU properties.
 *
 * @return pgt_info structure associated with the allocated HOP on success, otherwise NULL.
 */
struct pgt_info *hl_mmu_hr_alloc_hop(struct hl_ctx *ctx, struct hl_mmu_hr_priv *hr_priv,
							struct hl_hr_mmu_funcs *hr_func,
							struct hl_mmu_properties *mmu_prop)
{}

/**
 * hl_mmu_hr_get_alloc_next_hop() - get the next HOP, allocate it if it does not exist
 * @ctx: pointer to the context structure.
 * @hr_priv: host resident private info structure.
 * @hr_func: host resident functions.
 * @mmu_prop: MMU properties.
 * @curr_pte: current PTE value.
 * @is_new_hop: set to true if HOP is new (caller responsibility to set it to false).
 *
 * @return pgt_info structure associated with the allocated HOP on success, otherwise NULL.
 */
struct pgt_info *hl_mmu_hr_get_alloc_next_hop(struct hl_ctx *ctx,
							struct hl_mmu_hr_priv *hr_priv,
							struct hl_hr_mmu_funcs *hr_func,
							struct hl_mmu_properties *mmu_prop,
							u64 curr_pte, bool *is_new_hop)
{}

/**
 * hl_mmu_hr_get_tlb_info() - get the TLB info (info for a specific mapping)
 * @ctx: pointer to the context structure.
 * @virt_addr: the virt address for which to get info.
 * @hops: HOPs info structure.
 * @hr_func: host resident functions.
 *
 * @return 0 on success, otherwise non 0 error code..
 */
int hl_mmu_hr_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, struct hl_mmu_hop_info *hops,
								struct hl_hr_mmu_funcs *hr_func)
{}

struct pgt_info *hl_mmu_dr_get_pgt_info(struct hl_ctx *ctx, u64 hop_addr)
{}

void hl_mmu_dr_free_hop(struct hl_ctx *ctx, u64 hop_addr)
{}

void hl_mmu_dr_free_pgt_node(struct hl_ctx *ctx, struct pgt_info *pgt_info)
{}

u64 hl_mmu_dr_get_phys_hop0_addr(struct hl_ctx *ctx)
{}

u64 hl_mmu_dr_get_hop0_addr(struct hl_ctx *ctx)
{}

u64 hl_mmu_dr_get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr)
{}

void hl_mmu_dr_write_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val)
{}

void hl_mmu_dr_write_final_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val)
{}

void hl_mmu_dr_clear_pte(struct hl_ctx *ctx, u64 pte_addr)
{}

void hl_mmu_dr_get_pte(struct hl_ctx *ctx, u64 hop_addr)
{}

int hl_mmu_dr_put_pte(struct hl_ctx *ctx, u64 hop_addr)
{}

u64 hl_mmu_dr_alloc_hop(struct hl_ctx *ctx)
{}

u64 hl_mmu_dr_get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte, bool *is_new_hop)
{}

void hl_mmu_dr_flush(struct hl_ctx *ctx)
{}

int hl_mmu_dr_init(struct hl_device *hdev)
{}

void hl_mmu_dr_fini(struct hl_device *hdev)
{}