// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2018 Intel Corporation. * Copyright 2018 Google LLC. * * Author: Tuukka Toivonen <[email protected]> * Author: Sakari Ailus <[email protected]> * Author: Samu Onkalo <[email protected]> * Author: Tomasz Figa <[email protected]> * */ #include <linux/dma-mapping.h> #include <linux/iopoll.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <asm/set_memory.h> #include "ipu3-mmu.h" #define IPU3_PT_BITS … #define IPU3_PT_PTES … #define IPU3_PT_SIZE … #define IPU3_PT_ORDER … #define IPU3_ADDR2PTE(addr) … #define IPU3_PTE2ADDR(pte) … #define IPU3_L2PT_SHIFT … #define IPU3_L2PT_MASK … #define IPU3_L1PT_SHIFT … #define IPU3_L1PT_MASK … #define IPU3_MMU_ADDRESS_BITS … #define IMGU_REG_BASE … #define REG_TLB_INVALIDATE … #define TLB_INVALIDATE … #define REG_L1_PHYS … #define REG_GP_HALT … #define REG_GP_HALTED … struct imgu_mmu { … }; static inline struct imgu_mmu *to_imgu_mmu(struct imgu_mmu_info *info) { … } /** * imgu_mmu_tlb_invalidate - invalidate translation look-aside buffer * @mmu: MMU to perform the invalidate operation on * * This function invalidates the whole TLB. Must be called when the hardware * is powered on. */ static void imgu_mmu_tlb_invalidate(struct imgu_mmu *mmu) { … } static void call_if_imgu_is_powered(struct imgu_mmu *mmu, void (*func)(struct imgu_mmu *mmu)) { … } /** * imgu_mmu_set_halt - set CIO gate halt bit * @mmu: MMU to set the CIO gate bit in. * @halt: Desired state of the gate bit. * * This function sets the CIO gate bit that controls whether external memory * accesses are allowed. Must be called when the hardware is powered on. */ static void imgu_mmu_set_halt(struct imgu_mmu *mmu, bool halt) { … } /** * imgu_mmu_alloc_page_table - allocate a pre-filled page table * @pteval: Value to initialize for page table entries with. * * Return: Pointer to allocated page table or NULL on failure. */ static u32 *imgu_mmu_alloc_page_table(u32 pteval) { … } /** * imgu_mmu_free_page_table - free page table * @pt: Page table to free. */ static void imgu_mmu_free_page_table(u32 *pt) { … } /** * address_to_pte_idx - split IOVA into L1 and L2 page table indices * @iova: IOVA to split. * @l1pt_idx: Output for the L1 page table index. * @l2pt_idx: Output for the L2 page index. */ static inline void address_to_pte_idx(unsigned long iova, u32 *l1pt_idx, u32 *l2pt_idx) { … } static u32 *imgu_mmu_get_l2pt(struct imgu_mmu *mmu, u32 l1pt_idx) { … } static int __imgu_mmu_map(struct imgu_mmu *mmu, unsigned long iova, phys_addr_t paddr) { … } /** * imgu_mmu_map - map a buffer to a physical address * * @info: MMU mappable range * @iova: the virtual address * @paddr: the physical address * @size: length of the mappable area * * The function has been adapted from iommu_map() in * drivers/iommu/iommu.c . */ int imgu_mmu_map(struct imgu_mmu_info *info, unsigned long iova, phys_addr_t paddr, size_t size) { … } /** * imgu_mmu_map_sg - Map a scatterlist * * @info: MMU mappable range * @iova: the virtual address * @sg: the scatterlist to map * @nents: number of entries in the scatterlist * * The function has been adapted from default_iommu_map_sg() in * drivers/iommu/iommu.c . */ size_t imgu_mmu_map_sg(struct imgu_mmu_info *info, unsigned long iova, struct scatterlist *sg, unsigned int nents) { … } static size_t __imgu_mmu_unmap(struct imgu_mmu *mmu, unsigned long iova, size_t size) { … } /** * imgu_mmu_unmap - Unmap a buffer * * @info: MMU mappable range * @iova: the virtual address * @size: the length of the buffer * * The function has been adapted from iommu_unmap() in * drivers/iommu/iommu.c . */ size_t imgu_mmu_unmap(struct imgu_mmu_info *info, unsigned long iova, size_t size) { … } /** * imgu_mmu_init() - initialize IPU3 MMU block * * @parent: struct device parent * @base: IOMEM base of hardware registers. * * Return: Pointer to IPU3 MMU private data pointer or ERR_PTR() on error. */ struct imgu_mmu_info *imgu_mmu_init(struct device *parent, void __iomem *base) { … } /** * imgu_mmu_exit() - clean up IPU3 MMU block * * @info: MMU mappable range */ void imgu_mmu_exit(struct imgu_mmu_info *info) { … } void imgu_mmu_suspend(struct imgu_mmu_info *info) { … } void imgu_mmu_resume(struct imgu_mmu_info *info) { … }