// SPDX-License-Identifier: GPL-2.0 or MIT /* Copyright 2019 Linaro, Ltd, Rob Herring <[email protected]> */ /* Copyright 2023 Collabora ltd. */ #include <drm/drm_debugfs.h> #include <drm/drm_drv.h> #include <drm/drm_exec.h> #include <drm/drm_gpuvm.h> #include <drm/drm_managed.h> #include <drm/gpu_scheduler.h> #include <drm/panthor_drm.h> #include <linux/atomic.h> #include <linux/bitfield.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/io-pgtable.h> #include <linux/iommu.h> #include <linux/kmemleak.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/rwsem.h> #include <linux/sched.h> #include <linux/shmem_fs.h> #include <linux/sizes.h> #include "panthor_device.h" #include "panthor_gem.h" #include "panthor_heap.h" #include "panthor_mmu.h" #include "panthor_regs.h" #include "panthor_sched.h" #define MAX_AS_SLOTS … struct panthor_vm; /** * struct panthor_as_slot - Address space slot */ struct panthor_as_slot { … }; /** * struct panthor_mmu - MMU related data */ struct panthor_mmu { … }; /** * struct panthor_vm_pool - VM pool object */ struct panthor_vm_pool { … }; /** * struct panthor_vma - GPU mapping object * * This is used to track GEM mappings in GPU space. */ struct panthor_vma { … }; /** * struct panthor_vm_op_ctx - VM operation context * * With VM operations potentially taking place in a dma-signaling path, we * need to make sure everything that might require resource allocation is * pre-allocated upfront. This is what this operation context is far. * * We also collect resources that have been freed, so we can release them * asynchronously, and let the VM_BIND scheduler process the next VM_BIND * request. */ struct panthor_vm_op_ctx { … }; /** * struct panthor_vm - VM object * * A VM is an object representing a GPU (or MCU) virtual address space. * It embeds the MMU page table for this address space, a tree containing * all the virtual mappings of GEM objects, and other things needed to manage * the VM. * * Except for the MCU VM, which is managed by the kernel, all other VMs are * created by userspace and mostly managed by userspace, using the * %DRM_IOCTL_PANTHOR_VM_BIND ioctl. * * A portion of the virtual address space is reserved for kernel objects, * like heap chunks, and userspace gets to decide how much of the virtual * address space is left to the kernel (half of the virtual address space * by default). */ struct panthor_vm { … }; /** * struct panthor_vm_bind_job - VM bind job */ struct panthor_vm_bind_job { … }; /** * @pt_cache: Cache used to allocate MMU page tables. * * The pre-allocation pattern forces us to over-allocate to plan for * the worst case scenario, and return the pages we didn't use. * * Having a kmem_cache allows us to speed allocations. */ static struct kmem_cache *pt_cache; /** * alloc_pt() - Custom page table allocator * @cookie: Cookie passed at page table allocation time. * @size: Size of the page table. This size should be fixed, * and determined at creation time based on the granule size. * @gfp: GFP flags. * * We want a custom allocator so we can use a cache for page table * allocations and amortize the cost of the over-reservation that's * done to allow asynchronous VM operations. * * Return: non-NULL on success, NULL if the allocation failed for any * reason. */ static void *alloc_pt(void *cookie, size_t size, gfp_t gfp) { … } /** * @free_pt() - Custom page table free function * @cookie: Cookie passed at page table allocation time. * @data: Page table to free. * @size: Size of the page table. This size should be fixed, * and determined at creation time based on the granule size. */ static void free_pt(void *cookie, void *data, size_t size) { … } static int wait_ready(struct panthor_device *ptdev, u32 as_nr) { … } static int write_cmd(struct panthor_device *ptdev, u32 as_nr, u32 cmd) { … } static void lock_region(struct panthor_device *ptdev, u32 as_nr, u64 region_start, u64 size) { … } static int mmu_hw_do_operation_locked(struct panthor_device *ptdev, int as_nr, u64 iova, u64 size, u32 op) { … } static int mmu_hw_do_operation(struct panthor_vm *vm, u64 iova, u64 size, u32 op) { … } static int panthor_mmu_as_enable(struct panthor_device *ptdev, u32 as_nr, u64 transtab, u64 transcfg, u64 memattr) { … } static int panthor_mmu_as_disable(struct panthor_device *ptdev, u32 as_nr) { … } static u32 panthor_mmu_fault_mask(struct panthor_device *ptdev, u32 value) { … } static u32 panthor_mmu_as_fault_mask(struct panthor_device *ptdev, u32 as) { … } /** * panthor_vm_has_unhandled_faults() - Check if a VM has unhandled faults * @vm: VM to check. * * Return: true if the VM has unhandled faults, false otherwise. */ bool panthor_vm_has_unhandled_faults(struct panthor_vm *vm) { … } /** * panthor_vm_is_unusable() - Check if the VM is still usable * @vm: VM to check. * * Return: true if the VM is unusable, false otherwise. */ bool panthor_vm_is_unusable(struct panthor_vm *vm) { … } static void panthor_vm_release_as_locked(struct panthor_vm *vm) { … } /** * panthor_vm_active() - Flag a VM as active * @VM: VM to flag as active. * * Assigns an address space to a VM so it can be used by the GPU/MCU. * * Return: 0 on success, a negative error code otherwise. */ int panthor_vm_active(struct panthor_vm *vm) { … } /** * panthor_vm_idle() - Flag a VM idle * @VM: VM to flag as idle. * * When we know the GPU is done with the VM (no more jobs to process), * we can relinquish the AS slot attached to this VM, if any. * * We don't release the slot immediately, but instead place the VM in * the LRU list, so it can be evicted if another VM needs an AS slot. * This way, VMs keep attached to the AS they were given until we run * out of free slot, limiting the number of MMU operations (TLB flush * and other AS updates). */ void panthor_vm_idle(struct panthor_vm *vm) { … } static void panthor_vm_stop(struct panthor_vm *vm) { … } static void panthor_vm_start(struct panthor_vm *vm) { … } /** * panthor_vm_as() - Get the AS slot attached to a VM * @vm: VM to get the AS slot of. * * Return: -1 if the VM is not assigned an AS slot yet, >= 0 otherwise. */ int panthor_vm_as(struct panthor_vm *vm) { … } static size_t get_pgsize(u64 addr, size_t size, size_t *count) { … } static int panthor_vm_flush_range(struct panthor_vm *vm, u64 iova, u64 size) { … } static int panthor_vm_unmap_pages(struct panthor_vm *vm, u64 iova, u64 size) { … } static int panthor_vm_map_pages(struct panthor_vm *vm, u64 iova, int prot, struct sg_table *sgt, u64 offset, u64 size) { … } static int flags_to_prot(u32 flags) { … } /** * panthor_vm_alloc_va() - Allocate a region in the auto-va space * @VM: VM to allocate a region on. * @va: start of the VA range. Can be PANTHOR_VM_KERNEL_AUTO_VA if the user * wants the VA to be automatically allocated from the auto-VA range. * @size: size of the VA range. * @va_node: drm_mm_node to initialize. Must be zero-initialized. * * Some GPU objects, like heap chunks, are fully managed by the kernel and * need to be mapped to the userspace VM, in the region reserved for kernel * objects. * * This function takes care of allocating a region in the kernel auto-VA space. * * Return: 0 on success, an error code otherwise. */ int panthor_vm_alloc_va(struct panthor_vm *vm, u64 va, u64 size, struct drm_mm_node *va_node) { … } /** * panthor_vm_free_va() - Free a region allocated with panthor_vm_alloc_va() * @VM: VM to free the region on. * @va_node: Memory node representing the region to free. */ void panthor_vm_free_va(struct panthor_vm *vm, struct drm_mm_node *va_node) { … } static void panthor_vm_bo_put(struct drm_gpuvm_bo *vm_bo) { … } static void panthor_vm_cleanup_op_ctx(struct panthor_vm_op_ctx *op_ctx, struct panthor_vm *vm) { … } static struct panthor_vma * panthor_vm_op_ctx_get_vma(struct panthor_vm_op_ctx *op_ctx) { … } static int panthor_vm_op_ctx_prealloc_vmas(struct panthor_vm_op_ctx *op_ctx) { … } #define PANTHOR_VM_BIND_OP_MAP_FLAGS … static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx, struct panthor_vm *vm, struct panthor_gem_object *bo, u64 offset, u64 size, u64 va, u32 flags) { … } static int panthor_vm_prepare_unmap_op_ctx(struct panthor_vm_op_ctx *op_ctx, struct panthor_vm *vm, u64 va, u64 size) { … } static void panthor_vm_prepare_sync_only_op_ctx(struct panthor_vm_op_ctx *op_ctx, struct panthor_vm *vm) { … } /** * panthor_vm_get_bo_for_va() - Get the GEM object mapped at a virtual address * @vm: VM to look into. * @va: Virtual address to search for. * @bo_offset: Offset of the GEM object mapped at this virtual address. * Only valid on success. * * The object returned by this function might no longer be mapped when the * function returns. It's the caller responsibility to ensure there's no * concurrent map/unmap operations making the returned value invalid, or * make sure it doesn't matter if the object is no longer mapped. * * Return: A valid pointer on success, an ERR_PTR() otherwise. */ struct panthor_gem_object * panthor_vm_get_bo_for_va(struct panthor_vm *vm, u64 va, u64 *bo_offset) { … } #define PANTHOR_VM_MIN_KERNEL_VA_SIZE … static u64 panthor_vm_create_get_user_va_range(const struct drm_panthor_vm_create *args, u64 full_va_range) { … } #define PANTHOR_VM_CREATE_FLAGS … static int panthor_vm_create_check_args(const struct panthor_device *ptdev, const struct drm_panthor_vm_create *args, u64 *kernel_va_start, u64 *kernel_va_range) { … } /* * Only 32 VMs per open file. If that becomes a limiting factor, we can * increase this number. */ #define PANTHOR_MAX_VMS_PER_FILE … /** * panthor_vm_pool_create_vm() - Create a VM * @pool: The VM to create this VM on. * @kernel_va_start: Start of the region reserved for kernel objects. * @kernel_va_range: Size of the region reserved for kernel objects. * * Return: a positive VM ID on success, a negative error code otherwise. */ int panthor_vm_pool_create_vm(struct panthor_device *ptdev, struct panthor_vm_pool *pool, struct drm_panthor_vm_create *args) { … } static void panthor_vm_destroy(struct panthor_vm *vm) { … } /** * panthor_vm_pool_destroy_vm() - Destroy a VM. * @pool: VM pool. * @handle: VM handle. * * This function doesn't free the VM object or its resources, it just kills * all mappings, and makes sure nothing can be mapped after that point. * * If there was any active jobs at the time this function is called, these * jobs should experience page faults and be killed as a result. * * The VM resources are freed when the last reference on the VM object is * dropped. */ int panthor_vm_pool_destroy_vm(struct panthor_vm_pool *pool, u32 handle) { … } /** * panthor_vm_pool_get_vm() - Retrieve VM object bound to a VM handle * @pool: VM pool to check. * @handle: Handle of the VM to retrieve. * * Return: A valid pointer if the VM exists, NULL otherwise. */ struct panthor_vm * panthor_vm_pool_get_vm(struct panthor_vm_pool *pool, u32 handle) { … } /** * panthor_vm_pool_destroy() - Destroy a VM pool. * @pfile: File. * * Destroy all VMs in the pool, and release the pool resources. * * Note that VMs can outlive the pool they were created from if other * objects hold a reference to there VMs. */ void panthor_vm_pool_destroy(struct panthor_file *pfile) { … } /** * panthor_vm_pool_create() - Create a VM pool * @pfile: File. * * Return: 0 on success, a negative error code otherwise. */ int panthor_vm_pool_create(struct panthor_file *pfile) { … } /* dummy TLB ops, the real TLB flush happens in panthor_vm_flush_range() */ static void mmu_tlb_flush_all(void *cookie) { … } static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule, void *cookie) { … } static const struct iommu_flush_ops mmu_tlb_ops = …; static const char *access_type_name(struct panthor_device *ptdev, u32 fault_status) { … } static void panthor_mmu_irq_handler(struct panthor_device *ptdev, u32 status) { … } PANTHOR_IRQ_HANDLER(mmu, MMU, panthor_mmu_irq_handler); /** * panthor_mmu_suspend() - Suspend the MMU logic * @ptdev: Device. * * All we do here is de-assign the AS slots on all active VMs, so things * get flushed to the main memory, and no further access to these VMs are * possible. * * We also suspend the MMU IRQ. */ void panthor_mmu_suspend(struct panthor_device *ptdev) { … } /** * panthor_mmu_resume() - Resume the MMU logic * @ptdev: Device. * * Resume the IRQ. * * We don't re-enable previously active VMs. We assume other parts of the * driver will call panthor_vm_active() on the VMs they intend to use. */ void panthor_mmu_resume(struct panthor_device *ptdev) { … } /** * panthor_mmu_pre_reset() - Prepare for a reset * @ptdev: Device. * * Suspend the IRQ, and make sure all VM_BIND queues are stopped, so we * don't get asked to do a VM operation while the GPU is down. * * We don't cleanly shutdown the AS slots here, because the reset might * come from an AS_ACTIVE_BIT stuck situation. */ void panthor_mmu_pre_reset(struct panthor_device *ptdev) { … } /** * panthor_mmu_post_reset() - Restore things after a reset * @ptdev: Device. * * Put the MMU logic back in action after a reset. That implies resuming the * IRQ and re-enabling the VM_BIND queues. */ void panthor_mmu_post_reset(struct panthor_device *ptdev) { … } static void panthor_vm_free(struct drm_gpuvm *gpuvm) { … } /** * panthor_vm_put() - Release a reference on a VM * @vm: VM to release the reference on. Can be NULL. */ void panthor_vm_put(struct panthor_vm *vm) { … } /** * panthor_vm_get() - Get a VM reference * @vm: VM to get the reference on. Can be NULL. * * Return: @vm value. */ struct panthor_vm *panthor_vm_get(struct panthor_vm *vm) { … } /** * panthor_vm_get_heap_pool() - Get the heap pool attached to a VM * @vm: VM to query the heap pool on. * @create: True if the heap pool should be created when it doesn't exist. * * Heap pools are per-VM. This function allows one to retrieve the heap pool * attached to a VM. * * If no heap pool exists yet, and @create is true, we create one. * * The returned panthor_heap_pool should be released with panthor_heap_pool_put(). * * Return: A valid pointer on success, an ERR_PTR() otherwise. */ struct panthor_heap_pool *panthor_vm_get_heap_pool(struct panthor_vm *vm, bool create) { … } static u64 mair_to_memattr(u64 mair) { … } static void panthor_vma_link(struct panthor_vm *vm, struct panthor_vma *vma, struct drm_gpuvm_bo *vm_bo) { … } static void panthor_vma_unlink(struct panthor_vm *vm, struct panthor_vma *vma) { … } static void panthor_vma_init(struct panthor_vma *vma, u32 flags) { … } #define PANTHOR_VM_MAP_FLAGS … static int panthor_gpuva_sm_step_map(struct drm_gpuva_op *op, void *priv) { … } static int panthor_gpuva_sm_step_remap(struct drm_gpuva_op *op, void *priv) { … } static int panthor_gpuva_sm_step_unmap(struct drm_gpuva_op *op, void *priv) { … } static const struct drm_gpuvm_ops panthor_gpuvm_ops = …; /** * panthor_vm_resv() - Get the dma_resv object attached to a VM. * @vm: VM to get the dma_resv of. * * Return: A dma_resv object. */ struct dma_resv *panthor_vm_resv(struct panthor_vm *vm) { … } struct drm_gem_object *panthor_vm_root_gem(struct panthor_vm *vm) { … } static int panthor_vm_exec_op(struct panthor_vm *vm, struct panthor_vm_op_ctx *op, bool flag_vm_unusable_on_failure) { … } static struct dma_fence * panthor_vm_bind_run_job(struct drm_sched_job *sched_job) { … } static void panthor_vm_bind_job_release(struct kref *kref) { … } /** * panthor_vm_bind_job_put() - Release a VM_BIND job reference * @sched_job: Job to release the reference on. */ void panthor_vm_bind_job_put(struct drm_sched_job *sched_job) { … } static void panthor_vm_bind_free_job(struct drm_sched_job *sched_job) { … } static enum drm_gpu_sched_stat panthor_vm_bind_timedout_job(struct drm_sched_job *sched_job) { … } static const struct drm_sched_backend_ops panthor_vm_bind_ops = …; /** * panthor_vm_create() - Create a VM * @ptdev: Device. * @for_mcu: True if this is the FW MCU VM. * @kernel_va_start: Start of the range reserved for kernel BO mapping. * @kernel_va_size: Size of the range reserved for kernel BO mapping. * @auto_kernel_va_start: Start of the auto-VA kernel range. * @auto_kernel_va_size: Size of the auto-VA kernel range. * * Return: A valid pointer on success, an ERR_PTR() otherwise. */ struct panthor_vm * panthor_vm_create(struct panthor_device *ptdev, bool for_mcu, u64 kernel_va_start, u64 kernel_va_size, u64 auto_kernel_va_start, u64 auto_kernel_va_size) { … } static int panthor_vm_bind_prepare_op_ctx(struct drm_file *file, struct panthor_vm *vm, const struct drm_panthor_vm_bind_op *op, struct panthor_vm_op_ctx *op_ctx) { … } static void panthor_vm_bind_job_cleanup_op_ctx_work(struct work_struct *work) { … } /** * panthor_vm_bind_job_create() - Create a VM_BIND job * @file: File. * @vm: VM targeted by the VM_BIND job. * @op: VM operation data. * * Return: A valid pointer on success, an ERR_PTR() otherwise. */ struct drm_sched_job * panthor_vm_bind_job_create(struct drm_file *file, struct panthor_vm *vm, const struct drm_panthor_vm_bind_op *op) { … } /** * panthor_vm_bind_job_prepare_resvs() - Prepare VM_BIND job dma_resvs * @exec: The locking/preparation context. * @sched_job: The job to prepare resvs on. * * Locks and prepare the VM resv. * * If this is a map operation, locks and prepares the GEM resv. * * Return: 0 on success, a negative error code otherwise. */ int panthor_vm_bind_job_prepare_resvs(struct drm_exec *exec, struct drm_sched_job *sched_job) { … } /** * panthor_vm_bind_job_update_resvs() - Update the resv objects touched by a job * @exec: drm_exec context. * @sched_job: Job to update the resvs on. */ void panthor_vm_bind_job_update_resvs(struct drm_exec *exec, struct drm_sched_job *sched_job) { … } void panthor_vm_update_resvs(struct panthor_vm *vm, struct drm_exec *exec, struct dma_fence *fence, enum dma_resv_usage private_usage, enum dma_resv_usage extobj_usage) { … } /** * panthor_vm_bind_exec_sync_op() - Execute a VM_BIND operation synchronously. * @file: File. * @vm: VM targeted by the VM operation. * @op: Data describing the VM operation. * * Return: 0 on success, a negative error code otherwise. */ int panthor_vm_bind_exec_sync_op(struct drm_file *file, struct panthor_vm *vm, struct drm_panthor_vm_bind_op *op) { … } /** * panthor_vm_map_bo_range() - Map a GEM object range to a VM * @vm: VM to map the GEM to. * @bo: GEM object to map. * @offset: Offset in the GEM object. * @size: Size to map. * @va: Virtual address to map the object to. * @flags: Combination of drm_panthor_vm_bind_op_flags flags. * Only map-related flags are valid. * * Internal use only. For userspace requests, use * panthor_vm_bind_exec_sync_op() instead. * * Return: 0 on success, a negative error code otherwise. */ int panthor_vm_map_bo_range(struct panthor_vm *vm, struct panthor_gem_object *bo, u64 offset, u64 size, u64 va, u32 flags) { … } /** * panthor_vm_unmap_range() - Unmap a portion of the VA space * @vm: VM to unmap the region from. * @va: Virtual address to unmap. Must be 4k aligned. * @size: Size of the region to unmap. Must be 4k aligned. * * Internal use only. For userspace requests, use * panthor_vm_bind_exec_sync_op() instead. * * Return: 0 on success, a negative error code otherwise. */ int panthor_vm_unmap_range(struct panthor_vm *vm, u64 va, u64 size) { … } /** * panthor_vm_prepare_mapped_bos_resvs() - Prepare resvs on VM BOs. * @exec: Locking/preparation context. * @vm: VM targeted by the GPU job. * @slot_count: Number of slots to reserve. * * GPU jobs assume all BOs bound to the VM at the time the job is submitted * are available when the job is executed. In order to guarantee that, we * need to reserve a slot on all BOs mapped to a VM and update this slot with * the job fence after its submission. * * Return: 0 on success, a negative error code otherwise. */ int panthor_vm_prepare_mapped_bos_resvs(struct drm_exec *exec, struct panthor_vm *vm, u32 slot_count) { … } /** * panthor_mmu_unplug() - Unplug the MMU logic * @ptdev: Device. * * No access to the MMU regs should be done after this function is called. * We suspend the IRQ and disable all VMs to guarantee that. */ void panthor_mmu_unplug(struct panthor_device *ptdev) { … } static void panthor_mmu_release_wq(struct drm_device *ddev, void *res) { … } /** * panthor_mmu_init() - Initialize the MMU logic. * @ptdev: Device. * * Return: 0 on success, a negative error code otherwise. */ int panthor_mmu_init(struct panthor_device *ptdev) { … } #ifdef CONFIG_DEBUG_FS static int show_vm_gpuvas(struct panthor_vm *vm, struct seq_file *m) { … } static int show_each_vm(struct seq_file *m, void *arg) { … } static struct drm_info_list panthor_mmu_debugfs_list[] = …; /** * panthor_mmu_debugfs_init() - Initialize MMU debugfs entries * @minor: Minor. */ void panthor_mmu_debugfs_init(struct drm_minor *minor) { … } #endif /* CONFIG_DEBUG_FS */ /** * panthor_mmu_pt_cache_init() - Initialize the page table cache. * * Return: 0 on success, a negative error code otherwise. */ int panthor_mmu_pt_cache_init(void) { … } /** * panthor_mmu_pt_cache_fini() - Destroy the page table cache. */ void panthor_mmu_pt_cache_fini(void) { … }