linux/drivers/gpu/drm/panfrost/panfrost_mmu.c

// SPDX-License-Identifier: GPL-2.0
/* Copyright 2019 Linaro, Ltd, Rob Herring <[email protected]> */

#include <drm/panfrost_drm.h>

#include <linux/atomic.h>
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/io-pgtable.h>
#include <linux/iommu.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/shmem_fs.h>
#include <linux/sizes.h>

#include "panfrost_device.h"
#include "panfrost_mmu.h"
#include "panfrost_gem.h"
#include "panfrost_features.h"
#include "panfrost_regs.h"

#define mmu_write(dev, reg, data)
#define mmu_read(dev, reg)

static int wait_ready(struct panfrost_device *pfdev, u32 as_nr)
{}

static int write_cmd(struct panfrost_device *pfdev, u32 as_nr, u32 cmd)
{}

static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
			u64 region_start, u64 size)
{}


static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
				      u64 iova, u64 size, u32 op)
{}

static int mmu_hw_do_operation(struct panfrost_device *pfdev,
			       struct panfrost_mmu *mmu,
			       u64 iova, u64 size, u32 op)
{}

static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
{}

static void panfrost_mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
{}

u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
{}

void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
{}

void panfrost_mmu_reset(struct panfrost_device *pfdev)
{}

static size_t get_pgsize(u64 addr, size_t size, size_t *count)
{}

static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
				     struct panfrost_mmu *mmu,
				     u64 iova, u64 size)
{}

static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
		      u64 iova, int prot, struct sg_table *sgt)
{}

int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
{}

void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping)
{}

static void mmu_tlb_inv_context_s1(void *cookie)
{}

static void mmu_tlb_sync_context(void *cookie)
{}

static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule,
			       void *cookie)
{}

static const struct iommu_flush_ops mmu_tlb_ops =;

static struct panfrost_gem_mapping *
addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr)
{}

#define NUM_FAULT_PAGES

static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
				       u64 addr)
{}

static void panfrost_mmu_release_ctx(struct kref *kref)
{}

void panfrost_mmu_ctx_put(struct panfrost_mmu *mmu)
{}

struct panfrost_mmu *panfrost_mmu_ctx_get(struct panfrost_mmu *mmu)
{}

#define PFN_4G
#define PFN_4G_MASK
#define PFN_16M

static void panfrost_drm_mm_color_adjust(const struct drm_mm_node *node,
					 unsigned long color,
					 u64 *start, u64 *end)
{}

struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev)
{}

static const char *access_type_name(struct panfrost_device *pfdev,
		u32 fault_status)
{}

static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data)
{}

static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
{
	struct panfrost_device *pfdev = data;
	u32 status = mmu_read(pfdev, MMU_INT_RAWSTAT);
	int ret;

	while (status) {
		u32 as = ffs(status | (status >> 16)) - 1;
		u32 mask = BIT(as) | BIT(as + 16);
		u64 addr;
		u32 fault_status;
		u32 exception_type;
		u32 access_type;
		u32 source_id;

		fault_status = mmu_read(pfdev, AS_FAULTSTATUS(as));
		addr = mmu_read(pfdev, AS_FAULTADDRESS_LO(as));
		addr |= (u64)mmu_read(pfdev, AS_FAULTADDRESS_HI(as)) << 32;

		/* decode the fault status */
		exception_type = fault_status & 0xFF;
		access_type = (fault_status >> 8) & 0x3;
		source_id = (fault_status >> 16);

		mmu_write(pfdev, MMU_INT_CLEAR, mask);

		/* Page fault only */
		ret = -1;
		if ((status & mask) == BIT(as) && (exception_type & 0xF8) == 0xC0)
			ret = panfrost_mmu_map_fault_addr(pfdev, as, addr);

		if (ret) {
			/* terminal fault, print info about the fault */
			dev_err(pfdev->dev,
				"Unhandled Page fault in AS%d at VA 0x%016llX\n"
				"Reason: %s\n"
				"raw fault status: 0x%X\n"
				"decoded fault status: %s\n"
				"exception type 0x%X: %s\n"
				"access type 0x%X: %s\n"
				"source id 0x%X\n",
				as, addr,
				"TODO",
				fault_status,
				(fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
				exception_type, panfrost_exception_name(exception_type),
				access_type, access_type_name(pfdev, fault_status),
				source_id);

			spin_lock(&pfdev->as_lock);
			/* Ignore MMU interrupts on this AS until it's been
			 * re-enabled.
			 */
			pfdev->as_faulty_mask |= mask;

			/* Disable the MMU to kill jobs on this AS. */
			panfrost_mmu_disable(pfdev, as);
			spin_unlock(&pfdev->as_lock);
		}

		status &= ~mask;

		/* If we received new MMU interrupts, process them before returning. */
		if (!status)
			status = mmu_read(pfdev, MMU_INT_RAWSTAT) & ~pfdev->as_faulty_mask;
	}

	/* Enable interrupts only if we're not about to get suspended */
	if (!test_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended)) {
		spin_lock(&pfdev->as_lock);
		mmu_write(pfdev, MMU_INT_MASK, ~pfdev->as_faulty_mask);
		spin_unlock(&pfdev->as_lock);
	}

	return IRQ_HANDLED;
};

int panfrost_mmu_init(struct panfrost_device *pfdev)
{}

void panfrost_mmu_fini(struct panfrost_device *pfdev)
{}

void panfrost_mmu_suspend_irq(struct panfrost_device *pfdev)
{}