linux/drivers/staging/media/atomisp/pci/mmu/isp_mmu.c

// SPDX-License-Identifier: GPL-2.0
/*
 * Support for Medifield PNW Camera Imaging ISP subsystem.
 *
 * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
 *
 * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version
 * 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 *
 */
/*
 * ISP MMU management wrap code
 */
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/gfp.h>
#include <linux/mm.h>		/* for GFP_ATOMIC */
#include <linux/slab.h>		/* for kmalloc */
#include <linux/list.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/sizes.h>

#ifdef CONFIG_X86
#include <asm/set_memory.h>
#endif

#include "atomisp_internal.h"
#include "mmu/isp_mmu.h"

/*
 * 64-bit x86 processor physical address layout:
 * 0		- 0x7fffffff		DDR RAM	(2GB)
 * 0x80000000	- 0xffffffff		MMIO	(2GB)
 * 0x100000000	- 0x3fffffffffff	DDR RAM	(64TB)
 * So if the system has more than 2GB DDR memory, the lower 2GB occupies the
 * physical address 0 - 0x7fffffff and the rest will start from 0x100000000.
 * We have to make sure memory is allocated from the lower 2GB for devices
 * that are only 32-bit capable(e.g. the ISP MMU).
 *
 * For any confusion, contact [email protected].
 */
#define NR_PAGES_2GB

static void free_mmu_map(struct isp_mmu *mmu, unsigned int start_isp_virt,
			 unsigned int end_isp_virt);

static unsigned int atomisp_get_pte(phys_addr_t pt, unsigned int idx)
{}

static void atomisp_set_pte(phys_addr_t pt,
			    unsigned int idx, unsigned int pte)
{}

static void *isp_pt_phys_to_virt(phys_addr_t phys)
{}

static phys_addr_t isp_pte_to_pgaddr(struct isp_mmu *mmu,
				     unsigned int pte)
{}

static unsigned int isp_pgaddr_to_pte_valid(struct isp_mmu *mmu,
	phys_addr_t phys)
{}

/*
 * allocate a uncacheable page table.
 * return physical address.
 */
static phys_addr_t alloc_page_table(struct isp_mmu *mmu)
{}

static void free_page_table(struct isp_mmu *mmu, phys_addr_t page)
{}

static void mmu_remap_error(struct isp_mmu *mmu,
			    phys_addr_t l1_pt, unsigned int l1_idx,
			    phys_addr_t l2_pt, unsigned int l2_idx,
			    unsigned int isp_virt, phys_addr_t old_phys,
			    phys_addr_t new_phys)
{}

static void mmu_unmap_l2_pte_error(struct isp_mmu *mmu,
				   phys_addr_t l1_pt, unsigned int l1_idx,
				   phys_addr_t l2_pt, unsigned int l2_idx,
				   unsigned int isp_virt, unsigned int pte)
{}

static void mmu_unmap_l1_pte_error(struct isp_mmu *mmu,
				   phys_addr_t l1_pt, unsigned int l1_idx,
				   unsigned int isp_virt, unsigned int pte)
{}

static void mmu_unmap_l1_pt_error(struct isp_mmu *mmu, unsigned int pte)
{}

/*
 * Update L2 page table according to isp virtual address and page physical
 * address
 */
static int mmu_l2_map(struct isp_mmu *mmu, phys_addr_t l1_pt,
		      unsigned int l1_idx, phys_addr_t l2_pt,
		      unsigned int start, unsigned int end, phys_addr_t phys)
{}

/*
 * Update L1 page table according to isp virtual address and page physical
 * address
 */
static int mmu_l1_map(struct isp_mmu *mmu, phys_addr_t l1_pt,
		      unsigned int start, unsigned int end,
		      phys_addr_t phys)
{}

/*
 * Update page table according to isp virtual address and page physical
 * address
 */
static int mmu_map(struct isp_mmu *mmu, unsigned int isp_virt,
		   phys_addr_t phys, unsigned int pgnr)
{}

/*
 * Free L2 page table according to isp virtual address and page physical
 * address
 */
static void mmu_l2_unmap(struct isp_mmu *mmu, phys_addr_t l1_pt,
			 unsigned int l1_idx, phys_addr_t l2_pt,
			 unsigned int start, unsigned int end)
{}

/*
 * Free L1 page table according to isp virtual address and page physical
 * address
 */
static void mmu_l1_unmap(struct isp_mmu *mmu, phys_addr_t l1_pt,
			 unsigned int start, unsigned int end)
{}

/*
 * Free page table according to isp virtual address and page physical
 * address
 */
static void mmu_unmap(struct isp_mmu *mmu, unsigned int isp_virt,
		      unsigned int pgnr)
{}

/*
 * Free page tables according to isp start virtual address and end virtual
 * address.
 */
static void free_mmu_map(struct isp_mmu *mmu, unsigned int start_isp_virt,
			 unsigned int end_isp_virt)
{}

int isp_mmu_map(struct isp_mmu *mmu, unsigned int isp_virt,
		phys_addr_t phys, unsigned int pgnr)
{}

void isp_mmu_unmap(struct isp_mmu *mmu, unsigned int isp_virt,
		   unsigned int pgnr)
{}

static void isp_mmu_flush_tlb_range_default(struct isp_mmu *mmu,
	unsigned int start,
	unsigned int size)
{}

/*MMU init for internal structure*/
int isp_mmu_init(struct isp_mmu *mmu, struct isp_mmu_client *driver)
{}

/*Free L1 and L2 page table*/
void isp_mmu_exit(struct isp_mmu *mmu)
{}