linux/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c

// SPDX-License-Identifier: GPL-2.0
/*
 * Support for Medifield PNW Camera Imaging ISP subsystem.
 *
 * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
 *
 * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version
 * 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 *
 */
/*
 * This file contains functions for buffer object structure management
 */
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/gfp.h>		/* for GFP_ATOMIC */
#include <linux/mm.h>
#include <linux/mm_types.h>
#include <linux/hugetlb.h>
#include <linux/highmem.h>
#include <linux/slab.h>		/* for kmalloc */
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/string.h>
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/io.h>
#include <asm/current.h>
#include <linux/sched/signal.h>
#include <linux/file.h>

#include <asm/set_memory.h>

#include "atomisp_internal.h"
#include "hmm/hmm_common.h"
#include "hmm/hmm_bo.h"

static int __bo_init(struct hmm_bo_device *bdev, struct hmm_buffer_object *bo,
		     unsigned int pgnr)
{}

static struct hmm_buffer_object *__bo_search_and_remove_from_free_rbtree(
    struct rb_node *node, unsigned int pgnr)
{}

static struct hmm_buffer_object *__bo_search_by_addr(struct rb_root *root,
	ia_css_ptr start)
{}

static struct hmm_buffer_object *__bo_search_by_addr_in_range(
    struct rb_root *root, unsigned int start)
{}

static void __bo_insert_to_free_rbtree(struct rb_root *root,
				       struct hmm_buffer_object *bo)
{}

static void __bo_insert_to_alloc_rbtree(struct rb_root *root,
					struct hmm_buffer_object *bo)
{}

static struct hmm_buffer_object *__bo_break_up(struct hmm_bo_device *bdev,
	struct hmm_buffer_object *bo,
	unsigned int pgnr)
{}

static void __bo_take_off_handling(struct hmm_buffer_object *bo)
{}

static struct hmm_buffer_object *__bo_merge(struct hmm_buffer_object *bo,
	struct hmm_buffer_object *next_bo)
{}

/*
 * hmm_bo_device functions.
 */
int hmm_bo_device_init(struct hmm_bo_device *bdev,
		       struct isp_mmu_client *mmu_driver,
		       unsigned int vaddr_start,
		       unsigned int size)
{}

struct hmm_buffer_object *hmm_bo_alloc(struct hmm_bo_device *bdev,
				       unsigned int pgnr)
{}

void hmm_bo_release(struct hmm_buffer_object *bo)
{}

void hmm_bo_device_exit(struct hmm_bo_device *bdev)
{}

int hmm_bo_device_inited(struct hmm_bo_device *bdev)
{}

int hmm_bo_allocated(struct hmm_buffer_object *bo)
{}

struct hmm_buffer_object *hmm_bo_device_search_start(
    struct hmm_bo_device *bdev, ia_css_ptr vaddr)
{}

struct hmm_buffer_object *hmm_bo_device_search_in_range(
    struct hmm_bo_device *bdev, unsigned int vaddr)
{}

struct hmm_buffer_object *hmm_bo_device_search_vmap_start(
    struct hmm_bo_device *bdev, const void *vaddr)
{}

static void free_pages_bulk_array(unsigned long nr_pages, struct page **page_array)
{}

static void free_private_bo_pages(struct hmm_buffer_object *bo)
{}

/*Allocate pages which will be used only by ISP*/
static int alloc_private_pages(struct hmm_buffer_object *bo)
{}

static int alloc_vmalloc_pages(struct hmm_buffer_object *bo, void *vmalloc_addr)
{}

/*
 * allocate/free physical pages for the bo.
 *
 * type indicate where are the pages from. currently we have 3 types
 * of memory: HMM_BO_PRIVATE, HMM_BO_VMALLOC.
 *
 * vmalloc_addr is only valid when type is HMM_BO_VMALLOC.
 */
int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
		       enum hmm_bo_type type,
		       void *vmalloc_addr)
{}

/*
 * free physical pages of the bo.
 */
void hmm_bo_free_pages(struct hmm_buffer_object *bo)
{}

int hmm_bo_page_allocated(struct hmm_buffer_object *bo)
{}

/*
 * bind the physical pages to a virtual address space.
 */
int hmm_bo_bind(struct hmm_buffer_object *bo)
{}

/*
 * unbind the physical pages with related virtual address space.
 */
void hmm_bo_unbind(struct hmm_buffer_object *bo)
{}

int hmm_bo_binded(struct hmm_buffer_object *bo)
{}

void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached)
{}

void hmm_bo_flush_vmap(struct hmm_buffer_object *bo)
{}

void hmm_bo_vunmap(struct hmm_buffer_object *bo)
{}

void hmm_bo_ref(struct hmm_buffer_object *bo)
{}

static void kref_hmm_bo_release(struct kref *kref)
{}

void hmm_bo_unref(struct hmm_buffer_object *bo)
{}

static void hmm_bo_vm_open(struct vm_area_struct *vma)
{}

static void hmm_bo_vm_close(struct vm_area_struct *vma)
{}

static const struct vm_operations_struct hmm_bo_vm_ops =;

/*
 * mmap the bo to user space.
 */
int hmm_bo_mmap(struct vm_area_struct *vma, struct hmm_buffer_object *bo)
{}