linux/drivers/infiniband/hw/hfi1/mmu_rb.c

// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
 * Copyright(c) 2020 Cornelis Networks, Inc.
 * Copyright(c) 2016 - 2017 Intel Corporation.
 */

#include <linux/list.h>
#include <linux/rculist.h>
#include <linux/mmu_notifier.h>
#include <linux/interval_tree_generic.h>
#include <linux/sched/mm.h>

#include "mmu_rb.h"
#include "trace.h"

static unsigned long mmu_node_start(struct mmu_rb_node *);
static unsigned long mmu_node_last(struct mmu_rb_node *);
static int mmu_notifier_range_start(struct mmu_notifier *,
		const struct mmu_notifier_range *);
static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
					   unsigned long, unsigned long);
static void release_immediate(struct kref *refcount);
static void handle_remove(struct work_struct *work);

static const struct mmu_notifier_ops mn_opts =;

INTERVAL_TREE_DEFINE(struct mmu_rb_node, node, unsigned long, __last,
		     mmu_node_start, mmu_node_last, static, __mmu_int_rb);

static unsigned long mmu_node_start(struct mmu_rb_node *node)
{}

static unsigned long mmu_node_last(struct mmu_rb_node *node)
{}

int hfi1_mmu_rb_register(void *ops_arg,
			 const struct mmu_rb_ops *ops,
			 struct workqueue_struct *wq,
			 struct mmu_rb_handler **handler)
{}

void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
{}

int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
		       struct mmu_rb_node *mnode)
{}

/* Caller must hold handler lock */
struct mmu_rb_node *hfi1_mmu_rb_get_first(struct mmu_rb_handler *handler,
					  unsigned long addr, unsigned long len)
{}

/* Caller must hold handler lock */
static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
					   unsigned long addr,
					   unsigned long len)
{}

/*
 * Must NOT call while holding mnode->handler->lock.
 * mnode->handler->ops->remove() may sleep and mnode->handler->lock is a
 * spinlock.
 */
static void release_immediate(struct kref *refcount)
{}

/* Caller must hold mnode->handler->lock */
static void release_nolock(struct kref *refcount)
{}

/*
 * struct mmu_rb_node->refcount kref_put() callback.
 * Adds mmu_rb_node to mmu_rb_node->handler->del_list and queues
 * handler->del_work on handler->wq.
 * Does not remove mmu_rb_node from handler->lru_list or handler->rb_root.
 * Acquires mmu_rb_node->handler->lock; do not call while already holding
 * handler->lock.
 */
void hfi1_mmu_rb_release(struct kref *refcount)
{}

void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
{}

static int mmu_notifier_range_start(struct mmu_notifier *mn,
		const struct mmu_notifier_range *range)
{}

/*
 * Work queue function to remove all nodes that have been queued up to
 * be removed.  The key feature is that mm->mmap_lock is not being held
 * and the remove callback can sleep while taking it, if needed.
 */
static void handle_remove(struct work_struct *work)
{}