linux/drivers/xen/privcmd.c

// SPDX-License-Identifier: GPL-2.0-only
/******************************************************************************
 * privcmd.c
 *
 * Interface to privileged domain-0 commands.
 *
 * Copyright (c) 2002-2004, K A Fraser, B Dragovic
 */

#define pr_fmt(fmt)

#include <linux/eventfd.h>
#include <linux/file.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/srcu.h>
#include <linux/string.h>
#include <linux/workqueue.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/uaccess.h>
#include <linux/swap.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/seq_file.h>
#include <linux/miscdevice.h>
#include <linux/moduleparam.h>
#include <linux/virtio_mmio.h>

#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>

#include <xen/xen.h>
#include <xen/events.h>
#include <xen/privcmd.h>
#include <xen/interface/xen.h>
#include <xen/interface/memory.h>
#include <xen/interface/hvm/dm_op.h>
#include <xen/interface/hvm/ioreq.h>
#include <xen/features.h>
#include <xen/page.h>
#include <xen/xen-ops.h>
#include <xen/balloon.h>

#include "privcmd.h"

MODULE_DESCRIPTION();
MODULE_LICENSE();

#define PRIV_VMA_LOCKED

static unsigned int privcmd_dm_op_max_num =;
module_param_named(dm_op_max_nr_bufs, privcmd_dm_op_max_num, uint, 0644);
MODULE_PARM_DESC();

static unsigned int privcmd_dm_op_buf_max_size =;
module_param_named(dm_op_buf_max_size, privcmd_dm_op_buf_max_size, uint,
		   0644);
MODULE_PARM_DESC();

struct privcmd_data {};

static int privcmd_vma_range_is_mapped(
               struct vm_area_struct *vma,
               unsigned long addr,
               unsigned long nr_pages);

static long privcmd_ioctl_hypercall(struct file *file, void __user *udata)
{}

static void free_page_list(struct list_head *pages)
{}

/*
 * Given an array of items in userspace, return a list of pages
 * containing the data.  If copying fails, either because of memory
 * allocation failure or a problem reading user memory, return an
 * error code; its up to the caller to dispose of any partial list.
 */
static int gather_array(struct list_head *pagelist,
			unsigned nelem, size_t size,
			const void __user *data)
{}

/*
 * Call function "fn" on each element of the array fragmented
 * over a list of pages.
 */
static int traverse_pages(unsigned nelem, size_t size,
			  struct list_head *pos,
			  int (*fn)(void *data, void *state),
			  void *state)
{}

/*
 * Similar to traverse_pages, but use each page as a "block" of
 * data to be processed as one unit.
 */
static int traverse_pages_block(unsigned nelem, size_t size,
				struct list_head *pos,
				int (*fn)(void *data, int nr, void *state),
				void *state)
{}

struct mmap_gfn_state {};

static int mmap_gfn_range(void *data, void *state)
{}

static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
{}

struct mmap_batch_state {};

/* auto translated dom0 note: if domU being created is PV, then gfn is
 * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP).
 */
static int mmap_batch_fn(void *data, int nr, void *state)
{}

static int mmap_return_error(int err, struct mmap_batch_state *st)
{}

static int mmap_return_errors(void *data, int nr, void *state)
{}

/* Allocate pfns that are then mapped with gfns from foreign domid. Update
 * the vma with the page info to use later.
 * Returns: 0 if success, otherwise -errno
 */
static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
{}

static const struct vm_operations_struct privcmd_vm_ops;

static long privcmd_ioctl_mmap_batch(
	struct file *file, void __user *udata, int version)
{}

static int lock_pages(
	struct privcmd_dm_op_buf kbufs[], unsigned int num,
	struct page *pages[], unsigned int nr_pages, unsigned int *pinned)
{}

static void unlock_pages(struct page *pages[], unsigned int nr_pages)
{}

static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
{}

static long privcmd_ioctl_restrict(struct file *file, void __user *udata)
{}

static long privcmd_ioctl_mmap_resource(struct file *file,
				struct privcmd_mmap_resource __user *udata)
{}

#ifdef CONFIG_XEN_PRIVCMD_EVENTFD
/* Irqfd support */
static struct workqueue_struct *irqfd_cleanup_wq;
static DEFINE_SPINLOCK(irqfds_lock);
DEFINE_STATIC_SRCU();
static LIST_HEAD(irqfds_list);

struct privcmd_kernel_irqfd {};

static void irqfd_deactivate(struct privcmd_kernel_irqfd *kirqfd)
{}

static void irqfd_shutdown(struct work_struct *work)
{}

static void irqfd_inject(struct privcmd_kernel_irqfd *kirqfd)
{}

static int
irqfd_wakeup(wait_queue_entry_t *wait, unsigned int mode, int sync, void *key)
{}

static void
irqfd_poll_func(struct file *file, wait_queue_head_t *wqh, poll_table *pt)
{}

static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
{}

static int privcmd_irqfd_deassign(struct privcmd_irqfd *irqfd)
{}

static long privcmd_ioctl_irqfd(struct file *file, void __user *udata)
{}

static int privcmd_irqfd_init(void)
{}

static void privcmd_irqfd_exit(void)
{}

/* Ioeventfd Support */
#define QUEUE_NOTIFY_VQ_MASK

static DEFINE_MUTEX(ioreq_lock);
static LIST_HEAD(ioreq_list);

/* per-eventfd structure */
struct privcmd_kernel_ioeventfd {};

/* per-guest CPU / port structure */
struct ioreq_port {};

/* per-guest structure */
struct privcmd_kernel_ioreq {};

static irqreturn_t ioeventfd_interrupt(int irq, void *dev_id)
{}

static void ioreq_free(struct privcmd_kernel_ioreq *kioreq)
{}

static
struct privcmd_kernel_ioreq *alloc_ioreq(struct privcmd_ioeventfd *ioeventfd)
{}

static struct privcmd_kernel_ioreq *
get_ioreq(struct privcmd_ioeventfd *ioeventfd, struct eventfd_ctx *eventfd)
{}

static void ioeventfd_free(struct privcmd_kernel_ioeventfd *kioeventfd)
{}

static int privcmd_ioeventfd_assign(struct privcmd_ioeventfd *ioeventfd)
{}

static int privcmd_ioeventfd_deassign(struct privcmd_ioeventfd *ioeventfd)
{}

static long privcmd_ioctl_ioeventfd(struct file *file, void __user *udata)
{}

static void privcmd_ioeventfd_exit(void)
{}
#else
static inline long privcmd_ioctl_irqfd(struct file *file, void __user *udata)
{
	return -EOPNOTSUPP;
}

static inline int privcmd_irqfd_init(void)
{
	return 0;
}

static inline void privcmd_irqfd_exit(void)
{
}

static inline long privcmd_ioctl_ioeventfd(struct file *file, void __user *udata)
{
	return -EOPNOTSUPP;
}

static inline void privcmd_ioeventfd_exit(void)
{
}
#endif /* CONFIG_XEN_PRIVCMD_EVENTFD */

static long privcmd_ioctl(struct file *file,
			  unsigned int cmd, unsigned long data)
{}

static int privcmd_open(struct inode *ino, struct file *file)
{}

static int privcmd_release(struct inode *ino, struct file *file)
{}

static void privcmd_close(struct vm_area_struct *vma)
{}

static vm_fault_t privcmd_fault(struct vm_fault *vmf)
{}

static const struct vm_operations_struct privcmd_vm_ops =;

static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
{}

/*
 * For MMAPBATCH*. This allows asserting the singleshot mapping
 * on a per pfn/pte basis. Mapping calls that fail with ENOENT
 * can be then retried until success.
 */
static int is_mapped_fn(pte_t *pte, unsigned long addr, void *data)
{}

static int privcmd_vma_range_is_mapped(
	           struct vm_area_struct *vma,
	           unsigned long addr,
	           unsigned long nr_pages)
{}

const struct file_operations xen_privcmd_fops =;
EXPORT_SYMBOL_GPL();

static struct miscdevice privcmd_dev =;

static int __init privcmd_init(void)
{}

static void __exit privcmd_exit(void)
{}

module_init();
module_exit(privcmd_exit);