linux/virt/kvm/kvm_main.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Kernel-based Virtual Machine (KVM) Hypervisor
 *
 * Copyright (C) 2006 Qumranet, Inc.
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
 *
 * Authors:
 *   Avi Kivity   <[email protected]>
 *   Yaniv Kamay  <[email protected]>
 */

#include <kvm/iodev.h>

#include <linux/kvm_host.h>
#include <linux/kvm.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/percpu.h>
#include <linux/mm.h>
#include <linux/miscdevice.h>
#include <linux/vmalloc.h>
#include <linux/reboot.h>
#include <linux/debugfs.h>
#include <linux/highmem.h>
#include <linux/file.h>
#include <linux/syscore_ops.h>
#include <linux/cpu.h>
#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
#include <linux/sched/stat.h>
#include <linux/cpumask.h>
#include <linux/smp.h>
#include <linux/anon_inodes.h>
#include <linux/profile.h>
#include <linux/kvm_para.h>
#include <linux/pagemap.h>
#include <linux/mman.h>
#include <linux/swap.h>
#include <linux/bitops.h>
#include <linux/spinlock.h>
#include <linux/compat.h>
#include <linux/srcu.h>
#include <linux/hugetlb.h>
#include <linux/slab.h>
#include <linux/sort.h>
#include <linux/bsearch.h>
#include <linux/io.h>
#include <linux/lockdep.h>
#include <linux/kthread.h>
#include <linux/suspend.h>

#include <asm/processor.h>
#include <asm/ioctl.h>
#include <linux/uaccess.h>

#include "coalesced_mmio.h"
#include "async_pf.h"
#include "kvm_mm.h"
#include "vfio.h"

#include <trace/events/ipi.h>

#define CREATE_TRACE_POINTS
#include <trace/events/kvm.h>

#include <linux/kvm_dirty_ring.h>


/* Worst case buffer size needed for holding an integer. */
#define ITOA_MAX_LEN

MODULE_AUTHOR();
MODULE_DESCRIPTION();
MODULE_LICENSE();

/* Architectures should define their poll value according to the halt latency */
unsigned int halt_poll_ns =;
module_param(halt_poll_ns, uint, 0644);
EXPORT_SYMBOL_GPL();

/* Default doubles per-vcpu halt_poll_ns. */
unsigned int halt_poll_ns_grow =;
module_param(halt_poll_ns_grow, uint, 0644);
EXPORT_SYMBOL_GPL();

/* The start value to grow halt_poll_ns from */
unsigned int halt_poll_ns_grow_start =; /* 10us */
module_param(halt_poll_ns_grow_start, uint, 0644);
EXPORT_SYMBOL_GPL();

/* Default halves per-vcpu halt_poll_ns. */
unsigned int halt_poll_ns_shrink =;
module_param(halt_poll_ns_shrink, uint, 0644);
EXPORT_SYMBOL_GPL();

/*
 * Ordering of locks:
 *
 *	kvm->lock --> kvm->slots_lock --> kvm->irq_lock
 */

DEFINE_MUTEX();
LIST_HEAD();

static struct kmem_cache *kvm_vcpu_cache;

static __read_mostly struct preempt_ops kvm_preempt_ops;
static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu);

static struct dentry *kvm_debugfs_dir;

static const struct file_operations stat_fops_per_vm;

static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
			   unsigned long arg);
#ifdef CONFIG_KVM_COMPAT
static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
				  unsigned long arg);
#define KVM_COMPAT(c)
#else
/*
 * For architectures that don't implement a compat infrastructure,
 * adopt a double line of defense:
 * - Prevent a compat task from opening /dev/kvm
 * - If the open has been done by a 64bit task, and the KVM fd
 *   passed to a compat task, let the ioctls fail.
 */
static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl,
				unsigned long arg) { return -EINVAL; }

static int kvm_no_compat_open(struct inode *inode, struct file *file)
{
	return is_compat_task() ? -ENODEV : 0;
}
#define KVM_COMPAT
#endif
static int hardware_enable_all(void);
static void hardware_disable_all(void);

static void kvm_io_bus_destroy(struct kvm_io_bus *bus);

#define KVM_EVENT_CREATE_VM
#define KVM_EVENT_DESTROY_VM
static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
static unsigned long long kvm_createvm_count;
static unsigned long long kvm_active_vms;

static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask);

__weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
{}

bool kvm_is_zone_device_page(struct page *page)
{}

/*
 * Returns a 'struct page' if the pfn is "valid" and backed by a refcounted
 * page, NULL otherwise.  Note, the list of refcounted PG_reserved page types
 * is likely incomplete, it has been compiled purely through people wanting to
 * back guest with a certain type of memory and encountering issues.
 */
struct page *kvm_pfn_to_refcounted_page(kvm_pfn_t pfn)
{}

/*
 * Switches to specified vcpu, until a matching vcpu_put()
 */
void vcpu_load(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();

void vcpu_put(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();

/* TODO: merge with kvm_arch_vcpu_should_kick */
static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req)
{}

static void ack_kick(void *_completed)
{}

static inline bool kvm_kick_many_cpus(struct cpumask *cpus, bool wait)
{}

static void kvm_make_vcpu_request(struct kvm_vcpu *vcpu, unsigned int req,
				  struct cpumask *tmp, int current_cpu)
{}

bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
				 unsigned long *vcpu_bitmap)
{}

bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
{}
EXPORT_SYMBOL_GPL();

void kvm_flush_remote_tlbs(struct kvm *kvm)
{}
EXPORT_SYMBOL_GPL();

void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
{}

void kvm_flush_remote_tlbs_memslot(struct kvm *kvm,
				   const struct kvm_memory_slot *memslot)
{}

static void kvm_flush_shadow_all(struct kvm *kvm)
{}

#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
					       gfp_t gfp_flags)
{}

int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min)
{}

int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
{}

int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc)
{}

void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
{}

void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
{}
#endif

static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
{}

static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
{}

void kvm_destroy_vcpus(struct kvm *kvm)
{}
EXPORT_SYMBOL_GPL();

#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
{}

gfn_handler_t;

on_lock_fn_t;

struct kvm_mmu_notifier_range {};

/*
 * The inner-most helper returns a tuple containing the return value from the
 * arch- and action-specific handler, plus a flag indicating whether or not at
 * least one memslot was found, i.e. if the handler found guest memory.
 *
 * Note, most notifiers are averse to booleans, so even though KVM tracks the
 * return from arch code as a bool, outer helpers will cast it to an int. :-(
 */
kvm_mn_ret_t;

/*
 * Use a dedicated stub instead of NULL to indicate that there is no callback
 * function/handler.  The compiler technically can't guarantee that a real
 * function will have a non-zero address, and so it will generate code to
 * check for !NULL, whereas comparing against a stub will be elided at compile
 * time (unless the compiler is getting long in the tooth, e.g. gcc 4.9).
 */
static void kvm_null_fn(void)
{}
#define IS_KVM_NULL_FN(fn)

/* Iterate over each memslot intersecting [start, last] (inclusive) range */
#define kvm_for_each_memslot_in_hva_range(node, slots, start, last)	     \

static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(struct kvm *kvm,
							   const struct kvm_mmu_notifier_range *range)
{}

static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
						unsigned long start,
						unsigned long end,
						gfn_handler_t handler)
{}

static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn,
							 unsigned long start,
							 unsigned long end,
							 gfn_handler_t handler)
{}

void kvm_mmu_invalidate_begin(struct kvm *kvm)
{}

void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end)
{}

bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
{}

static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
					const struct mmu_notifier_range *range)
{}

void kvm_mmu_invalidate_end(struct kvm *kvm)
{}

static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
					const struct mmu_notifier_range *range)
{}

static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
					      struct mm_struct *mm,
					      unsigned long start,
					      unsigned long end)
{}

static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
					struct mm_struct *mm,
					unsigned long start,
					unsigned long end)
{}

static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
				       struct mm_struct *mm,
				       unsigned long address)
{}

static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
				     struct mm_struct *mm)
{}

static const struct mmu_notifier_ops kvm_mmu_notifier_ops =;

static int kvm_init_mmu_notifier(struct kvm *kvm)
{}

#else  /* !CONFIG_KVM_GENERIC_MMU_NOTIFIER */

static int kvm_init_mmu_notifier(struct kvm *kvm)
{
	return 0;
}

#endif /* CONFIG_KVM_GENERIC_MMU_NOTIFIER */

#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
static int kvm_pm_notifier_call(struct notifier_block *bl,
				unsigned long state,
				void *unused)
{}

static void kvm_init_pm_notifier(struct kvm *kvm)
{}

static void kvm_destroy_pm_notifier(struct kvm *kvm)
{}
#else /* !CONFIG_HAVE_KVM_PM_NOTIFIER */
static void kvm_init_pm_notifier(struct kvm *kvm)
{
}

static void kvm_destroy_pm_notifier(struct kvm *kvm)
{
}
#endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */

static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
{}

/* This does not remove the slot from struct kvm_memslots data structures */
static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
{}

static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots)
{}

static umode_t kvm_stats_debugfs_mode(const struct _kvm_stats_desc *pdesc)
{}


static void kvm_destroy_vm_debugfs(struct kvm *kvm)
{}

static int kvm_create_vm_debugfs(struct kvm *kvm, const char *fdname)
{}

/*
 * Called after the VM is otherwise initialized, but just before adding it to
 * the vm_list.
 */
int __weak kvm_arch_post_init_vm(struct kvm *kvm)
{}

/*
 * Called just after removing the VM from the vm_list, but before doing any
 * other destruction.
 */
void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm)
{}

/*
 * Called after per-vm debugfs created.  When called kvm->debugfs_dentry should
 * be setup already, so we can create arch-specific debugfs entries under it.
 * Cleanup should be automatic done in kvm_destroy_vm_debugfs() recursively, so
 * a per-arch destroy interface is not needed.
 */
void __weak kvm_arch_create_vm_debugfs(struct kvm *kvm)
{}

static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
{}

static void kvm_destroy_devices(struct kvm *kvm)
{}

static void kvm_destroy_vm(struct kvm *kvm)
{}

void kvm_get_kvm(struct kvm *kvm)
{}
EXPORT_SYMBOL_GPL();

/*
 * Make sure the vm is not during destruction, which is a safe version of
 * kvm_get_kvm().  Return true if kvm referenced successfully, false otherwise.
 */
bool kvm_get_kvm_safe(struct kvm *kvm)
{}
EXPORT_SYMBOL_GPL();

void kvm_put_kvm(struct kvm *kvm)
{}
EXPORT_SYMBOL_GPL();

/*
 * Used to put a reference that was taken on behalf of an object associated
 * with a user-visible file descriptor, e.g. a vcpu or device, if installation
 * of the new file descriptor fails and the reference cannot be transferred to
 * its final owner.  In such cases, the caller is still actively using @kvm and
 * will fail miserably if the refcount unexpectedly hits zero.
 */
void kvm_put_kvm_no_destroy(struct kvm *kvm)
{}
EXPORT_SYMBOL_GPL();

static int kvm_vm_release(struct inode *inode, struct file *filp)
{}

/*
 * Allocation size is twice as large as the actual dirty bitmap size.
 * See kvm_vm_ioctl_get_dirty_log() why this is needed.
 */
static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot)
{}

static struct kvm_memslots *kvm_get_inactive_memslots(struct kvm *kvm, int as_id)
{}

/*
 * Helper to get the address space ID when one of memslot pointers may be NULL.
 * This also serves as a sanity that at least one of the pointers is non-NULL,
 * and that their address space IDs don't diverge.
 */
static int kvm_memslots_get_as_id(struct kvm_memory_slot *a,
				  struct kvm_memory_slot *b)
{}

static void kvm_insert_gfn_node(struct kvm_memslots *slots,
				struct kvm_memory_slot *slot)
{}

static void kvm_erase_gfn_node(struct kvm_memslots *slots,
			       struct kvm_memory_slot *slot)
{}

static void kvm_replace_gfn_node(struct kvm_memslots *slots,
				 struct kvm_memory_slot *old,
				 struct kvm_memory_slot *new)
{}

/*
 * Replace @old with @new in the inactive memslots.
 *
 * With NULL @old this simply adds @new.
 * With NULL @new this simply removes @old.
 *
 * If @new is non-NULL its hva_node[slots_idx] range has to be set
 * appropriately.
 */
static void kvm_replace_memslot(struct kvm *kvm,
				struct kvm_memory_slot *old,
				struct kvm_memory_slot *new)
{}

/*
 * Flags that do not access any of the extra space of struct
 * kvm_userspace_memory_region2.  KVM_SET_USER_MEMORY_REGION_V1_FLAGS
 * only allows these.
 */
#define KVM_SET_USER_MEMORY_REGION_V1_FLAGS

static int check_memory_region_flags(struct kvm *kvm,
				     const struct kvm_userspace_memory_region2 *mem)
{}

static void kvm_swap_active_memslots(struct kvm *kvm, int as_id)
{}

static int kvm_prepare_memory_region(struct kvm *kvm,
				     const struct kvm_memory_slot *old,
				     struct kvm_memory_slot *new,
				     enum kvm_mr_change change)
{}

static void kvm_commit_memory_region(struct kvm *kvm,
				     struct kvm_memory_slot *old,
				     const struct kvm_memory_slot *new,
				     enum kvm_mr_change change)
{}

/*
 * Activate @new, which must be installed in the inactive slots by the caller,
 * by swapping the active slots and then propagating @new to @old once @old is
 * unreachable and can be safely modified.
 *
 * With NULL @old this simply adds @new to @active (while swapping the sets).
 * With NULL @new this simply removes @old from @active and frees it
 * (while also swapping the sets).
 */
static void kvm_activate_memslot(struct kvm *kvm,
				 struct kvm_memory_slot *old,
				 struct kvm_memory_slot *new)
{}

static void kvm_copy_memslot(struct kvm_memory_slot *dest,
			     const struct kvm_memory_slot *src)
{}

static void kvm_invalidate_memslot(struct kvm *kvm,
				   struct kvm_memory_slot *old,
				   struct kvm_memory_slot *invalid_slot)
{}

static void kvm_create_memslot(struct kvm *kvm,
			       struct kvm_memory_slot *new)
{}

static void kvm_delete_memslot(struct kvm *kvm,
			       struct kvm_memory_slot *old,
			       struct kvm_memory_slot *invalid_slot)
{}

static void kvm_move_memslot(struct kvm *kvm,
			     struct kvm_memory_slot *old,
			     struct kvm_memory_slot *new,
			     struct kvm_memory_slot *invalid_slot)
{}

static void kvm_update_flags_memslot(struct kvm *kvm,
				     struct kvm_memory_slot *old,
				     struct kvm_memory_slot *new)
{}

static int kvm_set_memslot(struct kvm *kvm,
			   struct kvm_memory_slot *old,
			   struct kvm_memory_slot *new,
			   enum kvm_mr_change change)
{}

static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id,
				      gfn_t start, gfn_t end)
{}

/*
 * Allocate some memory and give it an address in the guest physical address
 * space.
 *
 * Discontiguous memory is allowed, mostly for framebuffers.
 *
 * Must be called holding kvm->slots_lock for write.
 */
int __kvm_set_memory_region(struct kvm *kvm,
			    const struct kvm_userspace_memory_region2 *mem)
{}
EXPORT_SYMBOL_GPL();

int kvm_set_memory_region(struct kvm *kvm,
			  const struct kvm_userspace_memory_region2 *mem)
{}
EXPORT_SYMBOL_GPL();

static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
					  struct kvm_userspace_memory_region2 *mem)
{}

#ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
/**
 * kvm_get_dirty_log - get a snapshot of dirty pages
 * @kvm:	pointer to kvm instance
 * @log:	slot id and address to which we copy the log
 * @is_dirty:	set to '1' if any dirty pages were found
 * @memslot:	set to the associated memslot, always valid on success
 */
int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
		      int *is_dirty, struct kvm_memory_slot **memslot)
{
	struct kvm_memslots *slots;
	int i, as_id, id;
	unsigned long n;
	unsigned long any = 0;

	/* Dirty ring tracking may be exclusive to dirty log tracking */
	if (!kvm_use_dirty_bitmap(kvm))
		return -ENXIO;

	*memslot = NULL;
	*is_dirty = 0;

	as_id = log->slot >> 16;
	id = (u16)log->slot;
	if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
		return -EINVAL;

	slots = __kvm_memslots(kvm, as_id);
	*memslot = id_to_memslot(slots, id);
	if (!(*memslot) || !(*memslot)->dirty_bitmap)
		return -ENOENT;

	kvm_arch_sync_dirty_log(kvm, *memslot);

	n = kvm_dirty_bitmap_bytes(*memslot);

	for (i = 0; !any && i < n/sizeof(long); ++i)
		any = (*memslot)->dirty_bitmap[i];

	if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n))
		return -EFAULT;

	if (any)
		*is_dirty = 1;
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_get_dirty_log);

#else /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
/**
 * kvm_get_dirty_log_protect - get a snapshot of dirty pages
 *	and reenable dirty page tracking for the corresponding pages.
 * @kvm:	pointer to kvm instance
 * @log:	slot id and address to which we copy the log
 *
 * We need to keep it in mind that VCPU threads can write to the bitmap
 * concurrently. So, to avoid losing track of dirty pages we keep the
 * following order:
 *
 *    1. Take a snapshot of the bit and clear it if needed.
 *    2. Write protect the corresponding page.
 *    3. Copy the snapshot to the userspace.
 *    4. Upon return caller flushes TLB's if needed.
 *
 * Between 2 and 4, the guest may write to the page using the remaining TLB
 * entry.  This is not a problem because the page is reported dirty using
 * the snapshot taken before and step 4 ensures that writes done after
 * exiting to userspace will be logged for the next call.
 *
 */
static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log)
{}


/**
 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
 * @kvm: kvm instance
 * @log: slot id and address to which we copy the log
 *
 * Steps 1-4 below provide general overview of dirty page logging. See
 * kvm_get_dirty_log_protect() function description for additional details.
 *
 * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
 * always flush the TLB (step 4) even if previous step failed  and the dirty
 * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
 * does not preclude user space subsequent dirty log read. Flushing TLB ensures
 * writes will be marked dirty for next log read.
 *
 *   1. Take a snapshot of the bit and clear it if needed.
 *   2. Write protect the corresponding page.
 *   3. Copy the snapshot to the userspace.
 *   4. Flush TLB's if needed.
 */
static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
				      struct kvm_dirty_log *log)
{}

/**
 * kvm_clear_dirty_log_protect - clear dirty bits in the bitmap
 *	and reenable dirty page tracking for the corresponding pages.
 * @kvm:	pointer to kvm instance
 * @log:	slot id and address from which to fetch the bitmap of dirty pages
 */
static int kvm_clear_dirty_log_protect(struct kvm *kvm,
				       struct kvm_clear_dirty_log *log)
{}

static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm,
					struct kvm_clear_dirty_log *log)
{}
#endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */

#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
static u64 kvm_supported_mem_attributes(struct kvm *kvm)
{}

/*
 * Returns true if _all_ gfns in the range [@start, @end) have attributes
 * such that the bits in @mask match @attrs.
 */
bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
				     unsigned long mask, unsigned long attrs)
{}

static __always_inline void kvm_handle_gfn_range(struct kvm *kvm,
						 struct kvm_mmu_notifier_range *range)
{}

static bool kvm_pre_set_memory_attributes(struct kvm *kvm,
					  struct kvm_gfn_range *range)
{}

/* Set @attributes for the gfn range [@start, @end). */
static int kvm_vm_set_mem_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
				     unsigned long attributes)
{}
static int kvm_vm_ioctl_set_mem_attributes(struct kvm *kvm,
					   struct kvm_memory_attributes *attrs)
{}
#endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */

struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
{}
EXPORT_SYMBOL_GPL();

struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
{}

bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
{}
EXPORT_SYMBOL_GPL();

bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{}
EXPORT_SYMBOL_GPL();

unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn)
{}

static bool memslot_is_readonly(const struct kvm_memory_slot *slot)
{}

static unsigned long __gfn_to_hva_many(const struct kvm_memory_slot *slot, gfn_t gfn,
				       gfn_t *nr_pages, bool write)
{}

static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
				     gfn_t *nr_pages)
{}

unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
					gfn_t gfn)
{}
EXPORT_SYMBOL_GPL();

unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
{}
EXPORT_SYMBOL_GPL();

unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
{}
EXPORT_SYMBOL_GPL();

/*
 * Return the hva of a @gfn and the R/W attribute if possible.
 *
 * @slot: the kvm_memory_slot which contains @gfn
 * @gfn: the gfn to be translated
 * @writable: used to return the read/write attribute of the @slot if the hva
 * is valid and @writable is not NULL
 */
unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot,
				      gfn_t gfn, bool *writable)
{}

unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
{}

unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable)
{}

static inline int check_user_page_hwpoison(unsigned long addr)
{}

/*
 * The fast path to get the writable pfn which will be stored in @pfn,
 * true indicates success, otherwise false is returned.  It's also the
 * only part that runs if we can in atomic context.
 */
static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
			    bool *writable, kvm_pfn_t *pfn)
{}

/*
 * The slow path to get the pfn of the specified host virtual address,
 * 1 indicates success, -errno is returned if error is detected.
 */
static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
			   bool interruptible, bool *writable, kvm_pfn_t *pfn)
{}

static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
{}

static int kvm_try_get_pfn(kvm_pfn_t pfn)
{}

static int hva_to_pfn_remapped(struct vm_area_struct *vma,
			       unsigned long addr, bool write_fault,
			       bool *writable, kvm_pfn_t *p_pfn)
{}

/*
 * Pin guest page in memory and return its pfn.
 * @addr: host virtual address which maps memory to the guest
 * @atomic: whether this function is forbidden from sleeping
 * @interruptible: whether the process can be interrupted by non-fatal signals
 * @async: whether this function need to wait IO complete if the
 *         host page is not in the memory
 * @write_fault: whether we should get a writable host page
 * @writable: whether it allows to map a writable host page for !@write_fault
 *
 * The function will map a writable host page for these two cases:
 * 1): @write_fault = true
 * 2): @write_fault = false && @writable, @writable will tell the caller
 *     whether the mapping is writable.
 */
kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool interruptible,
		     bool *async, bool write_fault, bool *writable)
{}

kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
			       bool atomic, bool interruptible, bool *async,
			       bool write_fault, bool *writable, hva_t *hva)
{}
EXPORT_SYMBOL_GPL();

kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
		      bool *writable)
{}
EXPORT_SYMBOL_GPL();

kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
{}
EXPORT_SYMBOL_GPL();

kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn)
{}
EXPORT_SYMBOL_GPL();

kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn)
{}
EXPORT_SYMBOL_GPL();

kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
{}
EXPORT_SYMBOL_GPL();

kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{}
EXPORT_SYMBOL_GPL();

int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
			    struct page **pages, int nr_pages)
{}
EXPORT_SYMBOL_GPL();

/*
 * Do not use this helper unless you are absolutely certain the gfn _must_ be
 * backed by 'struct page'.  A valid example is if the backing memslot is
 * controlled by KVM.  Note, if the returned page is valid, it's refcount has
 * been elevated by gfn_to_pfn().
 */
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
{}
EXPORT_SYMBOL_GPL();

void kvm_release_pfn(kvm_pfn_t pfn, bool dirty)
{}

int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
{}
EXPORT_SYMBOL_GPL();

void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
{}
EXPORT_SYMBOL_GPL();

static bool kvm_is_ad_tracked_page(struct page *page)
{}

static void kvm_set_page_dirty(struct page *page)
{}

static void kvm_set_page_accessed(struct page *page)
{}

void kvm_release_page_clean(struct page *page)
{}
EXPORT_SYMBOL_GPL();

void kvm_release_pfn_clean(kvm_pfn_t pfn)
{}
EXPORT_SYMBOL_GPL();

void kvm_release_page_dirty(struct page *page)
{}
EXPORT_SYMBOL_GPL();

void kvm_release_pfn_dirty(kvm_pfn_t pfn)
{}
EXPORT_SYMBOL_GPL();

/*
 * Note, checking for an error/noslot pfn is the caller's responsibility when
 * directly marking a page dirty/accessed.  Unlike the "release" helpers, the
 * "set" helpers are not to be used when the pfn might point at garbage.
 */
void kvm_set_pfn_dirty(kvm_pfn_t pfn)
{}
EXPORT_SYMBOL_GPL();

void kvm_set_pfn_accessed(kvm_pfn_t pfn)
{}
EXPORT_SYMBOL_GPL();

static int next_segment(unsigned long len, int offset)
{}

/* Copy @len bytes from guest memory at '(@gfn * PAGE_SIZE) + @offset' to @data */
static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
				 void *data, int offset, int len)
{}

int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
			int len)
{}
EXPORT_SYMBOL_GPL();

int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
			     int offset, int len)
{}
EXPORT_SYMBOL_GPL();

int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
{}
EXPORT_SYMBOL_GPL();

int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
{}
EXPORT_SYMBOL_GPL();

static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
			           void *data, int offset, unsigned long len)
{}

int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
			       void *data, unsigned long len)
{}
EXPORT_SYMBOL_GPL();

/* Copy @len bytes from @data into guest memory at '(@gfn * PAGE_SIZE) + @offset' */
static int __kvm_write_guest_page(struct kvm *kvm,
				  struct kvm_memory_slot *memslot, gfn_t gfn,
			          const void *data, int offset, int len)
{}

int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn,
			 const void *data, int offset, int len)
{}
EXPORT_SYMBOL_GPL();

int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
			      const void *data, int offset, int len)
{}
EXPORT_SYMBOL_GPL();

int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
		    unsigned long len)
{}
EXPORT_SYMBOL_GPL();

int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
		         unsigned long len)
{}
EXPORT_SYMBOL_GPL();

static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots,
				       struct gfn_to_hva_cache *ghc,
				       gpa_t gpa, unsigned long len)
{}

int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
			      gpa_t gpa, unsigned long len)
{}
EXPORT_SYMBOL_GPL();

int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
				  void *data, unsigned int offset,
				  unsigned long len)
{}
EXPORT_SYMBOL_GPL();

int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
			   void *data, unsigned long len)
{}
EXPORT_SYMBOL_GPL();

int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
				 void *data, unsigned int offset,
				 unsigned long len)
{}
EXPORT_SYMBOL_GPL();

int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
			  void *data, unsigned long len)
{}
EXPORT_SYMBOL_GPL();

int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
{}
EXPORT_SYMBOL_GPL();

void mark_page_dirty_in_slot(struct kvm *kvm,
			     const struct kvm_memory_slot *memslot,
		 	     gfn_t gfn)
{}
EXPORT_SYMBOL_GPL();

void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
{}
EXPORT_SYMBOL_GPL();

void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
{}
EXPORT_SYMBOL_GPL();

void kvm_sigset_activate(struct kvm_vcpu *vcpu)
{}

void kvm_sigset_deactivate(struct kvm_vcpu *vcpu)
{}

static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
{}

static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
{}

static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
{}

/*
 * Block the vCPU until the vCPU is runnable, an event arrives, or a signal is
 * pending.  This is mostly used when halting a vCPU, but may also be used
 * directly for other vCPU non-runnable states, e.g. x86's Wait-For-SIPI.
 */
bool kvm_vcpu_block(struct kvm_vcpu *vcpu)
{}

static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start,
					  ktime_t end, bool success)
{}

static unsigned int kvm_vcpu_max_halt_poll_ns(struct kvm_vcpu *vcpu)
{}

/*
 * Emulate a vCPU halt condition, e.g. HLT on x86, WFI on arm, etc...  If halt
 * polling is enabled, busy wait for a short time before blocking to avoid the
 * expensive block+unblock sequence if a wake event arrives soon after the vCPU
 * is halted.
 */
void kvm_vcpu_halt(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();

bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();

#ifndef CONFIG_S390
/*
 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
 */
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();
#endif /* !CONFIG_S390 */

int kvm_vcpu_yield_to(struct kvm_vcpu *target)
{}
EXPORT_SYMBOL_GPL();

/*
 * Helper that checks whether a VCPU is eligible for directed yield.
 * Most eligible candidate to yield is decided by following heuristics:
 *
 *  (a) VCPU which has not done pl-exit or cpu relax intercepted recently
 *  (preempted lock holder), indicated by @in_spin_loop.
 *  Set at the beginning and cleared at the end of interception/PLE handler.
 *
 *  (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get
 *  chance last time (mostly it has become eligible now since we have probably
 *  yielded to lockholder in last iteration. This is done by toggling
 *  @dy_eligible each time a VCPU checked for eligibility.)
 *
 *  Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding
 *  to preempted lock-holder could result in wrong VCPU selection and CPU
 *  burning. Giving priority for a potential lock-holder increases lock
 *  progress.
 *
 *  Since algorithm is based on heuristics, accessing another VCPU data without
 *  locking does not harm. It may result in trying to yield to  same VCPU, fail
 *  and continue with next VCPU and so on.
 */
static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
{}

/*
 * Unlike kvm_arch_vcpu_runnable, this function is called outside
 * a vcpu_load/vcpu_put pair.  However, for most architectures
 * kvm_arch_vcpu_runnable does not require vcpu_load.
 */
bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
{}

static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
{}

/*
 * By default, simply query the target vCPU's current mode when checking if a
 * vCPU was preempted in kernel mode.  All architectures except x86 (or more
 * specifical, except VMX) allow querying whether or not a vCPU is in kernel
 * mode even if the vCPU is NOT loaded, i.e. using kvm_arch_vcpu_in_kernel()
 * directly for cross-vCPU checks is functionally correct and accurate.
 */
bool __weak kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
{}

bool __weak kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
{}

void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
{}
EXPORT_SYMBOL_GPL();

static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff)
{}

static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf)
{}

static const struct vm_operations_struct kvm_vcpu_vm_ops =;

static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
{}

static int kvm_vcpu_release(struct inode *inode, struct file *filp)
{}

static struct file_operations kvm_vcpu_fops =;

/*
 * Allocates an inode for the vcpu.
 */
static int create_vcpu_fd(struct kvm_vcpu *vcpu)
{}

#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
static int vcpu_get_pid(void *data, u64 *val)
{}

DEFINE_SIMPLE_ATTRIBUTE();

static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
{}
#endif

/*
 * Creates some virtual cpus.  Good luck creating more than one.
 */
static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, unsigned long id)
{}

static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
{}

static ssize_t kvm_vcpu_stats_read(struct file *file, char __user *user_buffer,
			      size_t size, loff_t *offset)
{}

static int kvm_vcpu_stats_release(struct inode *inode, struct file *file)
{}

static const struct file_operations kvm_vcpu_stats_fops =;

static int kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu *vcpu)
{}

#ifdef CONFIG_KVM_GENERIC_PRE_FAULT_MEMORY
static int kvm_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
				     struct kvm_pre_fault_memory *range)
{}
#endif

static long kvm_vcpu_ioctl(struct file *filp,
			   unsigned int ioctl, unsigned long arg)
{}

#ifdef CONFIG_KVM_COMPAT
static long kvm_vcpu_compat_ioctl(struct file *filp,
				  unsigned int ioctl, unsigned long arg)
{}
#endif

static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma)
{}

static int kvm_device_ioctl_attr(struct kvm_device *dev,
				 int (*accessor)(struct kvm_device *dev,
						 struct kvm_device_attr *attr),
				 unsigned long arg)
{}

static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
			     unsigned long arg)
{}

static int kvm_device_release(struct inode *inode, struct file *filp)
{}

static struct file_operations kvm_device_fops =;

struct kvm_device *kvm_device_from_filp(struct file *filp)
{}

static const struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] =;

int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type)
{}

void kvm_unregister_device_ops(u32 type)
{}

static int kvm_ioctl_create_device(struct kvm *kvm,
				   struct kvm_create_device *cd)
{}

static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
{}

static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size)
{}

static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm)
{}

int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm,
						  struct kvm_enable_cap *cap)
{}

bool kvm_are_all_memslots_empty(struct kvm *kvm)
{}
EXPORT_SYMBOL_GPL();

static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
					   struct kvm_enable_cap *cap)
{}

static ssize_t kvm_vm_stats_read(struct file *file, char __user *user_buffer,
			      size_t size, loff_t *offset)
{}

static int kvm_vm_stats_release(struct inode *inode, struct file *file)
{}

static const struct file_operations kvm_vm_stats_fops =;

static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm)
{}

#define SANITY_CHECK_MEM_REGION_FIELD(field)

static long kvm_vm_ioctl(struct file *filp,
			   unsigned int ioctl, unsigned long arg)
{}

#ifdef CONFIG_KVM_COMPAT
struct compat_kvm_dirty_log {};

struct compat_kvm_clear_dirty_log {};

long __weak kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl,
				     unsigned long arg)
{}

static long kvm_vm_compat_ioctl(struct file *filp,
			   unsigned int ioctl, unsigned long arg)
{}
#endif

static struct file_operations kvm_vm_fops =;

bool file_is_kvm(struct file *file)
{}
EXPORT_SYMBOL_GPL();

static int kvm_dev_ioctl_create_vm(unsigned long type)
{}

static long kvm_dev_ioctl(struct file *filp,
			  unsigned int ioctl, unsigned long arg)
{}

static struct file_operations kvm_chardev_ops =;

static struct miscdevice kvm_dev =;

#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
__visible bool kvm_rebooting;
EXPORT_SYMBOL_GPL();

static DEFINE_PER_CPU(bool, hardware_enabled);
static int kvm_usage_count;

static int __hardware_enable_nolock(void)
{}

static void hardware_enable_nolock(void *failed)
{}

static int kvm_online_cpu(unsigned int cpu)
{}

static void hardware_disable_nolock(void *junk)
{}

static int kvm_offline_cpu(unsigned int cpu)
{}

static void hardware_disable_all_nolock(void)
{}

static void hardware_disable_all(void)
{}

static int hardware_enable_all(void)
{}

static void kvm_shutdown(void)
{}

static int kvm_suspend(void)
{}

static void kvm_resume(void)
{}

static struct syscore_ops kvm_syscore_ops =;
#else /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */
static int hardware_enable_all(void)
{
	return 0;
}

static void hardware_disable_all(void)
{

}
#endif /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */

static void kvm_iodevice_destructor(struct kvm_io_device *dev)
{}

static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
{}

static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
				 const struct kvm_io_range *r2)
{}

static int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
{}

static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
			     gpa_t addr, int len)
{}

static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
			      struct kvm_io_range *range, const void *val)
{}

/* kvm_io_bus_write - called under kvm->slots_lock */
int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
		     int len, const void *val)
{}
EXPORT_SYMBOL_GPL();

/* kvm_io_bus_write_cookie - called under kvm->slots_lock */
int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
			    gpa_t addr, int len, const void *val, long cookie)
{}

static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
			     struct kvm_io_range *range, void *val)
{}

/* kvm_io_bus_read - called under kvm->slots_lock */
int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
		    int len, void *val)
{}

int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
			    int len, struct kvm_io_device *dev)
{}

int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
			      struct kvm_io_device *dev)
{}

struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
					 gpa_t addr)
{}
EXPORT_SYMBOL_GPL();

static int kvm_debugfs_open(struct inode *inode, struct file *file,
			   int (*get)(void *, u64 *), int (*set)(void *, u64),
			   const char *fmt)
{}

static int kvm_debugfs_release(struct inode *inode, struct file *file)
{}

static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val)
{}

static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset)
{}

static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val)
{}

static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset)
{}

static int kvm_stat_data_get(void *data, u64 *val)
{}

static int kvm_stat_data_clear(void *data, u64 val)
{}

static int kvm_stat_data_open(struct inode *inode, struct file *file)
{}

static const struct file_operations stat_fops_per_vm =;

static int vm_stat_get(void *_offset, u64 *val)
{}

static int vm_stat_clear(void *_offset, u64 val)
{}

DEFINE_SIMPLE_ATTRIBUTE();
DEFINE_SIMPLE_ATTRIBUTE();

static int vcpu_stat_get(void *_offset, u64 *val)
{}

static int vcpu_stat_clear(void *_offset, u64 val)
{}

DEFINE_SIMPLE_ATTRIBUTE();
DEFINE_SIMPLE_ATTRIBUTE();

static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
{}

static void kvm_init_debug(void)
{}

static inline
struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
{}

static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
{}

static void kvm_sched_out(struct preempt_notifier *pn,
			  struct task_struct *next)
{}

/**
 * kvm_get_running_vcpu - get the vcpu running on the current CPU.
 *
 * We can disable preemption locally around accessing the per-CPU variable,
 * and use the resolved vcpu pointer after enabling preemption again,
 * because even if the current thread is migrated to another CPU, reading
 * the per-CPU value later will give us the same value as we update the
 * per-CPU variable in the preempt notifier handlers.
 */
struct kvm_vcpu *kvm_get_running_vcpu(void)
{}
EXPORT_SYMBOL_GPL();

/**
 * kvm_get_running_vcpus - get the per-CPU array of currently running vcpus.
 */
struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
{}

#ifdef CONFIG_GUEST_PERF_EVENTS
static unsigned int kvm_guest_state(void)
{}

static unsigned long kvm_guest_get_ip(void)
{}

static struct perf_guest_info_callbacks kvm_guest_cbs =;

void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void))
{}
void kvm_unregister_perf_callbacks(void)
{}
#endif

int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
{}
EXPORT_SYMBOL_GPL();

void kvm_exit(void)
{}
EXPORT_SYMBOL_GPL();

struct kvm_vm_worker_thread_context {};

static int kvm_vm_worker_thread(void *context)
{}

int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
				uintptr_t data, const char *name,
				struct task_struct **thread_ptr)
{}