#include <kvm/iodev.h>
#include <linux/kvm_host.h>
#include <linux/kvm.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/percpu.h>
#include <linux/mm.h>
#include <linux/miscdevice.h>
#include <linux/vmalloc.h>
#include <linux/reboot.h>
#include <linux/debugfs.h>
#include <linux/highmem.h>
#include <linux/file.h>
#include <linux/syscore_ops.h>
#include <linux/cpu.h>
#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
#include <linux/sched/stat.h>
#include <linux/cpumask.h>
#include <linux/smp.h>
#include <linux/anon_inodes.h>
#include <linux/profile.h>
#include <linux/kvm_para.h>
#include <linux/pagemap.h>
#include <linux/mman.h>
#include <linux/swap.h>
#include <linux/bitops.h>
#include <linux/spinlock.h>
#include <linux/compat.h>
#include <linux/srcu.h>
#include <linux/hugetlb.h>
#include <linux/slab.h>
#include <linux/sort.h>
#include <linux/bsearch.h>
#include <linux/io.h>
#include <linux/lockdep.h>
#include <linux/kthread.h>
#include <linux/suspend.h>
#include <asm/processor.h>
#include <asm/ioctl.h>
#include <linux/uaccess.h>
#include "coalesced_mmio.h"
#include "async_pf.h"
#include "kvm_mm.h"
#include "vfio.h"
#include <trace/events/ipi.h>
#define CREATE_TRACE_POINTS
#include <trace/events/kvm.h>
#include <linux/kvm_dirty_ring.h>
#define ITOA_MAX_LEN …
MODULE_AUTHOR(…) …;
MODULE_DESCRIPTION(…) …;
MODULE_LICENSE(…) …;
unsigned int halt_poll_ns = …;
module_param(halt_poll_ns, uint, 0644);
EXPORT_SYMBOL_GPL(…);
unsigned int halt_poll_ns_grow = …;
module_param(halt_poll_ns_grow, uint, 0644);
EXPORT_SYMBOL_GPL(…);
unsigned int halt_poll_ns_grow_start = …;
module_param(halt_poll_ns_grow_start, uint, 0644);
EXPORT_SYMBOL_GPL(…);
unsigned int halt_poll_ns_shrink = …;
module_param(halt_poll_ns_shrink, uint, 0644);
EXPORT_SYMBOL_GPL(…);
DEFINE_MUTEX(…) …;
LIST_HEAD(…);
static struct kmem_cache *kvm_vcpu_cache;
static __read_mostly struct preempt_ops kvm_preempt_ops;
static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu);
static struct dentry *kvm_debugfs_dir;
static const struct file_operations stat_fops_per_vm;
static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
unsigned long arg);
#ifdef CONFIG_KVM_COMPAT
static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
unsigned long arg);
#define KVM_COMPAT(c) …
#else
static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl,
unsigned long arg) { return -EINVAL; }
static int kvm_no_compat_open(struct inode *inode, struct file *file)
{
return is_compat_task() ? -ENODEV : 0;
}
#define KVM_COMPAT …
#endif
static int hardware_enable_all(void);
static void hardware_disable_all(void);
static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
#define KVM_EVENT_CREATE_VM …
#define KVM_EVENT_DESTROY_VM …
static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
static unsigned long long kvm_createvm_count;
static unsigned long long kvm_active_vms;
static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask);
__weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
{ … }
bool kvm_is_zone_device_page(struct page *page)
{ … }
struct page *kvm_pfn_to_refcounted_page(kvm_pfn_t pfn)
{ … }
void vcpu_load(struct kvm_vcpu *vcpu)
{ … }
EXPORT_SYMBOL_GPL(…);
void vcpu_put(struct kvm_vcpu *vcpu)
{ … }
EXPORT_SYMBOL_GPL(…);
static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req)
{ … }
static void ack_kick(void *_completed)
{ … }
static inline bool kvm_kick_many_cpus(struct cpumask *cpus, bool wait)
{ … }
static void kvm_make_vcpu_request(struct kvm_vcpu *vcpu, unsigned int req,
struct cpumask *tmp, int current_cpu)
{ … }
bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
unsigned long *vcpu_bitmap)
{ … }
bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
{ … }
EXPORT_SYMBOL_GPL(…);
void kvm_flush_remote_tlbs(struct kvm *kvm)
{ … }
EXPORT_SYMBOL_GPL(…);
void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
{ … }
void kvm_flush_remote_tlbs_memslot(struct kvm *kvm,
const struct kvm_memory_slot *memslot)
{ … }
static void kvm_flush_shadow_all(struct kvm *kvm)
{ … }
#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
gfp_t gfp_flags)
{ … }
int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min)
{ … }
int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
{ … }
int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc)
{ … }
void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
{ … }
void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
{ … }
#endif
static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
{ … }
static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
{ … }
void kvm_destroy_vcpus(struct kvm *kvm)
{ … }
EXPORT_SYMBOL_GPL(…);
#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
{ … }
gfn_handler_t;
on_lock_fn_t;
struct kvm_mmu_notifier_range { … };
kvm_mn_ret_t;
static void kvm_null_fn(void)
{ … }
#define IS_KVM_NULL_FN(fn) …
#define kvm_for_each_memslot_in_hva_range(node, slots, start, last) … \
static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(struct kvm *kvm,
const struct kvm_mmu_notifier_range *range)
{ … }
static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
unsigned long start,
unsigned long end,
gfn_handler_t handler)
{ … }
static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn,
unsigned long start,
unsigned long end,
gfn_handler_t handler)
{ … }
void kvm_mmu_invalidate_begin(struct kvm *kvm)
{ … }
void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end)
{ … }
bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
{ … }
static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
const struct mmu_notifier_range *range)
{ … }
void kvm_mmu_invalidate_end(struct kvm *kvm)
{ … }
static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
const struct mmu_notifier_range *range)
{ … }
static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
unsigned long end)
{ … }
static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
unsigned long end)
{ … }
static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address)
{ … }
static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
struct mm_struct *mm)
{ … }
static const struct mmu_notifier_ops kvm_mmu_notifier_ops = …;
static int kvm_init_mmu_notifier(struct kvm *kvm)
{ … }
#else
static int kvm_init_mmu_notifier(struct kvm *kvm)
{
return 0;
}
#endif
#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
static int kvm_pm_notifier_call(struct notifier_block *bl,
unsigned long state,
void *unused)
{ … }
static void kvm_init_pm_notifier(struct kvm *kvm)
{ … }
static void kvm_destroy_pm_notifier(struct kvm *kvm)
{ … }
#else
static void kvm_init_pm_notifier(struct kvm *kvm)
{
}
static void kvm_destroy_pm_notifier(struct kvm *kvm)
{
}
#endif
static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
{ … }
static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
{ … }
static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots)
{ … }
static umode_t kvm_stats_debugfs_mode(const struct _kvm_stats_desc *pdesc)
{ … }
static void kvm_destroy_vm_debugfs(struct kvm *kvm)
{ … }
static int kvm_create_vm_debugfs(struct kvm *kvm, const char *fdname)
{ … }
int __weak kvm_arch_post_init_vm(struct kvm *kvm)
{ … }
void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm)
{ … }
void __weak kvm_arch_create_vm_debugfs(struct kvm *kvm)
{ … }
static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
{ … }
static void kvm_destroy_devices(struct kvm *kvm)
{ … }
static void kvm_destroy_vm(struct kvm *kvm)
{ … }
void kvm_get_kvm(struct kvm *kvm)
{ … }
EXPORT_SYMBOL_GPL(…);
bool kvm_get_kvm_safe(struct kvm *kvm)
{ … }
EXPORT_SYMBOL_GPL(…);
void kvm_put_kvm(struct kvm *kvm)
{ … }
EXPORT_SYMBOL_GPL(…);
void kvm_put_kvm_no_destroy(struct kvm *kvm)
{ … }
EXPORT_SYMBOL_GPL(…);
static int kvm_vm_release(struct inode *inode, struct file *filp)
{ … }
static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot)
{ … }
static struct kvm_memslots *kvm_get_inactive_memslots(struct kvm *kvm, int as_id)
{ … }
static int kvm_memslots_get_as_id(struct kvm_memory_slot *a,
struct kvm_memory_slot *b)
{ … }
static void kvm_insert_gfn_node(struct kvm_memslots *slots,
struct kvm_memory_slot *slot)
{ … }
static void kvm_erase_gfn_node(struct kvm_memslots *slots,
struct kvm_memory_slot *slot)
{ … }
static void kvm_replace_gfn_node(struct kvm_memslots *slots,
struct kvm_memory_slot *old,
struct kvm_memory_slot *new)
{ … }
static void kvm_replace_memslot(struct kvm *kvm,
struct kvm_memory_slot *old,
struct kvm_memory_slot *new)
{ … }
#define KVM_SET_USER_MEMORY_REGION_V1_FLAGS …
static int check_memory_region_flags(struct kvm *kvm,
const struct kvm_userspace_memory_region2 *mem)
{ … }
static void kvm_swap_active_memslots(struct kvm *kvm, int as_id)
{ … }
static int kvm_prepare_memory_region(struct kvm *kvm,
const struct kvm_memory_slot *old,
struct kvm_memory_slot *new,
enum kvm_mr_change change)
{ … }
static void kvm_commit_memory_region(struct kvm *kvm,
struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
{ … }
static void kvm_activate_memslot(struct kvm *kvm,
struct kvm_memory_slot *old,
struct kvm_memory_slot *new)
{ … }
static void kvm_copy_memslot(struct kvm_memory_slot *dest,
const struct kvm_memory_slot *src)
{ … }
static void kvm_invalidate_memslot(struct kvm *kvm,
struct kvm_memory_slot *old,
struct kvm_memory_slot *invalid_slot)
{ … }
static void kvm_create_memslot(struct kvm *kvm,
struct kvm_memory_slot *new)
{ … }
static void kvm_delete_memslot(struct kvm *kvm,
struct kvm_memory_slot *old,
struct kvm_memory_slot *invalid_slot)
{ … }
static void kvm_move_memslot(struct kvm *kvm,
struct kvm_memory_slot *old,
struct kvm_memory_slot *new,
struct kvm_memory_slot *invalid_slot)
{ … }
static void kvm_update_flags_memslot(struct kvm *kvm,
struct kvm_memory_slot *old,
struct kvm_memory_slot *new)
{ … }
static int kvm_set_memslot(struct kvm *kvm,
struct kvm_memory_slot *old,
struct kvm_memory_slot *new,
enum kvm_mr_change change)
{ … }
static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id,
gfn_t start, gfn_t end)
{ … }
int __kvm_set_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region2 *mem)
{ … }
EXPORT_SYMBOL_GPL(…);
int kvm_set_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region2 *mem)
{ … }
EXPORT_SYMBOL_GPL(…);
static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region2 *mem)
{ … }
#ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
int *is_dirty, struct kvm_memory_slot **memslot)
{
struct kvm_memslots *slots;
int i, as_id, id;
unsigned long n;
unsigned long any = 0;
if (!kvm_use_dirty_bitmap(kvm))
return -ENXIO;
*memslot = NULL;
*is_dirty = 0;
as_id = log->slot >> 16;
id = (u16)log->slot;
if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
return -EINVAL;
slots = __kvm_memslots(kvm, as_id);
*memslot = id_to_memslot(slots, id);
if (!(*memslot) || !(*memslot)->dirty_bitmap)
return -ENOENT;
kvm_arch_sync_dirty_log(kvm, *memslot);
n = kvm_dirty_bitmap_bytes(*memslot);
for (i = 0; !any && i < n/sizeof(long); ++i)
any = (*memslot)->dirty_bitmap[i];
if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n))
return -EFAULT;
if (any)
*is_dirty = 1;
return 0;
}
EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
#else
static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log)
{ … }
static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_dirty_log *log)
{ … }
static int kvm_clear_dirty_log_protect(struct kvm *kvm,
struct kvm_clear_dirty_log *log)
{ … }
static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm,
struct kvm_clear_dirty_log *log)
{ … }
#endif
#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
static u64 kvm_supported_mem_attributes(struct kvm *kvm)
{ … }
bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
unsigned long mask, unsigned long attrs)
{ … }
static __always_inline void kvm_handle_gfn_range(struct kvm *kvm,
struct kvm_mmu_notifier_range *range)
{ … }
static bool kvm_pre_set_memory_attributes(struct kvm *kvm,
struct kvm_gfn_range *range)
{ … }
static int kvm_vm_set_mem_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
unsigned long attributes)
{ … }
static int kvm_vm_ioctl_set_mem_attributes(struct kvm *kvm,
struct kvm_memory_attributes *attrs)
{ … }
#endif
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
{ … }
EXPORT_SYMBOL_GPL(…);
struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
{ … }
bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
{ … }
EXPORT_SYMBOL_GPL(…);
bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{ … }
EXPORT_SYMBOL_GPL(…);
unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn)
{ … }
static bool memslot_is_readonly(const struct kvm_memory_slot *slot)
{ … }
static unsigned long __gfn_to_hva_many(const struct kvm_memory_slot *slot, gfn_t gfn,
gfn_t *nr_pages, bool write)
{ … }
static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
gfn_t *nr_pages)
{ … }
unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
gfn_t gfn)
{ … }
EXPORT_SYMBOL_GPL(…);
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
{ … }
EXPORT_SYMBOL_GPL(…);
unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
{ … }
EXPORT_SYMBOL_GPL(…);
unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot,
gfn_t gfn, bool *writable)
{ … }
unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
{ … }
unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable)
{ … }
static inline int check_user_page_hwpoison(unsigned long addr)
{ … }
static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
bool *writable, kvm_pfn_t *pfn)
{ … }
static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
bool interruptible, bool *writable, kvm_pfn_t *pfn)
{ … }
static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
{ … }
static int kvm_try_get_pfn(kvm_pfn_t pfn)
{ … }
static int hva_to_pfn_remapped(struct vm_area_struct *vma,
unsigned long addr, bool write_fault,
bool *writable, kvm_pfn_t *p_pfn)
{ … }
kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool interruptible,
bool *async, bool write_fault, bool *writable)
{ … }
kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
bool atomic, bool interruptible, bool *async,
bool write_fault, bool *writable, hva_t *hva)
{ … }
EXPORT_SYMBOL_GPL(…);
kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
bool *writable)
{ … }
EXPORT_SYMBOL_GPL(…);
kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
{ … }
EXPORT_SYMBOL_GPL(…);
kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn)
{ … }
EXPORT_SYMBOL_GPL(…);
kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn)
{ … }
EXPORT_SYMBOL_GPL(…);
kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
{ … }
EXPORT_SYMBOL_GPL(…);
kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{ … }
EXPORT_SYMBOL_GPL(…);
int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
struct page **pages, int nr_pages)
{ … }
EXPORT_SYMBOL_GPL(…);
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
{ … }
EXPORT_SYMBOL_GPL(…);
void kvm_release_pfn(kvm_pfn_t pfn, bool dirty)
{ … }
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
{ … }
EXPORT_SYMBOL_GPL(…);
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
{ … }
EXPORT_SYMBOL_GPL(…);
static bool kvm_is_ad_tracked_page(struct page *page)
{ … }
static void kvm_set_page_dirty(struct page *page)
{ … }
static void kvm_set_page_accessed(struct page *page)
{ … }
void kvm_release_page_clean(struct page *page)
{ … }
EXPORT_SYMBOL_GPL(…);
void kvm_release_pfn_clean(kvm_pfn_t pfn)
{ … }
EXPORT_SYMBOL_GPL(…);
void kvm_release_page_dirty(struct page *page)
{ … }
EXPORT_SYMBOL_GPL(…);
void kvm_release_pfn_dirty(kvm_pfn_t pfn)
{ … }
EXPORT_SYMBOL_GPL(…);
void kvm_set_pfn_dirty(kvm_pfn_t pfn)
{ … }
EXPORT_SYMBOL_GPL(…);
void kvm_set_pfn_accessed(kvm_pfn_t pfn)
{ … }
EXPORT_SYMBOL_GPL(…);
static int next_segment(unsigned long len, int offset)
{ … }
static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
void *data, int offset, int len)
{ … }
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
int len)
{ … }
EXPORT_SYMBOL_GPL(…);
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
int offset, int len)
{ … }
EXPORT_SYMBOL_GPL(…);
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
{ … }
EXPORT_SYMBOL_GPL(…);
int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
{ … }
EXPORT_SYMBOL_GPL(…);
static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
void *data, int offset, unsigned long len)
{ … }
int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
void *data, unsigned long len)
{ … }
EXPORT_SYMBOL_GPL(…);
static int __kvm_write_guest_page(struct kvm *kvm,
struct kvm_memory_slot *memslot, gfn_t gfn,
const void *data, int offset, int len)
{ … }
int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn,
const void *data, int offset, int len)
{ … }
EXPORT_SYMBOL_GPL(…);
int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
const void *data, int offset, int len)
{ … }
EXPORT_SYMBOL_GPL(…);
int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
unsigned long len)
{ … }
EXPORT_SYMBOL_GPL(…);
int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
unsigned long len)
{ … }
EXPORT_SYMBOL_GPL(…);
static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots,
struct gfn_to_hva_cache *ghc,
gpa_t gpa, unsigned long len)
{ … }
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
gpa_t gpa, unsigned long len)
{ … }
EXPORT_SYMBOL_GPL(…);
int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned int offset,
unsigned long len)
{ … }
EXPORT_SYMBOL_GPL(…);
int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned long len)
{ … }
EXPORT_SYMBOL_GPL(…);
int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned int offset,
unsigned long len)
{ … }
EXPORT_SYMBOL_GPL(…);
int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned long len)
{ … }
EXPORT_SYMBOL_GPL(…);
int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
{ … }
EXPORT_SYMBOL_GPL(…);
void mark_page_dirty_in_slot(struct kvm *kvm,
const struct kvm_memory_slot *memslot,
gfn_t gfn)
{ … }
EXPORT_SYMBOL_GPL(…);
void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
{ … }
EXPORT_SYMBOL_GPL(…);
void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
{ … }
EXPORT_SYMBOL_GPL(…);
void kvm_sigset_activate(struct kvm_vcpu *vcpu)
{ … }
void kvm_sigset_deactivate(struct kvm_vcpu *vcpu)
{ … }
static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
{ … }
static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
{ … }
static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
{ … }
bool kvm_vcpu_block(struct kvm_vcpu *vcpu)
{ … }
static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start,
ktime_t end, bool success)
{ … }
static unsigned int kvm_vcpu_max_halt_poll_ns(struct kvm_vcpu *vcpu)
{ … }
void kvm_vcpu_halt(struct kvm_vcpu *vcpu)
{ … }
EXPORT_SYMBOL_GPL(…);
bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
{ … }
EXPORT_SYMBOL_GPL(…);
#ifndef CONFIG_S390
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
{ … }
EXPORT_SYMBOL_GPL(…);
#endif
int kvm_vcpu_yield_to(struct kvm_vcpu *target)
{ … }
EXPORT_SYMBOL_GPL(…);
static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
{ … }
bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
{ … }
static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
{ … }
bool __weak kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
{ … }
bool __weak kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
{ … }
void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
{ … }
EXPORT_SYMBOL_GPL(…);
static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff)
{ … }
static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf)
{ … }
static const struct vm_operations_struct kvm_vcpu_vm_ops = …;
static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
{ … }
static int kvm_vcpu_release(struct inode *inode, struct file *filp)
{ … }
static struct file_operations kvm_vcpu_fops = …;
static int create_vcpu_fd(struct kvm_vcpu *vcpu)
{ … }
#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
static int vcpu_get_pid(void *data, u64 *val)
{ … }
DEFINE_SIMPLE_ATTRIBUTE(…);
static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
{ … }
#endif
static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, unsigned long id)
{ … }
static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
{ … }
static ssize_t kvm_vcpu_stats_read(struct file *file, char __user *user_buffer,
size_t size, loff_t *offset)
{ … }
static int kvm_vcpu_stats_release(struct inode *inode, struct file *file)
{ … }
static const struct file_operations kvm_vcpu_stats_fops = …;
static int kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu *vcpu)
{ … }
#ifdef CONFIG_KVM_GENERIC_PRE_FAULT_MEMORY
static int kvm_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
struct kvm_pre_fault_memory *range)
{ … }
#endif
static long kvm_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{ … }
#ifdef CONFIG_KVM_COMPAT
static long kvm_vcpu_compat_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{ … }
#endif
static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma)
{ … }
static int kvm_device_ioctl_attr(struct kvm_device *dev,
int (*accessor)(struct kvm_device *dev,
struct kvm_device_attr *attr),
unsigned long arg)
{ … }
static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
unsigned long arg)
{ … }
static int kvm_device_release(struct inode *inode, struct file *filp)
{ … }
static struct file_operations kvm_device_fops = …;
struct kvm_device *kvm_device_from_filp(struct file *filp)
{ … }
static const struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = …;
int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type)
{ … }
void kvm_unregister_device_ops(u32 type)
{ … }
static int kvm_ioctl_create_device(struct kvm *kvm,
struct kvm_create_device *cd)
{ … }
static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
{ … }
static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size)
{ … }
static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm)
{ … }
int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm,
struct kvm_enable_cap *cap)
{ … }
bool kvm_are_all_memslots_empty(struct kvm *kvm)
{ … }
EXPORT_SYMBOL_GPL(…);
static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
struct kvm_enable_cap *cap)
{ … }
static ssize_t kvm_vm_stats_read(struct file *file, char __user *user_buffer,
size_t size, loff_t *offset)
{ … }
static int kvm_vm_stats_release(struct inode *inode, struct file *file)
{ … }
static const struct file_operations kvm_vm_stats_fops = …;
static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm)
{ … }
#define SANITY_CHECK_MEM_REGION_FIELD(field) …
static long kvm_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{ … }
#ifdef CONFIG_KVM_COMPAT
struct compat_kvm_dirty_log { … };
struct compat_kvm_clear_dirty_log { … };
long __weak kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl,
unsigned long arg)
{ … }
static long kvm_vm_compat_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{ … }
#endif
static struct file_operations kvm_vm_fops = …;
bool file_is_kvm(struct file *file)
{ … }
EXPORT_SYMBOL_GPL(…);
static int kvm_dev_ioctl_create_vm(unsigned long type)
{ … }
static long kvm_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{ … }
static struct file_operations kvm_chardev_ops = …;
static struct miscdevice kvm_dev = …;
#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
__visible bool kvm_rebooting;
EXPORT_SYMBOL_GPL(…);
static DEFINE_PER_CPU(bool, hardware_enabled);
static int kvm_usage_count;
static int __hardware_enable_nolock(void)
{ … }
static void hardware_enable_nolock(void *failed)
{ … }
static int kvm_online_cpu(unsigned int cpu)
{ … }
static void hardware_disable_nolock(void *junk)
{ … }
static int kvm_offline_cpu(unsigned int cpu)
{ … }
static void hardware_disable_all_nolock(void)
{ … }
static void hardware_disable_all(void)
{ … }
static int hardware_enable_all(void)
{ … }
static void kvm_shutdown(void)
{ … }
static int kvm_suspend(void)
{ … }
static void kvm_resume(void)
{ … }
static struct syscore_ops kvm_syscore_ops = …;
#else
static int hardware_enable_all(void)
{
return 0;
}
static void hardware_disable_all(void)
{
}
#endif
static void kvm_iodevice_destructor(struct kvm_io_device *dev)
{ … }
static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
{ … }
static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
const struct kvm_io_range *r2)
{ … }
static int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
{ … }
static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
gpa_t addr, int len)
{ … }
static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
struct kvm_io_range *range, const void *val)
{ … }
int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
int len, const void *val)
{ … }
EXPORT_SYMBOL_GPL(…);
int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
gpa_t addr, int len, const void *val, long cookie)
{ … }
static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
struct kvm_io_range *range, void *val)
{ … }
int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
int len, void *val)
{ … }
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int len, struct kvm_io_device *dev)
{ … }
int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
struct kvm_io_device *dev)
{ … }
struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
gpa_t addr)
{ … }
EXPORT_SYMBOL_GPL(…);
static int kvm_debugfs_open(struct inode *inode, struct file *file,
int (*get)(void *, u64 *), int (*set)(void *, u64),
const char *fmt)
{ … }
static int kvm_debugfs_release(struct inode *inode, struct file *file)
{ … }
static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val)
{ … }
static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset)
{ … }
static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val)
{ … }
static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset)
{ … }
static int kvm_stat_data_get(void *data, u64 *val)
{ … }
static int kvm_stat_data_clear(void *data, u64 val)
{ … }
static int kvm_stat_data_open(struct inode *inode, struct file *file)
{ … }
static const struct file_operations stat_fops_per_vm = …;
static int vm_stat_get(void *_offset, u64 *val)
{ … }
static int vm_stat_clear(void *_offset, u64 val)
{ … }
DEFINE_SIMPLE_ATTRIBUTE(…);
DEFINE_SIMPLE_ATTRIBUTE(…);
static int vcpu_stat_get(void *_offset, u64 *val)
{ … }
static int vcpu_stat_clear(void *_offset, u64 val)
{ … }
DEFINE_SIMPLE_ATTRIBUTE(…);
DEFINE_SIMPLE_ATTRIBUTE(…);
static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
{ … }
static void kvm_init_debug(void)
{ … }
static inline
struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
{ … }
static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
{ … }
static void kvm_sched_out(struct preempt_notifier *pn,
struct task_struct *next)
{ … }
struct kvm_vcpu *kvm_get_running_vcpu(void)
{ … }
EXPORT_SYMBOL_GPL(…);
struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
{ … }
#ifdef CONFIG_GUEST_PERF_EVENTS
static unsigned int kvm_guest_state(void)
{ … }
static unsigned long kvm_guest_get_ip(void)
{ … }
static struct perf_guest_info_callbacks kvm_guest_cbs = …;
void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void))
{ … }
void kvm_unregister_perf_callbacks(void)
{ … }
#endif
int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
{ … }
EXPORT_SYMBOL_GPL(…);
void kvm_exit(void)
{ … }
EXPORT_SYMBOL_GPL(…);
struct kvm_vm_worker_thread_context { … };
static int kvm_vm_worker_thread(void *context)
{ … }
int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
uintptr_t data, const char *name,
struct task_struct **thread_ptr)
{ … }