#ifndef __KVM_HOST_H
#define __KVM_HOST_H
#include <linux/types.h>
#include <linux/hardirq.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/sched/stat.h>
#include <linux/bug.h>
#include <linux/minmax.h>
#include <linux/mm.h>
#include <linux/mmu_notifier.h>
#include <linux/preempt.h>
#include <linux/msi.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/rcupdate.h>
#include <linux/ratelimit.h>
#include <linux/err.h>
#include <linux/irqflags.h>
#include <linux/context_tracking.h>
#include <linux/irqbypass.h>
#include <linux/rcuwait.h>
#include <linux/refcount.h>
#include <linux/nospec.h>
#include <linux/notifier.h>
#include <linux/ftrace.h>
#include <linux/hashtable.h>
#include <linux/instrumentation.h>
#include <linux/interval_tree.h>
#include <linux/rbtree.h>
#include <linux/xarray.h>
#include <asm/signal.h>
#include <linux/kvm.h>
#include <linux/kvm_para.h>
#include <linux/kvm_types.h>
#include <asm/kvm_host.h>
#include <linux/kvm_dirty_ring.h>
#ifndef KVM_MAX_VCPU_IDS
#define KVM_MAX_VCPU_IDS …
#endif
#define KVM_MEMSLOT_INVALID …
#define KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS …
#define KVM_MAX_MMIO_FRAGMENTS …
#ifndef KVM_MAX_NR_ADDRESS_SPACES
#define KVM_MAX_NR_ADDRESS_SPACES …
#endif
#define KVM_PFN_ERR_MASK …
#define KVM_PFN_ERR_NOSLOT_MASK …
#define KVM_PFN_NOSLOT …
#define KVM_PFN_ERR_FAULT …
#define KVM_PFN_ERR_HWPOISON …
#define KVM_PFN_ERR_RO_FAULT …
#define KVM_PFN_ERR_SIGPENDING …
static inline bool is_error_pfn(kvm_pfn_t pfn)
{ … }
static inline bool is_sigpending_pfn(kvm_pfn_t pfn)
{ … }
static inline bool is_error_noslot_pfn(kvm_pfn_t pfn)
{ … }
static inline bool is_noslot_pfn(kvm_pfn_t pfn)
{ … }
#ifndef KVM_HVA_ERR_BAD
#define KVM_HVA_ERR_BAD …
#define KVM_HVA_ERR_RO_BAD …
static inline bool kvm_is_error_hva(unsigned long addr)
{ … }
#endif
static inline bool kvm_is_error_gpa(gpa_t gpa)
{ … }
#define KVM_ERR_PTR_BAD_PAGE …
static inline bool is_error_page(struct page *page)
{ … }
#define KVM_REQUEST_MASK …
#define KVM_REQUEST_NO_WAKEUP …
#define KVM_REQUEST_WAIT …
#define KVM_REQUEST_NO_ACTION …
#define KVM_REQ_TLB_FLUSH …
#define KVM_REQ_VM_DEAD …
#define KVM_REQ_UNBLOCK …
#define KVM_REQ_DIRTY_RING_SOFT_FULL …
#define KVM_REQUEST_ARCH_BASE …
#define KVM_REQ_OUTSIDE_GUEST_MODE …
#define KVM_ARCH_REQ_FLAGS(nr, flags) …
#define KVM_ARCH_REQ(nr) …
bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
unsigned long *vcpu_bitmap);
bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
#define KVM_USERSPACE_IRQ_SOURCE_ID …
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID …
extern struct mutex kvm_lock;
extern struct list_head vm_list;
struct kvm_io_range { … };
#define NR_IOBUS_DEVS …
struct kvm_io_bus { … };
enum kvm_bus { … };
int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
int len, const void *val);
int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
gpa_t addr, int len, const void *val, long cookie);
int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
int len, void *val);
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int len, struct kvm_io_device *dev);
int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
struct kvm_io_device *dev);
struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
gpa_t addr);
#ifdef CONFIG_KVM_ASYNC_PF
struct kvm_async_pf { … };
void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
unsigned long hva, struct kvm_arch_async_pf *arch);
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
#endif
#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
kvm_mmu_notifier_arg;
struct kvm_gfn_range { … };
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
#endif
enum { … };
#define KVM_UNMAPPED_PAGE …
struct kvm_host_map { … };
static inline bool kvm_vcpu_mapped(struct kvm_host_map *map)
{ … }
static inline bool kvm_vcpu_can_poll(ktime_t cur, ktime_t stop)
{ … }
struct kvm_mmio_fragment { … };
struct kvm_vcpu { … };
static __always_inline void guest_timing_enter_irqoff(void)
{ … }
static __always_inline void guest_context_enter_irqoff(void)
{ … }
static __always_inline void guest_enter_irqoff(void)
{ … }
static __always_inline void guest_state_enter_irqoff(void)
{ … }
static __always_inline void guest_context_exit_irqoff(void)
{ … }
static __always_inline void guest_timing_exit_irqoff(void)
{ … }
static __always_inline void guest_exit_irqoff(void)
{ … }
static inline void guest_exit(void)
{ … }
static __always_inline void guest_state_exit_irqoff(void)
{ … }
static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
{ … }
#define KVM_MEM_MAX_NR_PAGES …
struct kvm_memory_slot { … };
static inline bool kvm_slot_can_be_private(const struct kvm_memory_slot *slot)
{ … }
static inline bool kvm_slot_dirty_track_enabled(const struct kvm_memory_slot *slot)
{ … }
static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
{ … }
static inline unsigned long *kvm_second_dirty_bitmap(struct kvm_memory_slot *memslot)
{ … }
#ifndef KVM_DIRTY_LOG_MANUAL_CAPS
#define KVM_DIRTY_LOG_MANUAL_CAPS …
#endif
struct kvm_s390_adapter_int { … };
struct kvm_hv_sint { … };
struct kvm_xen_evtchn { … };
struct kvm_kernel_irq_routing_entry { … };
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
struct kvm_irq_routing_table { … };
#endif
bool kvm_arch_irqchip_in_kernel(struct kvm *kvm);
#ifndef KVM_INTERNAL_MEM_SLOTS
#define KVM_INTERNAL_MEM_SLOTS …
#endif
#define KVM_MEM_SLOTS_NUM …
#define KVM_USER_MEM_SLOTS …
#if KVM_MAX_NR_ADDRESS_SPACES == 1
static inline int kvm_arch_nr_memslot_as_ids(struct kvm *kvm)
{
return KVM_MAX_NR_ADDRESS_SPACES;
}
static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
{
return 0;
}
#endif
#if !defined(kvm_arch_has_private_mem) && !IS_ENABLED(CONFIG_KVM_PRIVATE_MEM)
static inline bool kvm_arch_has_private_mem(struct kvm *kvm)
{
return false;
}
#endif
struct kvm_memslots { … };
struct kvm { … };
#define kvm_err(fmt, ...) …
#define kvm_info(fmt, ...) …
#define kvm_debug(fmt, ...) …
#define kvm_debug_ratelimited(fmt, ...) …
#define kvm_pr_unimpl(fmt, ...) …
#define vcpu_unimpl(vcpu, fmt, ...) …
#define vcpu_debug(vcpu, fmt, ...) …
#define vcpu_debug_ratelimited(vcpu, fmt, ...) …
#define vcpu_err(vcpu, fmt, ...) …
static inline void kvm_vm_dead(struct kvm *kvm)
{ … }
static inline void kvm_vm_bugged(struct kvm *kvm)
{ … }
#define KVM_BUG(cond, kvm, fmt...) …
#define KVM_BUG_ON(cond, kvm) …
#define KVM_BUG_ON_DATA_CORRUPTION(cond, kvm) …
static inline void kvm_vcpu_srcu_read_lock(struct kvm_vcpu *vcpu)
{ … }
static inline void kvm_vcpu_srcu_read_unlock(struct kvm_vcpu *vcpu)
{ … }
static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm)
{ … }
static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
{ … }
static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
{ … }
#define kvm_for_each_vcpu(idx, vcpup, kvm) …
static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
{ … }
void kvm_destroy_vcpus(struct kvm *kvm);
void vcpu_load(struct kvm_vcpu *vcpu);
void vcpu_put(struct kvm_vcpu *vcpu);
#ifdef __KVM_HAVE_IOAPIC
void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm);
void kvm_arch_post_irq_routing_update(struct kvm *kvm);
#else
static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm)
{
}
static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm)
{
}
#endif
#ifdef CONFIG_HAVE_KVM_IRQCHIP
int kvm_irqfd_init(void);
void kvm_irqfd_exit(void);
#else
static inline int kvm_irqfd_init(void)
{
return 0;
}
static inline void kvm_irqfd_exit(void)
{
}
#endif
int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module);
void kvm_exit(void);
void kvm_get_kvm(struct kvm *kvm);
bool kvm_get_kvm_safe(struct kvm *kvm);
void kvm_put_kvm(struct kvm *kvm);
bool file_is_kvm(struct file *file);
void kvm_put_kvm_no_destroy(struct kvm *kvm);
static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
{ … }
static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
{ … }
static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
{ … }
static inline bool kvm_memslots_empty(struct kvm_memslots *slots)
{ … }
bool kvm_are_all_memslots_empty(struct kvm *kvm);
#define kvm_for_each_memslot(memslot, bkt, slots) …
static inline
struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id)
{ … }
struct kvm_memslot_iter { … };
static inline void kvm_memslot_iter_next(struct kvm_memslot_iter *iter)
{ … }
static inline void kvm_memslot_iter_start(struct kvm_memslot_iter *iter,
struct kvm_memslots *slots,
gfn_t start)
{ … }
static inline bool kvm_memslot_iter_is_valid(struct kvm_memslot_iter *iter, gfn_t end)
{ … }
#define kvm_for_each_memslot_in_gfn_range(iter, slots, start, end) …
enum kvm_mr_change { … };
int kvm_set_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region2 *mem);
int __kvm_set_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region2 *mem);
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot);
void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
int kvm_arch_prepare_memory_region(struct kvm *kvm,
const struct kvm_memory_slot *old,
struct kvm_memory_slot *new,
enum kvm_mr_change change);
void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change);
void kvm_arch_flush_shadow_all(struct kvm *kvm);
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot);
int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
struct page **pages, int nr_pages);
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
bool *writable);
void kvm_release_page_clean(struct page *page);
void kvm_release_page_dirty(struct page *page);
kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
bool *writable);
kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn);
kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn);
kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
bool atomic, bool interruptible, bool *async,
bool write_fault, bool *writable, hva_t *hva);
void kvm_release_pfn_clean(kvm_pfn_t pfn);
void kvm_release_pfn_dirty(kvm_pfn_t pfn);
void kvm_set_pfn_dirty(kvm_pfn_t pfn);
void kvm_set_pfn_accessed(kvm_pfn_t pfn);
void kvm_release_pfn(kvm_pfn_t pfn, bool dirty);
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
int len);
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned long len);
int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned int offset,
unsigned long len);
int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
int offset, int len);
int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
unsigned long len);
int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned long len);
int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned int offset,
unsigned long len);
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
gpa_t gpa, unsigned long len);
#define __kvm_get_guest(kvm, gfn, offset, v) …
#define kvm_get_guest(kvm, gpa, v) …
#define __kvm_put_guest(kvm, gfn, offset, v) …
#define kvm_put_guest(kvm, gpa, v) …
int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn);
void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn);
void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
int len);
int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
unsigned long len);
int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
unsigned long len);
int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data,
int offset, int len);
int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
unsigned long len);
void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm);
int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len);
int kvm_gpc_activate_hva(struct gfn_to_pfn_cache *gpc, unsigned long hva, unsigned long len);
bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len);
int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len);
void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc);
static inline bool kvm_gpc_is_gpa_active(struct gfn_to_pfn_cache *gpc)
{ … }
static inline bool kvm_gpc_is_hva_active(struct gfn_to_pfn_cache *gpc)
{ … }
void kvm_sigset_activate(struct kvm_vcpu *vcpu);
void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
void kvm_vcpu_halt(struct kvm_vcpu *vcpu);
bool kvm_vcpu_block(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
int kvm_vcpu_yield_to(struct kvm_vcpu *target);
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool yield_to_kernel_mode);
void kvm_flush_remote_tlbs(struct kvm *kvm);
void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages);
void kvm_flush_remote_tlbs_memslot(struct kvm *kvm,
const struct kvm_memory_slot *memslot);
#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min);
int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min);
int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc);
void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc);
void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
#endif
void kvm_mmu_invalidate_begin(struct kvm *kvm);
void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end);
void kvm_mmu_invalidate_end(struct kvm *kvm);
bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset,
unsigned long mask);
void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot);
#ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log);
int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
int *is_dirty, struct kvm_memory_slot **memslot);
#endif
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
bool line_status);
int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
struct kvm_enable_cap *cap);
int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg);
long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl,
unsigned long arg);
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
struct kvm_translation *tr);
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs);
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs);
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state);
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state);
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
struct kvm_guest_debug *dbg);
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id);
int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state);
#endif
#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry);
#else
static inline void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) {}
#endif
#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
int kvm_arch_hardware_enable(void);
void kvm_arch_hardware_disable(void);
#endif
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu);
bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu);
int kvm_arch_post_init_vm(struct kvm *kvm);
void kvm_arch_pre_destroy_vm(struct kvm *kvm);
void kvm_arch_create_vm_debugfs(struct kvm *kvm);
#ifndef __KVM_HAVE_ARCH_VM_ALLOC
static inline struct kvm *kvm_arch_alloc_vm(void)
{
return kzalloc(sizeof(struct kvm), GFP_KERNEL_ACCOUNT);
}
#endif
static inline void __kvm_arch_free_vm(struct kvm *kvm)
{ … }
#ifndef __KVM_HAVE_ARCH_VM_FREE
static inline void kvm_arch_free_vm(struct kvm *kvm)
{
__kvm_arch_free_vm(kvm);
}
#endif
#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS
static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
{
return -ENOTSUPP;
}
#else
int kvm_arch_flush_remote_tlbs(struct kvm *kvm);
#endif
#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
static inline int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm,
gfn_t gfn, u64 nr_pages)
{
return -EOPNOTSUPP;
}
#else
int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages);
#endif
#ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
bool kvm_arch_has_noncoherent_dma(struct kvm *kvm);
#else
static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
{
}
static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
{
}
static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
{
return false;
}
#endif
#ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE
void kvm_arch_start_assignment(struct kvm *kvm);
void kvm_arch_end_assignment(struct kvm *kvm);
bool kvm_arch_has_assigned_device(struct kvm *kvm);
#else
static inline void kvm_arch_start_assignment(struct kvm *kvm)
{
}
static inline void kvm_arch_end_assignment(struct kvm *kvm)
{
}
static __always_inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
{
return false;
}
#endif
static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu)
{ … }
static inline bool __kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
{ … }
static inline bool kvm_vcpu_is_blocking(struct kvm_vcpu *vcpu)
{ … }
#ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED
bool kvm_arch_intc_initialized(struct kvm *kvm);
#else
static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
{ … }
#endif
#ifdef CONFIG_GUEST_PERF_EVENTS
unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu);
void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void));
void kvm_unregister_perf_callbacks(void);
#else
static inline void kvm_register_perf_callbacks(void *ign) {}
static inline void kvm_unregister_perf_callbacks(void) {}
#endif
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
void kvm_arch_destroy_vm(struct kvm *kvm);
void kvm_arch_sync_events(struct kvm *kvm);
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
struct page *kvm_pfn_to_refcounted_page(kvm_pfn_t pfn);
bool kvm_is_zone_device_page(struct page *page);
struct kvm_irq_ack_notifier { … };
int kvm_irq_map_gsi(struct kvm *kvm,
struct kvm_kernel_irq_routing_entry *entries, int gsi);
int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin);
int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
bool line_status);
int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
int irq_source_id, int level, bool line_status);
int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int irq_source_id,
int level, bool line_status);
bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
void kvm_notify_acked_gsi(struct kvm *kvm, int gsi);
void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
void kvm_register_irq_ack_notifier(struct kvm *kvm,
struct kvm_irq_ack_notifier *kian);
void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
struct kvm_irq_ack_notifier *kian);
int kvm_request_irq_source_id(struct kvm *kvm);
void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args);
static inline struct kvm_memory_slot *
try_get_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
{ … }
static inline struct kvm_memory_slot *
search_memslots(struct kvm_memslots *slots, gfn_t gfn, bool approx)
{ … }
static inline struct kvm_memory_slot *
____gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn, bool approx)
{ … }
static inline struct kvm_memory_slot *
__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
{ … }
static inline unsigned long
__gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
{ … }
static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
{ … }
static inline gfn_t
hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
{ … }
static inline gpa_t gfn_to_gpa(gfn_t gfn)
{ … }
static inline gfn_t gpa_to_gfn(gpa_t gpa)
{ … }
static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn)
{ … }
static inline bool kvm_is_gpa_in_memslot(struct kvm *kvm, gpa_t gpa)
{ … }
static inline void kvm_gpc_mark_dirty_in_slot(struct gfn_to_pfn_cache *gpc)
{ … }
enum kvm_stat_kind { … };
struct kvm_stat_data { … };
struct _kvm_stats_desc { … };
#define STATS_DESC_COMMON(type, unit, base, exp, sz, bsz) …
#define VM_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) …
#define VCPU_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) …
#define VM_STATS_DESC(stat, type, unit, base, exp, sz, bsz) …
#define VCPU_STATS_DESC(stat, type, unit, base, exp, sz, bsz) …
#define STATS_DESC(SCOPE, stat, type, unit, base, exp, sz, bsz) …
#define STATS_DESC_CUMULATIVE(SCOPE, name, unit, base, exponent) …
#define STATS_DESC_INSTANT(SCOPE, name, unit, base, exponent) …
#define STATS_DESC_PEAK(SCOPE, name, unit, base, exponent) …
#define STATS_DESC_LINEAR_HIST(SCOPE, name, unit, base, exponent, sz, bsz) …
#define STATS_DESC_LOG_HIST(SCOPE, name, unit, base, exponent, sz) …
#define STATS_DESC_COUNTER(SCOPE, name) …
#define STATS_DESC_ICOUNTER(SCOPE, name) …
#define STATS_DESC_PCOUNTER(SCOPE, name) …
#define STATS_DESC_IBOOLEAN(SCOPE, name) …
#define STATS_DESC_PBOOLEAN(SCOPE, name) …
#define STATS_DESC_TIME_NSEC(SCOPE, name) …
#define STATS_DESC_LINHIST_TIME_NSEC(SCOPE, name, sz, bsz) …
#define STATS_DESC_LOGHIST_TIME_NSEC(SCOPE, name, sz) …
#define KVM_GENERIC_VM_STATS() …
#define KVM_GENERIC_VCPU_STATS() …
ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header,
const struct _kvm_stats_desc *desc,
void *stats, size_t size_stats,
char __user *user_buffer, size_t size, loff_t *offset);
static inline void kvm_stats_linear_hist_update(u64 *data, size_t size,
u64 value, size_t bucket_size)
{ … }
static inline void kvm_stats_log_hist_update(u64 *data, size_t size, u64 value)
{ … }
#define KVM_STATS_LINEAR_HIST_UPDATE(array, value, bsize) …
#define KVM_STATS_LOG_HIST_UPDATE(array, value) …
extern const struct kvm_stats_header kvm_vm_stats_header;
extern const struct _kvm_stats_desc kvm_vm_stats_desc[];
extern const struct kvm_stats_header kvm_vcpu_stats_header;
extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[];
#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq)
{ … }
static inline int mmu_invalidate_retry_gfn(struct kvm *kvm,
unsigned long mmu_seq,
gfn_t gfn)
{ … }
static inline bool mmu_invalidate_retry_gfn_unsafe(struct kvm *kvm,
unsigned long mmu_seq,
gfn_t gfn)
{ … }
#endif
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
#define KVM_MAX_IRQ_ROUTES …
bool kvm_arch_can_set_irq_routing(struct kvm *kvm);
int kvm_set_irq_routing(struct kvm *kvm,
const struct kvm_irq_routing_entry *entries,
unsigned nr,
unsigned flags);
int kvm_init_irq_routing(struct kvm *kvm);
int kvm_set_routing_entry(struct kvm *kvm,
struct kvm_kernel_irq_routing_entry *e,
const struct kvm_irq_routing_entry *ue);
void kvm_free_irq_routing(struct kvm *kvm);
#else
static inline void kvm_free_irq_routing(struct kvm *kvm) {}
static inline int kvm_init_irq_routing(struct kvm *kvm)
{
return 0;
}
#endif
int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
void kvm_eventfd_init(struct kvm *kvm);
int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
#ifdef CONFIG_HAVE_KVM_IRQCHIP
int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
void kvm_irqfd_release(struct kvm *kvm);
bool kvm_notify_irqfd_resampler(struct kvm *kvm,
unsigned int irqchip,
unsigned int pin);
void kvm_irq_routing_update(struct kvm *);
#else
static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
{
return -EINVAL;
}
static inline void kvm_irqfd_release(struct kvm *kvm) {}
static inline bool kvm_notify_irqfd_resampler(struct kvm *kvm,
unsigned int irqchip,
unsigned int pin)
{
return false;
}
#endif
void kvm_arch_irq_routing_update(struct kvm *kvm);
static inline void __kvm_make_request(int req, struct kvm_vcpu *vcpu)
{ … }
static __always_inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
{ … }
static inline bool kvm_request_pending(struct kvm_vcpu *vcpu)
{ … }
static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu)
{ … }
static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu)
{ … }
static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
{ … }
#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
extern bool kvm_rebooting;
#endif
extern unsigned int halt_poll_ns;
extern unsigned int halt_poll_ns_grow;
extern unsigned int halt_poll_ns_grow_start;
extern unsigned int halt_poll_ns_shrink;
struct kvm_device { … };
struct kvm_device_ops { … };
struct kvm_device *kvm_device_from_filp(struct file *filp);
int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type);
void kvm_unregister_device_ops(u32 type);
extern struct kvm_device_ops kvm_mpic_ops;
extern struct kvm_device_ops kvm_arm_vgic_v2_ops;
extern struct kvm_device_ops kvm_arm_vgic_v3_ops;
#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
{ … }
static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
{ … }
#else
static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
{
}
static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
{
}
#endif
static inline bool kvm_is_visible_memslot(struct kvm_memory_slot *memslot)
{ … }
struct kvm_vcpu *kvm_get_running_vcpu(void);
struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
bool kvm_arch_has_irq_bypass(void);
int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *,
struct irq_bypass_producer *);
void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *,
struct irq_bypass_producer *);
void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *);
void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *);
int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
uint32_t guest_irq, bool set);
bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *,
struct kvm_kernel_irq_routing_entry *);
#endif
#ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS
static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
{
return vcpu->valid_wakeup;
}
#else
static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
{ … }
#endif
#ifdef CONFIG_HAVE_KVM_NO_POLL
bool kvm_arch_no_poll(struct kvm_vcpu *vcpu);
#else
static inline bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
{
return false;
}
#endif
#ifdef CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL
long kvm_arch_vcpu_async_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
#else
static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
unsigned int ioctl,
unsigned long arg)
{ … }
#endif
void kvm_arch_guest_memory_reclaimed(struct kvm *kvm);
#ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE
int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu);
#else
static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
{ … }
#endif
kvm_vm_thread_fn_t;
int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
uintptr_t data, const char *name,
struct task_struct **thread_ptr);
#ifdef CONFIG_KVM_XFER_TO_GUEST_WORK
static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu)
{ … }
#endif
static inline void kvm_account_pgtable_pages(void *virt, int nr)
{ … }
#define KVM_DIRTY_RING_RSVD_ENTRIES …
#define KVM_DIRTY_RING_MAX_ENTRIES …
static inline void kvm_prepare_memory_fault_exit(struct kvm_vcpu *vcpu,
gpa_t gpa, gpa_t size,
bool is_write, bool is_exec,
bool is_private)
{ … }
#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
static inline unsigned long kvm_get_memory_attributes(struct kvm *kvm, gfn_t gfn)
{ … }
bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
unsigned long mask, unsigned long attrs);
bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
struct kvm_gfn_range *range);
bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
struct kvm_gfn_range *range);
static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn)
{ … }
#else
static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn)
{
return false;
}
#endif
#ifdef CONFIG_KVM_PRIVATE_MEM
int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
gfn_t gfn, kvm_pfn_t *pfn, int *max_order);
#else
static inline int kvm_gmem_get_pfn(struct kvm *kvm,
struct kvm_memory_slot *slot, gfn_t gfn,
kvm_pfn_t *pfn, int *max_order)
{
KVM_BUG_ON(1, kvm);
return -EIO;
}
#endif
#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE
int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order);
#endif
#ifdef CONFIG_KVM_GENERIC_PRIVATE_MEM
kvm_gmem_populate_cb;
long kvm_gmem_populate(struct kvm *kvm, gfn_t gfn, void __user *src, long npages,
kvm_gmem_populate_cb post_populate, void *opaque);
#endif
#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end);
#endif
#ifdef CONFIG_KVM_GENERIC_PRE_FAULT_MEMORY
long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
struct kvm_pre_fault_memory *range);
#endif
#endif