#define pr_fmt(fmt) …
#include <linux/kvm_host.h>
#include "irq.h"
#include "ioapic.h"
#include "mmu.h"
#include "i8254.h"
#include "tss.h"
#include "kvm_cache_regs.h"
#include "kvm_emulate.h"
#include "mmu/page_track.h"
#include "x86.h"
#include "cpuid.h"
#include "pmu.h"
#include "hyperv.h"
#include "lapic.h"
#include "xen.h"
#include "smm.h"
#include <linux/clocksource.h>
#include <linux/interrupt.h>
#include <linux/kvm.h>
#include <linux/fs.h>
#include <linux/vmalloc.h>
#include <linux/export.h>
#include <linux/moduleparam.h>
#include <linux/mman.h>
#include <linux/highmem.h>
#include <linux/iommu.h>
#include <linux/cpufreq.h>
#include <linux/user-return-notifier.h>
#include <linux/srcu.h>
#include <linux/slab.h>
#include <linux/perf_event.h>
#include <linux/uaccess.h>
#include <linux/hash.h>
#include <linux/pci.h>
#include <linux/timekeeper_internal.h>
#include <linux/pvclock_gtod.h>
#include <linux/kvm_irqfd.h>
#include <linux/irqbypass.h>
#include <linux/sched/stat.h>
#include <linux/sched/isolation.h>
#include <linux/mem_encrypt.h>
#include <linux/entry-kvm.h>
#include <linux/suspend.h>
#include <linux/smp.h>
#include <trace/events/ipi.h>
#include <trace/events/kvm.h>
#include <asm/debugreg.h>
#include <asm/msr.h>
#include <asm/desc.h>
#include <asm/mce.h>
#include <asm/pkru.h>
#include <linux/kernel_stat.h>
#include <asm/fpu/api.h>
#include <asm/fpu/xcr.h>
#include <asm/fpu/xstate.h>
#include <asm/pvclock.h>
#include <asm/div64.h>
#include <asm/irq_remapping.h>
#include <asm/mshyperv.h>
#include <asm/hypervisor.h>
#include <asm/tlbflush.h>
#include <asm/intel_pt.h>
#include <asm/emulate_prefix.h>
#include <asm/sgx.h>
#include <clocksource/hyperv_timer.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
#define MAX_IO_MSRS …
#define KVM_MAX_MCE_BANKS …
struct kvm_caps kvm_caps __read_mostly;
EXPORT_SYMBOL_GPL(…);
struct kvm_host_values kvm_host __read_mostly;
EXPORT_SYMBOL_GPL(…);
#define ERR_PTR_USR(e) …
#define emul_to_vcpu(ctxt) …
#ifdef CONFIG_X86_64
static
u64 __read_mostly efer_reserved_bits = …;
#else
static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
#endif
static u64 __read_mostly cr4_reserved_bits = …;
#define KVM_EXIT_HYPERCALL_VALID_MASK …
#define KVM_CAP_PMU_VALID_MASK …
#define KVM_X2APIC_API_VALID_FLAGS …
static void update_cr8_intercept(struct kvm_vcpu *vcpu);
static void process_nmi(struct kvm_vcpu *vcpu);
static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
static void store_regs(struct kvm_vcpu *vcpu);
static int sync_regs(struct kvm_vcpu *vcpu);
static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu);
static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2);
static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2);
static DEFINE_MUTEX(vendor_module_lock);
struct kvm_x86_ops kvm_x86_ops __read_mostly;
#define KVM_X86_OP …
#define KVM_X86_OP_OPTIONAL …
#define KVM_X86_OP_OPTIONAL_RET0 …
#include <asm/kvm-x86-ops.h>
EXPORT_STATIC_CALL_GPL(…);
EXPORT_STATIC_CALL_GPL(…);
static bool __read_mostly ignore_msrs = …;
module_param(ignore_msrs, bool, 0644);
bool __read_mostly report_ignored_msrs = …;
module_param(report_ignored_msrs, bool, 0644);
EXPORT_SYMBOL_GPL(…);
unsigned int min_timer_period_us = …;
module_param(min_timer_period_us, uint, 0644);
static bool __read_mostly kvmclock_periodic_sync = …;
module_param(kvmclock_periodic_sync, bool, 0444);
static u32 __read_mostly tsc_tolerance_ppm = …;
module_param(tsc_tolerance_ppm, uint, 0644);
static bool __read_mostly vector_hashing = …;
module_param(vector_hashing, bool, 0444);
bool __read_mostly enable_vmware_backdoor = …;
module_param(enable_vmware_backdoor, bool, 0444);
EXPORT_SYMBOL_GPL(…);
#define KVM_FEP_CLEAR_RFLAGS_RF …
static int __read_mostly force_emulation_prefix;
module_param(force_emulation_prefix, int, 0644);
int __read_mostly pi_inject_timer = …;
module_param(pi_inject_timer, bint, 0644);
bool __read_mostly enable_pmu = …;
EXPORT_SYMBOL_GPL(…);
module_param(enable_pmu, bool, 0444);
bool __read_mostly eager_page_split = …;
module_param(eager_page_split, bool, 0644);
static bool __read_mostly mitigate_smt_rsb;
module_param(mitigate_smt_rsb, bool, 0444);
#define KVM_MAX_NR_USER_RETURN_MSRS …
struct kvm_user_return_msrs { … };
u32 __read_mostly kvm_nr_uret_msrs;
EXPORT_SYMBOL_GPL(…);
static u32 __read_mostly kvm_uret_msrs_list[KVM_MAX_NR_USER_RETURN_MSRS];
static struct kvm_user_return_msrs __percpu *user_return_msrs;
#define KVM_SUPPORTED_XCR0 …
bool __read_mostly allow_smaller_maxphyaddr = …;
EXPORT_SYMBOL_GPL(…);
bool __read_mostly enable_apicv = …;
EXPORT_SYMBOL_GPL(…);
const struct _kvm_stats_desc kvm_vm_stats_desc[] = …;
const struct kvm_stats_header kvm_vm_stats_header = …;
const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = …;
const struct kvm_stats_header kvm_vcpu_stats_header = …;
static struct kmem_cache *x86_emulator_cache;
static const u32 msrs_to_save_base[] = …;
static const u32 msrs_to_save_pmu[] = …;
static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_base) +
ARRAY_SIZE(msrs_to_save_pmu)];
static unsigned num_msrs_to_save;
static const u32 emulated_msrs_all[] = …;
static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)];
static unsigned num_emulated_msrs;
static const u32 msr_based_features_all_except_vmx[] = …;
static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all_except_vmx) +
(KVM_LAST_EMULATED_VMX_MSR - KVM_FIRST_EMULATED_VMX_MSR + 1)];
static unsigned int num_msr_based_features;
static bool kvm_is_immutable_feature_msr(u32 msr)
{ … }
static bool kvm_is_advertised_msr(u32 msr_index)
{ … }
msr_access_t;
static __always_inline int kvm_do_msr_access(struct kvm_vcpu *vcpu, u32 msr,
u64 *data, bool host_initiated,
enum kvm_msr_access rw,
msr_access_t msr_access_fn)
{ … }
static struct kmem_cache *kvm_alloc_emulator_cache(void)
{ … }
static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
{ … }
static void kvm_on_user_return(struct user_return_notifier *urn)
{ … }
static int kvm_probe_user_return_msr(u32 msr)
{ … }
int kvm_add_user_return_msr(u32 msr)
{ … }
EXPORT_SYMBOL_GPL(…);
int kvm_find_user_return_msr(u32 msr)
{ … }
EXPORT_SYMBOL_GPL(…);
static void kvm_user_return_msr_cpu_online(void)
{ … }
int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask)
{ … }
EXPORT_SYMBOL_GPL(…);
static void drop_user_return_notifiers(void)
{ … }
u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
{ … }
enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu)
{ … }
EXPORT_SYMBOL_GPL(…);
int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{ … }
noinstr void kvm_spurious_fault(void)
{ … }
EXPORT_SYMBOL_GPL(…);
#define EXCPT_BENIGN …
#define EXCPT_CONTRIBUTORY …
#define EXCPT_PF …
static int exception_class(int vector)
{ … }
#define EXCPT_FAULT …
#define EXCPT_TRAP …
#define EXCPT_ABORT …
#define EXCPT_INTERRUPT …
#define EXCPT_DB …
static int exception_type(int vector)
{ … }
void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu,
struct kvm_queued_exception *ex)
{ … }
EXPORT_SYMBOL_GPL(…);
static void kvm_queue_exception_vmexit(struct kvm_vcpu *vcpu, unsigned int vector,
bool has_error_code, u32 error_code,
bool has_payload, unsigned long payload)
{ … }
static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
unsigned nr, bool has_error, u32 error_code,
bool has_payload, unsigned long payload, bool reinject)
{ … }
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
{ … }
EXPORT_SYMBOL_GPL(…);
void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
{ … }
EXPORT_SYMBOL_GPL(…);
void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr,
unsigned long payload)
{ … }
EXPORT_SYMBOL_GPL(…);
static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr,
u32 error_code, unsigned long payload)
{ … }
int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
{ … }
EXPORT_SYMBOL_GPL(…);
static int complete_emulated_insn_gp(struct kvm_vcpu *vcpu, int err)
{ … }
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
{ … }
void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
struct x86_exception *fault)
{ … }
EXPORT_SYMBOL_GPL(…);
void kvm_inject_nmi(struct kvm_vcpu *vcpu)
{ … }
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
{ … }
EXPORT_SYMBOL_GPL(…);
void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
{ … }
EXPORT_SYMBOL_GPL(…);
bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
{ … }
bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr)
{ … }
EXPORT_SYMBOL_GPL(…);
static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu)
{ … }
int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
{ … }
EXPORT_SYMBOL_GPL(…);
static bool kvm_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{ … }
void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0)
{ … }
EXPORT_SYMBOL_GPL(…);
int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{ … }
EXPORT_SYMBOL_GPL(…);
void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
{ … }
EXPORT_SYMBOL_GPL(…);
void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
{ … }
EXPORT_SYMBOL_GPL(…);
void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
{ … }
EXPORT_SYMBOL_GPL(…);
#ifdef CONFIG_X86_64
static inline u64 kvm_guest_supported_xfd(struct kvm_vcpu *vcpu)
{ … }
#endif
static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
{ … }
int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu)
{ … }
EXPORT_SYMBOL_GPL(…);
bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{ … }
EXPORT_SYMBOL_GPL(…);
static bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{ … }
void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4)
{ … }
EXPORT_SYMBOL_GPL(…);
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{ … }
EXPORT_SYMBOL_GPL(…);
static void kvm_invalidate_pcid(struct kvm_vcpu *vcpu, unsigned long pcid)
{ … }
int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{ … }
EXPORT_SYMBOL_GPL(…);
int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
{ … }
EXPORT_SYMBOL_GPL(…);
unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
{ … }
EXPORT_SYMBOL_GPL(…);
static void kvm_update_dr0123(struct kvm_vcpu *vcpu)
{ … }
void kvm_update_dr7(struct kvm_vcpu *vcpu)
{ … }
EXPORT_SYMBOL_GPL(…);
static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu)
{ … }
int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
{ … }
EXPORT_SYMBOL_GPL(…);
unsigned long kvm_get_dr(struct kvm_vcpu *vcpu, int dr)
{ … }
EXPORT_SYMBOL_GPL(…);
int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu)
{ … }
EXPORT_SYMBOL_GPL(…);
#define KVM_SUPPORTED_ARCH_CAP …
static u64 kvm_get_arch_capabilities(void)
{ … }
static int kvm_get_feature_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
bool host_initiated)
{ … }
static int do_get_feature_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
{ … }
static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
{ … }
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
{ … }
EXPORT_SYMBOL_GPL(…);
static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{ … }
void kvm_enable_efer_bits(u64 mask)
{ … }
EXPORT_SYMBOL_GPL(…);
bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type)
{ … }
EXPORT_SYMBOL_GPL(…);
static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
bool host_initiated)
{ … }
static int _kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
bool host_initiated)
{ … }
static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu,
u32 index, u64 data, bool host_initiated)
{ … }
int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
bool host_initiated)
{ … }
static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu,
u32 index, u64 *data, bool host_initiated)
{ … }
int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data)
{ … }
EXPORT_SYMBOL_GPL(…);
int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data)
{ … }
EXPORT_SYMBOL_GPL(…);
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
{ … }
EXPORT_SYMBOL_GPL(…);
int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
{ … }
EXPORT_SYMBOL_GPL(…);
static void complete_userspace_rdmsr(struct kvm_vcpu *vcpu)
{ … }
static int complete_emulated_msr_access(struct kvm_vcpu *vcpu)
{ … }
static int complete_emulated_rdmsr(struct kvm_vcpu *vcpu)
{ … }
static int complete_fast_msr_access(struct kvm_vcpu *vcpu)
{ … }
static int complete_fast_rdmsr(struct kvm_vcpu *vcpu)
{ … }
static u64 kvm_msr_reason(int r)
{ … }
static int kvm_msr_user_space(struct kvm_vcpu *vcpu, u32 index,
u32 exit_reason, u64 data,
int (*completion)(struct kvm_vcpu *vcpu),
int r)
{ … }
int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu)
{ … }
EXPORT_SYMBOL_GPL(…);
int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu)
{ … }
EXPORT_SYMBOL_GPL(…);
int kvm_emulate_as_nop(struct kvm_vcpu *vcpu)
{ … }
int kvm_emulate_invd(struct kvm_vcpu *vcpu)
{ … }
EXPORT_SYMBOL_GPL(…);
int kvm_handle_invalid_op(struct kvm_vcpu *vcpu)
{ … }
EXPORT_SYMBOL_GPL(…);
static int kvm_emulate_monitor_mwait(struct kvm_vcpu *vcpu, const char *insn)
{ … }
int kvm_emulate_mwait(struct kvm_vcpu *vcpu)
{ … }
EXPORT_SYMBOL_GPL(…);
int kvm_emulate_monitor(struct kvm_vcpu *vcpu)
{ … }
EXPORT_SYMBOL_GPL(…);
static inline bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu)
{ … }
static int handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu *vcpu, u64 data)
{ … }
static int handle_fastpath_set_tscdeadline(struct kvm_vcpu *vcpu, u64 data)
{ … }
fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu)
{ … }
EXPORT_SYMBOL_GPL(…);
static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
{ … }
static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
{ … }
#ifdef CONFIG_X86_64
struct pvclock_clock { … };
struct pvclock_gtod_data { … };
static struct pvclock_gtod_data pvclock_gtod_data;
static void update_pvclock_gtod(struct timekeeper *tk)
{ … }
static s64 get_kvmclock_base_ns(void)
{ … }
#else
static s64 get_kvmclock_base_ns(void)
{
return ktime_get_boottime_ns();
}
#endif
static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock, int sec_hi_ofs)
{ … }
static void kvm_write_system_time(struct kvm_vcpu *vcpu, gpa_t system_time,
bool old_msr, bool host_initiated)
{ … }
static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
{ … }
static void kvm_get_time_scale(uint64_t scaled_hz, uint64_t base_hz,
s8 *pshift, u32 *pmultiplier)
{ … }
#ifdef CONFIG_X86_64
static atomic_t kvm_guest_has_master_clock = …;
#endif
static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
static unsigned long max_tsc_khz;
static u32 adjust_tsc_khz(u32 khz, s32 ppm)
{ … }
static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier);
static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
{ … }
static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
{ … }
static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
{ … }
#ifdef CONFIG_X86_64
static inline bool gtod_is_based_on_tsc(int mode)
{ … }
#endif
static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu, bool new_generation)
{ … }
static inline u64 __scale_tsc(u64 ratio, u64 tsc)
{ … }
u64 kvm_scale_tsc(u64 tsc, u64 ratio)
{ … }
static u64 kvm_compute_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
{ … }
u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
{ … }
EXPORT_SYMBOL_GPL(…);
u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multiplier)
{ … }
EXPORT_SYMBOL_GPL(…);
u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier)
{ … }
EXPORT_SYMBOL_GPL(…);
static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 l1_offset)
{ … }
static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier)
{ … }
static inline bool kvm_check_tsc_unstable(void)
{ … }
static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc,
u64 ns, bool matched)
{ … }
static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 *user_value)
{ … }
static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
s64 adjustment)
{ … }
static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
{ … }
#ifdef CONFIG_X86_64
static u64 read_tsc(void)
{ … }
static inline u64 vgettsc(struct pvclock_clock *clock, u64 *tsc_timestamp,
int *mode)
{ … }
static int do_kvmclock_base(s64 *t, u64 *tsc_timestamp)
{ … }
static int do_monotonic(s64 *t, u64 *tsc_timestamp)
{ … }
static int do_realtime(struct timespec64 *ts, u64 *tsc_timestamp)
{ … }
static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp)
{ … }
bool kvm_get_monotonic_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp)
{ … }
static bool kvm_get_walltime_and_clockread(struct timespec64 *ts,
u64 *tsc_timestamp)
{ … }
#endif
static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
{ … }
static void kvm_make_mclock_inprogress_request(struct kvm *kvm)
{ … }
static void __kvm_start_pvclock_update(struct kvm *kvm)
{ … }
static void kvm_start_pvclock_update(struct kvm *kvm)
{ … }
static void kvm_end_pvclock_update(struct kvm *kvm)
{ … }
static void kvm_update_masterclock(struct kvm *kvm)
{ … }
static unsigned long get_cpu_tsc_khz(void)
{ … }
static void __get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data)
{ … }
static void get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data)
{ … }
u64 get_kvmclock_ns(struct kvm *kvm)
{ … }
static void kvm_setup_guest_pvclock(struct kvm_vcpu *v,
struct gfn_to_pfn_cache *gpc,
unsigned int offset,
bool force_tsc_unstable)
{ … }
static int kvm_guest_time_update(struct kvm_vcpu *v)
{ … }
uint64_t kvm_get_wall_clock_epoch(struct kvm *kvm)
{ … }
#define KVMCLOCK_UPDATE_DELAY …
static void kvmclock_update_fn(struct work_struct *work)
{ … }
static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
{ … }
#define KVMCLOCK_SYNC_PERIOD …
static void kvmclock_sync_fn(struct work_struct *work)
{ … }
static bool is_mci_control_msr(u32 msr)
{ … }
static bool is_mci_status_msr(u32 msr)
{ … }
static bool can_set_mci_status(struct kvm_vcpu *vcpu)
{ … }
static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{ … }
static inline bool kvm_pv_async_pf_enabled(struct kvm_vcpu *vcpu)
{ … }
static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
{ … }
static int kvm_pv_enable_async_pf_int(struct kvm_vcpu *vcpu, u64 data)
{ … }
static void kvmclock_reset(struct kvm_vcpu *vcpu)
{ … }
static void kvm_vcpu_flush_tlb_all(struct kvm_vcpu *vcpu)
{ … }
static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu)
{ … }
static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu)
{ … }
void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu)
{ … }
EXPORT_SYMBOL_GPL(…);
static void record_steal_time(struct kvm_vcpu *vcpu)
{ … }
int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{ … }
EXPORT_SYMBOL_GPL(…);
static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
{ … }
int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{ … }
EXPORT_SYMBOL_GPL(…);
static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
struct kvm_msr_entry *entries,
int (*do_msr)(struct kvm_vcpu *vcpu,
unsigned index, u64 *data))
{ … }
static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
int (*do_msr)(struct kvm_vcpu *vcpu,
unsigned index, u64 *data),
int writeback)
{ … }
static inline bool kvm_can_mwait_in_guest(void)
{ … }
#ifdef CONFIG_KVM_HYPERV
static int kvm_ioctl_get_supported_hv_cpuid(struct kvm_vcpu *vcpu,
struct kvm_cpuid2 __user *cpuid_arg)
{ … }
#endif
static bool kvm_is_vm_type_supported(unsigned long type)
{ … }
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
{ … }
static int __kvm_x86_dev_get_attr(struct kvm_device_attr *attr, u64 *val)
{ … }
static int kvm_x86_dev_get_attr(struct kvm_device_attr *attr)
{ … }
static int kvm_x86_dev_has_attr(struct kvm_device_attr *attr)
{ … }
long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{ … }
static void wbinvd_ipi(void *garbage)
{ … }
static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
{ … }
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{ … }
static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
{ … }
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{ … }
static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
struct kvm_lapic_state *s)
{ … }
static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
struct kvm_lapic_state *s)
{ … }
static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
{ … }
static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
{ … }
static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
struct kvm_interrupt *irq)
{ … }
static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
{ … }
static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
struct kvm_tpr_access_ctl *tac)
{ … }
static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
u64 mcg_cap)
{ … }
static bool is_ucna(struct kvm_x86_mce *mce)
{ … }
static int kvm_vcpu_x86_set_ucna(struct kvm_vcpu *vcpu, struct kvm_x86_mce *mce, u64* banks)
{ … }
static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
struct kvm_x86_mce *mce)
{ … }
static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events)
{ … }
static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events)
{ … }
static int kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
struct kvm_debugregs *dbgregs)
{ … }
static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
struct kvm_debugregs *dbgregs)
{ … }
static int kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu *vcpu,
u8 *state, unsigned int size)
{ … }
static int kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
struct kvm_xsave *guest_xsave)
{ … }
static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
struct kvm_xsave *guest_xsave)
{ … }
static int kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
struct kvm_xcrs *guest_xcrs)
{ … }
static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
struct kvm_xcrs *guest_xcrs)
{ … }
static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
{ … }
static int kvm_arch_tsc_has_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{ … }
static int kvm_arch_tsc_get_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{ … }
static int kvm_arch_tsc_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{ … }
static int kvm_vcpu_ioctl_device_attr(struct kvm_vcpu *vcpu,
unsigned int ioctl,
void __user *argp)
{ … }
static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
struct kvm_enable_cap *cap)
{ … }
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{ … }
vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
{ … }
static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
{ … }
static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
u64 ident_addr)
{ … }
static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
unsigned long kvm_nr_mmu_pages)
{ … }
static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
{ … }
static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
{ … }
static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
{ … }
static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
{ … }
static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
{ … }
static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
{ … }
static int kvm_vm_ioctl_reinject(struct kvm *kvm,
struct kvm_reinject_control *control)
{ … }
void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
{ … }
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
bool line_status)
{ … }
int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
struct kvm_enable_cap *cap)
{ … }
static struct kvm_x86_msr_filter *kvm_alloc_msr_filter(bool default_allow)
{ … }
static void kvm_free_msr_filter(struct kvm_x86_msr_filter *msr_filter)
{ … }
static int kvm_add_msr_filter(struct kvm_x86_msr_filter *msr_filter,
struct kvm_msr_filter_range *user_range)
{ … }
static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm,
struct kvm_msr_filter *filter)
{ … }
#ifdef CONFIG_KVM_COMPAT
struct kvm_msr_filter_range_compat { … };
struct kvm_msr_filter_compat { … };
#define KVM_X86_SET_MSR_FILTER_COMPAT …
long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl,
unsigned long arg)
{ … }
#endif
#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
static int kvm_arch_suspend_notifier(struct kvm *kvm)
{ … }
int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state)
{ … }
#endif
static int kvm_vm_ioctl_get_clock(struct kvm *kvm, void __user *argp)
{ … }
static int kvm_vm_ioctl_set_clock(struct kvm *kvm, void __user *argp)
{ … }
int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
{ … }
static void kvm_probe_feature_msr(u32 msr_index)
{ … }
static void kvm_probe_msr_to_save(u32 msr_index)
{ … }
static void kvm_init_msr_lists(void)
{ … }
static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
const void *v)
{ … }
static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
{ … }
void kvm_set_segment(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg)
{ … }
void kvm_get_segment(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg)
{ … }
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access,
struct x86_exception *exception)
{ … }
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception)
{ … }
EXPORT_SYMBOL_GPL(…);
gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception)
{ … }
EXPORT_SYMBOL_GPL(…);
gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception)
{ … }
static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
struct kvm_vcpu *vcpu, u64 access,
struct x86_exception *exception)
{ … }
static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
gva_t addr, void *val, unsigned int bytes,
struct x86_exception *exception)
{ … }
int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
gva_t addr, void *val, unsigned int bytes,
struct x86_exception *exception)
{ … }
EXPORT_SYMBOL_GPL(…);
static int emulator_read_std(struct x86_emulate_ctxt *ctxt,
gva_t addr, void *val, unsigned int bytes,
struct x86_exception *exception, bool system)
{ … }
static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
struct kvm_vcpu *vcpu, u64 access,
struct x86_exception *exception)
{ … }
static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val,
unsigned int bytes, struct x86_exception *exception,
bool system)
{ … }
int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
unsigned int bytes, struct x86_exception *exception)
{ … }
EXPORT_SYMBOL_GPL(…);
static int kvm_check_emulate_insn(struct kvm_vcpu *vcpu, int emul_type,
void *insn, int insn_len)
{ … }
int handle_ud(struct kvm_vcpu *vcpu)
{ … }
EXPORT_SYMBOL_GPL(…);
static int vcpu_is_mmio_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
gpa_t gpa, bool write)
{ … }
static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
gpa_t *gpa, struct x86_exception *exception,
bool write)
{ … }
int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
const void *val, int bytes)
{ … }
struct read_write_emulator_ops { … };
static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
{ … }
static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
void *val, int bytes)
{ … }
static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
void *val, int bytes)
{ … }
static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
{ … }
static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
void *val, int bytes)
{ … }
static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
void *val, int bytes)
{ … }
static const struct read_write_emulator_ops read_emultor = …;
static const struct read_write_emulator_ops write_emultor = …;
static int emulator_read_write_onepage(unsigned long addr, void *val,
unsigned int bytes,
struct x86_exception *exception,
struct kvm_vcpu *vcpu,
const struct read_write_emulator_ops *ops)
{ … }
static int emulator_read_write(struct x86_emulate_ctxt *ctxt,
unsigned long addr,
void *val, unsigned int bytes,
struct x86_exception *exception,
const struct read_write_emulator_ops *ops)
{ … }
static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
unsigned long addr,
void *val,
unsigned int bytes,
struct x86_exception *exception)
{ … }
static int emulator_write_emulated(struct x86_emulate_ctxt *ctxt,
unsigned long addr,
const void *val,
unsigned int bytes,
struct x86_exception *exception)
{ … }
#define emulator_try_cmpxchg_user(t, ptr, old, new) …
static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
unsigned long addr,
const void *old,
const void *new,
unsigned int bytes,
struct x86_exception *exception)
{ … }
static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
unsigned short port, void *data,
unsigned int count, bool in)
{ … }
static int emulator_pio_in(struct kvm_vcpu *vcpu, int size,
unsigned short port, void *val, unsigned int count)
{ … }
static void complete_emulator_pio_in(struct kvm_vcpu *vcpu, void *val)
{ … }
static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
int size, unsigned short port, void *val,
unsigned int count)
{ … }
static int emulator_pio_out(struct kvm_vcpu *vcpu, int size,
unsigned short port, const void *val,
unsigned int count)
{ … }
static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
int size, unsigned short port,
const void *val, unsigned int count)
{ … }
static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
{ … }
static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address)
{ … }
static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
{ … }
int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
{ … }
EXPORT_SYMBOL_GPL(…);
static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
{ … }
static unsigned long emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr)
{ … }
static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
unsigned long value)
{ … }
static u64 mk_cr_64(u64 curr_cr, u32 new_val)
{ … }
static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr)
{ … }
static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val)
{ … }
static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt)
{ … }
static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
{ … }
static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
{ … }
static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
{ … }
static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
{ … }
static unsigned long emulator_get_cached_segment_base(
struct x86_emulate_ctxt *ctxt, int seg)
{ … }
static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
struct desc_struct *desc, u32 *base3,
int seg)
{ … }
static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector,
struct desc_struct *desc, u32 base3,
int seg)
{ … }
static int emulator_get_msr_with_filter(struct x86_emulate_ctxt *ctxt,
u32 msr_index, u64 *pdata)
{ … }
static int emulator_set_msr_with_filter(struct x86_emulate_ctxt *ctxt,
u32 msr_index, u64 data)
{ … }
static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
u32 msr_index, u64 *pdata)
{ … }
static int emulator_check_rdpmc_early(struct x86_emulate_ctxt *ctxt, u32 pmc)
{ … }
static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
u32 pmc, u64 *pdata)
{ … }
static void emulator_halt(struct x86_emulate_ctxt *ctxt)
{ … }
static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
struct x86_instruction_info *info,
enum x86_intercept_stage stage)
{ … }
static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt,
u32 *eax, u32 *ebx, u32 *ecx, u32 *edx,
bool exact_only)
{ … }
static bool emulator_guest_has_movbe(struct x86_emulate_ctxt *ctxt)
{ … }
static bool emulator_guest_has_fxsr(struct x86_emulate_ctxt *ctxt)
{ … }
static bool emulator_guest_has_rdpid(struct x86_emulate_ctxt *ctxt)
{ … }
static bool emulator_guest_cpuid_is_intel_compatible(struct x86_emulate_ctxt *ctxt)
{ … }
static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg)
{ … }
static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val)
{ … }
static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked)
{ … }
static bool emulator_is_smm(struct x86_emulate_ctxt *ctxt)
{ … }
static bool emulator_is_guest_mode(struct x86_emulate_ctxt *ctxt)
{ … }
#ifndef CONFIG_KVM_SMM
static int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
{
WARN_ON_ONCE(1);
return X86EMUL_UNHANDLEABLE;
}
#endif
static void emulator_triple_fault(struct x86_emulate_ctxt *ctxt)
{ … }
static int emulator_set_xcr(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr)
{ … }
static void emulator_vm_bugged(struct x86_emulate_ctxt *ctxt)
{ … }
static gva_t emulator_get_untagged_addr(struct x86_emulate_ctxt *ctxt,
gva_t addr, unsigned int flags)
{ … }
static const struct x86_emulate_ops emulate_ops = …;
static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
{ … }
static void inject_emulated_exception(struct kvm_vcpu *vcpu)
{ … }
static struct x86_emulate_ctxt *alloc_emulate_ctxt(struct kvm_vcpu *vcpu)
{ … }
static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
{ … }
void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
{ … }
EXPORT_SYMBOL_GPL(…);
static void prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data,
u8 ndata, u8 *insn_bytes, u8 insn_size)
{ … }
static void prepare_emulation_ctxt_failure_exit(struct kvm_vcpu *vcpu)
{ … }
void __kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data,
u8 ndata)
{ … }
EXPORT_SYMBOL_GPL(…);
void kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu)
{ … }
EXPORT_SYMBOL_GPL(…);
static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type)
{ … }
static bool kvm_unprotect_and_retry_on_failure(struct kvm_vcpu *vcpu,
gpa_t cr2_or_gpa,
int emulation_type)
{ … }
static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
static int complete_emulated_pio(struct kvm_vcpu *vcpu);
static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
unsigned long *db)
{ … }
static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu)
{ … }
int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
{ … }
EXPORT_SYMBOL_GPL(…);
static bool kvm_is_code_breakpoint_inhibited(struct kvm_vcpu *vcpu)
{ … }
static bool kvm_vcpu_check_code_breakpoint(struct kvm_vcpu *vcpu,
int emulation_type, int *r)
{ … }
static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt)
{ … }
int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
void *insn, int insn_len)
{ … }
EXPORT_SYMBOL_GPL(…);
int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
int emulation_type, void *insn, int insn_len)
{ … }
int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type)
{ … }
EXPORT_SYMBOL_GPL(…);
int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
void *insn, int insn_len)
{ … }
EXPORT_SYMBOL_GPL(…);
static int complete_fast_pio_out_port_0x7e(struct kvm_vcpu *vcpu)
{ … }
static int complete_fast_pio_out(struct kvm_vcpu *vcpu)
{ … }
static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,
unsigned short port)
{ … }
static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
{ … }
static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
unsigned short port)
{ … }
int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in)
{ … }
EXPORT_SYMBOL_GPL(…);
static int kvmclock_cpu_down_prep(unsigned int cpu)
{ … }
static void tsc_khz_changed(void *data)
{ … }
#ifdef CONFIG_X86_64
static void kvm_hyperv_tsc_notifier(void)
{ … }
#endif
static void __kvmclock_cpufreq_notifier(struct cpufreq_freqs *freq, int cpu)
{ … }
static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
void *data)
{ … }
static struct notifier_block kvmclock_cpufreq_notifier_block = …;
static int kvmclock_cpu_online(unsigned int cpu)
{ … }
static void kvm_timer_init(void)
{ … }
#ifdef CONFIG_X86_64
static void pvclock_gtod_update_fn(struct work_struct *work)
{ … }
static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);
static void pvclock_irq_work_fn(struct irq_work *w)
{ … }
static DEFINE_IRQ_WORK(pvclock_irq_work, pvclock_irq_work_fn);
static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused,
void *priv)
{ … }
static struct notifier_block pvclock_gtod_notifier = …;
#endif
static inline void kvm_ops_update(struct kvm_x86_init_ops *ops)
{ … }
static int kvm_x86_check_processor_compatibility(void)
{ … }
static void kvm_x86_check_cpu_compat(void *ret)
{ … }
int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
{ … }
EXPORT_SYMBOL_GPL(…);
void kvm_x86_vendor_exit(void)
{ … }
EXPORT_SYMBOL_GPL(…);
#ifdef CONFIG_X86_64
static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr,
unsigned long clock_type)
{ … }
#endif
static void kvm_pv_kick_cpu_op(struct kvm *kvm, int apicid)
{ … }
bool kvm_apicv_activated(struct kvm *kvm)
{ … }
EXPORT_SYMBOL_GPL(…);
bool kvm_vcpu_apicv_activated(struct kvm_vcpu *vcpu)
{ … }
EXPORT_SYMBOL_GPL(…);
static void set_or_clear_apicv_inhibit(unsigned long *inhibits,
enum kvm_apicv_inhibit reason, bool set)
{ … }
static void kvm_apicv_init(struct kvm *kvm)
{ … }
static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id)
{ … }
static int complete_hypercall_exit(struct kvm_vcpu *vcpu)
{ … }
unsigned long __kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
unsigned long a0, unsigned long a1,
unsigned long a2, unsigned long a3,
int op_64_bit, int cpl)
{ … }
EXPORT_SYMBOL_GPL(…);
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
{ … }
EXPORT_SYMBOL_GPL(…);
static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
{ … }
static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
{ … }
static void post_kvm_run_save(struct kvm_vcpu *vcpu)
{ … }
static void update_cr8_intercept(struct kvm_vcpu *vcpu)
{ … }
int kvm_check_nested_events(struct kvm_vcpu *vcpu)
{ … }
static void kvm_inject_exception(struct kvm_vcpu *vcpu)
{ … }
static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu,
bool *req_immediate_exit)
{ … }
static void process_nmi(struct kvm_vcpu *vcpu)
{ … }
int kvm_get_nr_pending_nmis(struct kvm_vcpu *vcpu)
{ … }
void kvm_make_scan_ioapic_request_mask(struct kvm *kvm,
unsigned long *vcpu_bitmap)
{ … }
void kvm_make_scan_ioapic_request(struct kvm *kvm)
{ … }
void __kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
{ … }
EXPORT_SYMBOL_GPL(…);
static void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
{ … }
void __kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
enum kvm_apicv_inhibit reason, bool set)
{ … }
void kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
enum kvm_apicv_inhibit reason, bool set)
{ … }
EXPORT_SYMBOL_GPL(…);
static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
{ … }
static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu)
{ … }
void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
{ … }
static void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
{ … }
static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
{ … }
static bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
{ … }
static bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
{ … }
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{ … }
static inline int vcpu_block(struct kvm_vcpu *vcpu)
{ … }
static int vcpu_run(struct kvm_vcpu *vcpu)
{ … }
static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason)
{ … }
int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu)
{ … }
EXPORT_SYMBOL_GPL(…);
int kvm_emulate_halt(struct kvm_vcpu *vcpu)
{ … }
EXPORT_SYMBOL_GPL(…);
fastpath_t handle_fastpath_hlt(struct kvm_vcpu *vcpu)
{ … }
EXPORT_SYMBOL_GPL(…);
int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu)
{ … }
EXPORT_SYMBOL_GPL(…);
bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
{ … }
bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
{ … }
bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
{ … }
static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
{ … }
static int complete_emulated_pio(struct kvm_vcpu *vcpu)
{ … }
static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
{ … }
static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
{ … }
static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
{ … }
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
{ … }
static void __get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{ … }
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{ … }
static void __set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{ … }
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{ … }
static void __get_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
{ … }
static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
{ … }
static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2)
{ … }
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{ … }
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{ … }
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{ … }
int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
int reason, bool has_error_code, u32 error_code)
{ … }
EXPORT_SYMBOL_GPL(…);
static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
{ … }
static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs,
int *mmu_reset_needed, bool update_pdptrs)
{ … }
static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
{ … }
static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2)
{ … }
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{ … }
static void kvm_arch_vcpu_guestdbg_update_apicv_inhibit(struct kvm *kvm)
{ … }
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
struct kvm_guest_debug *dbg)
{ … }
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
struct kvm_translation *tr)
{ … }
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{ … }
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{ … }
static void store_regs(struct kvm_vcpu *vcpu)
{ … }
static int sync_regs(struct kvm_vcpu *vcpu)
{ … }
int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
{ … }
int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
{ … }
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
{ … }
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{ … }
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
{ … }
EXPORT_SYMBOL_GPL(…);
void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
{ … }
EXPORT_SYMBOL_GPL(…);
void kvm_arch_enable_virtualization(void)
{ … }
void kvm_arch_disable_virtualization(void)
{ … }
int kvm_arch_enable_virtualization_cpu(void)
{ … }
void kvm_arch_disable_virtualization_cpu(void)
{ … }
bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)
{ … }
bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
{ … }
void kvm_arch_free_vm(struct kvm *kvm)
{ … }
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{ … }
int kvm_arch_post_init_vm(struct kvm *kvm)
{ … }
static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
{ … }
static void kvm_unload_vcpu_mmus(struct kvm *kvm)
{ … }
void kvm_arch_sync_events(struct kvm *kvm)
{ … }
void __user * __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
u32 size)
{ … }
EXPORT_SYMBOL_GPL(…);
void kvm_arch_pre_destroy_vm(struct kvm *kvm)
{ … }
void kvm_arch_destroy_vm(struct kvm *kvm)
{ … }
static void memslot_rmap_free(struct kvm_memory_slot *slot)
{ … }
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
{ … }
int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages)
{ … }
static int kvm_alloc_memslot_metadata(struct kvm *kvm,
struct kvm_memory_slot *slot)
{ … }
void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
{ … }
int kvm_arch_prepare_memory_region(struct kvm *kvm,
const struct kvm_memory_slot *old,
struct kvm_memory_slot *new,
enum kvm_mr_change change)
{ … }
static void kvm_mmu_update_cpu_dirty_logging(struct kvm *kvm, bool enable)
{ … }
static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
{ … }
void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
{ … }
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
{ … }
unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
{ … }
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{ … }
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
{ … }
unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu)
{ … }
EXPORT_SYMBOL_GPL(…);
bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
{ … }
EXPORT_SYMBOL_GPL(…);
unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
{ … }
EXPORT_SYMBOL_GPL(…);
static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{ … }
void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{ … }
EXPORT_SYMBOL_GPL(…);
static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
{ … }
static inline u32 kvm_async_pf_next_probe(u32 key)
{ … }
static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{ … }
static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
{ … }
bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{ … }
static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{ … }
static inline int apf_put_user_notpresent(struct kvm_vcpu *vcpu)
{ … }
static inline int apf_put_user_ready(struct kvm_vcpu *vcpu, u32 token)
{ … }
static inline bool apf_pageready_slot_free(struct kvm_vcpu *vcpu)
{ … }
static bool kvm_can_deliver_async_pf(struct kvm_vcpu *vcpu)
{ … }
bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
{ … }
bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work)
{ … }
void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work)
{ … }
void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu)
{ … }
bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
{ … }
void kvm_arch_start_assignment(struct kvm *kvm)
{ … }
EXPORT_SYMBOL_GPL(…);
void kvm_arch_end_assignment(struct kvm *kvm)
{ … }
EXPORT_SYMBOL_GPL(…);
bool noinstr kvm_arch_has_assigned_device(struct kvm *kvm)
{ … }
EXPORT_SYMBOL_GPL(…);
static void kvm_noncoherent_dma_assignment_start_or_stop(struct kvm *kvm)
{ … }
void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
{ … }
EXPORT_SYMBOL_GPL(…);
void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
{ … }
EXPORT_SYMBOL_GPL(…);
bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
{ … }
EXPORT_SYMBOL_GPL(…);
bool kvm_arch_has_irq_bypass(void)
{ … }
int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
struct irq_bypass_producer *prod)
{ … }
void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
struct irq_bypass_producer *prod)
{ … }
int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
uint32_t guest_irq, bool set)
{ … }
bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *old,
struct kvm_kernel_irq_routing_entry *new)
{ … }
bool kvm_vector_hashing_enabled(void)
{ … }
bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
{ … }
EXPORT_SYMBOL_GPL(…);
#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE
int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order)
{ … }
#endif
#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end)
{ … }
#endif
int kvm_spec_ctrl_test_value(u64 value)
{ … }
EXPORT_SYMBOL_GPL(…);
void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code)
{ … }
EXPORT_SYMBOL_GPL(…);
int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
struct x86_exception *e)
{ … }
EXPORT_SYMBOL_GPL(…);
int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
{ … }
EXPORT_SYMBOL_GPL(…);
static int complete_sev_es_emulated_mmio(struct kvm_vcpu *vcpu)
{ … }
int kvm_sev_es_mmio_write(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes,
void *data)
{ … }
EXPORT_SYMBOL_GPL(…);
int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes,
void *data)
{ … }
EXPORT_SYMBOL_GPL(…);
static void advance_sev_es_emulated_pio(struct kvm_vcpu *vcpu, unsigned count, int size)
{ … }
static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size,
unsigned int port);
static int complete_sev_es_emulated_outs(struct kvm_vcpu *vcpu)
{ … }
static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size,
unsigned int port)
{ … }
static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size,
unsigned int port);
static int complete_sev_es_emulated_ins(struct kvm_vcpu *vcpu)
{ … }
static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size,
unsigned int port)
{ … }
int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
unsigned int port, void *data, unsigned int count,
int in)
{ … }
EXPORT_SYMBOL_GPL(…);
EXPORT_TRACEPOINT_SYMBOL_GPL(…);
EXPORT_TRACEPOINT_SYMBOL_GPL(…);
EXPORT_TRACEPOINT_SYMBOL_GPL(…);
EXPORT_TRACEPOINT_SYMBOL_GPL(…);
EXPORT_TRACEPOINT_SYMBOL_GPL(…);
EXPORT_TRACEPOINT_SYMBOL_GPL(…);
EXPORT_TRACEPOINT_SYMBOL_GPL(…);
EXPORT_TRACEPOINT_SYMBOL_GPL(…);
EXPORT_TRACEPOINT_SYMBOL_GPL(…);
EXPORT_TRACEPOINT_SYMBOL_GPL(…);
EXPORT_TRACEPOINT_SYMBOL_GPL(…);
EXPORT_TRACEPOINT_SYMBOL_GPL(…);
EXPORT_TRACEPOINT_SYMBOL_GPL(…);
EXPORT_TRACEPOINT_SYMBOL_GPL(…);
EXPORT_TRACEPOINT_SYMBOL_GPL(…);
EXPORT_TRACEPOINT_SYMBOL_GPL(…);
EXPORT_TRACEPOINT_SYMBOL_GPL(…);
EXPORT_TRACEPOINT_SYMBOL_GPL(…);
EXPORT_TRACEPOINT_SYMBOL_GPL(…);
EXPORT_TRACEPOINT_SYMBOL_GPL(…);
EXPORT_TRACEPOINT_SYMBOL_GPL(…);
EXPORT_TRACEPOINT_SYMBOL_GPL(…);
EXPORT_TRACEPOINT_SYMBOL_GPL(…);
EXPORT_TRACEPOINT_SYMBOL_GPL(…);
EXPORT_TRACEPOINT_SYMBOL_GPL(…);
EXPORT_TRACEPOINT_SYMBOL_GPL(…);
EXPORT_TRACEPOINT_SYMBOL_GPL(…);
EXPORT_TRACEPOINT_SYMBOL_GPL(…);
EXPORT_TRACEPOINT_SYMBOL_GPL(…);
EXPORT_TRACEPOINT_SYMBOL_GPL(…);
static int __init kvm_x86_init(void)
{ … }
module_init(…) …;
static void __exit kvm_x86_exit(void)
{ … }
module_exit(kvm_x86_exit);