linux/arch/x86/kvm/x86.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * derived from drivers/kvm/kvm_main.c
 *
 * Copyright (C) 2006 Qumranet, Inc.
 * Copyright (C) 2008 Qumranet, Inc.
 * Copyright IBM Corporation, 2008
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
 *
 * Authors:
 *   Avi Kivity   <[email protected]>
 *   Yaniv Kamay  <[email protected]>
 *   Amit Shah    <[email protected]>
 *   Ben-Ami Yassour <[email protected]>
 */
#define pr_fmt(fmt)

#include <linux/kvm_host.h>
#include "irq.h"
#include "ioapic.h"
#include "mmu.h"
#include "i8254.h"
#include "tss.h"
#include "kvm_cache_regs.h"
#include "kvm_emulate.h"
#include "mmu/page_track.h"
#include "x86.h"
#include "cpuid.h"
#include "pmu.h"
#include "hyperv.h"
#include "lapic.h"
#include "xen.h"
#include "smm.h"

#include <linux/clocksource.h>
#include <linux/interrupt.h>
#include <linux/kvm.h>
#include <linux/fs.h>
#include <linux/vmalloc.h>
#include <linux/export.h>
#include <linux/moduleparam.h>
#include <linux/mman.h>
#include <linux/highmem.h>
#include <linux/iommu.h>
#include <linux/cpufreq.h>
#include <linux/user-return-notifier.h>
#include <linux/srcu.h>
#include <linux/slab.h>
#include <linux/perf_event.h>
#include <linux/uaccess.h>
#include <linux/hash.h>
#include <linux/pci.h>
#include <linux/timekeeper_internal.h>
#include <linux/pvclock_gtod.h>
#include <linux/kvm_irqfd.h>
#include <linux/irqbypass.h>
#include <linux/sched/stat.h>
#include <linux/sched/isolation.h>
#include <linux/mem_encrypt.h>
#include <linux/entry-kvm.h>
#include <linux/suspend.h>
#include <linux/smp.h>

#include <trace/events/ipi.h>
#include <trace/events/kvm.h>

#include <asm/debugreg.h>
#include <asm/msr.h>
#include <asm/desc.h>
#include <asm/mce.h>
#include <asm/pkru.h>
#include <linux/kernel_stat.h>
#include <asm/fpu/api.h>
#include <asm/fpu/xcr.h>
#include <asm/fpu/xstate.h>
#include <asm/pvclock.h>
#include <asm/div64.h>
#include <asm/irq_remapping.h>
#include <asm/mshyperv.h>
#include <asm/hypervisor.h>
#include <asm/tlbflush.h>
#include <asm/intel_pt.h>
#include <asm/emulate_prefix.h>
#include <asm/sgx.h>
#include <clocksource/hyperv_timer.h>

#define CREATE_TRACE_POINTS
#include "trace.h"

#define MAX_IO_MSRS
#define KVM_MAX_MCE_BANKS

/*
 * Note, kvm_caps fields should *never* have default values, all fields must be
 * recomputed from scratch during vendor module load, e.g. to account for a
 * vendor module being reloaded with different module parameters.
 */
struct kvm_caps kvm_caps __read_mostly;
EXPORT_SYMBOL_GPL();

struct kvm_host_values kvm_host __read_mostly;
EXPORT_SYMBOL_GPL();

#define ERR_PTR_USR(e)

#define emul_to_vcpu(ctxt)

/* EFER defaults:
 * - enable syscall per default because its emulated by KVM
 * - enable LME and LMA per default on 64 bit KVM
 */
#ifdef CONFIG_X86_64
static
u64 __read_mostly efer_reserved_bits =;
#else
static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
#endif

static u64 __read_mostly cr4_reserved_bits =;

#define KVM_EXIT_HYPERCALL_VALID_MASK

#define KVM_CAP_PMU_VALID_MASK

#define KVM_X2APIC_API_VALID_FLAGS

static void update_cr8_intercept(struct kvm_vcpu *vcpu);
static void process_nmi(struct kvm_vcpu *vcpu);
static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
static void store_regs(struct kvm_vcpu *vcpu);
static int sync_regs(struct kvm_vcpu *vcpu);
static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu);

static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2);
static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2);

static DEFINE_MUTEX(vendor_module_lock);
struct kvm_x86_ops kvm_x86_ops __read_mostly;

#define KVM_X86_OP
#define KVM_X86_OP_OPTIONAL
#define KVM_X86_OP_OPTIONAL_RET0
#include <asm/kvm-x86-ops.h>
EXPORT_STATIC_CALL_GPL();
EXPORT_STATIC_CALL_GPL();

static bool __read_mostly ignore_msrs =;
module_param(ignore_msrs, bool, 0644);

bool __read_mostly report_ignored_msrs =;
module_param(report_ignored_msrs, bool, 0644);
EXPORT_SYMBOL_GPL();

unsigned int min_timer_period_us =;
module_param(min_timer_period_us, uint, 0644);

static bool __read_mostly kvmclock_periodic_sync =;
module_param(kvmclock_periodic_sync, bool, 0444);

/* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
static u32 __read_mostly tsc_tolerance_ppm =;
module_param(tsc_tolerance_ppm, uint, 0644);

static bool __read_mostly vector_hashing =;
module_param(vector_hashing, bool, 0444);

bool __read_mostly enable_vmware_backdoor =;
module_param(enable_vmware_backdoor, bool, 0444);
EXPORT_SYMBOL_GPL();

/*
 * Flags to manipulate forced emulation behavior (any non-zero value will
 * enable forced emulation).
 */
#define KVM_FEP_CLEAR_RFLAGS_RF
static int __read_mostly force_emulation_prefix;
module_param(force_emulation_prefix, int, 0644);

int __read_mostly pi_inject_timer =;
module_param(pi_inject_timer, bint, 0644);

/* Enable/disable PMU virtualization */
bool __read_mostly enable_pmu =;
EXPORT_SYMBOL_GPL();
module_param(enable_pmu, bool, 0444);

bool __read_mostly eager_page_split =;
module_param(eager_page_split, bool, 0644);

/* Enable/disable SMT_RSB bug mitigation */
static bool __read_mostly mitigate_smt_rsb;
module_param(mitigate_smt_rsb, bool, 0444);

/*
 * Restoring the host value for MSRs that are only consumed when running in
 * usermode, e.g. SYSCALL MSRs and TSC_AUX, can be deferred until the CPU
 * returns to userspace, i.e. the kernel can run with the guest's value.
 */
#define KVM_MAX_NR_USER_RETURN_MSRS

struct kvm_user_return_msrs {};

u32 __read_mostly kvm_nr_uret_msrs;
EXPORT_SYMBOL_GPL();
static u32 __read_mostly kvm_uret_msrs_list[KVM_MAX_NR_USER_RETURN_MSRS];
static struct kvm_user_return_msrs __percpu *user_return_msrs;

#define KVM_SUPPORTED_XCR0

bool __read_mostly allow_smaller_maxphyaddr =;
EXPORT_SYMBOL_GPL();

bool __read_mostly enable_apicv =;
EXPORT_SYMBOL_GPL();

const struct _kvm_stats_desc kvm_vm_stats_desc[] =;

const struct kvm_stats_header kvm_vm_stats_header =;

const struct _kvm_stats_desc kvm_vcpu_stats_desc[] =;

const struct kvm_stats_header kvm_vcpu_stats_header =;

static struct kmem_cache *x86_emulator_cache;

/*
 * The three MSR lists(msrs_to_save, emulated_msrs, msr_based_features) track
 * the set of MSRs that KVM exposes to userspace through KVM_GET_MSRS,
 * KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.  msrs_to_save holds MSRs that
 * require host support, i.e. should be probed via RDMSR.  emulated_msrs holds
 * MSRs that KVM emulates without strictly requiring host support.
 * msr_based_features holds MSRs that enumerate features, i.e. are effectively
 * CPUID leafs.  Note, msr_based_features isn't mutually exclusive with
 * msrs_to_save and emulated_msrs.
 */

static const u32 msrs_to_save_base[] =;

static const u32 msrs_to_save_pmu[] =;

static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_base) +
			ARRAY_SIZE(msrs_to_save_pmu)];
static unsigned num_msrs_to_save;

static const u32 emulated_msrs_all[] =;

static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)];
static unsigned num_emulated_msrs;

/*
 * List of MSRs that control the existence of MSR-based features, i.e. MSRs
 * that are effectively CPUID leafs.  VMX MSRs are also included in the set of
 * feature MSRs, but are handled separately to allow expedited lookups.
 */
static const u32 msr_based_features_all_except_vmx[] =;

static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all_except_vmx) +
			      (KVM_LAST_EMULATED_VMX_MSR - KVM_FIRST_EMULATED_VMX_MSR + 1)];
static unsigned int num_msr_based_features;

/*
 * All feature MSRs except uCode revID, which tracks the currently loaded uCode
 * patch, are immutable once the vCPU model is defined.
 */
static bool kvm_is_immutable_feature_msr(u32 msr)
{}

static bool kvm_is_advertised_msr(u32 msr_index)
{}

msr_access_t;

static __always_inline int kvm_do_msr_access(struct kvm_vcpu *vcpu, u32 msr,
					     u64 *data, bool host_initiated,
					     enum kvm_msr_access rw,
					     msr_access_t msr_access_fn)
{}

static struct kmem_cache *kvm_alloc_emulator_cache(void)
{}

static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);

static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
{}

static void kvm_on_user_return(struct user_return_notifier *urn)
{}

static int kvm_probe_user_return_msr(u32 msr)
{}

int kvm_add_user_return_msr(u32 msr)
{}
EXPORT_SYMBOL_GPL();

int kvm_find_user_return_msr(u32 msr)
{}
EXPORT_SYMBOL_GPL();

static void kvm_user_return_msr_cpu_online(void)
{}

int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask)
{}
EXPORT_SYMBOL_GPL();

static void drop_user_return_notifiers(void)
{}

u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
{}

enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();

int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{}

/*
 * Handle a fault on a hardware virtualization (VMX or SVM) instruction.
 *
 * Hardware virtualization extension instructions may fault if a reboot turns
 * off virtualization while processes are running.  Usually after catching the
 * fault we just panic; during reboot instead the instruction is ignored.
 */
noinstr void kvm_spurious_fault(void)
{}
EXPORT_SYMBOL_GPL();

#define EXCPT_BENIGN
#define EXCPT_CONTRIBUTORY
#define EXCPT_PF

static int exception_class(int vector)
{}

#define EXCPT_FAULT
#define EXCPT_TRAP
#define EXCPT_ABORT
#define EXCPT_INTERRUPT
#define EXCPT_DB

static int exception_type(int vector)
{}

void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu,
				   struct kvm_queued_exception *ex)
{}
EXPORT_SYMBOL_GPL();

static void kvm_queue_exception_vmexit(struct kvm_vcpu *vcpu, unsigned int vector,
				       bool has_error_code, u32 error_code,
				       bool has_payload, unsigned long payload)
{}

static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
		unsigned nr, bool has_error, u32 error_code,
	        bool has_payload, unsigned long payload, bool reinject)
{}

void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
{}
EXPORT_SYMBOL_GPL();

void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
{}
EXPORT_SYMBOL_GPL();

void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr,
			   unsigned long payload)
{}
EXPORT_SYMBOL_GPL();

static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr,
				    u32 error_code, unsigned long payload)
{}

int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
{}
EXPORT_SYMBOL_GPL();

static int complete_emulated_insn_gp(struct kvm_vcpu *vcpu, int err)
{}

void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
{}

void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
				    struct x86_exception *fault)
{}
EXPORT_SYMBOL_GPL();

void kvm_inject_nmi(struct kvm_vcpu *vcpu)
{}

void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
{}
EXPORT_SYMBOL_GPL();

void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
{}
EXPORT_SYMBOL_GPL();

/*
 * Checks if cpl <= required_cpl; if true, return true.  Otherwise queue
 * a #GP and return false.
 */
bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
{}

bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr)
{}
EXPORT_SYMBOL_GPL();

static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu)
{}

/*
 * Load the pae pdptrs.  Return 1 if they are all valid, 0 otherwise.
 */
int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
{}
EXPORT_SYMBOL_GPL();

static bool kvm_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{}

void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0)
{}
EXPORT_SYMBOL_GPL();

int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{}
EXPORT_SYMBOL_GPL();

void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
{}
EXPORT_SYMBOL_GPL();

void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();

void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();

#ifdef CONFIG_X86_64
static inline u64 kvm_guest_supported_xfd(struct kvm_vcpu *vcpu)
{}
#endif

static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
{}

int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();

bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{}
EXPORT_SYMBOL_GPL();

static bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{}

void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4)
{}
EXPORT_SYMBOL_GPL();

int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{}
EXPORT_SYMBOL_GPL();

static void kvm_invalidate_pcid(struct kvm_vcpu *vcpu, unsigned long pcid)
{}

int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{}
EXPORT_SYMBOL_GPL();

int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
{}
EXPORT_SYMBOL_GPL();

unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();

static void kvm_update_dr0123(struct kvm_vcpu *vcpu)
{}

void kvm_update_dr7(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();

static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu)
{}

int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
{}
EXPORT_SYMBOL_GPL();

unsigned long kvm_get_dr(struct kvm_vcpu *vcpu, int dr)
{}
EXPORT_SYMBOL_GPL();

int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();

/*
 * Some IA32_ARCH_CAPABILITIES bits have dependencies on MSRs that KVM
 * does not yet virtualize. These include:
 *   10 - MISC_PACKAGE_CTRLS
 *   11 - ENERGY_FILTERING_CTL
 *   12 - DOITM
 *   18 - FB_CLEAR_CTRL
 *   21 - XAPIC_DISABLE_STATUS
 *   23 - OVERCLOCKING_STATUS
 */

#define KVM_SUPPORTED_ARCH_CAP

static u64 kvm_get_arch_capabilities(void)
{}

static int kvm_get_feature_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
			       bool host_initiated)
{}

static int do_get_feature_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
{}

static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
{}
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
{}
EXPORT_SYMBOL_GPL();

static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{}

void kvm_enable_efer_bits(u64 mask)
{}
EXPORT_SYMBOL_GPL();

bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type)
{}
EXPORT_SYMBOL_GPL();

/*
 * Write @data into the MSR specified by @index.  Select MSR specific fault
 * checks are bypassed if @host_initiated is %true.
 * Returns 0 on success, non-0 otherwise.
 * Assumes vcpu_load() was already called.
 */
static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
			 bool host_initiated)
{}

static int _kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
			bool host_initiated)
{}

static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu,
				     u32 index, u64 data, bool host_initiated)
{}

/*
 * Read the MSR specified by @index into @data.  Select MSR specific fault
 * checks are bypassed if @host_initiated is %true.
 * Returns 0 on success, non-0 otherwise.
 * Assumes vcpu_load() was already called.
 */
int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
		  bool host_initiated)
{}

static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu,
				     u32 index, u64 *data, bool host_initiated)
{}

int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data)
{}
EXPORT_SYMBOL_GPL();

int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data)
{}
EXPORT_SYMBOL_GPL();

int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
{}
EXPORT_SYMBOL_GPL();

int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
{}
EXPORT_SYMBOL_GPL();

static void complete_userspace_rdmsr(struct kvm_vcpu *vcpu)
{}

static int complete_emulated_msr_access(struct kvm_vcpu *vcpu)
{}

static int complete_emulated_rdmsr(struct kvm_vcpu *vcpu)
{}

static int complete_fast_msr_access(struct kvm_vcpu *vcpu)
{}

static int complete_fast_rdmsr(struct kvm_vcpu *vcpu)
{}

static u64 kvm_msr_reason(int r)
{}

static int kvm_msr_user_space(struct kvm_vcpu *vcpu, u32 index,
			      u32 exit_reason, u64 data,
			      int (*completion)(struct kvm_vcpu *vcpu),
			      int r)
{}

int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();

int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();

int kvm_emulate_as_nop(struct kvm_vcpu *vcpu)
{}

int kvm_emulate_invd(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();

int kvm_handle_invalid_op(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();


static int kvm_emulate_monitor_mwait(struct kvm_vcpu *vcpu, const char *insn)
{}
int kvm_emulate_mwait(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();

int kvm_emulate_monitor(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();

static inline bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu)
{}

/*
 * The fast path for frequent and performance sensitive wrmsr emulation,
 * i.e. the sending of IPI, sending IPI early in the VM-Exit flow reduces
 * the latency of virtual IPI by avoiding the expensive bits of transitioning
 * from guest to host, e.g. reacquiring KVM's SRCU lock. In contrast to the
 * other cases which must be called after interrupts are enabled on the host.
 */
static int handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu *vcpu, u64 data)
{}

static int handle_fastpath_set_tscdeadline(struct kvm_vcpu *vcpu, u64 data)
{}

fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();

/*
 * Adapt set_msr() to msr_io()'s calling convention
 */
static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
{}

static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
{}

#ifdef CONFIG_X86_64
struct pvclock_clock {};

struct pvclock_gtod_data {};

static struct pvclock_gtod_data pvclock_gtod_data;

static void update_pvclock_gtod(struct timekeeper *tk)
{}

static s64 get_kvmclock_base_ns(void)
{}
#else
static s64 get_kvmclock_base_ns(void)
{
	/* Master clock not used, so we can just use CLOCK_BOOTTIME.  */
	return ktime_get_boottime_ns();
}
#endif

static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock, int sec_hi_ofs)
{}

static void kvm_write_system_time(struct kvm_vcpu *vcpu, gpa_t system_time,
				  bool old_msr, bool host_initiated)
{}

static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
{}

static void kvm_get_time_scale(uint64_t scaled_hz, uint64_t base_hz,
			       s8 *pshift, u32 *pmultiplier)
{}

#ifdef CONFIG_X86_64
static atomic_t kvm_guest_has_master_clock =;
#endif

static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
static unsigned long max_tsc_khz;

static u32 adjust_tsc_khz(u32 khz, s32 ppm)
{}

static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier);

static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
{}

static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
{}

static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
{}

#ifdef CONFIG_X86_64
static inline bool gtod_is_based_on_tsc(int mode)
{}
#endif

static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu, bool new_generation)
{}

/*
 * Multiply tsc by a fixed point number represented by ratio.
 *
 * The most significant 64-N bits (mult) of ratio represent the
 * integral part of the fixed point number; the remaining N bits
 * (frac) represent the fractional part, ie. ratio represents a fixed
 * point number (mult + frac * 2^(-N)).
 *
 * N equals to kvm_caps.tsc_scaling_ratio_frac_bits.
 */
static inline u64 __scale_tsc(u64 ratio, u64 tsc)
{}

u64 kvm_scale_tsc(u64 tsc, u64 ratio)
{}

static u64 kvm_compute_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
{}

u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
{}
EXPORT_SYMBOL_GPL();

u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multiplier)
{}
EXPORT_SYMBOL_GPL();

u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier)
{}
EXPORT_SYMBOL_GPL();

static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 l1_offset)
{}

static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier)
{}

static inline bool kvm_check_tsc_unstable(void)
{}

/*
 * Infers attempts to synchronize the guest's tsc from host writes. Sets the
 * offset for the vcpu and tracks the TSC matching generation that the vcpu
 * participates in.
 */
static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc,
				  u64 ns, bool matched)
{}

static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 *user_value)
{}

static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
					   s64 adjustment)
{}

static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
{}

#ifdef CONFIG_X86_64

static u64 read_tsc(void)
{}

static inline u64 vgettsc(struct pvclock_clock *clock, u64 *tsc_timestamp,
			  int *mode)
{}

/*
 * As with get_kvmclock_base_ns(), this counts from boot time, at the
 * frequency of CLOCK_MONOTONIC_RAW (hence adding gtos->offs_boot).
 */
static int do_kvmclock_base(s64 *t, u64 *tsc_timestamp)
{}

/*
 * This calculates CLOCK_MONOTONIC at the time of the TSC snapshot, with
 * no boot time offset.
 */
static int do_monotonic(s64 *t, u64 *tsc_timestamp)
{}

static int do_realtime(struct timespec64 *ts, u64 *tsc_timestamp)
{}

/*
 * Calculates the kvmclock_base_ns (CLOCK_MONOTONIC_RAW + boot time) and
 * reports the TSC value from which it do so. Returns true if host is
 * using TSC based clocksource.
 */
static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp)
{}

/*
 * Calculates CLOCK_MONOTONIC and reports the TSC value from which it did
 * so. Returns true if host is using TSC based clocksource.
 */
bool kvm_get_monotonic_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp)
{}

/*
 * Calculates CLOCK_REALTIME and reports the TSC value from which it did
 * so. Returns true if host is using TSC based clocksource.
 *
 * DO NOT USE this for anything related to migration. You want CLOCK_TAI
 * for that.
 */
static bool kvm_get_walltime_and_clockread(struct timespec64 *ts,
					   u64 *tsc_timestamp)
{}
#endif

/*
 *
 * Assuming a stable TSC across physical CPUS, and a stable TSC
 * across virtual CPUs, the following condition is possible.
 * Each numbered line represents an event visible to both
 * CPUs at the next numbered event.
 *
 * "timespecX" represents host monotonic time. "tscX" represents
 * RDTSC value.
 *
 * 		VCPU0 on CPU0		|	VCPU1 on CPU1
 *
 * 1.  read timespec0,tsc0
 * 2.					| timespec1 = timespec0 + N
 * 					| tsc1 = tsc0 + M
 * 3. transition to guest		| transition to guest
 * 4. ret0 = timespec0 + (rdtsc - tsc0) |
 * 5.				        | ret1 = timespec1 + (rdtsc - tsc1)
 * 				        | ret1 = timespec0 + N + (rdtsc - (tsc0 + M))
 *
 * Since ret0 update is visible to VCPU1 at time 5, to obey monotonicity:
 *
 * 	- ret0 < ret1
 *	- timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M))
 *		...
 *	- 0 < N - M => M < N
 *
 * That is, when timespec0 != timespec1, M < N. Unfortunately that is not
 * always the case (the difference between two distinct xtime instances
 * might be smaller then the difference between corresponding TSC reads,
 * when updating guest vcpus pvclock areas).
 *
 * To avoid that problem, do not allow visibility of distinct
 * system_timestamp/tsc_timestamp values simultaneously: use a master
 * copy of host monotonic time values. Update that master copy
 * in lockstep.
 *
 * Rely on synchronization of host TSCs and guest TSCs for monotonicity.
 *
 */

static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
{}

static void kvm_make_mclock_inprogress_request(struct kvm *kvm)
{}

static void __kvm_start_pvclock_update(struct kvm *kvm)
{}

static void kvm_start_pvclock_update(struct kvm *kvm)
{}

static void kvm_end_pvclock_update(struct kvm *kvm)
{}

static void kvm_update_masterclock(struct kvm *kvm)
{}

/*
 * Use the kernel's tsc_khz directly if the TSC is constant, otherwise use KVM's
 * per-CPU value (which may be zero if a CPU is going offline).  Note, tsc_khz
 * can change during boot even if the TSC is constant, as it's possible for KVM
 * to be loaded before TSC calibration completes.  Ideally, KVM would get a
 * notification when calibration completes, but practically speaking calibration
 * will complete before userspace is alive enough to create VMs.
 */
static unsigned long get_cpu_tsc_khz(void)
{}

/* Called within read_seqcount_begin/retry for kvm->pvclock_sc.  */
static void __get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data)
{}

static void get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data)
{}

u64 get_kvmclock_ns(struct kvm *kvm)
{}

static void kvm_setup_guest_pvclock(struct kvm_vcpu *v,
				    struct gfn_to_pfn_cache *gpc,
				    unsigned int offset,
				    bool force_tsc_unstable)
{}

static int kvm_guest_time_update(struct kvm_vcpu *v)
{}

/*
 * The pvclock_wall_clock ABI tells the guest the wall clock time at
 * which it started (i.e. its epoch, when its kvmclock was zero).
 *
 * In fact those clocks are subtly different; wall clock frequency is
 * adjusted by NTP and has leap seconds, while the kvmclock is a
 * simple function of the TSC without any such adjustment.
 *
 * Perhaps the ABI should have exposed CLOCK_TAI and a ratio between
 * that and kvmclock, but even that would be subject to change over
 * time.
 *
 * Attempt to calculate the epoch at a given moment using the *same*
 * TSC reading via kvm_get_walltime_and_clockread() to obtain both
 * wallclock and kvmclock times, and subtracting one from the other.
 *
 * Fall back to using their values at slightly different moments by
 * calling ktime_get_real_ns() and get_kvmclock_ns() separately.
 */
uint64_t kvm_get_wall_clock_epoch(struct kvm *kvm)
{}

/*
 * kvmclock updates which are isolated to a given vcpu, such as
 * vcpu->cpu migration, should not allow system_timestamp from
 * the rest of the vcpus to remain static. Otherwise ntp frequency
 * correction applies to one vcpu's system_timestamp but not
 * the others.
 *
 * So in those cases, request a kvmclock update for all vcpus.
 * We need to rate-limit these requests though, as they can
 * considerably slow guests that have a large number of vcpus.
 * The time for a remote vcpu to update its kvmclock is bound
 * by the delay we use to rate-limit the updates.
 */

#define KVMCLOCK_UPDATE_DELAY

static void kvmclock_update_fn(struct work_struct *work)
{}

static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
{}

#define KVMCLOCK_SYNC_PERIOD

static void kvmclock_sync_fn(struct work_struct *work)
{}

/* These helpers are safe iff @msr is known to be an MCx bank MSR. */
static bool is_mci_control_msr(u32 msr)
{}
static bool is_mci_status_msr(u32 msr)
{}

/*
 * On AMD, HWCR[McStatusWrEn] controls whether setting MCi_STATUS results in #GP.
 */
static bool can_set_mci_status(struct kvm_vcpu *vcpu)
{}

static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{}

static inline bool kvm_pv_async_pf_enabled(struct kvm_vcpu *vcpu)
{}

static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
{}

static int kvm_pv_enable_async_pf_int(struct kvm_vcpu *vcpu, u64 data)
{}

static void kvmclock_reset(struct kvm_vcpu *vcpu)
{}

static void kvm_vcpu_flush_tlb_all(struct kvm_vcpu *vcpu)
{}

static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu)
{}


static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu)
{}

/*
 * Service "local" TLB flush requests, which are specific to the current MMU
 * context.  In addition to the generic event handling in vcpu_enter_guest(),
 * TLB flushes that are targeted at an MMU context also need to be serviced
 * prior before nested VM-Enter/VM-Exit.
 */
void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();

static void record_steal_time(struct kvm_vcpu *vcpu)
{}

int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{}
EXPORT_SYMBOL_GPL();

static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
{}

int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{}
EXPORT_SYMBOL_GPL();

/*
 * Read or write a bunch of msrs. All parameters are kernel addresses.
 *
 * @return number of msrs set successfully.
 */
static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
		    struct kvm_msr_entry *entries,
		    int (*do_msr)(struct kvm_vcpu *vcpu,
				  unsigned index, u64 *data))
{}

/*
 * Read or write a bunch of msrs. Parameters are user addresses.
 *
 * @return number of msrs set successfully.
 */
static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
		  int (*do_msr)(struct kvm_vcpu *vcpu,
				unsigned index, u64 *data),
		  int writeback)
{}

static inline bool kvm_can_mwait_in_guest(void)
{}

#ifdef CONFIG_KVM_HYPERV
static int kvm_ioctl_get_supported_hv_cpuid(struct kvm_vcpu *vcpu,
					    struct kvm_cpuid2 __user *cpuid_arg)
{}
#endif

static bool kvm_is_vm_type_supported(unsigned long type)
{}

int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
{}

static int __kvm_x86_dev_get_attr(struct kvm_device_attr *attr, u64 *val)
{}

static int kvm_x86_dev_get_attr(struct kvm_device_attr *attr)
{}

static int kvm_x86_dev_has_attr(struct kvm_device_attr *attr)
{}

long kvm_arch_dev_ioctl(struct file *filp,
			unsigned int ioctl, unsigned long arg)
{}

static void wbinvd_ipi(void *garbage)
{}

static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
{}

void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{}

static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
{}

void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{}

static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
				    struct kvm_lapic_state *s)
{}

static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
				    struct kvm_lapic_state *s)
{}

static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
{}

static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
{}

static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
				    struct kvm_interrupt *irq)
{}

static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
{}

static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
					   struct kvm_tpr_access_ctl *tac)
{}

static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
					u64 mcg_cap)
{}

/*
 * Validate this is an UCNA (uncorrectable no action) error by checking the
 * MCG_STATUS and MCi_STATUS registers:
 * - none of the bits for Machine Check Exceptions are set
 * - both the VAL (valid) and UC (uncorrectable) bits are set
 * MCI_STATUS_PCC - Processor Context Corrupted
 * MCI_STATUS_S - Signaled as a Machine Check Exception
 * MCI_STATUS_AR - Software recoverable Action Required
 */
static bool is_ucna(struct kvm_x86_mce *mce)
{}

static int kvm_vcpu_x86_set_ucna(struct kvm_vcpu *vcpu, struct kvm_x86_mce *mce, u64* banks)
{}

static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
				      struct kvm_x86_mce *mce)
{}

static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
					       struct kvm_vcpu_events *events)
{}

static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
					      struct kvm_vcpu_events *events)
{}

static int kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
					    struct kvm_debugregs *dbgregs)
{}

static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
					    struct kvm_debugregs *dbgregs)
{}


static int kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu *vcpu,
					 u8 *state, unsigned int size)
{}

static int kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
					struct kvm_xsave *guest_xsave)
{}

static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
					struct kvm_xsave *guest_xsave)
{}

static int kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
				       struct kvm_xcrs *guest_xcrs)
{}

static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
				       struct kvm_xcrs *guest_xcrs)
{}

/*
 * kvm_set_guest_paused() indicates to the guest kernel that it has been
 * stopped by the hypervisor.  This function will be called from the host only.
 * EINVAL is returned when the host attempts to set the flag for a guest that
 * does not support pv clocks.
 */
static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
{}

static int kvm_arch_tsc_has_attr(struct kvm_vcpu *vcpu,
				 struct kvm_device_attr *attr)
{}

static int kvm_arch_tsc_get_attr(struct kvm_vcpu *vcpu,
				 struct kvm_device_attr *attr)
{}

static int kvm_arch_tsc_set_attr(struct kvm_vcpu *vcpu,
				 struct kvm_device_attr *attr)
{}

static int kvm_vcpu_ioctl_device_attr(struct kvm_vcpu *vcpu,
				      unsigned int ioctl,
				      void __user *argp)
{}

static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
				     struct kvm_enable_cap *cap)
{}

long kvm_arch_vcpu_ioctl(struct file *filp,
			 unsigned int ioctl, unsigned long arg)
{}

vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
{}

static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
{}

static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
					      u64 ident_addr)
{}

static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
					 unsigned long kvm_nr_mmu_pages)
{}

static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
{}

static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
{}

static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
{}

static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
{}

static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
{}

static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
{}

static int kvm_vm_ioctl_reinject(struct kvm *kvm,
				 struct kvm_reinject_control *control)
{}

void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
{}

int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
			bool line_status)
{}

int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
			    struct kvm_enable_cap *cap)
{}

static struct kvm_x86_msr_filter *kvm_alloc_msr_filter(bool default_allow)
{}

static void kvm_free_msr_filter(struct kvm_x86_msr_filter *msr_filter)
{}

static int kvm_add_msr_filter(struct kvm_x86_msr_filter *msr_filter,
			      struct kvm_msr_filter_range *user_range)
{}

static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm,
				       struct kvm_msr_filter *filter)
{}

#ifdef CONFIG_KVM_COMPAT
/* for KVM_X86_SET_MSR_FILTER */
struct kvm_msr_filter_range_compat {};

struct kvm_msr_filter_compat {};

#define KVM_X86_SET_MSR_FILTER_COMPAT

long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl,
			      unsigned long arg)
{}
#endif

#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
static int kvm_arch_suspend_notifier(struct kvm *kvm)
{}

int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state)
{}
#endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */

static int kvm_vm_ioctl_get_clock(struct kvm *kvm, void __user *argp)
{}

static int kvm_vm_ioctl_set_clock(struct kvm *kvm, void __user *argp)
{}

int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
{}

static void kvm_probe_feature_msr(u32 msr_index)
{}

static void kvm_probe_msr_to_save(u32 msr_index)
{}

static void kvm_init_msr_lists(void)
{}

static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
			   const void *v)
{}

static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
{}

void kvm_set_segment(struct kvm_vcpu *vcpu,
		     struct kvm_segment *var, int seg)
{}

void kvm_get_segment(struct kvm_vcpu *vcpu,
		     struct kvm_segment *var, int seg)
{}

gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access,
			   struct x86_exception *exception)
{}

gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
			      struct x86_exception *exception)
{}
EXPORT_SYMBOL_GPL();

gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
			       struct x86_exception *exception)
{}
EXPORT_SYMBOL_GPL();

/* uses this to access any guest's mapped memory without checking CPL */
gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
				struct x86_exception *exception)
{}

static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
				      struct kvm_vcpu *vcpu, u64 access,
				      struct x86_exception *exception)
{}

/* used for instruction fetching */
static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
				gva_t addr, void *val, unsigned int bytes,
				struct x86_exception *exception)
{}

int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
			       gva_t addr, void *val, unsigned int bytes,
			       struct x86_exception *exception)
{}
EXPORT_SYMBOL_GPL();

static int emulator_read_std(struct x86_emulate_ctxt *ctxt,
			     gva_t addr, void *val, unsigned int bytes,
			     struct x86_exception *exception, bool system)
{}

static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
				      struct kvm_vcpu *vcpu, u64 access,
				      struct x86_exception *exception)
{}

static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val,
			      unsigned int bytes, struct x86_exception *exception,
			      bool system)
{}

int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
				unsigned int bytes, struct x86_exception *exception)
{}
EXPORT_SYMBOL_GPL();

static int kvm_check_emulate_insn(struct kvm_vcpu *vcpu, int emul_type,
				  void *insn, int insn_len)
{}

int handle_ud(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();

static int vcpu_is_mmio_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
			    gpa_t gpa, bool write)
{}

static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
				gpa_t *gpa, struct x86_exception *exception,
				bool write)
{}

int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
			const void *val, int bytes)
{}

struct read_write_emulator_ops {};

static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
{}

static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
			void *val, int bytes)
{}

static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
			 void *val, int bytes)
{}

static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
{}

static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
			  void *val, int bytes)
{}

static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
			   void *val, int bytes)
{}

static const struct read_write_emulator_ops read_emultor =;

static const struct read_write_emulator_ops write_emultor =;

static int emulator_read_write_onepage(unsigned long addr, void *val,
				       unsigned int bytes,
				       struct x86_exception *exception,
				       struct kvm_vcpu *vcpu,
				       const struct read_write_emulator_ops *ops)
{}

static int emulator_read_write(struct x86_emulate_ctxt *ctxt,
			unsigned long addr,
			void *val, unsigned int bytes,
			struct x86_exception *exception,
			const struct read_write_emulator_ops *ops)
{}

static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
				  unsigned long addr,
				  void *val,
				  unsigned int bytes,
				  struct x86_exception *exception)
{}

static int emulator_write_emulated(struct x86_emulate_ctxt *ctxt,
			    unsigned long addr,
			    const void *val,
			    unsigned int bytes,
			    struct x86_exception *exception)
{}

#define emulator_try_cmpxchg_user(t, ptr, old, new)

static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
				     unsigned long addr,
				     const void *old,
				     const void *new,
				     unsigned int bytes,
				     struct x86_exception *exception)
{}

static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
			       unsigned short port, void *data,
			       unsigned int count, bool in)
{}

static int emulator_pio_in(struct kvm_vcpu *vcpu, int size,
      			   unsigned short port, void *val, unsigned int count)
{}

static void complete_emulator_pio_in(struct kvm_vcpu *vcpu, void *val)
{}

static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
				    int size, unsigned short port, void *val,
				    unsigned int count)
{}

static int emulator_pio_out(struct kvm_vcpu *vcpu, int size,
			    unsigned short port, const void *val,
			    unsigned int count)
{}

static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
				     int size, unsigned short port,
				     const void *val, unsigned int count)
{}

static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
{}

static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address)
{}

static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
{}

int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();



static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
{}

static unsigned long emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr)
{}

static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
			   unsigned long value)
{}

static u64 mk_cr_64(u64 curr_cr, u32 new_val)
{}

static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr)
{}

static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val)
{}

static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt)
{}

static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
{}

static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
{}

static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
{}

static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
{}

static unsigned long emulator_get_cached_segment_base(
	struct x86_emulate_ctxt *ctxt, int seg)
{}

static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
				 struct desc_struct *desc, u32 *base3,
				 int seg)
{}

static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector,
				 struct desc_struct *desc, u32 base3,
				 int seg)
{}

static int emulator_get_msr_with_filter(struct x86_emulate_ctxt *ctxt,
					u32 msr_index, u64 *pdata)
{}

static int emulator_set_msr_with_filter(struct x86_emulate_ctxt *ctxt,
					u32 msr_index, u64 data)
{}

static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
			    u32 msr_index, u64 *pdata)
{}

static int emulator_check_rdpmc_early(struct x86_emulate_ctxt *ctxt, u32 pmc)
{}

static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
			     u32 pmc, u64 *pdata)
{}

static void emulator_halt(struct x86_emulate_ctxt *ctxt)
{}

static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
			      struct x86_instruction_info *info,
			      enum x86_intercept_stage stage)
{}

static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt,
			      u32 *eax, u32 *ebx, u32 *ecx, u32 *edx,
			      bool exact_only)
{}

static bool emulator_guest_has_movbe(struct x86_emulate_ctxt *ctxt)
{}

static bool emulator_guest_has_fxsr(struct x86_emulate_ctxt *ctxt)
{}

static bool emulator_guest_has_rdpid(struct x86_emulate_ctxt *ctxt)
{}

static bool emulator_guest_cpuid_is_intel_compatible(struct x86_emulate_ctxt *ctxt)
{}

static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg)
{}

static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val)
{}

static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked)
{}

static bool emulator_is_smm(struct x86_emulate_ctxt *ctxt)
{}

static bool emulator_is_guest_mode(struct x86_emulate_ctxt *ctxt)
{}

#ifndef CONFIG_KVM_SMM
static int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
{
	WARN_ON_ONCE(1);
	return X86EMUL_UNHANDLEABLE;
}
#endif

static void emulator_triple_fault(struct x86_emulate_ctxt *ctxt)
{}

static int emulator_set_xcr(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr)
{}

static void emulator_vm_bugged(struct x86_emulate_ctxt *ctxt)
{}

static gva_t emulator_get_untagged_addr(struct x86_emulate_ctxt *ctxt,
					gva_t addr, unsigned int flags)
{}

static const struct x86_emulate_ops emulate_ops =;

static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
{}

static void inject_emulated_exception(struct kvm_vcpu *vcpu)
{}

static struct x86_emulate_ctxt *alloc_emulate_ctxt(struct kvm_vcpu *vcpu)
{}

static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
{}

void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
{}
EXPORT_SYMBOL_GPL();

static void prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data,
					   u8 ndata, u8 *insn_bytes, u8 insn_size)
{}

static void prepare_emulation_ctxt_failure_exit(struct kvm_vcpu *vcpu)
{}

void __kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data,
					  u8 ndata)
{}
EXPORT_SYMBOL_GPL();

void kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();

static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type)
{}

static bool kvm_unprotect_and_retry_on_failure(struct kvm_vcpu *vcpu,
					       gpa_t cr2_or_gpa,
					       int emulation_type)
{}

static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
static int complete_emulated_pio(struct kvm_vcpu *vcpu);

static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
				unsigned long *db)
{}

static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu)
{}

int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();

static bool kvm_is_code_breakpoint_inhibited(struct kvm_vcpu *vcpu)
{}

static bool kvm_vcpu_check_code_breakpoint(struct kvm_vcpu *vcpu,
					   int emulation_type, int *r)
{}

static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt)
{}

/*
 * Decode an instruction for emulation.  The caller is responsible for handling
 * code breakpoints.  Note, manually detecting code breakpoints is unnecessary
 * (and wrong) when emulating on an intercepted fault-like exception[*], as
 * code breakpoints have higher priority and thus have already been done by
 * hardware.
 *
 * [*] Except #MC, which is higher priority, but KVM should never emulate in
 *     response to a machine check.
 */
int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
				    void *insn, int insn_len)
{}
EXPORT_SYMBOL_GPL();

int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
			    int emulation_type, void *insn, int insn_len)
{}

int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type)
{}
EXPORT_SYMBOL_GPL();

int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
					void *insn, int insn_len)
{}
EXPORT_SYMBOL_GPL();

static int complete_fast_pio_out_port_0x7e(struct kvm_vcpu *vcpu)
{}

static int complete_fast_pio_out(struct kvm_vcpu *vcpu)
{}

static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,
			    unsigned short port)
{}

static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
{}

static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
			   unsigned short port)
{}

int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in)
{}
EXPORT_SYMBOL_GPL();

static int kvmclock_cpu_down_prep(unsigned int cpu)
{}

static void tsc_khz_changed(void *data)
{}

#ifdef CONFIG_X86_64
static void kvm_hyperv_tsc_notifier(void)
{}
#endif

static void __kvmclock_cpufreq_notifier(struct cpufreq_freqs *freq, int cpu)
{}

static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
				     void *data)
{}

static struct notifier_block kvmclock_cpufreq_notifier_block =;

static int kvmclock_cpu_online(unsigned int cpu)
{}

static void kvm_timer_init(void)
{}

#ifdef CONFIG_X86_64
static void pvclock_gtod_update_fn(struct work_struct *work)
{}

static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);

/*
 * Indirection to move queue_work() out of the tk_core.seq write held
 * region to prevent possible deadlocks against time accessors which
 * are invoked with work related locks held.
 */
static void pvclock_irq_work_fn(struct irq_work *w)
{}

static DEFINE_IRQ_WORK(pvclock_irq_work, pvclock_irq_work_fn);

/*
 * Notification about pvclock gtod data update.
 */
static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused,
			       void *priv)
{}

static struct notifier_block pvclock_gtod_notifier =;
#endif

static inline void kvm_ops_update(struct kvm_x86_init_ops *ops)
{}

static int kvm_x86_check_processor_compatibility(void)
{}

static void kvm_x86_check_cpu_compat(void *ret)
{}

int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
{}
EXPORT_SYMBOL_GPL();

void kvm_x86_vendor_exit(void)
{}
EXPORT_SYMBOL_GPL();

#ifdef CONFIG_X86_64
static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr,
			        unsigned long clock_type)
{}
#endif

/*
 * kvm_pv_kick_cpu_op:  Kick a vcpu.
 *
 * @apicid - apicid of vcpu to be kicked.
 */
static void kvm_pv_kick_cpu_op(struct kvm *kvm, int apicid)
{}

bool kvm_apicv_activated(struct kvm *kvm)
{}
EXPORT_SYMBOL_GPL();

bool kvm_vcpu_apicv_activated(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();

static void set_or_clear_apicv_inhibit(unsigned long *inhibits,
				       enum kvm_apicv_inhibit reason, bool set)
{}

static void kvm_apicv_init(struct kvm *kvm)
{}

static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id)
{}

static int complete_hypercall_exit(struct kvm_vcpu *vcpu)
{}

unsigned long __kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
				      unsigned long a0, unsigned long a1,
				      unsigned long a2, unsigned long a3,
				      int op_64_bit, int cpl)
{}
EXPORT_SYMBOL_GPL();

int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();

static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
{}

static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
{}

/* Called within kvm->srcu read side.  */
static void post_kvm_run_save(struct kvm_vcpu *vcpu)
{}

static void update_cr8_intercept(struct kvm_vcpu *vcpu)
{}


int kvm_check_nested_events(struct kvm_vcpu *vcpu)
{}

static void kvm_inject_exception(struct kvm_vcpu *vcpu)
{}

/*
 * Check for any event (interrupt or exception) that is ready to be injected,
 * and if there is at least one event, inject the event with the highest
 * priority.  This handles both "pending" events, i.e. events that have never
 * been injected into the guest, and "injected" events, i.e. events that were
 * injected as part of a previous VM-Enter, but weren't successfully delivered
 * and need to be re-injected.
 *
 * Note, this is not guaranteed to be invoked on a guest instruction boundary,
 * i.e. doesn't guarantee that there's an event window in the guest.  KVM must
 * be able to inject exceptions in the "middle" of an instruction, and so must
 * also be able to re-inject NMIs and IRQs in the middle of an instruction.
 * I.e. for exceptions and re-injected events, NOT invoking this on instruction
 * boundaries is necessary and correct.
 *
 * For simplicity, KVM uses a single path to inject all events (except events
 * that are injected directly from L1 to L2) and doesn't explicitly track
 * instruction boundaries for asynchronous events.  However, because VM-Exits
 * that can occur during instruction execution typically result in KVM skipping
 * the instruction or injecting an exception, e.g. instruction and exception
 * intercepts, and because pending exceptions have higher priority than pending
 * interrupts, KVM still honors instruction boundaries in most scenarios.
 *
 * But, if a VM-Exit occurs during instruction execution, and KVM does NOT skip
 * the instruction or inject an exception, then KVM can incorrecty inject a new
 * asynchronous event if the event became pending after the CPU fetched the
 * instruction (in the guest).  E.g. if a page fault (#PF, #NPF, EPT violation)
 * occurs and is resolved by KVM, a coincident NMI, SMI, IRQ, etc... can be
 * injected on the restarted instruction instead of being deferred until the
 * instruction completes.
 *
 * In practice, this virtualization hole is unlikely to be observed by the
 * guest, and even less likely to cause functional problems.  To detect the
 * hole, the guest would have to trigger an event on a side effect of an early
 * phase of instruction execution, e.g. on the instruction fetch from memory.
 * And for it to be a functional problem, the guest would need to depend on the
 * ordering between that side effect, the instruction completing, _and_ the
 * delivery of the asynchronous event.
 */
static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu,
				       bool *req_immediate_exit)
{}

static void process_nmi(struct kvm_vcpu *vcpu)
{}

/* Return total number of NMIs pending injection to the VM */
int kvm_get_nr_pending_nmis(struct kvm_vcpu *vcpu)
{}

void kvm_make_scan_ioapic_request_mask(struct kvm *kvm,
				       unsigned long *vcpu_bitmap)
{}

void kvm_make_scan_ioapic_request(struct kvm *kvm)
{}

void __kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();

static void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
{}

void __kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
				      enum kvm_apicv_inhibit reason, bool set)
{}

void kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
				    enum kvm_apicv_inhibit reason, bool set)
{}
EXPORT_SYMBOL_GPL();

static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
{}

static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu)
{}

void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
{}

static void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
{}

/*
 * Called within kvm->srcu read side.
 * Returns 1 to let vcpu_run() continue the guest execution loop without
 * exiting to the userspace.  Otherwise, the value will be returned to the
 * userspace.
 */
static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
{}

static bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
{}

static bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
{}

int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{}

/* Called within kvm->srcu read side.  */
static inline int vcpu_block(struct kvm_vcpu *vcpu)
{}

/* Called within kvm->srcu read side.  */
static int vcpu_run(struct kvm_vcpu *vcpu)
{}

static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason)
{}

int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();

int kvm_emulate_halt(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();

fastpath_t handle_fastpath_hlt(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();

int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();

bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
{}

bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
{}

bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
{}

static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
{}

static int complete_emulated_pio(struct kvm_vcpu *vcpu)
{}

/*
 * Implements the following, as a state machine:
 *
 * read:
 *   for each fragment
 *     for each mmio piece in the fragment
 *       write gpa, len
 *       exit
 *       copy data
 *   execute insn
 *
 * write:
 *   for each fragment
 *     for each mmio piece in the fragment
 *       write gpa, len
 *       copy data
 *       exit
 */
static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
{}

/* Swap (qemu) user FPU context for the guest FPU context. */
static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
{}

/* When vcpu_run ends, restore user space FPU context. */
static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
{}

int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
{}

static void __get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{}

int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{}

static void __set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{}

int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{}

static void __get_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
{}

static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
{}

static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2)
{}

int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
				  struct kvm_sregs *sregs)
{}

int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
				    struct kvm_mp_state *mp_state)
{}

int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
				    struct kvm_mp_state *mp_state)
{}

int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
		    int reason, bool has_error_code, u32 error_code)
{}
EXPORT_SYMBOL_GPL();

static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
{}

static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs,
		int *mmu_reset_needed, bool update_pdptrs)
{}

static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
{}

static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2)
{}

int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
				  struct kvm_sregs *sregs)
{}

static void kvm_arch_vcpu_guestdbg_update_apicv_inhibit(struct kvm *kvm)
{}

int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
					struct kvm_guest_debug *dbg)
{}

/*
 * Translate a guest virtual address to a guest physical address.
 */
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
				    struct kvm_translation *tr)
{}

int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{}

int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{}

static void store_regs(struct kvm_vcpu *vcpu)
{}

static int sync_regs(struct kvm_vcpu *vcpu)
{}

int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
{}

int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
{}

void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
{}

void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{}

void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
{}
EXPORT_SYMBOL_GPL();

void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
{}
EXPORT_SYMBOL_GPL();

void kvm_arch_enable_virtualization(void)
{}

void kvm_arch_disable_virtualization(void)
{}

int kvm_arch_enable_virtualization_cpu(void)
{}

void kvm_arch_disable_virtualization_cpu(void)
{}

bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)
{}

bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
{}

void kvm_arch_free_vm(struct kvm *kvm)
{}


int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{}

int kvm_arch_post_init_vm(struct kvm *kvm)
{}

static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
{}

static void kvm_unload_vcpu_mmus(struct kvm *kvm)
{}

void kvm_arch_sync_events(struct kvm *kvm)
{}

/**
 * __x86_set_memory_region: Setup KVM internal memory slot
 *
 * @kvm: the kvm pointer to the VM.
 * @id: the slot ID to setup.
 * @gpa: the GPA to install the slot (unused when @size == 0).
 * @size: the size of the slot. Set to zero to uninstall a slot.
 *
 * This function helps to setup a KVM internal memory slot.  Specify
 * @size > 0 to install a new slot, while @size == 0 to uninstall a
 * slot.  The return code can be one of the following:
 *
 *   HVA:           on success (uninstall will return a bogus HVA)
 *   -errno:        on error
 *
 * The caller should always use IS_ERR() to check the return value
 * before use.  Note, the KVM internal memory slots are guaranteed to
 * remain valid and unchanged until the VM is destroyed, i.e., the
 * GPA->HVA translation will not change.  However, the HVA is a user
 * address, i.e. its accessibility is not guaranteed, and must be
 * accessed via __copy_{to,from}_user().
 */
void __user * __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
				      u32 size)
{}
EXPORT_SYMBOL_GPL();

void kvm_arch_pre_destroy_vm(struct kvm *kvm)
{}

void kvm_arch_destroy_vm(struct kvm *kvm)
{}

static void memslot_rmap_free(struct kvm_memory_slot *slot)
{}

void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
{}

int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages)
{}

static int kvm_alloc_memslot_metadata(struct kvm *kvm,
				      struct kvm_memory_slot *slot)
{}

void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
{}

int kvm_arch_prepare_memory_region(struct kvm *kvm,
				   const struct kvm_memory_slot *old,
				   struct kvm_memory_slot *new,
				   enum kvm_mr_change change)
{}


static void kvm_mmu_update_cpu_dirty_logging(struct kvm *kvm, bool enable)
{}

static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
				     struct kvm_memory_slot *old,
				     const struct kvm_memory_slot *new,
				     enum kvm_mr_change change)
{}

void kvm_arch_commit_memory_region(struct kvm *kvm,
				struct kvm_memory_slot *old,
				const struct kvm_memory_slot *new,
				enum kvm_mr_change change)
{}

bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
{}

unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
{}

int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{}

int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
{}

unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();

bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
{}
EXPORT_SYMBOL_GPL();

unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();

static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{}

void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{}
EXPORT_SYMBOL_GPL();

static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
{}

static inline u32 kvm_async_pf_next_probe(u32 key)
{}

static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{}

static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
{}

bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{}

static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{}

static inline int apf_put_user_notpresent(struct kvm_vcpu *vcpu)
{}

static inline int apf_put_user_ready(struct kvm_vcpu *vcpu, u32 token)
{}

static inline bool apf_pageready_slot_free(struct kvm_vcpu *vcpu)
{}

static bool kvm_can_deliver_async_pf(struct kvm_vcpu *vcpu)
{}

bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
{}

bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
				     struct kvm_async_pf *work)
{}

void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
				 struct kvm_async_pf *work)
{}

void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu)
{}

bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
{}

void kvm_arch_start_assignment(struct kvm *kvm)
{}
EXPORT_SYMBOL_GPL();

void kvm_arch_end_assignment(struct kvm *kvm)
{}
EXPORT_SYMBOL_GPL();

bool noinstr kvm_arch_has_assigned_device(struct kvm *kvm)
{}
EXPORT_SYMBOL_GPL();

static void kvm_noncoherent_dma_assignment_start_or_stop(struct kvm *kvm)
{}

void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
{}
EXPORT_SYMBOL_GPL();

void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
{}
EXPORT_SYMBOL_GPL();

bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
{}
EXPORT_SYMBOL_GPL();

bool kvm_arch_has_irq_bypass(void)
{}

int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
				      struct irq_bypass_producer *prod)
{}

void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
				      struct irq_bypass_producer *prod)
{}

int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
				   uint32_t guest_irq, bool set)
{}

bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *old,
				  struct kvm_kernel_irq_routing_entry *new)
{}

bool kvm_vector_hashing_enabled(void)
{}

bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();

#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE
int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order)
{}
#endif

#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end)
{}
#endif

int kvm_spec_ctrl_test_value(u64 value)
{}
EXPORT_SYMBOL_GPL();

void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code)
{}
EXPORT_SYMBOL_GPL();

/*
 * Handles kvm_read/write_guest_virt*() result and either injects #PF or returns
 * KVM_EXIT_INTERNAL_ERROR for cases not currently handled by KVM. Return value
 * indicates whether exit to userspace is needed.
 */
int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
			      struct x86_exception *e)
{}
EXPORT_SYMBOL_GPL();

int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
{}
EXPORT_SYMBOL_GPL();

static int complete_sev_es_emulated_mmio(struct kvm_vcpu *vcpu)
{}

int kvm_sev_es_mmio_write(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes,
			  void *data)
{}
EXPORT_SYMBOL_GPL();

int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes,
			 void *data)
{}
EXPORT_SYMBOL_GPL();

static void advance_sev_es_emulated_pio(struct kvm_vcpu *vcpu, unsigned count, int size)
{}

static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size,
			   unsigned int port);

static int complete_sev_es_emulated_outs(struct kvm_vcpu *vcpu)
{}

static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size,
			   unsigned int port)
{}

static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size,
			  unsigned int port);

static int complete_sev_es_emulated_ins(struct kvm_vcpu *vcpu)
{}

static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size,
			  unsigned int port)
{}

int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
			 unsigned int port, void *data,  unsigned int count,
			 int in)
{}
EXPORT_SYMBOL_GPL();

EXPORT_TRACEPOINT_SYMBOL_GPL();
EXPORT_TRACEPOINT_SYMBOL_GPL();
EXPORT_TRACEPOINT_SYMBOL_GPL();
EXPORT_TRACEPOINT_SYMBOL_GPL();
EXPORT_TRACEPOINT_SYMBOL_GPL();
EXPORT_TRACEPOINT_SYMBOL_GPL();
EXPORT_TRACEPOINT_SYMBOL_GPL();
EXPORT_TRACEPOINT_SYMBOL_GPL();
EXPORT_TRACEPOINT_SYMBOL_GPL();
EXPORT_TRACEPOINT_SYMBOL_GPL();
EXPORT_TRACEPOINT_SYMBOL_GPL();
EXPORT_TRACEPOINT_SYMBOL_GPL();
EXPORT_TRACEPOINT_SYMBOL_GPL();
EXPORT_TRACEPOINT_SYMBOL_GPL();
EXPORT_TRACEPOINT_SYMBOL_GPL();
EXPORT_TRACEPOINT_SYMBOL_GPL();
EXPORT_TRACEPOINT_SYMBOL_GPL();
EXPORT_TRACEPOINT_SYMBOL_GPL();
EXPORT_TRACEPOINT_SYMBOL_GPL();
EXPORT_TRACEPOINT_SYMBOL_GPL();
EXPORT_TRACEPOINT_SYMBOL_GPL();
EXPORT_TRACEPOINT_SYMBOL_GPL();
EXPORT_TRACEPOINT_SYMBOL_GPL();
EXPORT_TRACEPOINT_SYMBOL_GPL();
EXPORT_TRACEPOINT_SYMBOL_GPL();
EXPORT_TRACEPOINT_SYMBOL_GPL();
EXPORT_TRACEPOINT_SYMBOL_GPL();
EXPORT_TRACEPOINT_SYMBOL_GPL();
EXPORT_TRACEPOINT_SYMBOL_GPL();
EXPORT_TRACEPOINT_SYMBOL_GPL();

static int __init kvm_x86_init(void)
{}
module_init();

static void __exit kvm_x86_exit(void)
{}
module_exit(kvm_x86_exit);