#define pr_fmt(fmt) …
#include "x86.h"
#include "lapic.h"
#include "ioapic.h"
#include "cpuid.h"
#include "hyperv.h"
#include "mmu.h"
#include "xen.h"
#include <linux/cpu.h>
#include <linux/kvm_host.h>
#include <linux/highmem.h>
#include <linux/sched/cputime.h>
#include <linux/spinlock.h>
#include <linux/eventfd.h>
#include <asm/apicdef.h>
#include <asm/mshyperv.h>
#include <trace/events/kvm.h>
#include "trace.h"
#include "irq.h"
#include "fpu.h"
#define KVM_HV_MAX_SPARSE_VCPU_SET_BITS …
#define HV_EXT_CALL_MAX …
static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
bool vcpu_kick);
static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint)
{ … }
static inline int synic_get_sint_vector(u64 sint_value)
{ … }
static bool synic_has_vector_connected(struct kvm_vcpu_hv_synic *synic,
int vector)
{ … }
static bool synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic *synic,
int vector)
{ … }
static void synic_update_vector(struct kvm_vcpu_hv_synic *synic,
int vector)
{ … }
static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint,
u64 data, bool host)
{ … }
static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx)
{ … }
static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx)
{ … }
static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
{ … }
static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr)
{ … }
static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
u32 msr, u64 data, bool host)
{ … }
static bool kvm_hv_is_syndbg_enabled(struct kvm_vcpu *vcpu)
{ … }
static int kvm_hv_syndbg_complete_userspace(struct kvm_vcpu *vcpu)
{ … }
static void syndbg_exit(struct kvm_vcpu *vcpu, u32 msr)
{ … }
static int syndbg_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
{ … }
static int syndbg_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
{ … }
static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata,
bool host)
{ … }
static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
{ … }
int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vpidx, u32 sint)
{ … }
void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector)
{ … }
static int kvm_hv_set_sint_gsi(struct kvm *kvm, u32 vpidx, u32 sint, int gsi)
{ … }
void kvm_hv_irq_routing_update(struct kvm *kvm)
{ … }
static void synic_init(struct kvm_vcpu_hv_synic *synic)
{ … }
static u64 get_time_ref_counter(struct kvm *kvm)
{ … }
static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
bool vcpu_kick)
{ … }
static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer)
{ … }
static enum hrtimer_restart stimer_timer_callback(struct hrtimer *timer)
{ … }
static int stimer_start(struct kvm_vcpu_hv_stimer *stimer)
{ … }
static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
bool host)
{ … }
static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
bool host)
{ … }
static int stimer_get_config(struct kvm_vcpu_hv_stimer *stimer, u64 *pconfig)
{ … }
static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount)
{ … }
static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
struct hv_message *src_msg, bool no_retry)
{ … }
static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer)
{ … }
static int stimer_notify_direct(struct kvm_vcpu_hv_stimer *stimer)
{ … }
static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer)
{ … }
void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
{ … }
void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
{ … }
bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu)
{ … }
EXPORT_SYMBOL_GPL(…);
int kvm_hv_get_assist_page(struct kvm_vcpu *vcpu)
{ … }
EXPORT_SYMBOL_GPL(…);
static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer)
{ … }
static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index)
{ … }
int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
{ … }
int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages)
{ … }
static bool kvm_hv_msr_partition_wide(u32 msr)
{ … }
static int kvm_hv_msr_get_crash_data(struct kvm *kvm, u32 index, u64 *pdata)
{ … }
static int kvm_hv_msr_get_crash_ctl(struct kvm *kvm, u64 *pdata)
{ … }
static int kvm_hv_msr_set_crash_ctl(struct kvm *kvm, u64 data)
{ … }
static int kvm_hv_msr_set_crash_data(struct kvm *kvm, u32 index, u64 data)
{ … }
static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock,
struct ms_hyperv_tsc_page *tsc_ref)
{ … }
static inline bool tsc_page_update_unsafe(struct kvm_hv *hv)
{ … }
void kvm_hv_setup_tsc_page(struct kvm *kvm,
struct pvclock_vcpu_time_info *hv_clock)
{ … }
void kvm_hv_request_tsc_page_update(struct kvm *kvm)
{ … }
static bool hv_check_msr_access(struct kvm_vcpu_hv *hv_vcpu, u32 msr)
{ … }
#define KVM_HV_WIN2016_GUEST_ID …
#define KVM_HV_WIN2016_GUEST_ID_MASK …
static void __kvm_hv_xsaves_xsavec_maybe_warn(struct kvm_vcpu *vcpu)
{ … }
void kvm_hv_xsaves_xsavec_maybe_warn(struct kvm_vcpu *vcpu)
{ … }
static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
bool host)
{ … }
static u64 current_task_runtime_100ns(void)
{ … }
static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
{ … }
static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
bool host)
{ … }
static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
bool host)
{ … }
int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
{ … }
int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
{ … }
static void sparse_set_to_vcpu_mask(struct kvm *kvm, u64 *sparse_banks,
u64 valid_bank_mask, unsigned long *vcpu_mask)
{ … }
static bool hv_is_vp_in_sparse_set(u32 vp_id, u64 valid_bank_mask, u64 sparse_banks[])
{ … }
struct kvm_hv_hcall { … };
static int kvm_hv_get_hc_data(struct kvm *kvm, struct kvm_hv_hcall *hc,
u16 orig_cnt, u16 cnt_cap, u64 *data)
{ … }
static u64 kvm_get_sparse_vp_set(struct kvm *kvm, struct kvm_hv_hcall *hc,
u64 *sparse_banks)
{ … }
static int kvm_hv_get_tlb_flush_entries(struct kvm *kvm, struct kvm_hv_hcall *hc, u64 entries[])
{ … }
static void hv_tlb_flush_enqueue(struct kvm_vcpu *vcpu,
struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo,
u64 *entries, int count)
{ … }
int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
{ … }
static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
{ … }
static void kvm_hv_send_ipi_to_many(struct kvm *kvm, u32 vector,
u64 *sparse_banks, u64 valid_bank_mask)
{ … }
static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
{ … }
void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu, bool hyperv_enabled)
{ … }
int kvm_hv_set_enforce_cpuid(struct kvm_vcpu *vcpu, bool enforce)
{ … }
static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
{ … }
static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result)
{ … }
static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
{ … }
static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
{ … }
static bool is_xmm_fast_hypercall(struct kvm_hv_hcall *hc)
{ … }
static void kvm_hv_hypercall_read_xmm(struct kvm_hv_hcall *hc)
{ … }
static bool hv_check_hypercall_access(struct kvm_vcpu_hv *hv_vcpu, u16 code)
{ … }
int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
{ … }
void kvm_hv_init_vm(struct kvm *kvm)
{ … }
void kvm_hv_destroy_vm(struct kvm *kvm)
{ … }
static int kvm_hv_eventfd_assign(struct kvm *kvm, u32 conn_id, int fd)
{ … }
static int kvm_hv_eventfd_deassign(struct kvm *kvm, u32 conn_id)
{ … }
int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args)
{ … }
int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
struct kvm_cpuid_entry2 __user *entries)
{ … }