linux/arch/x86/kvm/hyperv.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * KVM Microsoft Hyper-V emulation
 *
 * derived from arch/x86/kvm/x86.c
 *
 * Copyright (C) 2006 Qumranet, Inc.
 * Copyright (C) 2008 Qumranet, Inc.
 * Copyright IBM Corporation, 2008
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
 * Copyright (C) 2015 Andrey Smetanin <[email protected]>
 *
 * Authors:
 *   Avi Kivity   <[email protected]>
 *   Yaniv Kamay  <[email protected]>
 *   Amit Shah    <[email protected]>
 *   Ben-Ami Yassour <[email protected]>
 *   Andrey Smetanin <[email protected]>
 */
#define pr_fmt(fmt)

#include "x86.h"
#include "lapic.h"
#include "ioapic.h"
#include "cpuid.h"
#include "hyperv.h"
#include "mmu.h"
#include "xen.h"

#include <linux/cpu.h>
#include <linux/kvm_host.h>
#include <linux/highmem.h>
#include <linux/sched/cputime.h>
#include <linux/spinlock.h>
#include <linux/eventfd.h>

#include <asm/apicdef.h>
#include <asm/mshyperv.h>
#include <trace/events/kvm.h>

#include "trace.h"
#include "irq.h"
#include "fpu.h"

#define KVM_HV_MAX_SPARSE_VCPU_SET_BITS

/*
 * As per Hyper-V TLFS, extended hypercalls start from 0x8001
 * (HvExtCallQueryCapabilities). Response of this hypercalls is a 64 bit value
 * where each bit tells which extended hypercall is available besides
 * HvExtCallQueryCapabilities.
 *
 * 0x8001 - First extended hypercall, HvExtCallQueryCapabilities, no bit
 * assigned.
 *
 * 0x8002 - Bit 0
 * 0x8003 - Bit 1
 * ..
 * 0x8041 - Bit 63
 *
 * Therefore, HV_EXT_CALL_MAX = 0x8001 + 64
 */
#define HV_EXT_CALL_MAX

static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
				bool vcpu_kick);

static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint)
{}

static inline int synic_get_sint_vector(u64 sint_value)
{}

static bool synic_has_vector_connected(struct kvm_vcpu_hv_synic *synic,
				      int vector)
{}

static bool synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic *synic,
				     int vector)
{}

static void synic_update_vector(struct kvm_vcpu_hv_synic *synic,
				int vector)
{}

static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint,
			  u64 data, bool host)
{}

static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx)
{}

static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx)
{}

static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
{}

static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr)
{}

static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
			 u32 msr, u64 data, bool host)
{}

static bool kvm_hv_is_syndbg_enabled(struct kvm_vcpu *vcpu)
{}

static int kvm_hv_syndbg_complete_userspace(struct kvm_vcpu *vcpu)
{}

static void syndbg_exit(struct kvm_vcpu *vcpu, u32 msr)
{}

static int syndbg_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
{}

static int syndbg_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
{}

static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata,
			 bool host)
{}

static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
{}

int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vpidx, u32 sint)
{}

void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector)
{}

static int kvm_hv_set_sint_gsi(struct kvm *kvm, u32 vpidx, u32 sint, int gsi)
{}

void kvm_hv_irq_routing_update(struct kvm *kvm)
{}

static void synic_init(struct kvm_vcpu_hv_synic *synic)
{}

static u64 get_time_ref_counter(struct kvm *kvm)
{}

static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
				bool vcpu_kick)
{}

static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer)
{}

static enum hrtimer_restart stimer_timer_callback(struct hrtimer *timer)
{}

/*
 * stimer_start() assumptions:
 * a) stimer->count is not equal to 0
 * b) stimer->config has HV_STIMER_ENABLE flag
 */
static int stimer_start(struct kvm_vcpu_hv_stimer *stimer)
{}

static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
			     bool host)
{}

static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
			    bool host)
{}

static int stimer_get_config(struct kvm_vcpu_hv_stimer *stimer, u64 *pconfig)
{}

static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount)
{}

static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
			     struct hv_message *src_msg, bool no_retry)
{}

static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer)
{}

static int stimer_notify_direct(struct kvm_vcpu_hv_stimer *stimer)
{}

static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer)
{}

void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
{}

void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
{}

bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();

int kvm_hv_get_assist_page(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();

static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer)
{}

static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index)
{}

int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
{}

int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages)
{}

static bool kvm_hv_msr_partition_wide(u32 msr)
{}

static int kvm_hv_msr_get_crash_data(struct kvm *kvm, u32 index, u64 *pdata)
{}

static int kvm_hv_msr_get_crash_ctl(struct kvm *kvm, u64 *pdata)
{}

static int kvm_hv_msr_set_crash_ctl(struct kvm *kvm, u64 data)
{}

static int kvm_hv_msr_set_crash_data(struct kvm *kvm, u32 index, u64 data)
{}

/*
 * The kvmclock and Hyper-V TSC page use similar formulas, and converting
 * between them is possible:
 *
 * kvmclock formula:
 *    nsec = (ticks - tsc_timestamp) * tsc_to_system_mul * 2^(tsc_shift-32)
 *           + system_time
 *
 * Hyper-V formula:
 *    nsec/100 = ticks * scale / 2^64 + offset
 *
 * When tsc_timestamp = system_time = 0, offset is zero in the Hyper-V formula.
 * By dividing the kvmclock formula by 100 and equating what's left we get:
 *    ticks * scale / 2^64 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
 *            scale / 2^64 =         tsc_to_system_mul * 2^(tsc_shift-32) / 100
 *            scale        =         tsc_to_system_mul * 2^(32+tsc_shift) / 100
 *
 * Now expand the kvmclock formula and divide by 100:
 *    nsec = ticks * tsc_to_system_mul * 2^(tsc_shift-32)
 *           - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32)
 *           + system_time
 *    nsec/100 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
 *               - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32) / 100
 *               + system_time / 100
 *
 * Replace tsc_to_system_mul * 2^(tsc_shift-32) / 100 by scale / 2^64:
 *    nsec/100 = ticks * scale / 2^64
 *               - tsc_timestamp * scale / 2^64
 *               + system_time / 100
 *
 * Equate with the Hyper-V formula so that ticks * scale / 2^64 cancels out:
 *    offset = system_time / 100 - tsc_timestamp * scale / 2^64
 *
 * These two equivalencies are implemented in this function.
 */
static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock,
					struct ms_hyperv_tsc_page *tsc_ref)
{}

/*
 * Don't touch TSC page values if the guest has opted for TSC emulation after
 * migration. KVM doesn't fully support reenlightenment notifications and TSC
 * access emulation and Hyper-V is known to expect the values in TSC page to
 * stay constant before TSC access emulation is disabled from guest side
 * (HV_X64_MSR_TSC_EMULATION_STATUS). KVM userspace is expected to preserve TSC
 * frequency and guest visible TSC value across migration (and prevent it when
 * TSC scaling is unsupported).
 */
static inline bool tsc_page_update_unsafe(struct kvm_hv *hv)
{}

void kvm_hv_setup_tsc_page(struct kvm *kvm,
			   struct pvclock_vcpu_time_info *hv_clock)
{}

void kvm_hv_request_tsc_page_update(struct kvm *kvm)
{}

static bool hv_check_msr_access(struct kvm_vcpu_hv *hv_vcpu, u32 msr)
{}

#define KVM_HV_WIN2016_GUEST_ID
#define KVM_HV_WIN2016_GUEST_ID_MASK

/*
 * Hyper-V enabled Windows Server 2016 SMP VMs fail to boot in !XSAVES && XSAVEC
 * configuration.
 * Such configuration can result from, for example, AMD Erratum 1386 workaround.
 *
 * Print a notice so users aren't left wondering what's suddenly gone wrong.
 */
static void __kvm_hv_xsaves_xsavec_maybe_warn(struct kvm_vcpu *vcpu)
{}

void kvm_hv_xsaves_xsavec_maybe_warn(struct kvm_vcpu *vcpu)
{}

static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
			     bool host)
{}

/* Calculate cpu time spent by current task in 100ns units */
static u64 current_task_runtime_100ns(void)
{}

static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
{}

static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
			     bool host)
{}

static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
			  bool host)
{}

int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
{}

int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
{}

static void sparse_set_to_vcpu_mask(struct kvm *kvm, u64 *sparse_banks,
				    u64 valid_bank_mask, unsigned long *vcpu_mask)
{}

static bool hv_is_vp_in_sparse_set(u32 vp_id, u64 valid_bank_mask, u64 sparse_banks[])
{}

struct kvm_hv_hcall {};


static int kvm_hv_get_hc_data(struct kvm *kvm, struct kvm_hv_hcall *hc,
			      u16 orig_cnt, u16 cnt_cap, u64 *data)
{}

static u64 kvm_get_sparse_vp_set(struct kvm *kvm, struct kvm_hv_hcall *hc,
				 u64 *sparse_banks)
{}

static int kvm_hv_get_tlb_flush_entries(struct kvm *kvm, struct kvm_hv_hcall *hc, u64 entries[])
{}

static void hv_tlb_flush_enqueue(struct kvm_vcpu *vcpu,
				 struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo,
				 u64 *entries, int count)
{}

int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
{}

static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
{}

static void kvm_hv_send_ipi_to_many(struct kvm *kvm, u32 vector,
				    u64 *sparse_banks, u64 valid_bank_mask)
{}

static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
{}

void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu, bool hyperv_enabled)
{}

int kvm_hv_set_enforce_cpuid(struct kvm_vcpu *vcpu, bool enforce)
{}

static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
{}

static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result)
{}

static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
{}

static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
{}

static bool is_xmm_fast_hypercall(struct kvm_hv_hcall *hc)
{}

static void kvm_hv_hypercall_read_xmm(struct kvm_hv_hcall *hc)
{}

static bool hv_check_hypercall_access(struct kvm_vcpu_hv *hv_vcpu, u16 code)
{}

int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
{}

void kvm_hv_init_vm(struct kvm *kvm)
{}

void kvm_hv_destroy_vm(struct kvm *kvm)
{}

static int kvm_hv_eventfd_assign(struct kvm *kvm, u32 conn_id, int fd)
{}

static int kvm_hv_eventfd_deassign(struct kvm *kvm, u32 conn_id)
{}

int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args)
{}

int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
		     struct kvm_cpuid_entry2 __user *entries)
{}