#include <linux/dmi.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/clocksource.h>
#include <linux/cpu.h>
#include <linux/reboot.h>
#include <linux/static_call.h>
#include <asm/div64.h>
#include <asm/x86_init.h>
#include <asm/hypervisor.h>
#include <asm/timer.h>
#include <asm/apic.h>
#include <asm/vmware.h>
#include <asm/svm.h>
#undef pr_fmt
#define pr_fmt(fmt) …
#define CPUID_VMWARE_INFO_LEAF …
#define CPUID_VMWARE_FEATURES_LEAF …
#define GETVCPU_INFO_LEGACY_X2APIC …
#define GETVCPU_INFO_VCPU_RESERVED …
#define STEALCLOCK_NOT_AVAILABLE …
#define STEALCLOCK_DISABLED …
#define STEALCLOCK_ENABLED …
struct vmware_steal_time { … };
static unsigned long vmware_tsc_khz __ro_after_init;
static u8 vmware_hypercall_mode __ro_after_init;
unsigned long vmware_hypercall_slow(unsigned long cmd,
unsigned long in1, unsigned long in3,
unsigned long in4, unsigned long in5,
u32 *out1, u32 *out2, u32 *out3,
u32 *out4, u32 *out5)
{ … }
static inline int __vmware_platform(void)
{ … }
static unsigned long vmware_get_tsc_khz(void)
{ … }
#ifdef CONFIG_PARAVIRT
static struct cyc2ns_data vmware_cyc2ns __ro_after_init;
static bool vmw_sched_clock __initdata = …;
static DEFINE_PER_CPU_DECRYPTED(struct vmware_steal_time, vmw_steal_time) __aligned(…);
static bool has_steal_clock;
static bool steal_acc __initdata = …;
static __init int setup_vmw_sched_clock(char *s)
{ … }
early_param(…);
static __init int parse_no_stealacc(char *arg)
{ … }
early_param(…);
static noinstr u64 vmware_sched_clock(void)
{ … }
static void __init vmware_cyc2ns_setup(void)
{ … }
static int vmware_cmd_stealclock(u32 addr_hi, u32 addr_lo)
{ … }
static bool stealclock_enable(phys_addr_t pa)
{ … }
static int __stealclock_disable(void)
{ … }
static void stealclock_disable(void)
{ … }
static bool vmware_is_stealclock_available(void)
{ … }
static u64 vmware_steal_clock(int cpu)
{ … }
static void vmware_register_steal_time(void)
{ … }
static void vmware_disable_steal_time(void)
{ … }
static void vmware_guest_cpu_init(void)
{ … }
static void vmware_pv_guest_cpu_reboot(void *unused)
{ … }
static int vmware_pv_reboot_notify(struct notifier_block *nb,
unsigned long code, void *unused)
{ … }
static struct notifier_block vmware_pv_reboot_nb = …;
#ifdef CONFIG_SMP
static void __init vmware_smp_prepare_boot_cpu(void)
{ … }
static int vmware_cpu_online(unsigned int cpu)
{ … }
static int vmware_cpu_down_prepare(unsigned int cpu)
{ … }
#endif
static __init int activate_jump_labels(void)
{ … }
arch_initcall(activate_jump_labels);
static void __init vmware_paravirt_ops_setup(void)
{ … }
#else
#define vmware_paravirt_ops_setup …
#endif
static void __init vmware_set_capabilities(void)
{ … }
static void __init vmware_platform_setup(void)
{ … }
static u8 __init vmware_select_hypercall(void)
{ … }
static u32 __init vmware_platform(void)
{ … }
static bool __init vmware_legacy_x2apic_available(void)
{ … }
#ifdef CONFIG_INTEL_TDX_GUEST
unsigned long vmware_tdx_hypercall(unsigned long cmd,
unsigned long in1, unsigned long in3,
unsigned long in4, unsigned long in5,
u32 *out1, u32 *out2, u32 *out3,
u32 *out4, u32 *out5)
{ … }
EXPORT_SYMBOL_GPL(…);
#endif
#ifdef CONFIG_AMD_MEM_ENCRYPT
static void vmware_sev_es_hcall_prepare(struct ghcb *ghcb,
struct pt_regs *regs)
{ … }
static bool vmware_sev_es_hcall_finish(struct ghcb *ghcb, struct pt_regs *regs)
{ … }
#endif
const __initconst struct hypervisor_x86 x86_hyper_vmware = …;