linux/arch/x86/kernel/cpu/vmware.c

/*
 * VMware Detection code.
 *
 * Copyright (C) 2008, VMware, Inc.
 * Author : Alok N Kataria <[email protected]>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 * NON INFRINGEMENT.  See the GNU General Public License for more
 * details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 */

#include <linux/dmi.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/clocksource.h>
#include <linux/cpu.h>
#include <linux/reboot.h>
#include <linux/static_call.h>
#include <asm/div64.h>
#include <asm/x86_init.h>
#include <asm/hypervisor.h>
#include <asm/timer.h>
#include <asm/apic.h>
#include <asm/vmware.h>
#include <asm/svm.h>

#undef pr_fmt
#define pr_fmt(fmt)

#define CPUID_VMWARE_INFO_LEAF
#define CPUID_VMWARE_FEATURES_LEAF

#define GETVCPU_INFO_LEGACY_X2APIC
#define GETVCPU_INFO_VCPU_RESERVED

#define STEALCLOCK_NOT_AVAILABLE
#define STEALCLOCK_DISABLED
#define STEALCLOCK_ENABLED

struct vmware_steal_time {};

static unsigned long vmware_tsc_khz __ro_after_init;
static u8 vmware_hypercall_mode     __ro_after_init;

unsigned long vmware_hypercall_slow(unsigned long cmd,
				    unsigned long in1, unsigned long in3,
				    unsigned long in4, unsigned long in5,
				    u32 *out1, u32 *out2, u32 *out3,
				    u32 *out4, u32 *out5)
{}

static inline int __vmware_platform(void)
{}

static unsigned long vmware_get_tsc_khz(void)
{}

#ifdef CONFIG_PARAVIRT
static struct cyc2ns_data vmware_cyc2ns __ro_after_init;
static bool vmw_sched_clock __initdata =;
static DEFINE_PER_CPU_DECRYPTED(struct vmware_steal_time, vmw_steal_time) __aligned();
static bool has_steal_clock;
static bool steal_acc __initdata =; /* steal time accounting */

static __init int setup_vmw_sched_clock(char *s)
{}
early_param();

static __init int parse_no_stealacc(char *arg)
{}
early_param();

static noinstr u64 vmware_sched_clock(void)
{}

static void __init vmware_cyc2ns_setup(void)
{}

static int vmware_cmd_stealclock(u32 addr_hi, u32 addr_lo)
{}

static bool stealclock_enable(phys_addr_t pa)
{}

static int __stealclock_disable(void)
{}

static void stealclock_disable(void)
{}

static bool vmware_is_stealclock_available(void)
{}

/**
 * vmware_steal_clock() - read the per-cpu steal clock
 * @cpu:            the cpu number whose steal clock we want to read
 *
 * The function reads the steal clock if we are on a 64-bit system, otherwise
 * reads it in parts, checking that the high part didn't change in the
 * meantime.
 *
 * Return:
 *      The steal clock reading in ns.
 */
static u64 vmware_steal_clock(int cpu)
{}

static void vmware_register_steal_time(void)
{}

static void vmware_disable_steal_time(void)
{}

static void vmware_guest_cpu_init(void)
{}

static void vmware_pv_guest_cpu_reboot(void *unused)
{}

static int vmware_pv_reboot_notify(struct notifier_block *nb,
				unsigned long code, void *unused)
{}

static struct notifier_block vmware_pv_reboot_nb =;

#ifdef CONFIG_SMP
static void __init vmware_smp_prepare_boot_cpu(void)
{}

static int vmware_cpu_online(unsigned int cpu)
{}

static int vmware_cpu_down_prepare(unsigned int cpu)
{}
#endif

static __init int activate_jump_labels(void)
{}
arch_initcall(activate_jump_labels);

static void __init vmware_paravirt_ops_setup(void)
{}
#else
#define vmware_paravirt_ops_setup
#endif

/*
 * VMware hypervisor takes care of exporting a reliable TSC to the guest.
 * Still, due to timing difference when running on virtual cpus, the TSC can
 * be marked as unstable in some cases. For example, the TSC sync check at
 * bootup can fail due to a marginal offset between vcpus' TSCs (though the
 * TSCs do not drift from each other).  Also, the ACPI PM timer clocksource
 * is not suitable as a watchdog when running on a hypervisor because the
 * kernel may miss a wrap of the counter if the vcpu is descheduled for a
 * long time. To skip these checks at runtime we set these capability bits,
 * so that the kernel could just trust the hypervisor with providing a
 * reliable virtual TSC that is suitable for timekeeping.
 */
static void __init vmware_set_capabilities(void)
{}

static void __init vmware_platform_setup(void)
{}

static u8 __init vmware_select_hypercall(void)
{}

/*
 * While checking the dmi string information, just checking the product
 * serial key should be enough, as this will always have a VMware
 * specific string when running under VMware hypervisor.
 * If !boot_cpu_has(X86_FEATURE_HYPERVISOR), vmware_hypercall_mode
 * intentionally defaults to 0.
 */
static u32 __init vmware_platform(void)
{}

/* Checks if hypervisor supports x2apic without VT-D interrupt remapping. */
static bool __init vmware_legacy_x2apic_available(void)
{}

#ifdef CONFIG_INTEL_TDX_GUEST
/*
 * TDCALL[TDG.VP.VMCALL] uses %rax (arg0) and %rcx (arg2). Therefore,
 * we remap those registers to %r12 and %r13, respectively.
 */
unsigned long vmware_tdx_hypercall(unsigned long cmd,
				   unsigned long in1, unsigned long in3,
				   unsigned long in4, unsigned long in5,
				   u32 *out1, u32 *out2, u32 *out3,
				   u32 *out4, u32 *out5)
{}
EXPORT_SYMBOL_GPL();
#endif

#ifdef CONFIG_AMD_MEM_ENCRYPT
static void vmware_sev_es_hcall_prepare(struct ghcb *ghcb,
					struct pt_regs *regs)
{}

static bool vmware_sev_es_hcall_finish(struct ghcb *ghcb, struct pt_regs *regs)
{}
#endif

const __initconst struct hypervisor_x86 x86_hyper_vmware =;