linux/arch/x86/kernel/kvmclock.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*  KVM paravirtual clock driver. A clocksource implementation
    Copyright (C) 2008 Glauber de Oliveira Costa, Red Hat Inc.
*/

#include <linux/clocksource.h>
#include <linux/kvm_para.h>
#include <asm/pvclock.h>
#include <asm/msr.h>
#include <asm/apic.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <linux/cpuhotplug.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/set_memory.h>
#include <linux/cc_platform.h>

#include <asm/hypervisor.h>
#include <asm/x86_init.h>
#include <asm/kvmclock.h>

static int kvmclock __initdata =;
static int kvmclock_vsyscall __initdata =;
static int msr_kvm_system_time __ro_after_init;
static int msr_kvm_wall_clock __ro_after_init;
static u64 kvm_sched_clock_offset __ro_after_init;

static int __init parse_no_kvmclock(char *arg)
{}
early_param();

static int __init parse_no_kvmclock_vsyscall(char *arg)
{}
early_param();

/* Aligned to page sizes to match what's mapped via vsyscalls to userspace */
#define HVC_BOOT_ARRAY_SIZE

static struct pvclock_vsyscall_time_info
			hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __bss_decrypted __aligned();
static struct pvclock_wall_clock wall_clock __bss_decrypted;
static struct pvclock_vsyscall_time_info *hvclock_mem;
DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
EXPORT_PER_CPU_SYMBOL_GPL();

/*
 * The wallclock is the time of day when we booted. Since then, some time may
 * have elapsed since the hypervisor wrote the data. So we try to account for
 * that with system time
 */
static void kvm_get_wallclock(struct timespec64 *now)
{}

static int kvm_set_wallclock(const struct timespec64 *now)
{}

static u64 kvm_clock_read(void)
{}

static u64 kvm_clock_get_cycles(struct clocksource *cs)
{}

static noinstr u64 kvm_sched_clock_read(void)
{}

static inline void kvm_sched_clock_init(bool stable)
{}

/*
 * If we don't do that, there is the possibility that the guest
 * will calibrate under heavy load - thus, getting a lower lpj -
 * and execute the delays themselves without load. This is wrong,
 * because no delay loop can finish beforehand.
 * Any heuristics is subject to fail, because ultimately, a large
 * poll of guests can be running and trouble each other. So we preset
 * lpj here
 */
static unsigned long kvm_get_tsc_khz(void)
{}

static void __init kvm_get_preset_lpj(void)
{}

bool kvm_check_and_clear_guest_paused(void)
{}

static int kvm_cs_enable(struct clocksource *cs)
{}

static struct clocksource kvm_clock =;

static void kvm_register_clock(char *txt)
{}

static void kvm_save_sched_clock_state(void)
{}

static void kvm_restore_sched_clock_state(void)
{}

#ifdef CONFIG_X86_LOCAL_APIC
static void kvm_setup_secondary_clock(void)
{}
#endif

void kvmclock_disable(void)
{}

static void __init kvmclock_init_mem(void)
{}

static int __init kvm_setup_vsyscall_timeinfo(void)
{}
early_initcall(kvm_setup_vsyscall_timeinfo);

static int kvmclock_setup_percpu(unsigned int cpu)
{}

void __init kvmclock_init(void)
{}