#include <linux/percpu.h>
#include <linux/cpumask.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/sched_clock.h>
#include <linux/mm.h>
#include <linux/cpuhotplug.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/acpi.h>
#include <linux/hyperv.h>
#include <clocksource/hyperv_timer.h>
#include <asm/hyperv-tlfs.h>
#include <asm/mshyperv.h>
static struct clock_event_device __percpu *hv_clock_event;
static u64 hv_sched_clock_offset __ro_after_init;
static bool direct_mode_enabled;
static int stimer0_irq = …;
static int stimer0_message_sint;
static __maybe_unused DEFINE_PER_CPU(long, stimer0_evt);
void hv_stimer0_isr(void)
{ … }
EXPORT_SYMBOL_GPL(…);
static irqreturn_t __maybe_unused hv_stimer0_percpu_isr(int irq, void *dev_id)
{ … }
static int hv_ce_set_next_event(unsigned long delta,
struct clock_event_device *evt)
{ … }
static int hv_ce_shutdown(struct clock_event_device *evt)
{ … }
static int hv_ce_set_oneshot(struct clock_event_device *evt)
{ … }
static int hv_stimer_init(unsigned int cpu)
{ … }
int hv_stimer_cleanup(unsigned int cpu)
{ … }
EXPORT_SYMBOL_GPL(…);
void __weak hv_setup_stimer0_handler(void (*handler)(void))
{
};
void __weak hv_remove_stimer0_handler(void)
{
};
#ifdef CONFIG_ACPI
static int hv_setup_stimer0_irq(void)
{ … }
static void hv_remove_stimer0_irq(void)
{ … }
#else
static int hv_setup_stimer0_irq(void)
{
return 0;
}
static void hv_remove_stimer0_irq(void)
{
}
#endif
int hv_stimer_alloc(bool have_percpu_irqs)
{ … }
EXPORT_SYMBOL_GPL(…);
void hv_stimer_legacy_init(unsigned int cpu, int sint)
{ … }
EXPORT_SYMBOL_GPL(…);
void hv_stimer_legacy_cleanup(unsigned int cpu)
{ … }
EXPORT_SYMBOL_GPL(…);
void hv_stimer_global_cleanup(void)
{ … }
EXPORT_SYMBOL_GPL(…);
static __always_inline u64 read_hv_clock_msr(void)
{ … }
static union { … } tsc_pg __bss_decrypted __aligned(…);
static struct ms_hyperv_tsc_page *tsc_page = …;
static unsigned long tsc_pfn;
unsigned long hv_get_tsc_pfn(void)
{ … }
EXPORT_SYMBOL_GPL(…);
struct ms_hyperv_tsc_page *hv_get_tsc_page(void)
{ … }
EXPORT_SYMBOL_GPL(…);
static __always_inline u64 read_hv_clock_tsc(void)
{ … }
static u64 notrace read_hv_clock_tsc_cs(struct clocksource *arg)
{ … }
static u64 noinstr read_hv_sched_clock_tsc(void)
{ … }
static void suspend_hv_clock_tsc(struct clocksource *arg)
{ … }
static void resume_hv_clock_tsc(struct clocksource *arg)
{ … }
#ifdef HAVE_VDSO_CLOCKMODE_HVCLOCK
static int hv_cs_enable(struct clocksource *cs)
{ … }
#endif
static struct clocksource hyperv_cs_tsc = …;
static u64 notrace read_hv_clock_msr_cs(struct clocksource *arg)
{ … }
static struct clocksource hyperv_cs_msr = …;
#ifdef CONFIG_GENERIC_SCHED_CLOCK
static __always_inline void hv_setup_sched_clock(void *sched_clock)
{
sched_clock_register(sched_clock, 64, NSEC_PER_SEC);
}
#elif defined CONFIG_PARAVIRT
static __always_inline void hv_setup_sched_clock(void *sched_clock)
{ … }
#else
static __always_inline void hv_setup_sched_clock(void *sched_clock) {}
#endif
static void __init hv_init_tsc_clocksource(void)
{ … }
void __init hv_init_clocksource(void)
{ … }
void __init hv_remap_tsc_clocksource(void)
{ … }