#define pr_fmt(fmt) …
#include <linux/context_tracking.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/kvm_para.h>
#include <linux/cpu.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/hardirq.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/hash.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/kprobes.h>
#include <linux/nmi.h>
#include <linux/swait.h>
#include <linux/syscore_ops.h>
#include <linux/cc_platform.h>
#include <linux/efi.h>
#include <asm/timer.h>
#include <asm/cpu.h>
#include <asm/traps.h>
#include <asm/desc.h>
#include <asm/tlbflush.h>
#include <asm/apic.h>
#include <asm/apicdef.h>
#include <asm/hypervisor.h>
#include <asm/tlb.h>
#include <asm/cpuidle_haltpoll.h>
#include <asm/ptrace.h>
#include <asm/reboot.h>
#include <asm/svm.h>
#include <asm/e820/api.h>
DEFINE_STATIC_KEY_FALSE_RO(kvm_async_pf_enabled);
static int kvmapf = …;
static int __init parse_no_kvmapf(char *arg)
{ … }
early_param(…);
static int steal_acc = …;
static int __init parse_no_stealacc(char *arg)
{ … }
early_param(…);
static DEFINE_PER_CPU_READ_MOSTLY(bool, async_pf_enabled);
static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(…);
DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(…) __visible;
static int has_steal_clock = …;
static int has_guest_poll = …;
static void kvm_io_delay(void)
{ … }
#define KVM_TASK_SLEEP_HASHBITS …
#define KVM_TASK_SLEEP_HASHSIZE …
struct kvm_task_sleep_node { … };
static struct kvm_task_sleep_head { … } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
u32 token)
{ … }
static bool kvm_async_pf_queue_task(u32 token, struct kvm_task_sleep_node *n)
{ … }
void kvm_async_pf_task_wait_schedule(u32 token)
{ … }
EXPORT_SYMBOL_GPL(…);
static void apf_task_wake_one(struct kvm_task_sleep_node *n)
{ … }
static void apf_task_wake_all(void)
{ … }
void kvm_async_pf_task_wake(u32 token)
{ … }
EXPORT_SYMBOL_GPL(…);
noinstr u32 kvm_read_and_reset_apf_flags(void)
{ … }
EXPORT_SYMBOL_GPL(…);
noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
{ … }
DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt)
{ … }
static void __init paravirt_ops_setup(void)
{ … }
static void kvm_register_steal_time(void)
{ … }
static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi) = …;
static notrace __maybe_unused void kvm_guest_apic_eoi_write(void)
{ … }
static void kvm_guest_cpu_init(void)
{ … }
static void kvm_pv_disable_apf(void)
{ … }
static void kvm_disable_steal_time(void)
{ … }
static u64 kvm_steal_clock(int cpu)
{ … }
static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
{ … }
static void __init sev_map_percpu_data(void)
{ … }
static void kvm_guest_cpu_offline(bool shutdown)
{ … }
static int kvm_cpu_online(unsigned int cpu)
{ … }
#ifdef CONFIG_SMP
static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask);
static bool pv_tlb_flush_supported(void)
{ … }
static bool pv_ipi_supported(void)
{ … }
static bool pv_sched_yield_supported(void)
{ … }
#define KVM_IPI_CLUSTER_SIZE …
static void __send_ipi_mask(const struct cpumask *mask, int vector)
{ … }
static void kvm_send_ipi_mask(const struct cpumask *mask, int vector)
{ … }
static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
{ … }
static int __init setup_efi_kvm_sev_migration(void)
{ … }
late_initcall(setup_efi_kvm_sev_migration);
static __init void kvm_setup_pv_ipi(void)
{ … }
static void kvm_smp_send_call_func_ipi(const struct cpumask *mask)
{ … }
static void kvm_flush_tlb_multi(const struct cpumask *cpumask,
const struct flush_tlb_info *info)
{ … }
static __init int kvm_alloc_cpumask(void)
{ … }
arch_initcall(kvm_alloc_cpumask);
static void __init kvm_smp_prepare_boot_cpu(void)
{ … }
static int kvm_cpu_down_prepare(unsigned int cpu)
{ … }
#endif
static int kvm_suspend(void)
{ … }
static void kvm_resume(void)
{ … }
static struct syscore_ops kvm_syscore_ops = …;
static void kvm_pv_guest_cpu_reboot(void *unused)
{ … }
static int kvm_pv_reboot_notify(struct notifier_block *nb,
unsigned long code, void *unused)
{ … }
static struct notifier_block kvm_pv_reboot_nb = …;
#ifdef CONFIG_CRASH_DUMP
static void kvm_crash_shutdown(struct pt_regs *regs)
{ … }
#endif
#if defined(CONFIG_X86_32) || !defined(CONFIG_SMP)
bool __kvm_vcpu_is_preempted(long cpu);
__visible bool __kvm_vcpu_is_preempted(long cpu)
{
struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
return !!(src->preempted & KVM_VCPU_PREEMPTED);
}
PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
#else
#include <asm/asm-offsets.h>
extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
#define PV_VCPU_PREEMPTED_ASM …
DEFINE_ASM_FUNC(…);
#endif
static void __init kvm_guest_init(void)
{ … }
static noinline uint32_t __kvm_cpuid_base(void)
{ … }
static inline uint32_t kvm_cpuid_base(void)
{ … }
bool kvm_para_available(void)
{ … }
EXPORT_SYMBOL_GPL(…);
unsigned int kvm_arch_para_features(void)
{ … }
unsigned int kvm_arch_para_hints(void)
{ … }
EXPORT_SYMBOL_GPL(…);
static uint32_t __init kvm_detect(void)
{ … }
static void __init kvm_apic_init(void)
{ … }
static bool __init kvm_msi_ext_dest_id(void)
{ … }
static void kvm_sev_hc_page_enc_status(unsigned long pfn, int npages, bool enc)
{ … }
static void __init kvm_init_platform(void)
{ … }
#if defined(CONFIG_AMD_MEM_ENCRYPT)
static void kvm_sev_es_hcall_prepare(struct ghcb *ghcb, struct pt_regs *regs)
{ … }
static bool kvm_sev_es_hcall_finish(struct ghcb *ghcb, struct pt_regs *regs)
{ … }
#endif
const __initconst struct hypervisor_x86 x86_hyper_kvm = …;
static __init int activate_jump_labels(void)
{ … }
arch_initcall(activate_jump_labels);
#ifdef CONFIG_PARAVIRT_SPINLOCKS
static void kvm_kick_cpu(int cpu)
{ … }
#include <asm/qspinlock.h>
static void kvm_wait(u8 *ptr, u8 val)
{ … }
void __init kvm_spinlock_init(void)
{ … }
#endif
#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
static void kvm_disable_host_haltpoll(void *i)
{ … }
static void kvm_enable_host_haltpoll(void *i)
{ … }
void arch_haltpoll_enable(unsigned int cpu)
{ … }
EXPORT_SYMBOL_GPL(…);
void arch_haltpoll_disable(unsigned int cpu)
{ … }
EXPORT_SYMBOL_GPL(…);
#endif