#define pr_fmt(fmt) …
#include <linux/kvm_host.h>
#include "irq.h"
#include "mmu.h"
#include "kvm_cache_regs.h"
#include "x86.h"
#include "smm.h"
#include "cpuid.h"
#include "pmu.h"
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/kernel.h>
#include <linux/vmalloc.h>
#include <linux/highmem.h>
#include <linux/amd-iommu.h>
#include <linux/sched.h>
#include <linux/trace_events.h>
#include <linux/slab.h>
#include <linux/hashtable.h>
#include <linux/objtool.h>
#include <linux/psp-sev.h>
#include <linux/file.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <linux/rwsem.h>
#include <linux/cc_platform.h>
#include <linux/smp.h>
#include <asm/apic.h>
#include <asm/perf_event.h>
#include <asm/tlbflush.h>
#include <asm/desc.h>
#include <asm/debugreg.h>
#include <asm/kvm_para.h>
#include <asm/irq_remapping.h>
#include <asm/spec-ctrl.h>
#include <asm/cpu_device_id.h>
#include <asm/traps.h>
#include <asm/reboot.h>
#include <asm/fpu/api.h>
#include <trace/events/ipi.h>
#include "trace.h"
#include "svm.h"
#include "svm_ops.h"
#include "kvm_onhyperv.h"
#include "svm_onhyperv.h"
MODULE_AUTHOR(…) …;
MODULE_DESCRIPTION(…) …;
MODULE_LICENSE(…) …;
#ifdef MODULE
static const struct x86_cpu_id svm_cpu_id[] = {
X86_MATCH_FEATURE(X86_FEATURE_SVM, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
#endif
#define SEG_TYPE_LDT …
#define SEG_TYPE_BUSY_TSS16 …
static bool erratum_383_found __read_mostly;
u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
static uint64_t osvw_len = …, osvw_status;
static DEFINE_PER_CPU(u64, current_tsc_ratio);
#define X2APIC_MSR(x) …
static const struct svm_direct_access_msrs { … } direct_access_msrs[MAX_DIRECT_ACCESS_MSRS] = …;
static unsigned short pause_filter_thresh = …;
module_param(pause_filter_thresh, ushort, 0444);
static unsigned short pause_filter_count = …;
module_param(pause_filter_count, ushort, 0444);
static unsigned short pause_filter_count_grow = …;
module_param(pause_filter_count_grow, ushort, 0444);
static unsigned short pause_filter_count_shrink = …;
module_param(pause_filter_count_shrink, ushort, 0444);
static unsigned short pause_filter_count_max = …;
module_param(pause_filter_count_max, ushort, 0444);
bool npt_enabled = …;
module_param_named(npt, npt_enabled, bool, 0444);
static int nested = …;
module_param(nested, int, 0444);
int nrips = …;
module_param(nrips, int, 0444);
static int vls = …;
module_param(vls, int, 0444);
int vgif = …;
module_param(vgif, int, 0444);
int lbrv = …;
module_param(lbrv, int, 0444);
static int tsc_scaling = …;
module_param(tsc_scaling, int, 0444);
static bool avic;
module_param(avic, bool, 0444);
bool __read_mostly dump_invalid_vmcb;
module_param(dump_invalid_vmcb, bool, 0644);
bool intercept_smi = …;
module_param(intercept_smi, bool, 0444);
bool vnmi = …;
module_param(vnmi, bool, 0444);
static bool svm_gp_erratum_intercept = …;
static u8 rsm_ins_bytes[] = …;
static unsigned long iopm_base;
DEFINE_PER_CPU(struct svm_cpu_data, svm_data);
static int tsc_aux_uret_slot __read_mostly = …;
static const u32 msrpm_ranges[] = …;
#define NUM_MSR_MAPS …
#define MSRS_RANGE_SIZE …
#define MSRS_IN_RANGE …
u32 svm_msrpm_offset(u32 msr)
{ … }
static void svm_flush_tlb_current(struct kvm_vcpu *vcpu);
static int get_npt_level(void)
{ … }
int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
{ … }
static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
{ … }
static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
{ … }
static int __svm_skip_emulated_instruction(struct kvm_vcpu *vcpu,
bool commit_side_effects)
{ … }
static int svm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
{ … }
static int svm_update_soft_interrupt_rip(struct kvm_vcpu *vcpu)
{ … }
static void svm_inject_exception(struct kvm_vcpu *vcpu)
{ … }
static void svm_init_erratum_383(void)
{ … }
static void svm_init_osvw(struct kvm_vcpu *vcpu)
{ … }
static bool __kvm_is_svm_supported(void)
{ … }
static bool kvm_is_svm_supported(void)
{ … }
static int svm_check_processor_compat(void)
{ … }
static void __svm_write_tsc_multiplier(u64 multiplier)
{ … }
static __always_inline struct sev_es_save_area *sev_es_host_save_area(struct svm_cpu_data *sd)
{ … }
static inline void kvm_cpu_svm_disable(void)
{ … }
static void svm_emergency_disable_virtualization_cpu(void)
{ … }
static void svm_disable_virtualization_cpu(void)
{ … }
static int svm_enable_virtualization_cpu(void)
{ … }
static void svm_cpu_uninit(int cpu)
{ … }
static int svm_cpu_init(int cpu)
{ … }
static void set_dr_intercepts(struct vcpu_svm *svm)
{ … }
static void clr_dr_intercepts(struct vcpu_svm *svm)
{ … }
static int direct_access_msr_slot(u32 msr)
{ … }
static void set_shadow_msr_intercept(struct kvm_vcpu *vcpu, u32 msr, int read,
int write)
{ … }
static bool valid_msr_intercept(u32 index)
{ … }
static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
{ … }
static void set_msr_interception_bitmap(struct kvm_vcpu *vcpu, u32 *msrpm,
u32 msr, int read, int write)
{ … }
void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
int read, int write)
{ … }
u32 *svm_vcpu_alloc_msrpm(void)
{ … }
void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm)
{ … }
void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool intercept)
{ … }
void svm_vcpu_free_msrpm(u32 *msrpm)
{ … }
static void svm_msr_filter_changed(struct kvm_vcpu *vcpu)
{ … }
static void add_msr_offset(u32 offset)
{ … }
static void init_msrpm_offsets(void)
{ … }
void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
{ … }
void svm_enable_lbrv(struct kvm_vcpu *vcpu)
{ … }
static void svm_disable_lbrv(struct kvm_vcpu *vcpu)
{ … }
static struct vmcb *svm_get_lbr_vmcb(struct vcpu_svm *svm)
{ … }
void svm_update_lbrv(struct kvm_vcpu *vcpu)
{ … }
void disable_nmi_singlestep(struct vcpu_svm *svm)
{ … }
static void grow_ple_window(struct kvm_vcpu *vcpu)
{ … }
static void shrink_ple_window(struct kvm_vcpu *vcpu)
{ … }
static void svm_hardware_unsetup(void)
{ … }
static void init_seg(struct vmcb_seg *seg)
{ … }
static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
{ … }
static u64 svm_get_l2_tsc_offset(struct kvm_vcpu *vcpu)
{ … }
static u64 svm_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
{ … }
static void svm_write_tsc_offset(struct kvm_vcpu *vcpu)
{ … }
void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu)
{ … }
static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu,
struct vcpu_svm *svm)
{ … }
static inline void init_vmcb_after_set_cpuid(struct kvm_vcpu *vcpu)
{ … }
static void init_vmcb(struct kvm_vcpu *vcpu)
{ … }
static void __svm_vcpu_reset(struct kvm_vcpu *vcpu)
{ … }
static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
{ … }
void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb)
{ … }
static int svm_vcpu_create(struct kvm_vcpu *vcpu)
{ … }
static void svm_clear_current_vmcb(struct vmcb *vmcb)
{ … }
static void svm_vcpu_free(struct kvm_vcpu *vcpu)
{ … }
static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
{ … }
static void svm_prepare_host_switch(struct kvm_vcpu *vcpu)
{ … }
static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{ … }
static void svm_vcpu_put(struct kvm_vcpu *vcpu)
{ … }
static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
{ … }
static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{ … }
static bool svm_get_if_flag(struct kvm_vcpu *vcpu)
{ … }
static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
{ … }
static void svm_set_vintr(struct vcpu_svm *svm)
{ … }
static void svm_clear_vintr(struct vcpu_svm *svm)
{ … }
static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
{ … }
static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
{ … }
static void svm_get_segment(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg)
{ … }
static int svm_get_cpl(struct kvm_vcpu *vcpu)
{ … }
static void svm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
{ … }
static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
{ … }
static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
{ … }
static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
{ … }
static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
{ … }
static void sev_post_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{ … }
static bool svm_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{ … }
void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{ … }
static bool svm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{ … }
void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{ … }
static void svm_set_segment(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg)
{ … }
static void svm_update_exception_bitmap(struct kvm_vcpu *vcpu)
{ … }
static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
{ … }
static void svm_set_dr6(struct vcpu_svm *svm, unsigned long value)
{ … }
static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
{ … }
static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
{ … }
static int pf_interception(struct kvm_vcpu *vcpu)
{ … }
static int npf_interception(struct kvm_vcpu *vcpu)
{ … }
static int db_interception(struct kvm_vcpu *vcpu)
{ … }
static int bp_interception(struct kvm_vcpu *vcpu)
{ … }
static int ud_interception(struct kvm_vcpu *vcpu)
{ … }
static int ac_interception(struct kvm_vcpu *vcpu)
{ … }
static bool is_erratum_383(void)
{ … }
static void svm_handle_mce(struct kvm_vcpu *vcpu)
{ … }
static int mc_interception(struct kvm_vcpu *vcpu)
{ … }
static int shutdown_interception(struct kvm_vcpu *vcpu)
{ … }
static int io_interception(struct kvm_vcpu *vcpu)
{ … }
static int nmi_interception(struct kvm_vcpu *vcpu)
{ … }
static int smi_interception(struct kvm_vcpu *vcpu)
{ … }
static int intr_interception(struct kvm_vcpu *vcpu)
{ … }
static int vmload_vmsave_interception(struct kvm_vcpu *vcpu, bool vmload)
{ … }
static int vmload_interception(struct kvm_vcpu *vcpu)
{ … }
static int vmsave_interception(struct kvm_vcpu *vcpu)
{ … }
static int vmrun_interception(struct kvm_vcpu *vcpu)
{ … }
enum { … };
static int svm_instr_opcode(struct kvm_vcpu *vcpu)
{ … }
static int emulate_svm_instr(struct kvm_vcpu *vcpu, int opcode)
{ … }
static int gp_interception(struct kvm_vcpu *vcpu)
{ … }
void svm_set_gif(struct vcpu_svm *svm, bool value)
{ … }
static int stgi_interception(struct kvm_vcpu *vcpu)
{ … }
static int clgi_interception(struct kvm_vcpu *vcpu)
{ … }
static int invlpga_interception(struct kvm_vcpu *vcpu)
{ … }
static int skinit_interception(struct kvm_vcpu *vcpu)
{ … }
static int task_switch_interception(struct kvm_vcpu *vcpu)
{ … }
static void svm_clr_iret_intercept(struct vcpu_svm *svm)
{ … }
static void svm_set_iret_intercept(struct vcpu_svm *svm)
{ … }
static int iret_interception(struct kvm_vcpu *vcpu)
{ … }
static int invlpg_interception(struct kvm_vcpu *vcpu)
{ … }
static int emulate_on_interception(struct kvm_vcpu *vcpu)
{ … }
static int rsm_interception(struct kvm_vcpu *vcpu)
{ … }
static bool check_selective_cr0_intercepted(struct kvm_vcpu *vcpu,
unsigned long val)
{ … }
#define CR_VALID …
static int cr_interception(struct kvm_vcpu *vcpu)
{ … }
static int cr_trap(struct kvm_vcpu *vcpu)
{ … }
static int dr_interception(struct kvm_vcpu *vcpu)
{ … }
static int cr8_write_interception(struct kvm_vcpu *vcpu)
{ … }
static int efer_trap(struct kvm_vcpu *vcpu)
{ … }
static int svm_get_feature_msr(u32 msr, u64 *data)
{ … }
static bool
sev_es_prevent_msr_access(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{ … }
static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{ … }
static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err)
{ … }
static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
{ … }
static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
{ … }
static int msr_interception(struct kvm_vcpu *vcpu)
{ … }
static int interrupt_window_interception(struct kvm_vcpu *vcpu)
{ … }
static int pause_interception(struct kvm_vcpu *vcpu)
{ … }
static int invpcid_interception(struct kvm_vcpu *vcpu)
{ … }
static int (*const svm_exit_handlers[])(struct kvm_vcpu *vcpu) = …;
static void dump_vmcb(struct kvm_vcpu *vcpu)
{ … }
static bool svm_check_exit_valid(u64 exit_code)
{ … }
static int svm_handle_invalid_exit(struct kvm_vcpu *vcpu, u64 exit_code)
{ … }
int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code)
{ … }
static void svm_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
u64 *info1, u64 *info2,
u32 *intr_info, u32 *error_code)
{ … }
static int svm_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
{ … }
static void pre_svm_run(struct kvm_vcpu *vcpu)
{ … }
static void svm_inject_nmi(struct kvm_vcpu *vcpu)
{ … }
static bool svm_is_vnmi_pending(struct kvm_vcpu *vcpu)
{ … }
static bool svm_set_vnmi_pending(struct kvm_vcpu *vcpu)
{ … }
static void svm_inject_irq(struct kvm_vcpu *vcpu, bool reinjected)
{ … }
void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
int trig_mode, int vector)
{ … }
static void svm_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
int trig_mode, int vector)
{ … }
static void svm_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
{ … }
static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
{ … }
static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
{ … }
bool svm_nmi_blocked(struct kvm_vcpu *vcpu)
{ … }
static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
{ … }
bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
{ … }
static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
{ … }
static void svm_enable_irq_window(struct kvm_vcpu *vcpu)
{ … }
static void svm_enable_nmi_window(struct kvm_vcpu *vcpu)
{ … }
static void svm_flush_tlb_asid(struct kvm_vcpu *vcpu)
{ … }
static void svm_flush_tlb_current(struct kvm_vcpu *vcpu)
{ … }
static void svm_flush_tlb_all(struct kvm_vcpu *vcpu)
{ … }
static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva)
{ … }
static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
{ … }
static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
{ … }
static void svm_complete_soft_interrupt(struct kvm_vcpu *vcpu, u8 vector,
int type)
{ … }
static void svm_complete_interrupts(struct kvm_vcpu *vcpu)
{ … }
static void svm_cancel_injection(struct kvm_vcpu *vcpu)
{ … }
static int svm_vcpu_pre_run(struct kvm_vcpu *vcpu)
{ … }
static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
{ … }
static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_intercepted)
{ … }
static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu,
bool force_immediate_exit)
{ … }
static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa,
int root_level)
{ … }
static void
svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
{ … }
static bool svm_has_emulated_msr(struct kvm *kvm, u32 index)
{ … }
static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
{ … }
static bool svm_has_wbinvd_exit(void)
{ … }
#define PRE_EX …
#define POST_EX …
#define POST_MEM …
static const struct __x86_intercept { … } x86_intercept_map[] = …;
#undef PRE_EX
#undef POST_EX
#undef POST_MEM
static int svm_check_intercept(struct kvm_vcpu *vcpu,
struct x86_instruction_info *info,
enum x86_intercept_stage stage,
struct x86_exception *exception)
{ … }
static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
{ … }
static void svm_setup_mce(struct kvm_vcpu *vcpu)
{ … }
#ifdef CONFIG_KVM_SMM
bool svm_smi_blocked(struct kvm_vcpu *vcpu)
{ … }
static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
{ … }
static int svm_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram)
{ … }
static int svm_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
{ … }
static void svm_enable_smi_window(struct kvm_vcpu *vcpu)
{ … }
#endif
static int svm_check_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
void *insn, int insn_len)
{ … }
static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
{ … }
static void svm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
{ … }
static void svm_vm_destroy(struct kvm *kvm)
{ … }
static int svm_vm_init(struct kvm *kvm)
{ … }
static void *svm_alloc_apic_backing_page(struct kvm_vcpu *vcpu)
{ … }
static struct kvm_x86_ops svm_x86_ops __initdata = …;
static __init void svm_adjust_mmio_mask(void)
{ … }
static __init void svm_set_cpu_caps(void)
{ … }
static __init int svm_hardware_setup(void)
{ … }
static struct kvm_x86_init_ops svm_init_ops __initdata = …;
static void __svm_exit(void)
{ … }
static int __init svm_init(void)
{ … }
static void __exit svm_exit(void)
{ … }
module_init(…) …
module_exit(…)