#define pr_fmt(fmt) …
#include <linux/objtool.h>
#include <linux/percpu.h>
#include <asm/debugreg.h>
#include <asm/mmu_context.h>
#include "cpuid.h"
#include "hyperv.h"
#include "mmu.h"
#include "nested.h"
#include "pmu.h"
#include "posted_intr.h"
#include "sgx.h"
#include "trace.h"
#include "vmx.h"
#include "x86.h"
#include "smm.h"
static bool __read_mostly enable_shadow_vmcs = …;
module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
static bool __read_mostly nested_early_check = …;
module_param(nested_early_check, bool, S_IRUGO);
#define CC …
#define VMX_VPID_EXTENT_SUPPORTED_MASK …
#define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE …
enum { … };
static unsigned long *vmx_bitmap[VMX_BITMAP_NR];
#define vmx_vmread_bitmap …
#define vmx_vmwrite_bitmap …
struct shadow_vmcs_field { … };
static struct shadow_vmcs_field shadow_read_only_fields[] = …;
static int max_shadow_read_only_fields = …;
static struct shadow_vmcs_field shadow_read_write_fields[] = …;
static int max_shadow_read_write_fields = …;
static void init_vmcs_shadow_fields(void)
{ … }
static int nested_vmx_succeed(struct kvm_vcpu *vcpu)
{ … }
static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
{ … }
static int nested_vmx_failValid(struct kvm_vcpu *vcpu,
u32 vm_instruction_error)
{ … }
static int nested_vmx_fail(struct kvm_vcpu *vcpu, u32 vm_instruction_error)
{ … }
static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
{ … }
static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
{ … }
static inline u64 vmx_control_msr(u32 low, u32 high)
{ … }
static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
{ … }
static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
{ … }
static bool nested_evmcs_handle_vmclear(struct kvm_vcpu *vcpu, gpa_t vmptr)
{ … }
static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx,
struct loaded_vmcs *prev)
{ … }
static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
{ … }
static void free_nested(struct kvm_vcpu *vcpu)
{ … }
void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu)
{ … }
#define EPTP_PA_MASK …
static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp)
{ … }
static void nested_ept_invalidate_addr(struct kvm_vcpu *vcpu, gpa_t eptp,
gpa_t addr)
{ … }
static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
struct x86_exception *fault)
{ … }
static void nested_ept_new_eptp(struct kvm_vcpu *vcpu)
{ … }
static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
{ … }
static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
{ … }
static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
u16 error_code)
{ … }
static bool nested_vmx_is_exception_vmexit(struct kvm_vcpu *vcpu, u8 vector,
u32 error_code)
{ … }
static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{ … }
static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{ … }
static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{ … }
static void nested_vmx_disable_intercept_for_x2apic_msr(unsigned long *msr_bitmap_l1,
unsigned long *msr_bitmap_l0,
u32 msr, int type)
{ … }
static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap)
{ … }
#define BUILD_NVMX_MSR_INTERCEPT_HELPER(rw) …
BUILD_NVMX_MSR_INTERCEPT_HELPER(…)
BUILD_NVMX_MSR_INTERCEPT_HELPER(…)
static inline void nested_vmx_set_intercept_for_msr(struct vcpu_vmx *vmx,
unsigned long *msr_bitmap_l1,
unsigned long *msr_bitmap_l0,
u32 msr, int types)
{ … }
static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{ … }
static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{ … }
static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{ … }
static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu)
{ … }
static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{ … }
static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{ … }
static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
u32 count, u64 addr)
{ … }
static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{ … }
static int nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{ … }
static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{ … }
static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{ … }
static int nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{ … }
static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{ … }
static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu,
struct vmx_msr_entry *e)
{ … }
static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu,
struct vmx_msr_entry *e)
{ … }
static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu,
struct vmx_msr_entry *e)
{ … }
static u32 nested_vmx_max_atomic_switch_msrs(struct kvm_vcpu *vcpu)
{ … }
static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
{ … }
static bool nested_vmx_get_vmexit_msr_value(struct kvm_vcpu *vcpu,
u32 msr_index,
u64 *data)
{ … }
static bool read_and_check_msr_entry(struct kvm_vcpu *vcpu, u64 gpa, int i,
struct vmx_msr_entry *e)
{ … }
static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
{ … }
static bool nested_msr_store_list_has_msr(struct kvm_vcpu *vcpu, u32 msr_index)
{ … }
static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu,
u32 msr_index)
{ … }
static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
bool nested_ept, bool reload_pdptrs,
enum vm_entry_failure_code *entry_failure_code)
{ … }
static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu)
{ … }
static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12,
bool is_vmenter)
{ … }
static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
{ … }
static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
{ … }
static void vmx_get_control_msr(struct nested_vmx_msrs *msrs, u32 msr_index,
u32 **low, u32 **high)
{ … }
static int
vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
{ … }
static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
{ … }
static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
{ … }
static u64 *vmx_get_fixed0_msr(struct nested_vmx_msrs *msrs, u32 msr_index)
{ … }
static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
{ … }
int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
{ … }
int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata)
{ … }
static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
{ … }
static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
{ … }
static void copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx, u32 hv_clean_fields)
{ … }
static void copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
{ … }
static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld(
struct kvm_vcpu *vcpu, bool from_launch)
{ … }
void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu)
{ … }
static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
{ … }
static u64 vmx_calc_preemption_timer_value(struct kvm_vcpu *vcpu)
{ … }
static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu,
u64 preemption_timeout)
{ … }
static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
{ … }
static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx)
{ … }
static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx,
struct vmcs12 *vmcs12)
{ … }
static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs01,
struct vmcs12 *vmcs12)
{ … }
static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
{ … }
static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
bool from_vmentry,
enum vm_entry_failure_code *entry_failure_code)
{ … }
static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12)
{ … }
static bool nested_vmx_check_eptp(struct kvm_vcpu *vcpu, u64 new_eptp)
{ … }
static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{ … }
static int nested_check_vm_exit_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{ … }
static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{ … }
static int nested_vmx_check_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{ … }
static int nested_vmx_check_address_space_size(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{ … }
static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{ … }
static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{ … }
static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12)
{ … }
static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12,
enum vm_entry_failure_code *entry_failure_code)
{ … }
static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
{ … }
#ifdef CONFIG_KVM_HYPERV
static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu)
{ … }
#endif
static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
{ … }
static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
{ … }
static int nested_vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa)
{ … }
static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
{ … }
static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
{ … }
static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12);
enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
bool from_vmentry)
{ … }
static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
{ … }
static inline unsigned long
vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
{ … }
static inline unsigned long
vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
{ … }
static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12,
u32 vm_exit_reason, u32 exit_intr_info)
{ … }
void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu)
{ … }
static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
{ … }
static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu)
{ … }
static unsigned long vmx_get_pending_dbg_trap(struct kvm_queued_exception *ex)
{ … }
static bool vmx_is_low_priority_db_trap(struct kvm_queued_exception *ex)
{ … }
static void nested_vmx_update_pending_dbg(struct kvm_vcpu *vcpu)
{ … }
static bool nested_vmx_preemption_timer_pending(struct kvm_vcpu *vcpu)
{ … }
static bool vmx_has_nested_events(struct kvm_vcpu *vcpu, bool for_injection)
{ … }
static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
{ … }
static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
{ … }
static bool is_vmcs12_ext_field(unsigned long field)
{ … }
static void sync_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{ … }
static void copy_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{ … }
static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
{ … }
static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
u32 vm_exit_reason, u32 exit_intr_info,
unsigned long exit_qualification)
{ … }
static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{ … }
static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
{ … }
static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
{ … }
void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
u32 exit_intr_info, unsigned long exit_qualification)
{ … }
static void nested_vmx_triple_fault(struct kvm_vcpu *vcpu)
{ … }
int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
u32 vmx_instruction_info, bool wr, int len, gva_t *ret)
{ … }
static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer,
int *ret)
{ … }
static struct vmcs *alloc_shadow_vmcs(struct kvm_vcpu *vcpu)
{ … }
static int enter_vmx_operation(struct kvm_vcpu *vcpu)
{ … }
static int handle_vmxon(struct kvm_vcpu *vcpu)
{ … }
static inline void nested_release_vmcs12(struct kvm_vcpu *vcpu)
{ … }
static int handle_vmxoff(struct kvm_vcpu *vcpu)
{ … }
static int handle_vmclear(struct kvm_vcpu *vcpu)
{ … }
static int handle_vmlaunch(struct kvm_vcpu *vcpu)
{ … }
static int handle_vmresume(struct kvm_vcpu *vcpu)
{ … }
static int handle_vmread(struct kvm_vcpu *vcpu)
{ … }
static bool is_shadow_field_rw(unsigned long field)
{ … }
static bool is_shadow_field_ro(unsigned long field)
{ … }
static int handle_vmwrite(struct kvm_vcpu *vcpu)
{ … }
static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr)
{ … }
static int handle_vmptrld(struct kvm_vcpu *vcpu)
{ … }
static int handle_vmptrst(struct kvm_vcpu *vcpu)
{ … }
static int handle_invept(struct kvm_vcpu *vcpu)
{ … }
static int handle_invvpid(struct kvm_vcpu *vcpu)
{ … }
static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{ … }
static int handle_vmfunc(struct kvm_vcpu *vcpu)
{ … }
bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
int size)
{ … }
static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{ … }
static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12,
union vmx_exit_reason exit_reason)
{ … }
static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{ … }
static bool nested_vmx_exit_handled_encls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{ … }
static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12, gpa_t bitmap)
{ … }
static bool nested_vmx_exit_handled_mtf(struct vmcs12 *vmcs12)
{ … }
static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
union vmx_exit_reason exit_reason)
{ … }
static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu,
union vmx_exit_reason exit_reason)
{ … }
bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu)
{ … }
static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
struct kvm_nested_state __user *user_kvm_nested_state,
u32 user_data_size)
{ … }
void vmx_leave_nested(struct kvm_vcpu *vcpu)
{ … }
static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
struct kvm_nested_state __user *user_kvm_nested_state,
struct kvm_nested_state *kvm_state)
{ … }
void nested_vmx_set_vmcs_shadowing_bitmap(void)
{ … }
#define VMCS12_IDX_TO_ENC(idx) …
static u64 nested_vmx_calc_vmcs_enum_msr(void)
{ … }
static void nested_vmx_setup_pinbased_ctls(struct vmcs_config *vmcs_conf,
struct nested_vmx_msrs *msrs)
{ … }
static void nested_vmx_setup_exit_ctls(struct vmcs_config *vmcs_conf,
struct nested_vmx_msrs *msrs)
{ … }
static void nested_vmx_setup_entry_ctls(struct vmcs_config *vmcs_conf,
struct nested_vmx_msrs *msrs)
{ … }
static void nested_vmx_setup_cpubased_ctls(struct vmcs_config *vmcs_conf,
struct nested_vmx_msrs *msrs)
{ … }
static void nested_vmx_setup_secondary_ctls(u32 ept_caps,
struct vmcs_config *vmcs_conf,
struct nested_vmx_msrs *msrs)
{ … }
static void nested_vmx_setup_misc_data(struct vmcs_config *vmcs_conf,
struct nested_vmx_msrs *msrs)
{ … }
static void nested_vmx_setup_basic(struct nested_vmx_msrs *msrs)
{ … }
static void nested_vmx_setup_cr_fixed(struct nested_vmx_msrs *msrs)
{ … }
void nested_vmx_setup_ctls_msrs(struct vmcs_config *vmcs_conf, u32 ept_caps)
{ … }
void nested_vmx_hardware_unsetup(void)
{ … }
__init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
{ … }
struct kvm_x86_nested_ops vmx_nested_ops = …;