#ifndef __KVM_X86_VMX_H
#define __KVM_X86_VMX_H
#include <linux/kvm_host.h>
#include <asm/kvm.h>
#include <asm/intel_pt.h>
#include <asm/perf_event.h>
#include <asm/posted_intr.h>
#include "capabilities.h"
#include "../kvm_cache_regs.h"
#include "vmcs.h"
#include "vmx_ops.h"
#include "../cpuid.h"
#include "run_flags.h"
#include "../mmu.h"
#define MSR_TYPE_R …
#define MSR_TYPE_W …
#define MSR_TYPE_RW …
#define X2APIC_MSR(r) …
#ifdef CONFIG_X86_64
#define MAX_NR_USER_RETURN_MSRS …
#else
#define MAX_NR_USER_RETURN_MSRS …
#endif
#define MAX_NR_LOADSTORE_MSRS …
struct vmx_msrs { … };
struct vmx_uret_msr { … };
enum segment_cache_field { … };
#define RTIT_ADDR_RANGE …
struct pt_ctx { … };
struct pt_desc { … };
vmx_exit_reason;
struct lbr_desc { … };
extern struct x86_pmu_lbr vmx_lbr_caps;
struct nested_vmx { … };
struct vcpu_vmx { … };
struct kvm_vmx { … };
void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
struct loaded_vmcs *buddy);
int allocate_vpid(void);
void free_vpid(int vpid);
void vmx_set_constant_host_state(struct vcpu_vmx *vmx);
void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
unsigned long fs_base, unsigned long gs_base);
int vmx_get_cpl(struct kvm_vcpu *vcpu);
bool vmx_emulation_required(struct kvm_vcpu *vcpu);
unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
void set_cr4_guest_host_mask(struct vcpu_vmx *vmx);
void ept_save_pdptrs(struct kvm_vcpu *vcpu);
void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
void __vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu);
void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
bool vmx_nmi_blocked(struct kvm_vcpu *vcpu);
bool __vmx_interrupt_blocked(struct kvm_vcpu *vcpu);
bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu);
bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr);
void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu);
void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
void vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx, unsigned int flags);
unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx);
bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs,
unsigned int flags);
int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr);
void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags);
static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
int type, bool value)
{ … }
void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu);
#define __BUILD_VMX_MSR_BITMAP_HELPER(rtype, action, bitop, access, base) …
#define BUILD_VMX_MSR_BITMAP_HELPERS(ret_type, action, bitop) …
BUILD_VMX_MSR_BITMAP_HELPERS(…)
BUILD_VMX_MSR_BITMAP_HELPERS(…)
BUILD_VMX_MSR_BITMAP_HELPERS(…)
static inline u8 vmx_get_rvi(void)
{ … }
#define __KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS …
#ifdef CONFIG_X86_64
#define KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS …
#else
#define KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS …
#endif
#define KVM_OPTIONAL_VMX_VM_ENTRY_CONTROLS …
#define __KVM_REQUIRED_VMX_VM_EXIT_CONTROLS …
#ifdef CONFIG_X86_64
#define KVM_REQUIRED_VMX_VM_EXIT_CONTROLS …
#else
#define KVM_REQUIRED_VMX_VM_EXIT_CONTROLS …
#endif
#define KVM_OPTIONAL_VMX_VM_EXIT_CONTROLS …
#define KVM_REQUIRED_VMX_PIN_BASED_VM_EXEC_CONTROL …
#define KVM_OPTIONAL_VMX_PIN_BASED_VM_EXEC_CONTROL …
#define __KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL …
#ifdef CONFIG_X86_64
#define KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL …
#else
#define KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL …
#endif
#define KVM_OPTIONAL_VMX_CPU_BASED_VM_EXEC_CONTROL …
#define KVM_REQUIRED_VMX_SECONDARY_VM_EXEC_CONTROL …
#define KVM_OPTIONAL_VMX_SECONDARY_VM_EXEC_CONTROL …
#define KVM_REQUIRED_VMX_TERTIARY_VM_EXEC_CONTROL …
#define KVM_OPTIONAL_VMX_TERTIARY_VM_EXEC_CONTROL …
#define BUILD_CONTROLS_SHADOW(lname, uname, bits) …
BUILD_CONTROLS_SHADOW(vm_entry, VM_ENTRY_CONTROLS, 32)
BUILD_CONTROLS_SHADOW(vm_exit, VM_EXIT_CONTROLS, 32)
BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL, 32)
BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL, 32)
BUILD_CONTROLS_SHADOW(secondary_exec, SECONDARY_VM_EXEC_CONTROL, 32)
BUILD_CONTROLS_SHADOW(tertiary_exec, TERTIARY_VM_EXEC_CONTROL, 64)
#define VMX_REGS_LAZY_LOAD_SET …
static inline unsigned long vmx_l1_guest_owned_cr0_bits(void)
{ … }
static __always_inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
{ … }
static __always_inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
{ … }
static inline struct lbr_desc *vcpu_to_lbr_desc(struct kvm_vcpu *vcpu)
{ … }
static inline struct x86_pmu_lbr *vcpu_to_lbr_records(struct kvm_vcpu *vcpu)
{ … }
static inline bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu)
{ … }
void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu);
int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu);
void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu);
static __always_inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu)
{ … }
static __always_inline u32 vmx_get_intr_info(struct kvm_vcpu *vcpu)
{ … }
struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags);
void free_vmcs(struct vmcs *vmcs);
int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs);
static inline struct vmcs *alloc_vmcs(bool shadow)
{ … }
static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
{ … }
static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu)
{ … }
static inline bool is_unrestricted_guest(struct kvm_vcpu *vcpu)
{ … }
bool __vmx_guest_state_valid(struct kvm_vcpu *vcpu);
static inline bool vmx_guest_state_valid(struct kvm_vcpu *vcpu)
{ … }
void dump_vmcs(struct kvm_vcpu *vcpu);
static inline int vmx_get_instr_info_reg2(u32 vmx_instr_info)
{ … }
static inline bool vmx_can_use_ipiv(struct kvm_vcpu *vcpu)
{ … }
#endif