#define pr_fmt(fmt) …
#include <linux/kvm_host.h>
#include "x86.h"
#include "kvm_cache_regs.h"
#include "kvm_emulate.h"
#include "smm.h"
#include "cpuid.h"
#include "trace.h"
#define CHECK_SMRAM32_OFFSET …
#define CHECK_SMRAM64_OFFSET …
static void check_smram_offsets(void)
{ … }
#undef CHECK_SMRAM64_OFFSET
#undef CHECK_SMRAM32_OFFSET
void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm)
{ … }
void process_smi(struct kvm_vcpu *vcpu)
{ … }
static u32 enter_smm_get_segment_flags(struct kvm_segment *seg)
{ … }
static void enter_smm_save_seg_32(struct kvm_vcpu *vcpu,
struct kvm_smm_seg_state_32 *state,
u32 *selector, int n)
{ … }
#ifdef CONFIG_X86_64
static void enter_smm_save_seg_64(struct kvm_vcpu *vcpu,
struct kvm_smm_seg_state_64 *state,
int n)
{ … }
#endif
static void enter_smm_save_state_32(struct kvm_vcpu *vcpu,
struct kvm_smram_state_32 *smram)
{ … }
#ifdef CONFIG_X86_64
static void enter_smm_save_state_64(struct kvm_vcpu *vcpu,
struct kvm_smram_state_64 *smram)
{ … }
#endif
void enter_smm(struct kvm_vcpu *vcpu)
{ … }
static void rsm_set_desc_flags(struct kvm_segment *desc, u32 flags)
{ … }
static int rsm_load_seg_32(struct kvm_vcpu *vcpu,
const struct kvm_smm_seg_state_32 *state,
u16 selector, int n)
{ … }
#ifdef CONFIG_X86_64
static int rsm_load_seg_64(struct kvm_vcpu *vcpu,
const struct kvm_smm_seg_state_64 *state,
int n)
{ … }
#endif
static int rsm_enter_protected_mode(struct kvm_vcpu *vcpu,
u64 cr0, u64 cr3, u64 cr4)
{ … }
static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
const struct kvm_smram_state_32 *smstate)
{ … }
#ifdef CONFIG_X86_64
static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
const struct kvm_smram_state_64 *smstate)
{ … }
#endif
int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
{ … }