linux/arch/x86/kvm/svm/nested.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * AMD SVM support
 *
 * Copyright (C) 2006 Qumranet, Inc.
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
 *
 * Authors:
 *   Yaniv Kamay  <[email protected]>
 *   Avi Kivity   <[email protected]>
 */

#define pr_fmt(fmt)

#include <linux/kvm_types.h>
#include <linux/kvm_host.h>
#include <linux/kernel.h>

#include <asm/msr-index.h>
#include <asm/debugreg.h>

#include "kvm_emulate.h"
#include "trace.h"
#include "mmu.h"
#include "x86.h"
#include "smm.h"
#include "cpuid.h"
#include "lapic.h"
#include "svm.h"
#include "hyperv.h"

#define CC

static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
				       struct x86_exception *fault)
{}

static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
{}

static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
{}

static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
{}

static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
{}

static bool nested_vmcb_needs_vls_intercept(struct vcpu_svm *svm)
{}

void recalc_intercepts(struct vcpu_svm *svm)
{}

/*
 * Merge L0's (KVM) and L1's (Nested VMCB) MSR permission bitmaps. The function
 * is optimized in that it only merges the parts where KVM MSR permission bitmap
 * may contain zero bits.
 */
static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
{}

/*
 * Bits 11:0 of bitmap address are ignored by hardware
 */
static bool nested_svm_check_bitmap_pa(struct kvm_vcpu *vcpu, u64 pa, u32 size)
{}

static bool __nested_vmcb_check_controls(struct kvm_vcpu *vcpu,
					 struct vmcb_ctrl_area_cached *control)
{}

/* Common checks that apply to both L1 and L2 state.  */
static bool __nested_vmcb_check_save(struct kvm_vcpu *vcpu,
				     struct vmcb_save_area_cached *save)
{}

static bool nested_vmcb_check_save(struct kvm_vcpu *vcpu)
{}

static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu)
{}

static
void __nested_copy_vmcb_control_to_cache(struct kvm_vcpu *vcpu,
					 struct vmcb_ctrl_area_cached *to,
					 struct vmcb_control_area *from)
{}

void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
				       struct vmcb_control_area *control)
{}

static void __nested_copy_vmcb_save_to_cache(struct vmcb_save_area_cached *to,
					     struct vmcb_save_area *from)
{}

void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
				    struct vmcb_save_area *save)
{}

/*
 * Synchronize fields that are written by the processor, so that
 * they can be copied back into the vmcb12.
 */
void nested_sync_control_from_vmcb02(struct vcpu_svm *svm)
{}

/*
 * Transfer any event that L0 or L1 wanted to inject into L2 to
 * EXIT_INT_INFO.
 */
static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm,
						struct vmcb *vmcb12)
{}

static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu)
{}

/*
 * Load guest's/host's cr3 on nested vmentry or vmexit. @nested_npt is true
 * if we are emulating VM-Entry into a guest with NPT enabled.
 */
static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
			       bool nested_npt, bool reload_pdptrs)
{}

void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm)
{}

static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
{}

static inline bool is_evtinj_soft(u32 evtinj)
{}

static bool is_evtinj_nmi(u32 evtinj)
{}

static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
					  unsigned long vmcb12_rip,
					  unsigned long vmcb12_csbase)
{}

static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
{}

int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
			 struct vmcb *vmcb12, bool from_vmrun)
{}

int nested_svm_vmrun(struct kvm_vcpu *vcpu)
{}

/* Copy state save area fields which are handled by VMRUN */
void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
			  struct vmcb_save_area *from_save)
{}

void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
{}

int nested_svm_vmexit(struct vcpu_svm *svm)
{}

static void nested_svm_triple_fault(struct kvm_vcpu *vcpu)
{}

int svm_allocate_nested(struct vcpu_svm *svm)
{}

void svm_free_nested(struct vcpu_svm *svm)
{}

void svm_leave_nested(struct kvm_vcpu *vcpu)
{}

static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
{}

static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
{}

static int nested_svm_intercept(struct vcpu_svm *svm)
{}

int nested_svm_exit_handled(struct vcpu_svm *svm)
{}

int nested_svm_check_permissions(struct kvm_vcpu *vcpu)
{}

static bool nested_svm_is_exception_vmexit(struct kvm_vcpu *vcpu, u8 vector,
					   u32 error_code)
{}

static void nested_svm_inject_exception_vmexit(struct kvm_vcpu *vcpu)
{}

static inline bool nested_exit_on_init(struct vcpu_svm *svm)
{}

static int svm_check_nested_events(struct kvm_vcpu *vcpu)
{}

int nested_svm_exit_special(struct vcpu_svm *svm)
{}

void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu)
{}

/* Inverse operation of nested_copy_vmcb_control_to_cache(). asid is copied too. */
static void nested_copy_vmcb_cache_to_control(struct vmcb_control_area *dst,
					      struct vmcb_ctrl_area_cached *from)
{}

static int svm_get_nested_state(struct kvm_vcpu *vcpu,
				struct kvm_nested_state __user *user_kvm_nested_state,
				u32 user_data_size)
{}

static int svm_set_nested_state(struct kvm_vcpu *vcpu,
				struct kvm_nested_state __user *user_kvm_nested_state,
				struct kvm_nested_state *kvm_state)
{}

static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
{}

struct kvm_x86_nested_ops svm_nested_ops =;