linux/arch/x86/kvm/vmx/nested.h

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __KVM_X86_VMX_NESTED_H
#define __KVM_X86_VMX_NESTED_H

#include "kvm_cache_regs.h"
#include "hyperv.h"
#include "vmcs12.h"
#include "vmx.h"

/*
 * Status returned by nested_vmx_enter_non_root_mode():
 */
enum nvmx_vmentry_status {};

void vmx_leave_nested(struct kvm_vcpu *vcpu);
void nested_vmx_setup_ctls_msrs(struct vmcs_config *vmcs_conf, u32 ept_caps);
void nested_vmx_hardware_unsetup(void);
__init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *));
void nested_vmx_set_vmcs_shadowing_bitmap(void);
void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu);
enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
						     bool from_vmentry);
bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu);
void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
		       u32 exit_intr_info, unsigned long exit_qualification);
void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu);
int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata);
int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
			u32 vmx_instruction_info, bool wr, int len, gva_t *ret);
void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu);
bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
				 int size);

static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
{}

static inline struct vmcs12 *get_shadow_vmcs12(struct kvm_vcpu *vcpu)
{}

/*
 * Note: the same condition is checked against the state provided by userspace
 * in vmx_set_nested_state; if it is satisfied, the nested state must include
 * the VMCS12.
 */
static inline int vmx_has_valid_vmcs12(struct kvm_vcpu *vcpu)
{}

static inline u16 nested_get_vpid02(struct kvm_vcpu *vcpu)
{}

static inline unsigned long nested_ept_get_eptp(struct kvm_vcpu *vcpu)
{}

static inline bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu)
{}

/*
 * Return the cr0/4 value that a nested guest would read. This is a combination
 * of L1's "real" cr0 used to run the guest (guest_cr0), and the bits shadowed
 * by the L1 hypervisor (cr0_read_shadow).  KVM must emulate CPU behavior as
 * the value+mask loaded into vmcs02 may not match the vmcs12 fields.
 */
static inline unsigned long nested_read_cr0(struct vmcs12 *fields)
{}
static inline unsigned long nested_read_cr4(struct vmcs12 *fields)
{}

static inline unsigned nested_cpu_vmx_misc_cr3_count(struct kvm_vcpu *vcpu)
{}

/*
 * Do the virtual VMX capability MSRs specify that L1 can use VMWRITE
 * to modify any valid field of the VMCS, or are the VM-exit
 * information fields read-only?
 */
static inline bool nested_cpu_has_vmwrite_any_field(struct kvm_vcpu *vcpu)
{}

static inline bool nested_cpu_has_zero_length_injection(struct kvm_vcpu *vcpu)
{}

static inline bool nested_cpu_supports_monitor_trap_flag(struct kvm_vcpu *vcpu)
{}

static inline bool nested_cpu_has_vmx_shadow_vmcs(struct kvm_vcpu *vcpu)
{}

static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
{}

static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
{}

static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
{}

static inline bool nested_cpu_has_nmi_exiting(struct vmcs12 *vmcs12)
{}

static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
{}

static inline int nested_cpu_has_mtf(struct vmcs12 *vmcs12)
{}

static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
{}

static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12)
{}

static inline bool nested_cpu_has_pml(struct vmcs12 *vmcs12)
{}

static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12)
{}

static inline bool nested_cpu_has_vpid(struct vmcs12 *vmcs12)
{}

static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12)
{}

static inline bool nested_cpu_has_vid(struct vmcs12 *vmcs12)
{}

static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12)
{}

static inline bool nested_cpu_has_vmfunc(struct vmcs12 *vmcs12)
{}

static inline bool nested_cpu_has_eptp_switching(struct vmcs12 *vmcs12)
{}

static inline bool nested_cpu_has_shadow_vmcs(struct vmcs12 *vmcs12)
{}

static inline bool nested_cpu_has_save_preemption_timer(struct vmcs12 *vmcs12)
{}

static inline bool nested_exit_on_nmi(struct kvm_vcpu *vcpu)
{}

/*
 * In nested virtualization, check if L1 asked to exit on external interrupts.
 * For most existing hypervisors, this will always return true.
 */
static inline bool nested_exit_on_intr(struct kvm_vcpu *vcpu)
{}

static inline bool nested_cpu_has_encls_exit(struct vmcs12 *vmcs12)
{}

/*
 * if fixed0[i] == 1: val[i] must be 1
 * if fixed1[i] == 0: val[i] must be 0
 */
static inline bool fixed_bits_valid(u64 val, u64 fixed0, u64 fixed1)
{}

static inline bool nested_guest_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
{}

static inline bool nested_host_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
{}

static inline bool nested_cr4_valid(struct kvm_vcpu *vcpu, unsigned long val)
{}

/* No difference in the restrictions on guest and host CR4 in VMX operation. */
#define nested_guest_cr4_valid
#define nested_host_cr4_valid

extern struct kvm_x86_nested_ops vmx_nested_ops;

#endif /* __KVM_X86_VMX_NESTED_H */