linux/arch/x86/kvm/kvm_cache_regs.h

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ASM_KVM_CACHE_REGS_H
#define ASM_KVM_CACHE_REGS_H

#include <linux/kvm_host.h>

#define KVM_POSSIBLE_CR0_GUEST_BITS
#define KVM_POSSIBLE_CR4_GUEST_BITS

#define X86_CR0_PDPTR_BITS
#define X86_CR4_TLBFLUSH_BITS
#define X86_CR4_PDPTR_BITS

static_assert();

#define BUILD_KVM_GPR_ACCESSORS(lname, uname)
BUILD_KVM_GPR_ACCESSORS()
BUILD_KVM_GPR_ACCESSORS()
BUILD_KVM_GPR_ACCESSORS()
BUILD_KVM_GPR_ACCESSORS()
BUILD_KVM_GPR_ACCESSORS()
BUILD_KVM_GPR_ACCESSORS()
BUILD_KVM_GPR_ACCESSORS()
#ifdef CONFIG_X86_64
BUILD_KVM_GPR_ACCESSORS()
BUILD_KVM_GPR_ACCESSORS()
BUILD_KVM_GPR_ACCESSORS()
BUILD_KVM_GPR_ACCESSORS()
BUILD_KVM_GPR_ACCESSORS()
BUILD_KVM_GPR_ACCESSORS()
BUILD_KVM_GPR_ACCESSORS()
BUILD_KVM_GPR_ACCESSORS()
#endif

/*
 * avail  dirty
 * 0	  0	  register in VMCS/VMCB
 * 0	  1	  *INVALID*
 * 1	  0	  register in vcpu->arch
 * 1	  1	  register in vcpu->arch, needs to be stored back
 */
static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu,
					     enum kvm_reg reg)
{}

static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu,
					 enum kvm_reg reg)
{}

static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu,
					       enum kvm_reg reg)
{}

static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
					   enum kvm_reg reg)
{}

/*
 * kvm_register_test_and_mark_available() is a special snowflake that uses an
 * arch bitop directly to avoid the explicit instrumentation that comes with
 * the generic bitops.  This allows code that cannot be instrumented (noinstr
 * functions), e.g. the low level VM-Enter/VM-Exit paths, to cache registers.
 */
static __always_inline bool kvm_register_test_and_mark_available(struct kvm_vcpu *vcpu,
								 enum kvm_reg reg)
{}

/*
 * The "raw" register helpers are only for cases where the full 64 bits of a
 * register are read/written irrespective of current vCPU mode.  In other words,
 * odds are good you shouldn't be using the raw variants.
 */
static inline unsigned long kvm_register_read_raw(struct kvm_vcpu *vcpu, int reg)
{}

static inline void kvm_register_write_raw(struct kvm_vcpu *vcpu, int reg,
					  unsigned long val)
{}

static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
{}

static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
{}

static inline unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu)
{}

static inline void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val)
{}

static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
{}

static inline void kvm_pdptr_write(struct kvm_vcpu *vcpu, int index, u64 value)
{}

static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
{}

static __always_inline bool kvm_is_cr0_bit_set(struct kvm_vcpu *vcpu,
					       unsigned long cr0_bit)
{}

static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
{}

static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
{}

static __always_inline bool kvm_is_cr4_bit_set(struct kvm_vcpu *vcpu,
					       unsigned long cr4_bit)
{}

static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
{}

static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
{}

static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
{}

static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
{}

static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
{}

static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
{}

#endif