linux/arch/x86/kvm/vmx/vmx_ops.h

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __KVM_X86_VMX_INSN_H
#define __KVM_X86_VMX_INSN_H

#include <linux/nospec.h>

#include <asm/vmx.h>

#include "vmx_onhyperv.h"
#include "vmcs.h"
#include "../x86.h"

void vmread_error(unsigned long field);
void vmwrite_error(unsigned long field, unsigned long value);
void vmclear_error(struct vmcs *vmcs, u64 phys_addr);
void vmptrld_error(struct vmcs *vmcs, u64 phys_addr);
void invvpid_error(unsigned long ext, u16 vpid, gva_t gva);
void invept_error(unsigned long ext, u64 eptp, gpa_t gpa);

#ifndef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
/*
 * The VMREAD error trampoline _always_ uses the stack to pass parameters, even
 * for 64-bit targets.  Preserving all registers allows the VMREAD inline asm
 * blob to avoid clobbering GPRs, which in turn allows the compiler to better
 * optimize sequences of VMREADs.
 *
 * Declare the trampoline as an opaque label as it's not safe to call from C
 * code; there is no way to tell the compiler to pass params on the stack for
 * 64-bit targets.
 *
 * void vmread_error_trampoline(unsigned long field, bool fault);
 */
extern unsigned long vmread_error_trampoline;

/*
 * The second VMREAD error trampoline, called from the assembly trampoline,
 * exists primarily to enable instrumentation for the VM-Fail path.
 */
void vmread_error_trampoline2(unsigned long field, bool fault);

#endif

static __always_inline void vmcs_check16(unsigned long field)
{}

static __always_inline void vmcs_check32(unsigned long field)
{}

static __always_inline void vmcs_check64(unsigned long field)
{}

static __always_inline void vmcs_checkl(unsigned long field)
{}

static __always_inline unsigned long __vmcs_readl(unsigned long field)
{}

static __always_inline u16 vmcs_read16(unsigned long field)
{}

static __always_inline u32 vmcs_read32(unsigned long field)
{}

static __always_inline u64 vmcs_read64(unsigned long field)
{}

static __always_inline unsigned long vmcs_readl(unsigned long field)
{}

#define vmx_asm1(insn, op1, error_args...)

#define vmx_asm2(insn, op1, op2, error_args...)

static __always_inline void __vmcs_writel(unsigned long field, unsigned long value)
{}

static __always_inline void vmcs_write16(unsigned long field, u16 value)
{}

static __always_inline void vmcs_write32(unsigned long field, u32 value)
{}

static __always_inline void vmcs_write64(unsigned long field, u64 value)
{}

static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
{}

static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
{}

static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
{}

static inline void vmcs_clear(struct vmcs *vmcs)
{}

static inline void vmcs_load(struct vmcs *vmcs)
{}

static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva)
{}

static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa)
{}

static inline void vpid_sync_vcpu_single(int vpid)
{}

static inline void vpid_sync_vcpu_global(void)
{}

static inline void vpid_sync_context(int vpid)
{}

static inline void vpid_sync_vcpu_addr(int vpid, gva_t addr)
{}

static inline void ept_sync_global(void)
{}

static inline void ept_sync_context(u64 eptp)
{}

#endif /* __KVM_X86_VMX_INSN_H */