linux/arch/x86/kvm/svm/svm_ops.h

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __KVM_X86_SVM_OPS_H
#define __KVM_X86_SVM_OPS_H

#include <linux/compiler_types.h>

#include "x86.h"

#define svm_asm(insn, clobber...)

#define svm_asm1(insn, op1, clobber...)

#define svm_asm2(insn, op1, op2, clobber...)

static inline void clgi(void)
{}

static inline void stgi(void)
{}

static inline void invlpga(unsigned long addr, u32 asid)
{}

/*
 * Despite being a physical address, the portion of rAX that is consumed by
 * VMSAVE, VMLOAD, etc... is still controlled by the effective address size,
 * hence 'unsigned long' instead of 'hpa_t'.
 */
static __always_inline void vmsave(unsigned long pa)
{}

#endif /* __KVM_X86_SVM_OPS_H */