linux/arch/x86/kvm/kvm_emulate.h

/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
 * x86_emulate.h
 *
 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
 *
 * Copyright (c) 2005 Keir Fraser
 *
 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
 */

#ifndef _ASM_X86_KVM_X86_EMULATE_H
#define _ASM_X86_KVM_X86_EMULATE_H

#include <asm/desc_defs.h>
#include "fpu.h"

struct x86_emulate_ctxt;
enum x86_intercept;
enum x86_intercept_stage;

struct x86_exception {};

/*
 * This struct is used to carry enough information from the instruction
 * decoder to main KVM so that a decision can be made whether the
 * instruction needs to be intercepted or not.
 */
struct x86_instruction_info {};

/*
 * x86_emulate_ops:
 *
 * These operations represent the instruction emulator's interface to memory.
 * There are two categories of operation: those that act on ordinary memory
 * regions (*_std), and those that act on memory regions known to require
 * special treatment or emulation (*_emulated).
 *
 * The emulator assumes that an instruction accesses only one 'emulated memory'
 * location, that this location is the given linear faulting address (cr2), and
 * that this is one of the instruction's data operands. Instruction fetches and
 * stack operations are assumed never to access emulated memory. The emulator
 * automatically deduces which operand of a string-move operation is accessing
 * emulated memory, and assumes that the other operand accesses normal memory.
 *
 * NOTES:
 *  1. The emulator isn't very smart about emulated vs. standard memory.
 *     'Emulated memory' access addresses should be checked for sanity.
 *     'Normal memory' accesses may fault, and the caller must arrange to
 *     detect and handle reentrancy into the emulator via recursive faults.
 *     Accesses may be unaligned and may cross page boundaries.
 *  2. If the access fails (cannot emulate, or a standard access faults) then
 *     it is up to the memop to propagate the fault to the guest VM via
 *     some out-of-band mechanism, unknown to the emulator. The memop signals
 *     failure by returning X86EMUL_PROPAGATE_FAULT to the emulator, which will
 *     then immediately bail.
 *  3. Valid access sizes are 1, 2, 4 and 8 bytes. On x86/32 systems only
 *     cmpxchg8b_emulated need support 8-byte accesses.
 *  4. The emulator cannot handle 64-bit mode emulation on an x86/32 system.
 */
/* Access completed successfully: continue emulation as normal. */
#define X86EMUL_CONTINUE
/* Access is unhandleable: bail from emulation and return error to caller. */
#define X86EMUL_UNHANDLEABLE
/* Terminate emulation but return success to the caller. */
#define X86EMUL_PROPAGATE_FAULT
#define X86EMUL_RETRY_INSTR
#define X86EMUL_CMPXCHG_FAILED
#define X86EMUL_IO_NEEDED
#define X86EMUL_INTERCEPTED

/* x86-specific emulation flags */
#define X86EMUL_F_WRITE
#define X86EMUL_F_FETCH
#define X86EMUL_F_IMPLICIT
#define X86EMUL_F_INVLPG

struct x86_emulate_ops {};

/* Type, address-of, and value of an instruction's operand. */
struct operand {};

struct fetch_cache {};

struct read_cache {};

/* Execution mode, passed to the emulator. */
enum x86emul_mode {};

/*
 * fastop functions are declared as taking a never-defined fastop parameter,
 * so they can't be called from C directly.
 */
struct fastop;

fastop_t;

/*
 * The emulator's _regs array tracks only the GPRs, i.e. excludes RIP.  RIP is
 * tracked/accessed via _eip, and except for RIP relative addressing, which
 * also uses _eip, RIP cannot be a register operand nor can it be an operand in
 * a ModRM or SIB byte.
 */
#ifdef CONFIG_X86_64
#define NR_EMULATOR_GPRS
#else
#define NR_EMULATOR_GPRS
#endif

struct x86_emulate_ctxt {};

#define KVM_EMULATOR_BUG_ON(cond, ctxt)

/* Repeat String Operation Prefix */
#define REPE_PREFIX
#define REPNE_PREFIX

/* CPUID vendors */
#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx
#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx
#define X86EMUL_CPUID_VENDOR_AuthenticAMD_edx

#define X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx
#define X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx
#define X86EMUL_CPUID_VENDOR_AMDisbetterI_edx

#define X86EMUL_CPUID_VENDOR_HygonGenuine_ebx
#define X86EMUL_CPUID_VENDOR_HygonGenuine_ecx
#define X86EMUL_CPUID_VENDOR_HygonGenuine_edx

#define X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
#define X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
#define X86EMUL_CPUID_VENDOR_GenuineIntel_edx

#define X86EMUL_CPUID_VENDOR_CentaurHauls_ebx
#define X86EMUL_CPUID_VENDOR_CentaurHauls_ecx
#define X86EMUL_CPUID_VENDOR_CentaurHauls_edx

static inline bool is_guest_vendor_intel(u32 ebx, u32 ecx, u32 edx)
{}

static inline bool is_guest_vendor_amd(u32 ebx, u32 ecx, u32 edx)
{}

static inline bool is_guest_vendor_hygon(u32 ebx, u32 ecx, u32 edx)
{}

enum x86_intercept_stage {};

enum x86_intercept {};

/* Host execution mode. */
#if defined(CONFIG_X86_32)
#define X86EMUL_MODE_HOST
#elif defined(CONFIG_X86_64)
#define X86EMUL_MODE_HOST
#endif

int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int emulation_type);
bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt);
#define EMULATION_FAILED
#define EMULATION_OK
#define EMULATION_RESTART
#define EMULATION_INTERCEPTED
void init_decode_cache(struct x86_emulate_ctxt *ctxt);
int x86_emulate_insn(struct x86_emulate_ctxt *ctxt);
int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
			 u16 tss_selector, int idt_index, int reason,
			 bool has_error_code, u32 error_code);
int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq);
void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt);
void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt);
bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt);

static inline ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
{}

static inline ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
{}

static inline ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
{}

#endif /* _ASM_X86_KVM_X86_EMULATE_H */