linux/arch/x86/kvm/emulate.c

// SPDX-License-Identifier: GPL-2.0-only
/******************************************************************************
 * emulate.c
 *
 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
 *
 * Copyright (c) 2005 Keir Fraser
 *
 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
 * privileged instructions:
 *
 * Copyright (C) 2006 Qumranet
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
 *
 *   Avi Kivity <[email protected]>
 *   Yaniv Kamay <[email protected]>
 *
 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
 */
#define pr_fmt(fmt)

#include <linux/kvm_host.h>
#include "kvm_cache_regs.h"
#include "kvm_emulate.h"
#include <linux/stringify.h>
#include <asm/debugreg.h>
#include <asm/nospec-branch.h>
#include <asm/ibt.h>

#include "x86.h"
#include "tss.h"
#include "mmu.h"
#include "pmu.h"

/*
 * Operand types
 */
#define OpNone
#define OpImplicit
#define OpReg
#define OpMem
#define OpAcc
#define OpDI
#define OpMem64
#define OpImmUByte
#define OpDX
#define OpCL
#define OpImmByte
#define OpOne
#define OpImm
#define OpMem16
#define OpMem32
#define OpImmU
#define OpSI
#define OpImmFAddr
#define OpMemFAddr
#define OpImmU16
#define OpES
#define OpCS
#define OpSS
#define OpDS
#define OpFS
#define OpGS
#define OpMem8
#define OpImm64
#define OpXLat
#define OpAccLo
#define OpAccHi

#define OpBits
#define OpMask

/*
 * Opcode effective-address decode tables.
 * Note that we only emulate instructions that have at least one memory
 * operand (excluding implicit stack references). We assume that stack
 * references and instruction fetches will never occur in special memory
 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
 * not be handled.
 */

/* Operand sizes: 8-bit operands or specified/overridden size. */
#define ByteOp
/* Destination operand type. */
#define DstShift
#define ImplicitOps
#define DstReg
#define DstMem
#define DstAcc
#define DstDI
#define DstMem64
#define DstMem16
#define DstImmUByte
#define DstDX
#define DstAccLo
#define DstMask
/* Source operand type. */
#define SrcShift
#define SrcNone
#define SrcReg
#define SrcMem
#define SrcMem16
#define SrcMem32
#define SrcImm
#define SrcImmByte
#define SrcOne
#define SrcImmUByte
#define SrcImmU
#define SrcSI
#define SrcXLat
#define SrcImmFAddr
#define SrcMemFAddr
#define SrcAcc
#define SrcImmU16
#define SrcImm64
#define SrcDX
#define SrcMem8
#define SrcAccHi
#define SrcMask
#define BitOp
#define MemAbs
#define String
#define Stack
#define GroupMask
#define Group
#define GroupDual
#define Prefix
#define RMExt
#define Escape
#define InstrDual
#define ModeDual
#define Sse
/* Generic ModRM decode. */
#define ModRM
/* Destination is only written; never read. */
#define Mov
/* Misc flags */
#define Prot
#define EmulateOnUD
#define NoAccess
#define Op3264
#define Undefined
#define Lock
#define Priv
#define No64
#define PageTable
#define NotImpl
/* Source 2 operand type */
#define Src2Shift
#define Src2None
#define Src2Mem
#define Src2CL
#define Src2ImmByte
#define Src2One
#define Src2Imm
#define Src2ES
#define Src2CS
#define Src2SS
#define Src2DS
#define Src2FS
#define Src2GS
#define Src2Mask
#define Mmx
#define AlignMask
#define Aligned
#define Unaligned
#define Avx
#define Aligned16
#define Fastop
#define NoWrite
#define SrcWrite
#define NoMod
#define Intercept
#define CheckPerm
#define PrivUD
#define NearBranch
#define No16
#define IncSP
#define TwoMemOp
#define IsBranch

#define DstXacc

#define X2(x...)
#define X3(x...)
#define X4(x...)
#define X5(x...)
#define X6(x...)
#define X7(x...)
#define X8(x...)
#define X16(x...)

struct opcode {};

struct group_dual {};

struct gprefix {};

struct escape {};

struct instr_dual {};

struct mode_dual {};

#define EFLG_RESERVED_ZEROS_MASK

enum x86_transfer_type {};

static void writeback_registers(struct x86_emulate_ctxt *ctxt)
{}

static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
{}

/*
 * These EFLAGS bits are restored from saved value during emulation, and
 * any changes are written back to the saved value after emulation.
 */
#define EFLAGS_MASK

#ifdef CONFIG_X86_64
#define ON64(x)
#else
#define ON64
#endif

/*
 * fastop functions have a special calling convention:
 *
 * dst:    rax        (in/out)
 * src:    rdx        (in/out)
 * src2:   rcx        (in)
 * flags:  rflags     (in/out)
 * ex:     rsi        (in:fastop pointer, out:zero if exception)
 *
 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
 * different operand sizes can be reached by calculation, rather than a jump
 * table (which would be bigger than the code).
 *
 * The 16 byte alignment, considering 5 bytes for the RET thunk, 3 for ENDBR
 * and 1 for the straight line speculation INT3, leaves 7 bytes for the
 * body of the function.  Currently none is larger than 4.
 */
static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);

#define FASTOP_SIZE

#define __FOP_FUNC(name)

#define FOP_FUNC(name)

#define __FOP_RET(name)

#define FOP_RET(name)

#define __FOP_START(op, align)

#define FOP_START(op)

#define FOP_END

#define __FOPNOP(name)

#define FOPNOP()

#define FOP1E(op,  dst)

#define FOP1EEX(op,  dst)

#define FASTOP1(op)

/* 1-operand, using src2 (for MUL/DIV r/m) */
#define FASTOP1SRC2(op, name)

/* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
#define FASTOP1SRC2EX(op, name)

#define FOP2E(op,  dst, src)

#define FASTOP2(op)

/* 2 operand, word only */
#define FASTOP2W(op)

/* 2 operand, src is CL */
#define FASTOP2CL(op)

/* 2 operand, src and dest are reversed */
#define FASTOP2R(op, name)

#define FOP3E(op,  dst, src, src2)

/* 3-operand, word-only, src2=cl */
#define FASTOP3WCL(op)

/* Special case for SETcc - 1 instruction per cc */
#define FOP_SETCC(op)

FOP_START();

FOP_START();

/*
 * XXX: inoutclob user must know where the argument is being expanded.
 *      Using asm goto would allow us to remove _fault.
 */
#define asm_safe(insn, inoutclob...)

static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
				    enum x86_intercept intercept,
				    enum x86_intercept_stage stage)
{}

static void assign_masked(ulong *dest, ulong src, ulong mask)
{}

static void assign_register(unsigned long *reg, u64 val, int bytes)
{}

static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
{}

static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
{}

static int stack_size(struct x86_emulate_ctxt *ctxt)
{}

/* Access/update address held in a register, based on addressing mode. */
static inline unsigned long
address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
{}

static inline unsigned long
register_address(struct x86_emulate_ctxt *ctxt, int reg)
{}

static void masked_increment(ulong *reg, ulong mask, int inc)
{}

static inline void
register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
{}

static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
{}

static u32 desc_limit_scaled(struct desc_struct *desc)
{}

static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
{}

static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
			     u32 error, bool valid)
{}

static int emulate_db(struct x86_emulate_ctxt *ctxt)
{}

static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
{}

static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
{}

static int emulate_ud(struct x86_emulate_ctxt *ctxt)
{}

static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
{}

static int emulate_de(struct x86_emulate_ctxt *ctxt)
{}

static int emulate_nm(struct x86_emulate_ctxt *ctxt)
{}

static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
{}

static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
				 unsigned seg)
{}

static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt)
{}

static inline bool emul_is_noncanonical_address(u64 la,
						struct x86_emulate_ctxt *ctxt)
{}

/*
 * x86 defines three classes of vector instructions: explicitly
 * aligned, explicitly unaligned, and the rest, which change behaviour
 * depending on whether they're AVX encoded or not.
 *
 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
 * subject to the same check.  FXSAVE and FXRSTOR are checked here too as their
 * 512 bytes of data must be aligned to a 16 byte boundary.
 */
static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
{}

static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
				       struct segmented_address addr,
				       unsigned *max_size, unsigned size,
				       enum x86emul_mode mode, ulong *linear,
				       unsigned int flags)
{}

static int linearize(struct x86_emulate_ctxt *ctxt,
		     struct segmented_address addr,
		     unsigned size, bool write,
		     ulong *linear)
{}

static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
{}

static inline int emulator_recalc_and_set_mode(struct x86_emulate_ctxt *ctxt)
{}

static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
{}

static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst)
{}

static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
{}

static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
			      void *data, unsigned size)
{}

static int linear_write_system(struct x86_emulate_ctxt *ctxt,
			       ulong linear, void *data,
			       unsigned int size)
{}

static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
			      struct segmented_address addr,
			      void *data,
			      unsigned size)
{}

static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
			       struct segmented_address addr,
			       void *data,
			       unsigned int size)
{}

/*
 * Prefetch the remaining bytes of the instruction without crossing page
 * boundary if they are not in fetch_cache yet.
 */
static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
{}

static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
					       unsigned size)
{}

/* Fetch next part of the instruction being emulated. */
#define insn_fetch(_type, _ctxt)

#define insn_fetch_arr(_arr, _size, _ctxt)

/*
 * Given the 'reg' portion of a ModRM byte, and a register block, return a
 * pointer into the block that addresses the relevant register.
 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
 */
static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
			     int byteop)
{}

static int read_descriptor(struct x86_emulate_ctxt *ctxt,
			   struct segmented_address addr,
			   u16 *size, unsigned long *address, int op_bytes)
{}

FASTOP2(add);
FASTOP2(or);
FASTOP2(adc);
FASTOP2(sbb);
FASTOP2(and);
FASTOP2(sub);
FASTOP2(xor);
FASTOP2(cmp);
FASTOP2(test);

FASTOP1SRC2(mul, mul_ex);
FASTOP1SRC2(imul, imul_ex);
FASTOP1SRC2EX(div, div_ex);
FASTOP1SRC2EX(idiv, idiv_ex);

FASTOP3WCL(shld);
FASTOP3WCL(shrd);

FASTOP2W(imul);

FASTOP1(not);
FASTOP1(neg);
FASTOP1(inc);
FASTOP1(dec);

FASTOP2CL(rol);
FASTOP2CL(ror);
FASTOP2CL(rcl);
FASTOP2CL(rcr);
FASTOP2CL(shl);
FASTOP2CL(shr);
FASTOP2CL(sar);

FASTOP2W(bsf);
FASTOP2W(bsr);
FASTOP2W(bt);
FASTOP2W(bts);
FASTOP2W(btr);
FASTOP2W(btc);

FASTOP2(xadd);

FASTOP2R(cmp, cmp_r);

static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
{}

static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
{}

static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
{}

static void fetch_register_operand(struct operand *op)
{}

static int em_fninit(struct x86_emulate_ctxt *ctxt)
{}

static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
{}

static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
{}

static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
				    struct operand *op)
{}

static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
{}

static int decode_modrm(struct x86_emulate_ctxt *ctxt,
			struct operand *op)
{}

static int decode_abs(struct x86_emulate_ctxt *ctxt,
		      struct operand *op)
{}

static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
{}

static int read_emulated(struct x86_emulate_ctxt *ctxt,
			 unsigned long addr, void *dest, unsigned size)
{}

static int segmented_read(struct x86_emulate_ctxt *ctxt,
			  struct segmented_address addr,
			  void *data,
			  unsigned size)
{}

static int segmented_write(struct x86_emulate_ctxt *ctxt,
			   struct segmented_address addr,
			   const void *data,
			   unsigned size)
{}

static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
			     struct segmented_address addr,
			     const void *orig_data, const void *data,
			     unsigned size)
{}

static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
			   unsigned int size, unsigned short port,
			   void *dest)
{}

static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
				     u16 index, struct desc_struct *desc)
{}

static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
				     u16 selector, struct desc_ptr *dt)
{}

static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
			      u16 selector, ulong *desc_addr_p)
{}

/* allowed just for 8 bytes segments */
static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				   u16 selector, struct desc_struct *desc,
				   ulong *desc_addr_p)
{}

/* allowed just for 8 bytes segments */
static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				    u16 selector, struct desc_struct *desc)
{}

static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				     u16 selector, int seg, u8 cpl,
				     enum x86_transfer_type transfer,
				     struct desc_struct *desc)
{}

static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				   u16 selector, int seg)
{}

static void write_register_operand(struct operand *op)
{}

static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
{}

static int emulate_push(struct x86_emulate_ctxt *ctxt, const void *data, int len)
{}

static int em_push(struct x86_emulate_ctxt *ctxt)
{}

static int emulate_pop(struct x86_emulate_ctxt *ctxt,
		       void *dest, int len)
{}

static int em_pop(struct x86_emulate_ctxt *ctxt)
{}

static int emulate_popf(struct x86_emulate_ctxt *ctxt,
			void *dest, int len)
{}

static int em_popf(struct x86_emulate_ctxt *ctxt)
{}

static int em_enter(struct x86_emulate_ctxt *ctxt)
{}

static int em_leave(struct x86_emulate_ctxt *ctxt)
{}

static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
{}

static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
{}

static int em_pusha(struct x86_emulate_ctxt *ctxt)
{}

static int em_pushf(struct x86_emulate_ctxt *ctxt)
{}

static int em_popa(struct x86_emulate_ctxt *ctxt)
{}

static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
{}

int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
{}

static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
{}

static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
{}

static int em_iret(struct x86_emulate_ctxt *ctxt)
{}

static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
{}

static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
{}

static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
{}

static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
{}

static int em_ret(struct x86_emulate_ctxt *ctxt)
{}

static int em_ret_far(struct x86_emulate_ctxt *ctxt)
{}

static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
{}

static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
{}

static int em_lseg(struct x86_emulate_ctxt *ctxt)
{}

static int em_rsm(struct x86_emulate_ctxt *ctxt)
{}

static void
setup_syscalls_segments(struct desc_struct *cs, struct desc_struct *ss)
{}

static int em_syscall(struct x86_emulate_ctxt *ctxt)
{}

static int em_sysenter(struct x86_emulate_ctxt *ctxt)
{}

static int em_sysexit(struct x86_emulate_ctxt *ctxt)
{}

static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
{}

#define VMWARE_PORT_VMPORT
#define VMWARE_PORT_VMRPC

static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
					    u16 port, u16 len)
{}

static bool emulator_io_permitted(struct x86_emulate_ctxt *ctxt,
				  u16 port, u16 len)
{}

static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
{}

static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
				struct tss_segment_16 *tss)
{}

static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
				 struct tss_segment_16 *tss)
{}

static int task_switch_16(struct x86_emulate_ctxt *ctxt, u16 old_tss_sel,
			  ulong old_tss_base, struct desc_struct *new_desc)
{}

static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
				struct tss_segment_32 *tss)
{}

static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
				 struct tss_segment_32 *tss)
{}

static int task_switch_32(struct x86_emulate_ctxt *ctxt, u16 old_tss_sel,
			  ulong old_tss_base, struct desc_struct *new_desc)
{}

static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
				   u16 tss_selector, int idt_index, int reason,
				   bool has_error_code, u32 error_code)
{}

int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
			 u16 tss_selector, int idt_index, int reason,
			 bool has_error_code, u32 error_code)
{}

static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
		struct operand *op)
{}

static int em_das(struct x86_emulate_ctxt *ctxt)
{}

static int em_aam(struct x86_emulate_ctxt *ctxt)
{}

static int em_aad(struct x86_emulate_ctxt *ctxt)
{}

static int em_call(struct x86_emulate_ctxt *ctxt)
{}

static int em_call_far(struct x86_emulate_ctxt *ctxt)
{}

static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
{}

static int em_xchg(struct x86_emulate_ctxt *ctxt)
{}

static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
{}

static int em_cwd(struct x86_emulate_ctxt *ctxt)
{}

static int em_rdpid(struct x86_emulate_ctxt *ctxt)
{}

static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
{}

static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
{}

static int em_mov(struct x86_emulate_ctxt *ctxt)
{}

static int em_movbe(struct x86_emulate_ctxt *ctxt)
{}

static int em_cr_write(struct x86_emulate_ctxt *ctxt)
{}

static int em_dr_write(struct x86_emulate_ctxt *ctxt)
{}

static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
{}

static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
{}

static int em_store_sreg(struct x86_emulate_ctxt *ctxt, int segment)
{}

static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
{}

static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
{}

static int em_sldt(struct x86_emulate_ctxt *ctxt)
{}

static int em_lldt(struct x86_emulate_ctxt *ctxt)
{}

static int em_str(struct x86_emulate_ctxt *ctxt)
{}

static int em_ltr(struct x86_emulate_ctxt *ctxt)
{}

static int em_invlpg(struct x86_emulate_ctxt *ctxt)
{}

static int em_clts(struct x86_emulate_ctxt *ctxt)
{}

static int em_hypercall(struct x86_emulate_ctxt *ctxt)
{}

static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
				  void (*get)(struct x86_emulate_ctxt *ctxt,
					      struct desc_ptr *ptr))
{}

static int em_sgdt(struct x86_emulate_ctxt *ctxt)
{}

static int em_sidt(struct x86_emulate_ctxt *ctxt)
{}

static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
{}

static int em_lgdt(struct x86_emulate_ctxt *ctxt)
{}

static int em_lidt(struct x86_emulate_ctxt *ctxt)
{}

static int em_smsw(struct x86_emulate_ctxt *ctxt)
{}

static int em_lmsw(struct x86_emulate_ctxt *ctxt)
{}

static int em_loop(struct x86_emulate_ctxt *ctxt)
{}

static int em_jcxz(struct x86_emulate_ctxt *ctxt)
{}

static int em_in(struct x86_emulate_ctxt *ctxt)
{}

static int em_out(struct x86_emulate_ctxt *ctxt)
{}

static int em_cli(struct x86_emulate_ctxt *ctxt)
{}

static int em_sti(struct x86_emulate_ctxt *ctxt)
{}

static int em_cpuid(struct x86_emulate_ctxt *ctxt)
{}

static int em_sahf(struct x86_emulate_ctxt *ctxt)
{}

static int em_lahf(struct x86_emulate_ctxt *ctxt)
{}

static int em_bswap(struct x86_emulate_ctxt *ctxt)
{}

static int em_clflush(struct x86_emulate_ctxt *ctxt)
{}

static int em_clflushopt(struct x86_emulate_ctxt *ctxt)
{}

static int em_movsxd(struct x86_emulate_ctxt *ctxt)
{}

static int check_fxsr(struct x86_emulate_ctxt *ctxt)
{}

/*
 * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but does save
 * and restore MXCSR.
 */
static size_t __fxstate_size(int nregs)
{}

static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
{}

/*
 * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
 *  1) 16 bit mode
 *  2) 32 bit mode
 *     - like (1), but FIP and FDP (foo) are only 16 bit.  At least Intel CPUs
 *       preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
 *       save and restore
 *  3) 64-bit mode with REX.W prefix
 *     - like (2), but XMM 8-15 are being saved and restored
 *  4) 64-bit mode without REX.W prefix
 *     - like (3), but FIP and FDP are 64 bit
 *
 * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
 * desired result.  (4) is not emulated.
 *
 * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
 * and FPU DS) should match.
 */
static int em_fxsave(struct x86_emulate_ctxt *ctxt)
{}

/*
 * FXRSTOR might restore XMM registers not provided by the guest. Fill
 * in the host registers (via FXSAVE) instead, so they won't be modified.
 * (preemption has to stay disabled until FXRSTOR).
 *
 * Use noinline to keep the stack for other functions called by callers small.
 */
static noinline int fxregs_fixup(struct fxregs_state *fx_state,
				 const size_t used_size)
{}

static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
{}

static int em_xsetbv(struct x86_emulate_ctxt *ctxt)
{}

static bool valid_cr(int nr)
{}

static int check_cr_access(struct x86_emulate_ctxt *ctxt)
{}

static int check_dr_read(struct x86_emulate_ctxt *ctxt)
{}

static int check_dr_write(struct x86_emulate_ctxt *ctxt)
{}

static int check_svme(struct x86_emulate_ctxt *ctxt)
{}

static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
{}

static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
{}

static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
{}

static int check_perm_in(struct x86_emulate_ctxt *ctxt)
{}

static int check_perm_out(struct x86_emulate_ctxt *ctxt)
{}

#define D
#define DI(_y, _i)
#define DIP(_y, _i, _p)
#define N
#define EXT
#define G
#define GD
#define ID
#define MD
#define E(_f, _e)
#define I
#define F(_f, _e)
#define II(_f, _e, _i)
#define IIP(_f, _e, _i, _p)
#define GP

#define D2bv
#define D2bvIP
#define I2bv
#define F2bv(_f, _e)
#define I2bvIP

#define F6ALU(_f, _e)

static const struct opcode group7_rm0[] =;

static const struct opcode group7_rm1[] =;

static const struct opcode group7_rm2[] =;

static const struct opcode group7_rm3[] =;

static const struct opcode group7_rm7[] =;

static const struct opcode group1[] =;

static const struct opcode group1A[] =;

static const struct opcode group2[] =;

static const struct opcode group3[] =;

static const struct opcode group4[] =;

static const struct opcode group5[] =;

static const struct opcode group6[] =;

static const struct group_dual group7 =;

static const struct opcode group8[] =;

/*
 * The "memory" destination is actually always a register, since we come
 * from the register case of group9.
 */
static const struct gprefix pfx_0f_c7_7 =;


static const struct group_dual group9 =;

static const struct opcode group11[] =;

static const struct gprefix pfx_0f_ae_7 =;

static const struct group_dual group15 =;

static const struct gprefix pfx_0f_6f_0f_7f =;

static const struct instr_dual instr_dual_0f_2b =;

static const struct gprefix pfx_0f_2b =;

static const struct gprefix pfx_0f_10_0f_11 =;

static const struct gprefix pfx_0f_28_0f_29 =;

static const struct gprefix pfx_0f_e7 =;

static const struct escape escape_d9 =;

static const struct escape escape_db =;

static const struct escape escape_dd =;

static const struct instr_dual instr_dual_0f_c3 =;

static const struct mode_dual mode_dual_63 =;

static const struct instr_dual instr_dual_8d =;

static const struct opcode opcode_table[256] =;

static const struct opcode twobyte_table[256] =;

static const struct instr_dual instr_dual_0f_38_f0 =;

static const struct instr_dual instr_dual_0f_38_f1 =;

static const struct gprefix three_byte_0f_38_f0 =;

static const struct gprefix three_byte_0f_38_f1 =;

/*
 * Insns below are selected by the prefix which indexed by the third opcode
 * byte.
 */
static const struct opcode opcode_map_0f_38[256] =;

#undef D
#undef N
#undef G
#undef GD
#undef I
#undef GP
#undef EXT
#undef MD
#undef ID

#undef D2bv
#undef D2bvIP
#undef I2bv
#undef I2bvIP
#undef I6ALU

static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
{}

static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
		      unsigned size, bool sign_extension)
{}

static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
			  unsigned d)
{}

int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int emulation_type)
{}

bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
{}

static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
{}

static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
{}

static void fetch_possible_mmx_operand(struct operand *op)
{}

static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop)
{}

void init_decode_cache(struct x86_emulate_ctxt *ctxt)
{}

int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
{}

void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
{}

void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
{}

bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
{}