linux/arch/x86/include/asm/paravirt_types.h

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_PARAVIRT_TYPES_H
#define _ASM_X86_PARAVIRT_TYPES_H

#ifdef CONFIG_PARAVIRT

#ifndef __ASSEMBLY__
#include <linux/types.h>

#include <asm/desc_defs.h>
#include <asm/pgtable_types.h>
#include <asm/nospec-branch.h>

struct page;
struct thread_struct;
struct desc_ptr;
struct tss_struct;
struct mm_struct;
struct desc_struct;
struct task_struct;
struct cpumask;
struct flush_tlb_info;
struct mmu_gather;
struct vm_area_struct;

/*
 * Wrapper type for pointers to code which uses the non-standard
 * calling convention.  See PV_CALL_SAVE_REGS_THUNK below.
 */
struct paravirt_callee_save {};

/* general info */
struct pv_info {};

#ifdef CONFIG_PARAVIRT_XXL
struct pv_lazy_ops {} __no_randomize_layout;
#endif

struct pv_cpu_ops {} __no_randomize_layout;

struct pv_irq_ops {} __no_randomize_layout;

struct pv_mmu_ops {} __no_randomize_layout;

struct arch_spinlock;
#ifdef CONFIG_SMP
#include <asm/spinlock_types.h>
#endif

struct qspinlock;

struct pv_lock_ops {} __no_randomize_layout;

/* This contains all the paravirt structures: we get a convenient
 * number for each function using the offset which we use to indicate
 * what to patch. */
struct paravirt_patch_template {} __no_randomize_layout;

extern struct pv_info pv_info;
extern struct paravirt_patch_template pv_ops;

#define paravirt_ptr(op)

int paravirt_disable_iospace(void);

/* This generates an indirect call based on the operation type number. */
#define PARAVIRT_CALL

/*
 * These macros are intended to wrap calls through one of the paravirt
 * ops structs, so that they can be later identified and patched at
 * runtime.
 *
 * Normally, a call to a pv_op function is a simple indirect call:
 * (pv_op_struct.operations)(args...).
 *
 * Unfortunately, this is a relatively slow operation for modern CPUs,
 * because it cannot necessarily determine what the destination
 * address is.  In this case, the address is a runtime constant, so at
 * the very least we can patch the call to a simple direct call, or,
 * ideally, patch an inline implementation into the callsite.  (Direct
 * calls are essentially free, because the call and return addresses
 * are completely predictable.)
 *
 * For i386, these macros rely on the standard gcc "regparm(3)" calling
 * convention, in which the first three arguments are placed in %eax,
 * %edx, %ecx (in that order), and the remaining arguments are placed
 * on the stack.  All caller-save registers (eax,edx,ecx) are expected
 * to be modified (either clobbered or used for return values).
 * X86_64, on the other hand, already specifies a register-based calling
 * conventions, returning at %rax, with parameters going in %rdi, %rsi,
 * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
 * special handling for dealing with 4 arguments, unlike i386.
 * However, x86_64 also has to clobber all caller saved registers, which
 * unfortunately, are quite a bit (r8 - r11)
 *
 * Unfortunately there's no way to get gcc to generate the args setup
 * for the call, and then allow the call itself to be generated by an
 * inline asm.  Because of this, we must do the complete arg setup and
 * return value handling from within these macros.  This is fairly
 * cumbersome.
 *
 * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
 * It could be extended to more arguments, but there would be little
 * to be gained from that.  For each number of arguments, there are
 * two VCALL and CALL variants for void and non-void functions.
 *
 * When there is a return value, the invoker of the macro must specify
 * the return type.  The macro then uses sizeof() on that type to
 * determine whether it's a 32 or 64 bit value and places the return
 * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
 * 64-bit). For x86_64 machines, it just returns in %rax regardless of
 * the return value size.
 *
 * 64-bit arguments are passed as a pair of adjacent 32-bit arguments;
 * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
 * in low,high order
 *
 * Small structures are passed and returned in registers.  The macro
 * calling convention can't directly deal with this, so the wrapper
 * functions must do it.
 *
 * These PVOP_* macros are only defined within this header.  This
 * means that all uses must be wrapped in inline functions.  This also
 * makes sure the incoming and outgoing types are always correct.
 */
#ifdef CONFIG_X86_32
#define PVOP_CALL_ARGS

#define PVOP_CALL_ARG1
#define PVOP_CALL_ARG2
#define PVOP_CALL_ARG3

#define PVOP_VCALL_CLOBBERS
#define PVOP_CALL_CLOBBERS

#define PVOP_VCALLEE_CLOBBERS
#define PVOP_CALLEE_CLOBBERS

#define EXTRA_CLOBBERS
#define VEXTRA_CLOBBERS
#else  /* CONFIG_X86_64 */
/* [re]ax isn't an arg, but the return val */
#define PVOP_CALL_ARGS

#define PVOP_CALL_ARG1(x)
#define PVOP_CALL_ARG2(x)
#define PVOP_CALL_ARG3(x)
#define PVOP_CALL_ARG4(x)

#define PVOP_VCALL_CLOBBERS
#define PVOP_CALL_CLOBBERS

/*
 * void functions are still allowed [re]ax for scratch.
 *
 * The ZERO_CALL_USED REGS feature may end up zeroing out callee-saved
 * registers. Make sure we model this with the appropriate clobbers.
 */
#ifdef CONFIG_ZERO_CALL_USED_REGS
#define PVOP_VCALLEE_CLOBBERS
#else
#define PVOP_VCALLEE_CLOBBERS
#endif
#define PVOP_CALLEE_CLOBBERS

#define EXTRA_CLOBBERS
#define VEXTRA_CLOBBERS
#endif	/* CONFIG_X86_32 */

#ifdef CONFIG_PARAVIRT_DEBUG
#define PVOP_TEST_NULL(op)
#else
#define PVOP_TEST_NULL
#endif

#define PVOP_RETVAL(rettype)

/*
 * Use alternative patching for paravirt calls:
 * - For replacing an indirect call with a direct one, use the "normal"
 *   ALTERNATIVE() macro with the indirect call as the initial code sequence,
 *   which will be replaced with the related direct call by using the
 *   ALT_FLAG_DIRECT_CALL special case and the "always on" feature.
 * - In case the replacement is either a direct call or a short code sequence
 *   depending on a feature bit, the ALTERNATIVE_2() macro is being used.
 *   The indirect call is the initial code sequence again, while the special
 *   code sequence is selected with the specified feature bit. In case the
 *   feature is not active, the direct call is used as above via the
 *   ALT_FLAG_DIRECT_CALL special case and the "always on" feature.
 */
#define ____PVOP_CALL(ret, op, call_clbr, extra_clbr, ...)

#define ____PVOP_ALT_CALL(ret, op, alt, cond, call_clbr,		\
			  extra_clbr, ...)

#define __PVOP_CALL

#define __PVOP_ALT_CALL(rettype, op, alt, cond, ...)

#define __PVOP_CALLEESAVE(rettype, op, ...)

#define __PVOP_ALT_CALLEESAVE(rettype, op, alt, cond, ...)


#define __PVOP_VCALL

#define __PVOP_ALT_VCALL(op, alt, cond, ...)

#define __PVOP_VCALLEESAVE(op, ...)

#define __PVOP_ALT_VCALLEESAVE(op, alt, cond, ...)


#define PVOP_CALL0
#define PVOP_VCALL0
#define PVOP_ALT_CALL0(rettype, op, alt, cond)
#define PVOP_ALT_VCALL0(op, alt, cond)

#define PVOP_CALLEE0(rettype, op)
#define PVOP_VCALLEE0(op)
#define PVOP_ALT_CALLEE0(rettype, op, alt, cond)
#define PVOP_ALT_VCALLEE0(op, alt, cond)


#define PVOP_CALL1
#define PVOP_VCALL1
#define PVOP_ALT_VCALL1(op, arg1, alt, cond)

#define PVOP_CALLEE1(rettype, op, arg1)
#define PVOP_VCALLEE1(op, arg1)
#define PVOP_ALT_CALLEE1(rettype, op, arg1, alt, cond)
#define PVOP_ALT_VCALLEE1(op, arg1, alt, cond)


#define PVOP_CALL2
#define PVOP_VCALL2

#define PVOP_CALL3
#define PVOP_VCALL3

#define PVOP_CALL4
#define PVOP_VCALL4

unsigned long paravirt_ret0(void);
#ifdef CONFIG_PARAVIRT_XXL
u64 _paravirt_ident_64(u64);
unsigned long pv_native_save_fl(void);
void pv_native_irq_disable(void);
void pv_native_irq_enable(void);
unsigned long pv_native_read_cr2(void);
#endif

#define paravirt_nop

#endif	/* __ASSEMBLY__ */

#define ALT_NOT_XEN

#endif  /* CONFIG_PARAVIRT */
#endif	/* _ASM_X86_PARAVIRT_TYPES_H */