linux/arch/x86/include/asm/cpu_entry_area.h

/* SPDX-License-Identifier: GPL-2.0 */

#ifndef _ASM_X86_CPU_ENTRY_AREA_H
#define _ASM_X86_CPU_ENTRY_AREA_H

#include <linux/percpu-defs.h>
#include <asm/processor.h>
#include <asm/intel_ds.h>
#include <asm/pgtable_areas.h>

#ifdef CONFIG_X86_64

#ifdef CONFIG_AMD_MEM_ENCRYPT
#define VC_EXCEPTION_STKSZ
#else
#define VC_EXCEPTION_STKSZ
#endif

/* Macro to enforce the same ordering and stack sizes */
#define ESTACKS_MEMBERS(guardsize, optional_stack_size)			\

/* The exception stacks' physical storage. No guard pages required */
struct exception_stacks {};

/* The effective cpu entry area mapping with guard pages. */
struct cea_exception_stacks {};

/*
 * The exception stack ordering in [cea_]exception_stacks
 */
enum exception_stack_ordering {};

#define CEA_ESTACK_SIZE(st)

#define CEA_ESTACK_BOT(ceastp, st)

#define CEA_ESTACK_TOP(ceastp, st)

#define CEA_ESTACK_OFFS(st)

#define CEA_ESTACK_PAGES

#endif

#ifdef CONFIG_X86_32
struct doublefault_stack {
	unsigned long stack[(PAGE_SIZE - sizeof(struct x86_hw_tss)) / sizeof(unsigned long)];
	struct x86_hw_tss tss;
} __aligned(PAGE_SIZE);
#endif

/*
 * cpu_entry_area is a percpu region that contains things needed by the CPU
 * and early entry/exit code.  Real types aren't used for all fields here
 * to avoid circular header dependencies.
 *
 * Every field is a virtual alias of some other allocated backing store.
 * There is no direct allocation of a struct cpu_entry_area.
 */
struct cpu_entry_area {};

#define CPU_ENTRY_AREA_SIZE

DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks);

extern void setup_cpu_entry_areas(void);
extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags);

extern struct cpu_entry_area *get_cpu_entry_area(int cpu);

static __always_inline struct entry_stack *cpu_entry_stack(int cpu)
{}

#define __this_cpu_ist_top_va(name)

#define __this_cpu_ist_bottom_va(name)

#endif