linux/arch/x86/mm/cpu_entry_area.c

// SPDX-License-Identifier: GPL-2.0

#include <linux/spinlock.h>
#include <linux/percpu.h>
#include <linux/kallsyms.h>
#include <linux/kcore.h>
#include <linux/pgtable.h>

#include <asm/cpu_entry_area.h>
#include <asm/fixmap.h>
#include <asm/desc.h>
#include <asm/kasan.h>
#include <asm/setup.h>

static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);

#ifdef CONFIG_X86_64
static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks);
DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks);

static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, _cea_offset);

static __always_inline unsigned int cea_offset(unsigned int cpu)
{}

static __init void init_cea_offsets(void)
{}
#else /* !X86_64 */
DECLARE_PER_CPU_PAGE_ALIGNED(struct doublefault_stack, doublefault_stack);

static __always_inline unsigned int cea_offset(unsigned int cpu)
{
	return cpu;
}
static inline void init_cea_offsets(void) { }
#endif

/* Is called from entry code, so must be noinstr */
noinstr struct cpu_entry_area *get_cpu_entry_area(int cpu)
{}
EXPORT_SYMBOL();

void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags)
{}

static void __init
cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot)
{}

static void __init percpu_setup_debug_store(unsigned int cpu)
{}

#ifdef CONFIG_X86_64

#define cea_map_stack(name)

static void __init percpu_setup_exception_stacks(unsigned int cpu)
{}
#else
static void __init percpu_setup_exception_stacks(unsigned int cpu)
{
	struct cpu_entry_area *cea = get_cpu_entry_area(cpu);

	cea_map_percpu_pages(&cea->doublefault_stack,
			     &per_cpu(doublefault_stack, cpu), 1, PAGE_KERNEL);
}
#endif

/* Setup the fixmap mappings only once per-processor */
static void __init setup_cpu_entry_area(unsigned int cpu)
{}

static __init void setup_cpu_entry_area_ptes(void)
{}

void __init setup_cpu_entry_areas(void)
{}