linux/arch/x86/xen/enlighten_pv.c

// SPDX-License-Identifier: GPL-2.0
/*
 * Core of Xen paravirt_ops implementation.
 *
 * This file contains the xen_paravirt_ops structure itself, and the
 * implementations for:
 * - privileged instructions
 * - interrupt flags
 * - segment operations
 * - booting and setup
 *
 * Jeremy Fitzhardinge <[email protected]>, XenSource Inc, 2007
 */

#include <linux/cpu.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/preempt.h>
#include <linux/hardirq.h>
#include <linux/percpu.h>
#include <linux/delay.h>
#include <linux/start_kernel.h>
#include <linux/sched.h>
#include <linux/kprobes.h>
#include <linux/kstrtox.h>
#include <linux/memblock.h>
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/page-flags.h>
#include <linux/pci.h>
#include <linux/gfp.h>
#include <linux/edd.h>
#include <linux/reboot.h>
#include <linux/virtio_anchor.h>
#include <linux/stackprotector.h>

#include <xen/xen.h>
#include <xen/events.h>
#include <xen/interface/xen.h>
#include <xen/interface/version.h>
#include <xen/interface/physdev.h>
#include <xen/interface/vcpu.h>
#include <xen/interface/memory.h>
#include <xen/interface/nmi.h>
#include <xen/interface/xen-mca.h>
#include <xen/features.h>
#include <xen/page.h>
#include <xen/hvc-console.h>
#include <xen/acpi.h>

#include <asm/paravirt.h>
#include <asm/apic.h>
#include <asm/page.h>
#include <asm/xen/pci.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/cpuid.h>
#include <asm/fixmap.h>
#include <asm/processor.h>
#include <asm/proto.h>
#include <asm/msr-index.h>
#include <asm/traps.h>
#include <asm/setup.h>
#include <asm/desc.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/reboot.h>
#include <asm/hypervisor.h>
#include <asm/mach_traps.h>
#include <asm/mtrr.h>
#include <asm/mwait.h>
#include <asm/pci_x86.h>
#include <asm/cpu.h>
#ifdef CONFIG_X86_IOPL_IOPERM
#include <asm/io_bitmap.h>
#endif

#ifdef CONFIG_ACPI
#include <linux/acpi.h>
#include <asm/acpi.h>
#include <acpi/proc_cap_intel.h>
#include <acpi/processor.h>
#include <xen/interface/platform.h>
#endif

#include "xen-ops.h"

#include "../kernel/cpu/cpu.h" /* get_cpu_cap() */

void *xen_initial_gdt;

static int xen_cpu_up_prepare_pv(unsigned int cpu);
static int xen_cpu_dead_pv(unsigned int cpu);

struct tls_descs {};

DEFINE_PER_CPU(enum xen_lazy_mode, xen_lazy_mode) =;
DEFINE_PER_CPU(unsigned int, xen_lazy_nesting);

enum xen_lazy_mode xen_get_lazy_mode(void)
{}

/*
 * Updating the 3 TLS descriptors in the GDT on every task switch is
 * surprisingly expensive so we avoid updating them if they haven't
 * changed.  Since Xen writes different descriptors than the one
 * passed in the update_descriptor hypercall we keep shadow copies to
 * compare against.
 */
static DEFINE_PER_CPU(struct tls_descs, shadow_tls_desc);

static __read_mostly bool xen_msr_safe = IS_ENABLED();

static int __init parse_xen_msr_safe(char *str)
{}
early_param();

/* Get MTRR settings from Xen and put them into mtrr_state. */
static void __init xen_set_mtrr_data(void)
{}

static void __init xen_pv_init_platform(void)
{}

static void __init xen_pv_guest_late_init(void)
{}

static __read_mostly unsigned int cpuid_leaf5_ecx_val;
static __read_mostly unsigned int cpuid_leaf5_edx_val;

static void xen_cpuid(unsigned int *ax, unsigned int *bx,
		      unsigned int *cx, unsigned int *dx)
{}

static bool __init xen_check_mwait(void)
{}

static bool __init xen_check_xsave(void)
{}

static void __init xen_init_capabilities(void)
{}

static noinstr void xen_set_debugreg(int reg, unsigned long val)
{}

static noinstr unsigned long xen_get_debugreg(int reg)
{}

static void xen_start_context_switch(struct task_struct *prev)
{}

static void xen_end_context_switch(struct task_struct *next)
{}

static unsigned long xen_store_tr(void)
{}

/*
 * Set the page permissions for a particular virtual address.  If the
 * address is a vmalloc mapping (or other non-linear mapping), then
 * find the linear mapping of the page and also set its protections to
 * match.
 */
static void set_aliased_prot(void *v, pgprot_t prot)
{}

static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
{}

static void xen_free_ldt(struct desc_struct *ldt, unsigned entries)
{}

static void xen_set_ldt(const void *addr, unsigned entries)
{}

static void xen_load_gdt(const struct desc_ptr *dtr)
{}

/*
 * load_gdt for early boot, when the gdt is only mapped once
 */
static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
{}

static inline bool desc_equal(const struct desc_struct *d1,
			      const struct desc_struct *d2)
{}

static void load_TLS_descriptor(struct thread_struct *t,
				unsigned int cpu, unsigned int i)
{}

static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
{}

static void xen_load_gs_index(unsigned int idx)
{}

static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
				const void *ptr)
{}

void noist_exc_debug(struct pt_regs *regs);

DEFINE_IDTENTRY_RAW(xenpv_exc_nmi)
{}

DEFINE_IDTENTRY_RAW_ERRORCODE(xenpv_exc_double_fault)
{}

DEFINE_IDTENTRY_RAW(xenpv_exc_debug)
{}

DEFINE_IDTENTRY_RAW(exc_xen_unknown_trap)
{}

#ifdef CONFIG_X86_MCE
DEFINE_IDTENTRY_RAW(xenpv_exc_machine_check)
{}
#endif

struct trap_array_entry {};

#define TRAP_ENTRY(func, ist_ok)

#define TRAP_ENTRY_REDIR(func, ist_ok)

static struct trap_array_entry trap_array[] =;

static bool __ref get_trap_addr(void **addr, unsigned int ist)
{}

static int cvt_gate_to_trap(int vector, const gate_desc *val,
			    struct trap_info *info)
{}

/* Locations of each CPU's IDT */
static DEFINE_PER_CPU(struct desc_ptr, idt_desc);

/* Set an IDT entry.  If the entry is part of the current IDT, then
   also update Xen. */
static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
{}

static unsigned xen_convert_trap_info(const struct desc_ptr *desc,
				      struct trap_info *traps, bool full)
{}

void xen_copy_trap_info(struct trap_info *traps)
{}

/* Load a new IDT into Xen.  In principle this can be per-CPU, so we
   hold a spinlock to protect the static traps[] array (static because
   it avoids allocation, and saves stack space). */
static void xen_load_idt(const struct desc_ptr *desc)
{}

/* Write a GDT descriptor entry.  Ignore LDT descriptors, since
   they're handled differently. */
static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
				const void *desc, int type)
{}

/*
 * Version of write_gdt_entry for use at early boot-time needed to
 * update an entry as simply as possible.
 */
static void __init xen_write_gdt_entry_boot(struct desc_struct *dt, int entry,
					    const void *desc, int type)
{}

static void xen_load_sp0(unsigned long sp0)
{}

#ifdef CONFIG_X86_IOPL_IOPERM
static void xen_invalidate_io_bitmap(void)
{}

static void xen_update_io_bitmap(void)
{}
#endif

static void xen_io_delay(void)
{}

static DEFINE_PER_CPU(unsigned long, xen_cr0_value);

static unsigned long xen_read_cr0(void)
{}

static void xen_write_cr0(unsigned long cr0)
{}

static void xen_write_cr4(unsigned long cr4)
{}

static u64 xen_do_read_msr(unsigned int msr, int *err)
{}

static void set_seg(unsigned int which, unsigned int low, unsigned int high,
		    int *err)
{}

/*
 * Support write_msr_safe() and write_msr() semantics.
 * With err == NULL write_msr() semantics are selected.
 * Supplying an err pointer requires err to be pre-initialized with 0.
 */
static void xen_do_write_msr(unsigned int msr, unsigned int low,
			     unsigned int high, int *err)
{}

static u64 xen_read_msr_safe(unsigned int msr, int *err)
{}

static int xen_write_msr_safe(unsigned int msr, unsigned int low,
			      unsigned int high)
{}

static u64 xen_read_msr(unsigned int msr)
{}

static void xen_write_msr(unsigned int msr, unsigned low, unsigned high)
{}

/* This is called once we have the cpu_possible_mask */
void __init xen_setup_vcpu_info_placement(void)
{}

static const struct pv_info xen_info __initconst =;

static const typeof(pv_ops) xen_cpu_ops __initconst =;

static void xen_restart(char *msg)
{}

static void xen_machine_halt(void)
{}

static void xen_machine_power_off(void)
{}

static void xen_crash_shutdown(struct pt_regs *regs)
{}

static const struct machine_ops xen_machine_ops __initconst =;

static unsigned char xen_get_nmi_reason(void)
{}

static void __init xen_boot_params_init_edd(void)
{}

/*
 * Set up the GDT and segment registers for -fstack-protector.  Until
 * we do this, we have to be careful not to call any stack-protected
 * function, which is most of the kernel.
 */
static void __init xen_setup_gdt(int cpu)
{}

static void __init xen_dom0_set_legacy_features(void)
{}

static void __init xen_domu_set_legacy_features(void)
{}

extern void early_xen_iret_patch(void);

/* First C function to be called on Xen boot */
asmlinkage __visible void __init xen_start_kernel(struct start_info *si)
{}

static int xen_cpu_up_prepare_pv(unsigned int cpu)
{}

static int xen_cpu_dead_pv(unsigned int cpu)
{}

static uint32_t __init xen_platform_pv(void)
{}

const __initconst struct hypervisor_x86 x86_hyper_xen_pv =;