linux/arch/x86/coco/sev/core.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * AMD Memory Encryption Support
 *
 * Copyright (C) 2019 SUSE
 *
 * Author: Joerg Roedel <[email protected]>
 */

#define pr_fmt(fmt)

#include <linux/sched/debug.h>	/* For show_regs() */
#include <linux/percpu-defs.h>
#include <linux/cc_platform.h>
#include <linux/printk.h>
#include <linux/mm_types.h>
#include <linux/set_memory.h>
#include <linux/memblock.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/cpumask.h>
#include <linux/efi.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/psp-sev.h>
#include <linux/dmi.h>
#include <uapi/linux/sev-guest.h>

#include <asm/init.h>
#include <asm/cpu_entry_area.h>
#include <asm/stacktrace.h>
#include <asm/sev.h>
#include <asm/insn-eval.h>
#include <asm/fpu/xcr.h>
#include <asm/processor.h>
#include <asm/realmode.h>
#include <asm/setup.h>
#include <asm/traps.h>
#include <asm/svm.h>
#include <asm/smp.h>
#include <asm/cpu.h>
#include <asm/apic.h>
#include <asm/cpuid.h>
#include <asm/cmdline.h>

#define DR7_RESET_VALUE

/* AP INIT values as documented in the APM2  section "Processor Initialization State" */
#define AP_INIT_CS_LIMIT
#define AP_INIT_DS_LIMIT
#define AP_INIT_LDTR_LIMIT
#define AP_INIT_GDTR_LIMIT
#define AP_INIT_IDTR_LIMIT
#define AP_INIT_TR_LIMIT
#define AP_INIT_RFLAGS_DEFAULT
#define AP_INIT_DR6_DEFAULT
#define AP_INIT_GPAT_DEFAULT
#define AP_INIT_XCR0_DEFAULT
#define AP_INIT_X87_FTW_DEFAULT
#define AP_INIT_X87_FCW_DEFAULT
#define AP_INIT_CR0_DEFAULT
#define AP_INIT_MXCSR_DEFAULT

static const char * const sev_status_feat_names[] =;

/* For early boot hypervisor communication in SEV-ES enabled guests */
static struct ghcb boot_ghcb_page __bss_decrypted __aligned();

/*
 * Needs to be in the .data section because we need it NULL before bss is
 * cleared
 */
static struct ghcb *boot_ghcb __section();

/* Bitmap of SEV features supported by the hypervisor */
static u64 sev_hv_features __ro_after_init;

/* #VC handler runtime per-CPU data */
struct sev_es_runtime_data {};

struct ghcb_state {};

/* For early boot SVSM communication */
static struct svsm_ca boot_svsm_ca_page __aligned();

static DEFINE_PER_CPU(struct sev_es_runtime_data*, runtime_data);
static DEFINE_PER_CPU(struct sev_es_save_area *, sev_vmsa);
static DEFINE_PER_CPU(struct svsm_ca *, svsm_caa);
static DEFINE_PER_CPU(u64, svsm_caa_pa);

struct sev_config {};

static struct sev_config sev_cfg __read_mostly;

static __always_inline bool on_vc_stack(struct pt_regs *regs)
{}

/*
 * This function handles the case when an NMI is raised in the #VC
 * exception handler entry code, before the #VC handler has switched off
 * its IST stack. In this case, the IST entry for #VC must be adjusted,
 * so that any nested #VC exception will not overwrite the stack
 * contents of the interrupted #VC handler.
 *
 * The IST entry is adjusted unconditionally so that it can be also be
 * unconditionally adjusted back in __sev_es_ist_exit(). Otherwise a
 * nested sev_es_ist_exit() call may adjust back the IST entry too
 * early.
 *
 * The __sev_es_ist_enter() and __sev_es_ist_exit() functions always run
 * on the NMI IST stack, as they are only called from NMI handling code
 * right now.
 */
void noinstr __sev_es_ist_enter(struct pt_regs *regs)
{}

void noinstr __sev_es_ist_exit(void)
{}

/*
 * Nothing shall interrupt this code path while holding the per-CPU
 * GHCB. The backup GHCB is only for NMIs interrupting this path.
 *
 * Callers must disable local interrupts around it.
 */
static noinstr struct ghcb *__sev_get_ghcb(struct ghcb_state *state)
{}

static inline u64 sev_es_rd_ghcb_msr(void)
{}

static __always_inline void sev_es_wr_ghcb_msr(u64 val)
{}

static int vc_fetch_insn_kernel(struct es_em_ctxt *ctxt,
				unsigned char *buffer)
{}

static enum es_result __vc_decode_user_insn(struct es_em_ctxt *ctxt)
{}

static enum es_result __vc_decode_kern_insn(struct es_em_ctxt *ctxt)
{}

static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
{}

static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
				   char *dst, char *buf, size_t size)
{}

static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
				  char *src, char *buf, size_t size)
{}

static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
					   unsigned long vaddr, phys_addr_t *paddr)
{}

static enum es_result vc_ioio_check(struct es_em_ctxt *ctxt, u16 port, size_t size)
{}

static __always_inline void vc_forward_exception(struct es_em_ctxt *ctxt)
{}

/* Include code shared with pre-decompression boot stage */
#include "shared.c"

static inline struct svsm_ca *svsm_get_caa(void)
{}

static u64 svsm_get_caa_pa(void)
{}

static noinstr void __sev_put_ghcb(struct ghcb_state *state)
{}

static int svsm_perform_call_protocol(struct svsm_call *call)
{}

void noinstr __sev_es_nmi_complete(void)
{}

static u64 __init get_secrets_page(void)
{}

static u64 __init get_snp_jump_table_addr(void)
{}

static u64 __init get_jump_table_addr(void)
{}

static void __head
early_set_pages_state(unsigned long vaddr, unsigned long paddr,
		      unsigned long npages, enum psc_op op)
{}

void __head early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
					 unsigned long npages)
{}

void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
					unsigned long npages)
{}

static unsigned long __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr,
				       unsigned long vaddr_end, int op)
{}

static void set_pages_state(unsigned long vaddr, unsigned long npages, int op)
{}

void snp_set_memory_shared(unsigned long vaddr, unsigned long npages)
{}

void snp_set_memory_private(unsigned long vaddr, unsigned long npages)
{}

void snp_accept_memory(phys_addr_t start, phys_addr_t end)
{}

static int snp_set_vmsa(void *va, void *caa, int apic_id, bool make_vmsa)
{}

#define __ATTR_BASE
#define INIT_CS_ATTRIBS
#define INIT_DS_ATTRIBS

#define INIT_LDTR_ATTRIBS
#define INIT_TR_ATTRIBS

static void *snp_alloc_vmsa_page(int cpu)
{}

static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa, int apic_id)
{}

static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)
{}

void __init snp_set_wakeup_secondary_cpu(void)
{}

int __init sev_es_setup_ap_jump_table(struct real_mode_header *rmh)
{}

/*
 * This is needed by the OVMF UEFI firmware which will use whatever it finds in
 * the GHCB MSR as its GHCB to talk to the hypervisor. So make sure the per-cpu
 * runtime GHCBs used by the kernel are also mapped in the EFI page-table.
 */
int __init sev_es_efi_map_ghcbs(pgd_t *pgd)
{}

static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
{}

static void snp_register_per_cpu_ghcb(void)
{}

void setup_ghcb(void)
{}

#ifdef CONFIG_HOTPLUG_CPU
static void sev_es_ap_hlt_loop(void)
{}

/*
 * Play_dead handler when running under SEV-ES. This is needed because
 * the hypervisor can't deliver an SIPI request to restart the AP.
 * Instead the kernel has to issue a VMGEXIT to halt the VCPU until the
 * hypervisor wakes it up again.
 */
static void sev_es_play_dead(void)
{}
#else  /* CONFIG_HOTPLUG_CPU */
#define sev_es_play_dead
#endif /* CONFIG_HOTPLUG_CPU */

#ifdef CONFIG_SMP
static void __init sev_es_setup_play_dead(void)
{}
#else
static inline void sev_es_setup_play_dead(void) { }
#endif

static void __init alloc_runtime_data(int cpu)
{}

static void __init init_ghcb(int cpu)
{}

void __init sev_es_init_vc_handling(void)
{}

static void __init vc_early_forward_exception(struct es_em_ctxt *ctxt)
{}

static long *vc_insn_get_rm(struct es_em_ctxt *ctxt)
{}
static enum es_result vc_do_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
				 unsigned int bytes, bool read)
{}

/*
 * The MOVS instruction has two memory operands, which raises the
 * problem that it is not known whether the access to the source or the
 * destination caused the #VC exception (and hence whether an MMIO read
 * or write operation needs to be emulated).
 *
 * Instead of playing games with walking page-tables and trying to guess
 * whether the source or destination is an MMIO range, split the move
 * into two operations, a read and a write with only one memory operand.
 * This will cause a nested #VC exception on the MMIO address which can
 * then be handled.
 *
 * This implementation has the benefit that it also supports MOVS where
 * source _and_ destination are MMIO regions.
 *
 * It will slow MOVS on MMIO down a lot, but in SEV-ES guests it is a
 * rare operation. If it turns out to be a performance problem the split
 * operations can be moved to memcpy_fromio() and memcpy_toio().
 */
static enum es_result vc_handle_mmio_movs(struct es_em_ctxt *ctxt,
					  unsigned int bytes)
{}

static enum es_result vc_handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
{}

static enum es_result vc_handle_dr7_write(struct ghcb *ghcb,
					  struct es_em_ctxt *ctxt)
{}

static enum es_result vc_handle_dr7_read(struct ghcb *ghcb,
					 struct es_em_ctxt *ctxt)
{}

static enum es_result vc_handle_wbinvd(struct ghcb *ghcb,
				       struct es_em_ctxt *ctxt)
{}

static enum es_result vc_handle_rdpmc(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
{}

static enum es_result vc_handle_monitor(struct ghcb *ghcb,
					struct es_em_ctxt *ctxt)
{}

static enum es_result vc_handle_mwait(struct ghcb *ghcb,
				      struct es_em_ctxt *ctxt)
{}

static enum es_result vc_handle_vmmcall(struct ghcb *ghcb,
					struct es_em_ctxt *ctxt)
{}

static enum es_result vc_handle_trap_ac(struct ghcb *ghcb,
					struct es_em_ctxt *ctxt)
{}

static enum es_result vc_handle_exitcode(struct es_em_ctxt *ctxt,
					 struct ghcb *ghcb,
					 unsigned long exit_code)
{}

static __always_inline bool is_vc2_stack(unsigned long sp)
{}

static __always_inline bool vc_from_invalid_context(struct pt_regs *regs)
{}

static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_code)
{}

static __always_inline bool vc_is_db(unsigned long error_code)
{}

/*
 * Runtime #VC exception handler when raised from kernel mode. Runs in NMI mode
 * and will panic when an error happens.
 */
DEFINE_IDTENTRY_VC_KERNEL(exc_vmm_communication)
{}

/*
 * Runtime #VC exception handler when raised from user mode. Runs in IRQ mode
 * and will kill the current task with SIGBUS when an error happens.
 */
DEFINE_IDTENTRY_VC_USER(exc_vmm_communication)
{}

bool __init handle_vc_boot_ghcb(struct pt_regs *regs)
{}

/*
 * Initial set up of SNP relies on information provided by the
 * Confidential Computing blob, which can be passed to the kernel
 * in the following ways, depending on how it is booted:
 *
 * - when booted via the boot/decompress kernel:
 *   - via boot_params
 *
 * - when booted directly by firmware/bootloader (e.g. CONFIG_PVH):
 *   - via a setup_data entry, as defined by the Linux Boot Protocol
 *
 * Scan for the blob in that order.
 */
static __head struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
{}

static __head void svsm_setup(struct cc_blob_sev_info *cc_info)
{}

bool __head snp_init(struct boot_params *bp)
{}

void __head __noreturn snp_abort(void)
{}

/*
 * SEV-SNP guests should only execute dmi_setup() if EFI_CONFIG_TABLES are
 * enabled, as the alternative (fallback) logic for DMI probing in the legacy
 * ROM region can cause a crash since this region is not pre-validated.
 */
void __init snp_dmi_setup(void)
{}

static void dump_cpuid_table(void)
{}

/*
 * It is useful from an auditing/testing perspective to provide an easy way
 * for the guest owner to know that the CPUID table has been initialized as
 * expected, but that initialization happens too early in boot to print any
 * sort of indicator, and there's not really any other good place to do it,
 * so do it here.
 *
 * If running as an SNP guest, report the current VM privilege level (VMPL).
 */
static int __init report_snp_info(void)
{}
arch_initcall(report_snp_info);

static int __init init_sev_config(char *str)
{}
__setup();

static void update_attest_input(struct svsm_call *call, struct svsm_attest_call *input)
{}

int snp_issue_svsm_attest_req(u64 call_id, struct svsm_call *call,
			      struct svsm_attest_call *input)
{}
EXPORT_SYMBOL_GPL();

int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio)
{}
EXPORT_SYMBOL_GPL();

static struct platform_device sev_guest_device =;

static int __init snp_init_platform_device(void)
{}
device_initcall(snp_init_platform_device);

void sev_show_status(void)
{}

void __init snp_update_svsm_ca(void)
{}

#ifdef CONFIG_SYSFS
static ssize_t vmpl_show(struct kobject *kobj,
			 struct kobj_attribute *attr, char *buf)
{}

static struct kobj_attribute vmpl_attr =;

static struct attribute *vmpl_attrs[] =;

static struct attribute_group sev_attr_group =;

static int __init sev_sysfs_init(void)
{}
arch_initcall(sev_sysfs_init);
#endif // CONFIG_SYSFS