linux/arch/x86/coco/sev/shared.c

// SPDX-License-Identifier: GPL-2.0
/*
 * AMD Encrypted Register State Support
 *
 * Author: Joerg Roedel <[email protected]>
 *
 * This file is not compiled stand-alone. It contains code shared
 * between the pre-decompression boot code and the running Linux kernel
 * and is included directly into both code-bases.
 */

#include <asm/setup_data.h>

#ifndef __BOOT_COMPRESSED
#define error(v)
#define has_cpuflag(f)
#define sev_printk(fmt, ...)
#define sev_printk_rtl(fmt, ...)
#else
#undef WARN
#define WARN
#define sev_printk
#define sev_printk_rtl
#undef vc_forward_exception
#define vc_forward_exception
#endif

/*
 * SVSM related information:
 *   When running under an SVSM, the VMPL that Linux is executing at must be
 *   non-zero. The VMPL is therefore used to indicate the presence of an SVSM.
 *
 *   During boot, the page tables are set up as identity mapped and later
 *   changed to use kernel virtual addresses. Maintain separate virtual and
 *   physical addresses for the CAA to allow SVSM functions to be used during
 *   early boot, both with identity mapped virtual addresses and proper kernel
 *   virtual addresses.
 */
u8 snp_vmpl __ro_after_init;
EXPORT_SYMBOL_GPL();
static struct svsm_ca *boot_svsm_caa __ro_after_init;
static u64 boot_svsm_caa_pa __ro_after_init;

static struct svsm_ca *svsm_get_caa(void);
static u64 svsm_get_caa_pa(void);
static int svsm_perform_call_protocol(struct svsm_call *call);

/* I/O parameters for CPUID-related helpers */
struct cpuid_leaf {};

/*
 * Individual entries of the SNP CPUID table, as defined by the SNP
 * Firmware ABI, Revision 0.9, Section 7.1, Table 14.
 */
struct snp_cpuid_fn {} __packed;

/*
 * SNP CPUID table, as defined by the SNP Firmware ABI, Revision 0.9,
 * Section 8.14.2.6. Also noted there is the SNP firmware-enforced limit
 * of 64 entries per CPUID table.
 */
#define SNP_CPUID_COUNT_MAX

struct snp_cpuid_table {} __packed;

/*
 * Since feature negotiation related variables are set early in the boot
 * process they must reside in the .data section so as not to be zeroed
 * out when the .bss section is later cleared.
 *
 * GHCB protocol version negotiated with the hypervisor.
 */
static u16 ghcb_version __ro_after_init;

/* Copy of the SNP firmware's CPUID page. */
static struct snp_cpuid_table cpuid_table_copy __ro_after_init;

/*
 * These will be initialized based on CPUID table so that non-present
 * all-zero leaves (for sparse tables) can be differentiated from
 * invalid/out-of-range leaves. This is needed since all-zero leaves
 * still need to be post-processed.
 */
static u32 cpuid_std_range_max __ro_after_init;
static u32 cpuid_hyp_range_max __ro_after_init;
static u32 cpuid_ext_range_max __ro_after_init;

static bool __init sev_es_check_cpu_features(void)
{}

static void __head __noreturn
sev_es_terminate(unsigned int set, unsigned int reason)
{}

/*
 * The hypervisor features are available from GHCB version 2 onward.
 */
static u64 get_hv_features(void)
{}

static void snp_register_ghcb_early(unsigned long paddr)
{}

static bool sev_es_negotiate_protocol(void)
{}

static __always_inline void vc_ghcb_invalidate(struct ghcb *ghcb)
{}

static bool vc_decoding_needed(unsigned long exit_code)
{}

static enum es_result vc_init_em_ctxt(struct es_em_ctxt *ctxt,
				      struct pt_regs *regs,
				      unsigned long exit_code)
{}

static void vc_finish_insn(struct es_em_ctxt *ctxt)
{}

static enum es_result verify_exception_info(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
{}

static inline int svsm_process_result_codes(struct svsm_call *call)
{}

/*
 * Issue a VMGEXIT to call the SVSM:
 *   - Load the SVSM register state (RAX, RCX, RDX, R8 and R9)
 *   - Set the CA call pending field to 1
 *   - Issue VMGEXIT
 *   - Save the SVSM return register state (RAX, RCX, RDX, R8 and R9)
 *   - Perform atomic exchange of the CA call pending field
 *
 *   - See the "Secure VM Service Module for SEV-SNP Guests" specification for
 *     details on the calling convention.
 *     - The calling convention loosely follows the Microsoft X64 calling
 *       convention by putting arguments in RCX, RDX, R8 and R9.
 *     - RAX specifies the SVSM protocol/callid as input and the return code
 *       as output.
 */
static __always_inline void svsm_issue_call(struct svsm_call *call, u8 *pending)
{}

static int svsm_perform_msr_protocol(struct svsm_call *call)
{}

static int svsm_perform_ghcb_protocol(struct ghcb *ghcb, struct svsm_call *call)
{}

static enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb,
					  struct es_em_ctxt *ctxt,
					  u64 exit_code, u64 exit_info_1,
					  u64 exit_info_2)
{}

static int __sev_cpuid_hv(u32 fn, int reg_idx, u32 *reg)
{}

static int __sev_cpuid_hv_msr(struct cpuid_leaf *leaf)
{}

static int __sev_cpuid_hv_ghcb(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
{}

static int sev_cpuid_hv(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
{}

/*
 * This may be called early while still running on the initial identity
 * mapping. Use RIP-relative addressing to obtain the correct address
 * while running with the initial identity mapping as well as the
 * switch-over to kernel virtual addresses later.
 */
static const struct snp_cpuid_table *snp_cpuid_get_table(void)
{}

/*
 * The SNP Firmware ABI, Revision 0.9, Section 7.1, details the use of
 * XCR0_IN and XSS_IN to encode multiple versions of 0xD subfunctions 0
 * and 1 based on the corresponding features enabled by a particular
 * combination of XCR0 and XSS registers so that a guest can look up the
 * version corresponding to the features currently enabled in its XCR0/XSS
 * registers. The only values that differ between these versions/table
 * entries is the enabled XSAVE area size advertised via EBX.
 *
 * While hypervisors may choose to make use of this support, it is more
 * robust/secure for a guest to simply find the entry corresponding to the
 * base/legacy XSAVE area size (XCR0=1 or XCR0=3), and then calculate the
 * XSAVE area size using subfunctions 2 through 64, as documented in APM
 * Volume 3, Rev 3.31, Appendix E.3.8, which is what is done here.
 *
 * Since base/legacy XSAVE area size is documented as 0x240, use that value
 * directly rather than relying on the base size in the CPUID table.
 *
 * Return: XSAVE area size on success, 0 otherwise.
 */
static u32 snp_cpuid_calc_xsave_size(u64 xfeatures_en, bool compacted)
{}

static bool __head
snp_cpuid_get_validated_func(struct cpuid_leaf *leaf)
{}

static void snp_cpuid_hv(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
{}

static int snp_cpuid_postprocess(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
				 struct cpuid_leaf *leaf)
{}

/*
 * Returns -EOPNOTSUPP if feature not enabled. Any other non-zero return value
 * should be treated as fatal by caller.
 */
static int __head
snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
{}

/*
 * Boot VC Handler - This is the first VC handler during boot, there is no GHCB
 * page yet, so it only supports the MSR based communication with the
 * hypervisor and only the CPUID exit-code.
 */
void __head do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
{}

static enum es_result vc_insn_string_check(struct es_em_ctxt *ctxt,
					   unsigned long address,
					   bool write)
{}

static enum es_result vc_insn_string_read(struct es_em_ctxt *ctxt,
					  void *src, char *buf,
					  unsigned int data_size,
					  unsigned int count,
					  bool backwards)
{}

static enum es_result vc_insn_string_write(struct es_em_ctxt *ctxt,
					   void *dst, char *buf,
					   unsigned int data_size,
					   unsigned int count,
					   bool backwards)
{}

#define IOIO_TYPE_STR
#define IOIO_TYPE_IN
#define IOIO_TYPE_INS
#define IOIO_TYPE_OUT
#define IOIO_TYPE_OUTS

#define IOIO_REP

#define IOIO_ADDR_64
#define IOIO_ADDR_32
#define IOIO_ADDR_16

#define IOIO_DATA_32
#define IOIO_DATA_16
#define IOIO_DATA_8

#define IOIO_SEG_ES
#define IOIO_SEG_DS

static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
{}

static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
{}

static int vc_handle_cpuid_snp(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
{}

static enum es_result vc_handle_cpuid(struct ghcb *ghcb,
				      struct es_em_ctxt *ctxt)
{}

static enum es_result vc_handle_rdtsc(struct ghcb *ghcb,
				      struct es_em_ctxt *ctxt,
				      unsigned long exit_code)
{}

struct cc_setup_data {};

/*
 * Search for a Confidential Computing blob passed in as a setup_data entry
 * via the Linux Boot Protocol.
 */
static __head
struct cc_blob_sev_info *find_cc_blob_setup_data(struct boot_params *bp)
{}

/*
 * Initialize the kernel's copy of the SNP CPUID table, and set up the
 * pointer that will be used to access it.
 *
 * Maintaining a direct mapping of the SNP CPUID table used by firmware would
 * be possible as an alternative, but the approach is brittle since the
 * mapping needs to be updated in sync with all the changes to virtual memory
 * layout and related mapping facilities throughout the boot process.
 */
static void __head setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
{}

static inline void __pval_terminate(u64 pfn, bool action, unsigned int page_size,
				    int ret, u64 svsm_ret)
{}

static void svsm_pval_terminate(struct svsm_pvalidate_call *pc, int ret, u64 svsm_ret)
{}

static void svsm_pval_4k_page(unsigned long paddr, bool validate)
{}

static void pvalidate_4k_page(unsigned long vaddr, unsigned long paddr, bool validate)
{}

static void pval_pages(struct snp_psc_desc *desc)
{}

static u64 svsm_build_ca_from_pfn_range(u64 pfn, u64 pfn_end, bool action,
					struct svsm_pvalidate_call *pc)
{}

static int svsm_build_ca_from_psc_desc(struct snp_psc_desc *desc, unsigned int desc_entry,
				       struct svsm_pvalidate_call *pc)
{}

static void svsm_pval_pages(struct snp_psc_desc *desc)
{}

static void pvalidate_pages(struct snp_psc_desc *desc)
{}

static int vmgexit_psc(struct ghcb *ghcb, struct snp_psc_desc *desc)
{}

static enum es_result vc_check_opcode_bytes(struct es_em_ctxt *ctxt,
					    unsigned long exit_code)
{}

/*
 * Maintain the GPA of the SVSM Calling Area (CA) in order to utilize the SVSM
 * services needed when not running in VMPL0.
 */
static bool __head svsm_setup_ca(const struct cc_blob_sev_info *cc_info)
{}