linux/arch/x86/hyperv/ivm.c

// SPDX-License-Identifier: GPL-2.0
/*
 * Hyper-V Isolation VM interface with paravisor and hypervisor
 *
 * Author:
 *  Tianyu Lan <[email protected]>
 */

#include <linux/bitfield.h>
#include <linux/hyperv.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <asm/svm.h>
#include <asm/sev.h>
#include <asm/io.h>
#include <asm/coco.h>
#include <asm/mem_encrypt.h>
#include <asm/set_memory.h>
#include <asm/mshyperv.h>
#include <asm/hypervisor.h>
#include <asm/mtrr.h>
#include <asm/io_apic.h>
#include <asm/realmode.h>
#include <asm/e820/api.h>
#include <asm/desc.h>
#include <uapi/asm/vmx.h>

#ifdef CONFIG_AMD_MEM_ENCRYPT

#define GHCB_USAGE_HYPERV_CALL

hv_ghcb __packed __aligned();

/* Only used in an SNP VM with the paravisor */
static u16 hv_ghcb_version __ro_after_init;

/* Functions only used in an SNP VM with the paravisor go here. */
u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size)
{}

static inline u64 rd_ghcb_msr(void)
{}

static inline void wr_ghcb_msr(u64 val)
{}

static enum es_result hv_ghcb_hv_call(struct ghcb *ghcb, u64 exit_code,
				   u64 exit_info_1, u64 exit_info_2)
{}

void __noreturn hv_ghcb_terminate(unsigned int set, unsigned int reason)
{}

bool hv_ghcb_negotiate_protocol(void)
{}

static void hv_ghcb_msr_write(u64 msr, u64 value)
{}

static void hv_ghcb_msr_read(u64 msr, u64 *value)
{}

/* Only used in a fully enlightened SNP VM, i.e. without the paravisor */
static u8 ap_start_input_arg[PAGE_SIZE] __bss_decrypted __aligned();
static u8 ap_start_stack[PAGE_SIZE] __aligned();
static DEFINE_PER_CPU(struct sev_es_save_area *, hv_sev_vmsa);

/* Functions only used in an SNP VM without the paravisor go here. */

#define hv_populate_vmcb_seg(seg, gdtr_base)							\

static int snp_set_vmsa(void *va, bool vmsa)
{}

static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa)
{}

int hv_snp_boot_ap(u32 cpu, unsigned long start_ip)
{}

#else
static inline void hv_ghcb_msr_write(u64 msr, u64 value) {}
static inline void hv_ghcb_msr_read(u64 msr, u64 *value) {}
#endif /* CONFIG_AMD_MEM_ENCRYPT */

#ifdef CONFIG_INTEL_TDX_GUEST
static void hv_tdx_msr_write(u64 msr, u64 val)
{}

static void hv_tdx_msr_read(u64 msr, u64 *val)
{}

u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2)
{}

#else
static inline void hv_tdx_msr_write(u64 msr, u64 value) {}
static inline void hv_tdx_msr_read(u64 msr, u64 *value) {}
#endif /* CONFIG_INTEL_TDX_GUEST */

#if defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST)
void hv_ivm_msr_write(u64 msr, u64 value)
{}

void hv_ivm_msr_read(u64 msr, u64 *value)
{}

/*
 * hv_mark_gpa_visibility - Set pages visible to host via hvcall.
 *
 * In Isolation VM, all guest memory is encrypted from host and guest
 * needs to set memory visible to host via hvcall before sharing memory
 * with host.
 */
static int hv_mark_gpa_visibility(u16 count, const u64 pfn[],
			   enum hv_mem_host_visibility visibility)
{}

/*
 * When transitioning memory between encrypted and decrypted, the caller
 * of set_memory_encrypted() or set_memory_decrypted() is responsible for
 * ensuring that the memory isn't in use and isn't referenced while the
 * transition is in progress.  The transition has multiple steps, and the
 * memory is in an inconsistent state until all steps are complete. A
 * reference while the state is inconsistent could result in an exception
 * that can't be cleanly fixed up.
 *
 * But the Linux kernel load_unaligned_zeropad() mechanism could cause a
 * stray reference that can't be prevented by the caller, so Linux has
 * specific code to handle this case. But when the #VC and #VE exceptions
 * routed to a paravisor, the specific code doesn't work. To avoid this
 * problem, mark the pages as "not present" while the transition is in
 * progress. If load_unaligned_zeropad() causes a stray reference, a normal
 * page fault is generated instead of #VC or #VE, and the page-fault-based
 * handlers for load_unaligned_zeropad() resolve the reference.  When the
 * transition is complete, hv_vtom_set_host_visibility() marks the pages
 * as "present" again.
 */
static int hv_vtom_clear_present(unsigned long kbuffer, int pagecount, bool enc)
{}

/*
 * hv_vtom_set_host_visibility - Set specified memory visible to host.
 *
 * In Isolation VM, all guest memory is encrypted from host and guest
 * needs to set memory visible to host via hvcall before sharing memory
 * with host. This function works as wrap of hv_mark_gpa_visibility()
 * with memory base and size.
 */
static int hv_vtom_set_host_visibility(unsigned long kbuffer, int pagecount, bool enc)
{}

static bool hv_vtom_tlb_flush_required(bool private)
{}

static bool hv_vtom_cache_flush_required(void)
{}

static bool hv_is_private_mmio(u64 addr)
{}

void __init hv_vtom_init(void)
{}

#endif /* defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST) */

enum hv_isolation_type hv_get_isolation_type(void)
{}
EXPORT_SYMBOL_GPL();

/*
 * hv_is_isolation_supported - Check system runs in the Hyper-V
 * isolation VM.
 */
bool hv_is_isolation_supported(void)
{}

DEFINE_STATIC_KEY_FALSE(isolation_type_snp);

/*
 * hv_isolation_type_snp - Check if the system runs in an AMD SEV-SNP based
 * isolation VM.
 */
bool hv_isolation_type_snp(void)
{}

DEFINE_STATIC_KEY_FALSE(isolation_type_tdx);
/*
 * hv_isolation_type_tdx - Check if the system runs in an Intel TDX based
 * isolated VM.
 */
bool hv_isolation_type_tdx(void)
{}