linux/arch/x86/kvm/svm/sev.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * AMD SVM-SEV support
 *
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
 */
#define pr_fmt(fmt)

#include <linux/kvm_types.h>
#include <linux/kvm_host.h>
#include <linux/kernel.h>
#include <linux/highmem.h>
#include <linux/psp.h>
#include <linux/psp-sev.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <linux/misc_cgroup.h>
#include <linux/processor.h>
#include <linux/trace_events.h>
#include <uapi/linux/sev-guest.h>

#include <asm/pkru.h>
#include <asm/trapnr.h>
#include <asm/fpu/xcr.h>
#include <asm/fpu/xstate.h>
#include <asm/debugreg.h>
#include <asm/sev.h>

#include "mmu.h"
#include "x86.h"
#include "svm.h"
#include "svm_ops.h"
#include "cpuid.h"
#include "trace.h"

#define GHCB_VERSION_MAX
#define GHCB_VERSION_DEFAULT
#define GHCB_VERSION_MIN

#define GHCB_HV_FT_SUPPORTED

/* enable/disable SEV support */
static bool sev_enabled =;
module_param_named(sev, sev_enabled, bool, 0444);

/* enable/disable SEV-ES support */
static bool sev_es_enabled =;
module_param_named(sev_es, sev_es_enabled, bool, 0444);

/* enable/disable SEV-SNP support */
static bool sev_snp_enabled =;
module_param_named(sev_snp, sev_snp_enabled, bool, 0444);

/* enable/disable SEV-ES DebugSwap support */
static bool sev_es_debug_swap_enabled =;
module_param_named(debug_swap, sev_es_debug_swap_enabled, bool, 0444);
static u64 sev_supported_vmsa_features;

#define AP_RESET_HOLD_NONE
#define AP_RESET_HOLD_NAE_EVENT
#define AP_RESET_HOLD_MSR_PROTO

/* As defined by SEV-SNP Firmware ABI, under "Guest Policy". */
#define SNP_POLICY_MASK_API_MINOR
#define SNP_POLICY_MASK_API_MAJOR
#define SNP_POLICY_MASK_SMT
#define SNP_POLICY_MASK_RSVD_MBO
#define SNP_POLICY_MASK_DEBUG
#define SNP_POLICY_MASK_SINGLE_SOCKET

#define SNP_POLICY_MASK_VALID

#define INITIAL_VMSA_GPA

static u8 sev_enc_bit;
static DECLARE_RWSEM(sev_deactivate_lock);
static DEFINE_MUTEX(sev_bitmap_lock);
unsigned int max_sev_asid;
static unsigned int min_sev_asid;
static unsigned long sev_me_mask;
static unsigned int nr_asids;
static unsigned long *sev_asid_bitmap;
static unsigned long *sev_reclaim_asid_bitmap;

static int snp_decommission_context(struct kvm *kvm);

struct enc_region {};

/* Called with the sev_bitmap_lock held, or on shutdown  */
static int sev_flush_asids(unsigned int min_asid, unsigned int max_asid)
{}

static inline bool is_mirroring_enc_context(struct kvm *kvm)
{}

static bool sev_vcpu_has_debug_swap(struct vcpu_svm *svm)
{}

/* Must be called with the sev_bitmap_lock held */
static bool __sev_recycle_asids(unsigned int min_asid, unsigned int max_asid)
{}

static int sev_misc_cg_try_charge(struct kvm_sev_info *sev)
{}

static void sev_misc_cg_uncharge(struct kvm_sev_info *sev)
{}

static int sev_asid_new(struct kvm_sev_info *sev)
{}

static unsigned int sev_get_asid(struct kvm *kvm)
{}

static void sev_asid_free(struct kvm_sev_info *sev)
{}

static void sev_decommission(unsigned int handle)
{}

/*
 * Transition a page to hypervisor-owned/shared state in the RMP table. This
 * should not fail under normal conditions, but leak the page should that
 * happen since it will no longer be usable by the host due to RMP protections.
 */
static int kvm_rmp_make_shared(struct kvm *kvm, u64 pfn, enum pg_level level)
{}

/*
 * Certain page-states, such as Pre-Guest and Firmware pages (as documented
 * in Chapter 5 of the SEV-SNP Firmware ABI under "Page States") cannot be
 * directly transitioned back to normal/hypervisor-owned state via RMPUPDATE
 * unless they are reclaimed first.
 *
 * Until they are reclaimed and subsequently transitioned via RMPUPDATE, they
 * might not be usable by the host due to being set as immutable or still
 * being associated with a guest ASID.
 *
 * Bug the VM and leak the page if reclaim fails, or if the RMP entry can't be
 * converted back to shared, as the page is no longer usable due to RMP
 * protections, and it's infeasible for the guest to continue on.
 */
static int snp_page_reclaim(struct kvm *kvm, u64 pfn)
{}

static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
{}

/*
 * This sets up bounce buffers/firmware pages to handle SNP Guest Request
 * messages (e.g. attestation requests). See "SNP Guest Request" in the GHCB
 * 2.0 specification for more details.
 *
 * Technically, when an SNP Guest Request is issued, the guest will provide its
 * own request/response pages, which could in theory be passed along directly
 * to firmware rather than using bounce pages. However, these pages would need
 * special care:
 *
 *   - Both pages are from shared guest memory, so they need to be protected
 *     from migration/etc. occurring while firmware reads/writes to them. At a
 *     minimum, this requires elevating the ref counts and potentially needing
 *     an explicit pinning of the memory. This places additional restrictions
 *     on what type of memory backends userspace can use for shared guest
 *     memory since there is some reliance on using refcounted pages.
 *
 *   - The response page needs to be switched to Firmware-owned[1] state
 *     before the firmware can write to it, which can lead to potential
 *     host RMP #PFs if the guest is misbehaved and hands the host a
 *     guest page that KVM might write to for other reasons (e.g. virtio
 *     buffers/etc.).
 *
 * Both of these issues can be avoided completely by using separately-allocated
 * bounce pages for both the request/response pages and passing those to
 * firmware instead. So that's what is being set up here.
 *
 * Guest requests rely on message sequence numbers to ensure requests are
 * issued to firmware in the order the guest issues them, so concurrent guest
 * requests generally shouldn't happen. But a misbehaved guest could issue
 * concurrent guest requests in theory, so a mutex is used to serialize
 * access to the bounce buffers.
 *
 * [1] See the "Page States" section of the SEV-SNP Firmware ABI for more
 *     details on Firmware-owned pages, along with "RMP and VMPL Access Checks"
 *     in the APM for details on the related RMP restrictions.
 */
static int snp_guest_req_init(struct kvm *kvm)
{}

static void snp_guest_req_cleanup(struct kvm *kvm)
{}

static int __sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp,
			    struct kvm_sev_init *data,
			    unsigned long vm_type)
{}

static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
{}

static int sev_guest_init2(struct kvm *kvm, struct kvm_sev_cmd *argp)
{}

static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
{}

static int __sev_issue_cmd(int fd, int id, void *data, int *error)
{}

static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
{}

static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
{}

static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
				    unsigned long ulen, unsigned long *n,
				    int write)
{}

static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
			     unsigned long npages)
{}

static void sev_clflush_pages(struct page *pages[], unsigned long npages)
{}

static unsigned long get_num_contig_pages(unsigned long idx,
				struct page **inpages, unsigned long npages)
{}

static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
{}

static int sev_es_sync_vmsa(struct vcpu_svm *svm)
{}

static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
				    int *error)
{}

static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
{}

static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
{}

static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
{}

static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
{}

static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
			       unsigned long dst, int size,
			       int *error, bool enc)
{}

static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
			     unsigned long dst_paddr, int sz, int *err)
{}

static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
				  void __user *dst_uaddr,
				  unsigned long dst_paddr,
				  int size, int *err)
{}

static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
				  void __user *vaddr,
				  unsigned long dst_paddr,
				  void __user *dst_vaddr,
				  int size, int *error)
{}

static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
{}

static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
{}

static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp)
{}

/* Userspace wants to query session length. */
static int
__sev_send_start_query_session_length(struct kvm *kvm, struct kvm_sev_cmd *argp,
				      struct kvm_sev_send_start *params)
{}

static int sev_send_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
{}

/* Userspace wants to query either header or trans length. */
static int
__sev_send_update_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp,
				     struct kvm_sev_send_update_data *params)
{}

static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
{}

static int sev_send_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
{}

static int sev_send_cancel(struct kvm *kvm, struct kvm_sev_cmd *argp)
{}

static int sev_receive_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
{}

static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
{}

static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
{}

static bool is_cmd_allowed_from_mirror(u32 cmd_id)
{}

static int sev_lock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
{}

static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
{}

/* vCPU mutex subclasses.  */
enum sev_migration_role {};

static int sev_lock_vcpus_for_migration(struct kvm *kvm,
					enum sev_migration_role role)
{}

static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
{}

static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
{}

static int sev_check_source_vcpus(struct kvm *dst, struct kvm *src)
{}

int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
{}

int sev_dev_get_attr(u32 group, u64 attr, u64 *val)
{}

/*
 * The guest context contains all the information, keys and metadata
 * associated with the guest that the firmware tracks to implement SEV
 * and SNP features. The firmware stores the guest context in hypervisor
 * provide page via the SNP_GCTX_CREATE command.
 */
static void *snp_context_create(struct kvm *kvm, struct kvm_sev_cmd *argp)
{}

static int snp_bind_asid(struct kvm *kvm, int *error)
{}

static int snp_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
{}

struct sev_gmem_populate_args {};

static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn_start, kvm_pfn_t pfn,
				  void __user *src, int order, void *opaque)
{}

static int snp_launch_update(struct kvm *kvm, struct kvm_sev_cmd *argp)
{}

static int snp_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
{}

static int snp_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
{}

int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp)
{}

int sev_mem_enc_register_region(struct kvm *kvm,
				struct kvm_enc_region *range)
{}

static struct enc_region *
find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
{}

static void __unregister_enc_region_locked(struct kvm *kvm,
					   struct enc_region *region)
{}

int sev_mem_enc_unregister_region(struct kvm *kvm,
				  struct kvm_enc_region *range)
{}

int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd)
{}

static int snp_decommission_context(struct kvm *kvm)
{}

void sev_vm_destroy(struct kvm *kvm)
{}

void __init sev_set_cpu_caps(void)
{}

void __init sev_hardware_setup(void)
{}

void sev_hardware_unsetup(void)
{}

int sev_cpu_init(struct svm_cpu_data *sd)
{}

/*
 * Pages used by hardware to hold guest encrypted state must be flushed before
 * returning them to the system.
 */
static void sev_flush_encrypted_page(struct kvm_vcpu *vcpu, void *va)
{}

void sev_guest_memory_reclaimed(struct kvm *kvm)
{}

void sev_free_vcpu(struct kvm_vcpu *vcpu)
{}

static void dump_ghcb(struct vcpu_svm *svm)
{}

static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
{}

static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
{}

static u64 kvm_ghcb_get_sw_exit_code(struct vmcb_control_area *control)
{}

static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
{}

void sev_es_unmap_ghcb(struct vcpu_svm *svm)
{}

void pre_sev_run(struct vcpu_svm *svm, int cpu)
{}

#define GHCB_SCRATCH_AREA_LIMIT
static int setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
{}

static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask,
			      unsigned int pos)
{}

static u64 get_ghcb_msr_bits(struct vcpu_svm *svm, u64 mask, unsigned int pos)
{}

static void set_ghcb_msr(struct vcpu_svm *svm, u64 value)
{}

static int snp_rmptable_psmash(kvm_pfn_t pfn)
{}

static int snp_complete_psc_msr(struct kvm_vcpu *vcpu)
{}

static int snp_begin_psc_msr(struct vcpu_svm *svm, u64 ghcb_msr)
{}

struct psc_buffer {} __packed;

static int snp_begin_psc(struct vcpu_svm *svm, struct psc_buffer *psc);

static void snp_complete_psc(struct vcpu_svm *svm, u64 psc_ret)
{}

static void __snp_complete_one_psc(struct vcpu_svm *svm)
{}

static int snp_complete_one_psc(struct kvm_vcpu *vcpu)
{}

static int snp_begin_psc(struct vcpu_svm *svm, struct psc_buffer *psc)
{}

static int __sev_snp_update_protected_guest_state(struct kvm_vcpu *vcpu)
{}

/*
 * Invoked as part of svm_vcpu_reset() processing of an init event.
 */
void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu)
{}

static int sev_snp_ap_creation(struct vcpu_svm *svm)
{}

static int snp_handle_guest_req(struct vcpu_svm *svm, gpa_t req_gpa, gpa_t resp_gpa)
{}

static int snp_handle_ext_guest_req(struct vcpu_svm *svm, gpa_t req_gpa, gpa_t resp_gpa)
{}

static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
{}

int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
{}

int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
{}

static void sev_es_vcpu_after_set_cpuid(struct vcpu_svm *svm)
{}

void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm)
{}

static void sev_es_init_vmcb(struct vcpu_svm *svm)
{}

void sev_init_vmcb(struct vcpu_svm *svm)
{}

void sev_es_vcpu_reset(struct vcpu_svm *svm)
{}

void sev_es_prepare_switch_to_guest(struct vcpu_svm *svm, struct sev_es_save_area *hostsa)
{}

void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
{}

struct page *snp_safe_alloc_page_node(int node, gfp_t gfp)
{}

void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code)
{}

static bool is_pfn_range_shared(kvm_pfn_t start, kvm_pfn_t end)
{}

static u8 max_level_for_order(int order)
{}

static bool is_large_rmp_possible(struct kvm *kvm, kvm_pfn_t pfn, int order)
{}

int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order)
{}

void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end)
{}

int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn)
{}