// SPDX-License-Identifier: GPL-2.0-only /* * AMD SVM-SEV Host Support. * * Copyright (C) 2023 Advanced Micro Devices, Inc. * * Author: Ashish Kalra <[email protected]> * */ #include <linux/cc_platform.h> #include <linux/printk.h> #include <linux/mm_types.h> #include <linux/set_memory.h> #include <linux/memblock.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/cpumask.h> #include <linux/iommu.h> #include <linux/amd-iommu.h> #include <asm/sev.h> #include <asm/processor.h> #include <asm/setup.h> #include <asm/svm.h> #include <asm/smp.h> #include <asm/cpu.h> #include <asm/apic.h> #include <asm/cpuid.h> #include <asm/cmdline.h> #include <asm/iommu.h> /* * The RMP entry format is not architectural. The format is defined in PPR * Family 19h Model 01h, Rev B1 processor. */ struct rmpentry { … } __packed; /* * The first 16KB from the RMP_BASE is used by the processor for the * bookkeeping, the range needs to be added during the RMP entry lookup. */ #define RMPTABLE_CPU_BOOKKEEPING_SZ … /* Mask to apply to a PFN to get the first PFN of a 2MB page */ #define PFN_PMD_MASK … static u64 probed_rmp_base, probed_rmp_size; static struct rmpentry *rmptable __ro_after_init; static u64 rmptable_max_pfn __ro_after_init; static LIST_HEAD(snp_leaked_pages_list); static DEFINE_SPINLOCK(snp_leaked_pages_list_lock); static unsigned long snp_nr_leaked_pages; #undef pr_fmt #define pr_fmt(fmt) … static int __mfd_enable(unsigned int cpu) { … } static __init void mfd_enable(void *arg) { … } static int __snp_enable(unsigned int cpu) { … } static __init void snp_enable(void *arg) { … } #define RMP_ADDR_MASK … bool snp_probe_rmptable_info(void) { … } static void __init __snp_fixup_e820_tables(u64 pa) { … } void __init snp_fixup_e820_tables(void) { … } /* * Do the necessary preparations which are verified by the firmware as * described in the SNP_INIT_EX firmware command description in the SNP * firmware ABI spec. */ static int __init snp_rmptable_init(void) { … } /* * This must be called after the IOMMU has been initialized. */ device_initcall(snp_rmptable_init); static struct rmpentry *get_rmpentry(u64 pfn) { … } static struct rmpentry *__snp_lookup_rmpentry(u64 pfn, int *level) { … } int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level) { … } EXPORT_SYMBOL_GPL(…); /* * Dump the raw RMP entry for a particular PFN. These bits are documented in the * PPR for a particular CPU model and provide useful information about how a * particular PFN is being utilized by the kernel/firmware at the time certain * unexpected events occur, such as RMP faults. */ static void dump_rmpentry(u64 pfn) { … } void snp_dump_hva_rmpentry(unsigned long hva) { … } /* * PSMASH a 2MB aligned page into 4K pages in the RMP table while preserving the * Validated bit. */ int psmash(u64 pfn) { … } EXPORT_SYMBOL_GPL(…); /* * If the kernel uses a 2MB or larger directmap mapping to write to an address, * and that mapping contains any 4KB pages that are set to private in the RMP * table, an RMP #PF will trigger and cause a host crash. Hypervisor code that * owns the PFNs being transitioned will never attempt such a write, but other * kernel tasks writing to other PFNs in the range may trigger these checks * inadvertently due a large directmap mapping that happens to overlap such a * PFN. * * Prevent this by splitting any 2MB+ mappings that might end up containing a * mix of private/shared PFNs as a result of a subsequent RMPUPDATE for the * PFN/rmp_level passed in. * * Note that there is no attempt here to scan all the RMP entries for the 2MB * physical range, since it would only be worthwhile in determining if a * subsequent RMPUPDATE for a 4KB PFN would result in all the entries being of * the same shared/private state, thus avoiding the need to split the mapping. * But that would mean the entries are currently in a mixed state, and so the * mapping would have already been split as a result of prior transitions. * And since the 4K split is only done if the mapping is 2MB+, and there isn't * currently a mechanism in place to restore 2MB+ mappings, such a check would * not provide any usable benefit. * * More specifics on how these checks are carried out can be found in APM * Volume 2, "RMP and VMPL Access Checks". */ static int adjust_direct_map(u64 pfn, int rmp_level) { … } /* * It is expected that those operations are seldom enough so that no mutual * exclusion of updaters is needed and thus the overlap error condition below * should happen very rarely and would get resolved relatively quickly by * the firmware. * * If not, one could consider introducing a mutex or so here to sync concurrent * RMP updates and thus diminish the amount of cases where firmware needs to * lock 2M ranges to protect against concurrent updates. * * The optimal solution would be range locking to avoid locking disjoint * regions unnecessarily but there's no support for that yet. */ static int rmpupdate(u64 pfn, struct rmp_state *state) { … } /* Transition a page to guest-owned/private state in the RMP table. */ int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, u32 asid, bool immutable) { … } EXPORT_SYMBOL_GPL(…); /* Transition a page to hypervisor-owned/shared state in the RMP table. */ int rmp_make_shared(u64 pfn, enum pg_level level) { … } EXPORT_SYMBOL_GPL(…); void snp_leak_pages(u64 pfn, unsigned int npages) { … } EXPORT_SYMBOL_GPL(…); void kdump_sev_callback(void) { … }