#define pr_fmt(fmt) …
#include "irq.h"
#include "ioapic.h"
#include "mmu.h"
#include "mmu_internal.h"
#include "tdp_mmu.h"
#include "x86.h"
#include "kvm_cache_regs.h"
#include "smm.h"
#include "kvm_emulate.h"
#include "page_track.h"
#include "cpuid.h"
#include "spte.h"
#include <linux/kvm_host.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/moduleparam.h>
#include <linux/export.h>
#include <linux/swap.h>
#include <linux/hugetlb.h>
#include <linux/compiler.h>
#include <linux/srcu.h>
#include <linux/slab.h>
#include <linux/sched/signal.h>
#include <linux/uaccess.h>
#include <linux/hash.h>
#include <linux/kern_levels.h>
#include <linux/kstrtox.h>
#include <linux/kthread.h>
#include <linux/wordpart.h>
#include <asm/page.h>
#include <asm/memtype.h>
#include <asm/cmpxchg.h>
#include <asm/io.h>
#include <asm/set_memory.h>
#include <asm/spec-ctrl.h>
#include <asm/vmx.h>
#include "trace.h"
static bool nx_hugepage_mitigation_hard_disabled;
int __read_mostly nx_huge_pages = …;
static uint __read_mostly nx_huge_pages_recovery_period_ms;
#ifdef CONFIG_PREEMPT_RT
static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
#else
static uint __read_mostly nx_huge_pages_recovery_ratio = …;
#endif
static int get_nx_huge_pages(char *buffer, const struct kernel_param *kp);
static int set_nx_huge_pages(const char *val, const struct kernel_param *kp);
static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp);
static const struct kernel_param_ops nx_huge_pages_ops = …;
static const struct kernel_param_ops nx_huge_pages_recovery_param_ops = …;
module_param_cb(…);
__MODULE_PARM_TYPE(…) …;
module_param_cb(…);
__MODULE_PARM_TYPE(…) …;
module_param_cb(…);
__MODULE_PARM_TYPE(…) …;
static bool __read_mostly force_flush_and_sync_on_reuse;
module_param_named(flush_on_reuse, force_flush_and_sync_on_reuse, bool, 0644);
bool tdp_enabled = …;
static bool __ro_after_init tdp_mmu_allowed;
#ifdef CONFIG_X86_64
bool __read_mostly tdp_mmu_enabled = …;
module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0444);
#endif
static int max_huge_page_level __read_mostly;
static int tdp_root_level __read_mostly;
static int max_tdp_level __read_mostly;
#define PTE_PREFETCH_NUM …
#include <trace/events/kvm.h>
#define PTE_LIST_EXT …
struct pte_list_desc { … };
struct kvm_shadow_walk_iterator { … };
#define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker) …
#define for_each_shadow_entry(_vcpu, _addr, _walker) …
#define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte) …
static struct kmem_cache *pte_list_desc_cache;
struct kmem_cache *mmu_page_header_cache;
static struct percpu_counter kvm_total_used_mmu_pages;
static void mmu_spte_set(u64 *sptep, u64 spte);
struct kvm_mmu_role_regs { … };
#define CREATE_TRACE_POINTS
#include "mmutrace.h"
#define BUILD_MMU_ROLE_REGS_ACCESSOR(reg, name, flag) …
BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, pg, X86_CR0_PG);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, wp, X86_CR0_WP);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pse, X86_CR4_PSE);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pae, X86_CR4_PAE);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smep, X86_CR4_SMEP);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smap, X86_CR4_SMAP);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pke, X86_CR4_PKE);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, la57, X86_CR4_LA57);
BUILD_MMU_ROLE_REGS_ACCESSOR(efer, nx, EFER_NX);
BUILD_MMU_ROLE_REGS_ACCESSOR(efer, lma, EFER_LMA);
#define BUILD_MMU_ROLE_ACCESSOR(base_or_ext, reg, name) …
BUILD_MMU_ROLE_ACCESSOR(base, cr0, wp);
BUILD_MMU_ROLE_ACCESSOR(ext, cr4, pse);
BUILD_MMU_ROLE_ACCESSOR(ext, cr4, smep);
BUILD_MMU_ROLE_ACCESSOR(ext, cr4, smap);
BUILD_MMU_ROLE_ACCESSOR(ext, cr4, pke);
BUILD_MMU_ROLE_ACCESSOR(ext, cr4, la57);
BUILD_MMU_ROLE_ACCESSOR(base, efer, nx);
BUILD_MMU_ROLE_ACCESSOR(ext, efer, lma);
static inline bool is_cr0_pg(struct kvm_mmu *mmu)
{ … }
static inline bool is_cr4_pae(struct kvm_mmu *mmu)
{ … }
static struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu)
{ … }
static unsigned long get_guest_cr3(struct kvm_vcpu *vcpu)
{ … }
static inline unsigned long kvm_mmu_get_guest_pgd(struct kvm_vcpu *vcpu,
struct kvm_mmu *mmu)
{ … }
static inline bool kvm_available_flush_remote_tlbs_range(void)
{ … }
static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index);
static void kvm_flush_remote_tlbs_sptep(struct kvm *kvm, u64 *sptep)
{ … }
static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
unsigned int access)
{ … }
static gfn_t get_mmio_spte_gfn(u64 spte)
{ … }
static unsigned get_mmio_spte_access(u64 spte)
{ … }
static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
{ … }
static int is_cpuid_PSE36(void)
{ … }
#ifdef CONFIG_X86_64
static void __set_spte(u64 *sptep, u64 spte)
{ … }
static void __update_clear_spte_fast(u64 *sptep, u64 spte)
{ … }
static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
{ … }
static u64 __get_spte_lockless(u64 *sptep)
{ … }
#else
union split_spte {
struct {
u32 spte_low;
u32 spte_high;
};
u64 spte;
};
static void count_spte_clear(u64 *sptep, u64 spte)
{
struct kvm_mmu_page *sp = sptep_to_sp(sptep);
if (is_shadow_present_pte(spte))
return;
smp_wmb();
sp->clear_spte_count++;
}
static void __set_spte(u64 *sptep, u64 spte)
{
union split_spte *ssptep, sspte;
ssptep = (union split_spte *)sptep;
sspte = (union split_spte)spte;
ssptep->spte_high = sspte.spte_high;
smp_wmb();
WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
}
static void __update_clear_spte_fast(u64 *sptep, u64 spte)
{
union split_spte *ssptep, sspte;
ssptep = (union split_spte *)sptep;
sspte = (union split_spte)spte;
WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
smp_wmb();
ssptep->spte_high = sspte.spte_high;
count_spte_clear(sptep, spte);
}
static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
{
union split_spte *ssptep, sspte, orig;
ssptep = (union split_spte *)sptep;
sspte = (union split_spte)spte;
orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low);
orig.spte_high = ssptep->spte_high;
ssptep->spte_high = sspte.spte_high;
count_spte_clear(sptep, spte);
return orig.spte;
}
static u64 __get_spte_lockless(u64 *sptep)
{
struct kvm_mmu_page *sp = sptep_to_sp(sptep);
union split_spte spte, *orig = (union split_spte *)sptep;
int count;
retry:
count = sp->clear_spte_count;
smp_rmb();
spte.spte_low = orig->spte_low;
smp_rmb();
spte.spte_high = orig->spte_high;
smp_rmb();
if (unlikely(spte.spte_low != orig->spte_low ||
count != sp->clear_spte_count))
goto retry;
return spte.spte;
}
#endif
static void mmu_spte_set(u64 *sptep, u64 new_spte)
{ … }
static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte)
{ … }
static bool mmu_spte_update(u64 *sptep, u64 new_spte)
{ … }
static u64 mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)
{ … }
static void mmu_spte_clear_no_track(u64 *sptep)
{ … }
static u64 mmu_spte_get_lockless(u64 *sptep)
{ … }
static bool mmu_spte_age(u64 *sptep)
{ … }
static inline bool is_tdp_mmu_active(struct kvm_vcpu *vcpu)
{ … }
static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
{ … }
static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
{ … }
static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
{ … }
static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{ … }
static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
{ … }
static bool sp_has_gptes(struct kvm_mmu_page *sp);
static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
{ … }
static u32 kvm_mmu_page_get_access(struct kvm_mmu_page *sp, int index)
{ … }
static void kvm_mmu_page_set_translation(struct kvm_mmu_page *sp, int index,
gfn_t gfn, unsigned int access)
{ … }
static void kvm_mmu_page_set_access(struct kvm_mmu_page *sp, int index,
unsigned int access)
{ … }
static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
const struct kvm_memory_slot *slot, int level)
{ … }
#define KVM_LPAGE_MIXED_FLAG …
static void update_gfn_disallow_lpage_count(const struct kvm_memory_slot *slot,
gfn_t gfn, int count)
{ … }
void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
{ … }
void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
{ … }
static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
{ … }
void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{ … }
static void account_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp,
bool nx_huge_page_possible)
{ … }
static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
{ … }
void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{ … }
static void unaccount_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{ … }
static struct kvm_memory_slot *gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu,
gfn_t gfn,
bool no_dirty_log)
{ … }
static int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte,
struct kvm_rmap_head *rmap_head)
{ … }
static void pte_list_desc_remove_entry(struct kvm *kvm,
struct kvm_rmap_head *rmap_head,
struct pte_list_desc *desc, int i)
{ … }
static void pte_list_remove(struct kvm *kvm, u64 *spte,
struct kvm_rmap_head *rmap_head)
{ … }
static void kvm_zap_one_rmap_spte(struct kvm *kvm,
struct kvm_rmap_head *rmap_head, u64 *sptep)
{ … }
static bool kvm_zap_all_rmap_sptes(struct kvm *kvm,
struct kvm_rmap_head *rmap_head)
{ … }
unsigned int pte_list_count(struct kvm_rmap_head *rmap_head)
{ … }
static struct kvm_rmap_head *gfn_to_rmap(gfn_t gfn, int level,
const struct kvm_memory_slot *slot)
{ … }
static void rmap_remove(struct kvm *kvm, u64 *spte)
{ … }
struct rmap_iterator { … };
static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head,
struct rmap_iterator *iter)
{ … }
static u64 *rmap_get_next(struct rmap_iterator *iter)
{ … }
#define for_each_rmap_spte(_rmap_head_, _iter_, _spte_) …
static void drop_spte(struct kvm *kvm, u64 *sptep)
{ … }
static void drop_large_spte(struct kvm *kvm, u64 *sptep, bool flush)
{ … }
static bool spte_write_protect(u64 *sptep, bool pt_protect)
{ … }
static bool rmap_write_protect(struct kvm_rmap_head *rmap_head,
bool pt_protect)
{ … }
static bool spte_clear_dirty(u64 *sptep)
{ … }
static bool spte_wrprot_for_clear_dirty(u64 *sptep)
{ … }
static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
const struct kvm_memory_slot *slot)
{ … }
static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask)
{ … }
static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask)
{ … }
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask)
{ … }
int kvm_cpu_dirty_log_size(void)
{ … }
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
struct kvm_memory_slot *slot, u64 gfn,
int min_level)
{ … }
static bool kvm_vcpu_write_protect_gfn(struct kvm_vcpu *vcpu, u64 gfn)
{ … }
static bool __kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
const struct kvm_memory_slot *slot)
{ … }
static bool kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
struct kvm_memory_slot *slot, gfn_t gfn, int level)
{ … }
struct slot_rmap_walk_iterator { … };
static void rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator,
int level)
{ … }
static void slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator,
const struct kvm_memory_slot *slot,
int start_level, int end_level,
gfn_t start_gfn, gfn_t end_gfn)
{ … }
static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator)
{ … }
static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
{ … }
#define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_, \
_start_gfn, _end_gfn, _iter_) …
rmap_handler_t;
static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm,
struct kvm_gfn_range *range,
rmap_handler_t handler)
{ … }
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
{ … }
static bool kvm_age_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
struct kvm_memory_slot *slot, gfn_t gfn, int level)
{ … }
static bool kvm_test_age_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
struct kvm_memory_slot *slot, gfn_t gfn, int level)
{ … }
#define RMAP_RECYCLE_THRESHOLD …
static void __rmap_add(struct kvm *kvm,
struct kvm_mmu_memory_cache *cache,
const struct kvm_memory_slot *slot,
u64 *spte, gfn_t gfn, unsigned int access)
{ … }
static void rmap_add(struct kvm_vcpu *vcpu, const struct kvm_memory_slot *slot,
u64 *spte, gfn_t gfn, unsigned int access)
{ … }
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{ … }
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{ … }
static void kvm_mmu_check_sptes_at_free(struct kvm_mmu_page *sp)
{ … }
static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, long nr)
{ … }
static void kvm_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{ … }
static void kvm_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{ … }
static void kvm_mmu_free_shadow_page(struct kvm_mmu_page *sp)
{ … }
static unsigned kvm_page_table_hashfn(gfn_t gfn)
{ … }
static void mmu_page_add_parent_pte(struct kvm_mmu_memory_cache *cache,
struct kvm_mmu_page *sp, u64 *parent_pte)
{ … }
static void mmu_page_remove_parent_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
u64 *parent_pte)
{ … }
static void drop_parent_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
u64 *parent_pte)
{ … }
static void mark_unsync(u64 *spte);
static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
{ … }
static void mark_unsync(u64 *spte)
{ … }
#define KVM_PAGE_ARRAY_NR …
struct kvm_mmu_pages { … };
static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
int idx)
{ … }
static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx)
{ … }
static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
struct kvm_mmu_pages *pvec)
{ … }
#define INVALID_INDEX …
static int mmu_unsync_walk(struct kvm_mmu_page *sp,
struct kvm_mmu_pages *pvec)
{ … }
static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{ … }
static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
struct list_head *invalid_list);
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
struct list_head *invalid_list);
static bool sp_has_gptes(struct kvm_mmu_page *sp)
{ … }
#define for_each_valid_sp(_kvm, _sp, _list) …
#define for_each_gfn_valid_sp_with_gptes(_kvm, _sp, _gfn) …
static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{ … }
static int kvm_sync_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int i)
{ … }
static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{ … }
static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
struct list_head *invalid_list)
{ … }
static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
struct list_head *invalid_list,
bool remote_flush)
{ … }
static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
{ … }
struct mmu_page_path { … };
#define for_each_sp(pvec, sp, parents, i) …
static int mmu_pages_next(struct kvm_mmu_pages *pvec,
struct mmu_page_path *parents,
int i)
{ … }
static int mmu_pages_first(struct kvm_mmu_pages *pvec,
struct mmu_page_path *parents)
{ … }
static void mmu_pages_clear_parents(struct mmu_page_path *parents)
{ … }
static int mmu_sync_children(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *parent, bool can_yield)
{ … }
static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
{ … }
static void clear_sp_write_flooding_count(u64 *spte)
{ … }
static struct kvm_mmu_page *kvm_mmu_find_shadow_page(struct kvm *kvm,
struct kvm_vcpu *vcpu,
gfn_t gfn,
struct hlist_head *sp_list,
union kvm_mmu_page_role role)
{ … }
struct shadow_page_caches { … };
static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm *kvm,
struct shadow_page_caches *caches,
gfn_t gfn,
struct hlist_head *sp_list,
union kvm_mmu_page_role role)
{ … }
static struct kvm_mmu_page *__kvm_mmu_get_shadow_page(struct kvm *kvm,
struct kvm_vcpu *vcpu,
struct shadow_page_caches *caches,
gfn_t gfn,
union kvm_mmu_page_role role)
{ … }
static struct kvm_mmu_page *kvm_mmu_get_shadow_page(struct kvm_vcpu *vcpu,
gfn_t gfn,
union kvm_mmu_page_role role)
{ … }
static union kvm_mmu_page_role kvm_mmu_child_role(u64 *sptep, bool direct,
unsigned int access)
{ … }
static struct kvm_mmu_page *kvm_mmu_get_child_sp(struct kvm_vcpu *vcpu,
u64 *sptep, gfn_t gfn,
bool direct, unsigned int access)
{ … }
static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator,
struct kvm_vcpu *vcpu, hpa_t root,
u64 addr)
{ … }
static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
struct kvm_vcpu *vcpu, u64 addr)
{ … }
static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
{ … }
static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator,
u64 spte)
{ … }
static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
{ … }
static void __link_shadow_page(struct kvm *kvm,
struct kvm_mmu_memory_cache *cache, u64 *sptep,
struct kvm_mmu_page *sp, bool flush)
{ … }
static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
struct kvm_mmu_page *sp)
{ … }
static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
unsigned direct_access)
{ … }
static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
u64 *spte, struct list_head *invalid_list)
{ … }
static int kvm_mmu_page_unlink_children(struct kvm *kvm,
struct kvm_mmu_page *sp,
struct list_head *invalid_list)
{ … }
static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
{ … }
static int mmu_zap_unsync_children(struct kvm *kvm,
struct kvm_mmu_page *parent,
struct list_head *invalid_list)
{ … }
static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
struct kvm_mmu_page *sp,
struct list_head *invalid_list,
int *nr_zapped)
{ … }
static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
struct list_head *invalid_list)
{ … }
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
struct list_head *invalid_list)
{ … }
static unsigned long kvm_mmu_zap_oldest_mmu_pages(struct kvm *kvm,
unsigned long nr_to_zap)
{ … }
static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
{ … }
static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
{ … }
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
{ … }
int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
{ … }
static int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
{ … }
static void kvm_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{ … }
int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot,
gfn_t gfn, bool can_unsync, bool prefetch)
{ … }
static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
u64 *sptep, unsigned int pte_access, gfn_t gfn,
kvm_pfn_t pfn, struct kvm_page_fault *fault)
{ … }
static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp,
u64 *start, u64 *end)
{ … }
static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp, u64 *sptep)
{ … }
static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
{ … }
static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn,
const struct kvm_memory_slot *slot)
{ … }
static int __kvm_mmu_max_mapping_level(struct kvm *kvm,
const struct kvm_memory_slot *slot,
gfn_t gfn, int max_level, bool is_private)
{ … }
int kvm_mmu_max_mapping_level(struct kvm *kvm,
const struct kvm_memory_slot *slot, gfn_t gfn,
int max_level)
{ … }
void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{ … }
void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level)
{ … }
static int direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{ … }
static void kvm_send_hwpoison_signal(struct kvm_memory_slot *slot, gfn_t gfn)
{ … }
static int kvm_handle_error_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{ … }
static int kvm_handle_noslot_fault(struct kvm_vcpu *vcpu,
struct kvm_page_fault *fault,
unsigned int access)
{ … }
static bool page_fault_can_be_fast(struct kvm *kvm, struct kvm_page_fault *fault)
{ … }
static bool fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu,
struct kvm_page_fault *fault,
u64 *sptep, u64 old_spte, u64 new_spte)
{ … }
static bool is_access_allowed(struct kvm_page_fault *fault, u64 spte)
{ … }
static u64 *fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gpa_t gpa, u64 *spte)
{ … }
static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{ … }
static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
struct list_head *invalid_list)
{ … }
void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu,
ulong roots_to_free)
{ … }
EXPORT_SYMBOL_GPL(…);
void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu)
{ … }
EXPORT_SYMBOL_GPL(…);
static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, int quadrant,
u8 level)
{ … }
static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
{ … }
static int mmu_first_shadow_root_alloc(struct kvm *kvm)
{ … }
static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
{ … }
static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
{ … }
static bool is_unsync_root(hpa_t root)
{ … }
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
{ … }
void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu)
{ … }
static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
gpa_t vaddr, u64 access,
struct x86_exception *exception)
{ … }
static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
{ … }
static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level)
{ … }
static int get_sptes_lockless(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
int *root_level)
{ … }
static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
{ … }
static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
{ … }
static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
struct kvm_page_fault *fault)
{ … }
static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
{ … }
static u32 alloc_apf_token(struct kvm_vcpu *vcpu)
{ … }
static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu,
struct kvm_page_fault *fault)
{ … }
void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
{ … }
static inline u8 kvm_max_level_for_order(int order)
{ … }
static u8 kvm_max_private_mapping_level(struct kvm *kvm, kvm_pfn_t pfn,
u8 max_level, int gmem_order)
{ … }
static int kvm_faultin_pfn_private(struct kvm_vcpu *vcpu,
struct kvm_page_fault *fault)
{ … }
static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{ … }
static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
unsigned int access)
{ … }
static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
struct kvm_page_fault *fault)
{ … }
static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{ … }
static int nonpaging_page_fault(struct kvm_vcpu *vcpu,
struct kvm_page_fault *fault)
{ … }
int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
u64 fault_address, char *insn, int insn_len)
{ … }
EXPORT_SYMBOL_GPL(…);
#ifdef CONFIG_X86_64
static int kvm_tdp_mmu_page_fault(struct kvm_vcpu *vcpu,
struct kvm_page_fault *fault)
{ … }
#endif
bool kvm_mmu_may_ignore_guest_pat(void)
{ … }
int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{ … }
static int kvm_tdp_map_page(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code,
u8 *level)
{ … }
long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
struct kvm_pre_fault_memory *range)
{ … }
static void nonpaging_init_context(struct kvm_mmu *context)
{ … }
static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
union kvm_mmu_page_role role)
{ … }
static bool cached_root_find_and_keep_current(struct kvm *kvm, struct kvm_mmu *mmu,
gpa_t new_pgd,
union kvm_mmu_page_role new_role)
{ … }
static bool cached_root_find_without_current(struct kvm *kvm, struct kvm_mmu *mmu,
gpa_t new_pgd,
union kvm_mmu_page_role new_role)
{ … }
static bool fast_pgd_switch(struct kvm *kvm, struct kvm_mmu *mmu,
gpa_t new_pgd, union kvm_mmu_page_role new_role)
{ … }
void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
{ … }
EXPORT_SYMBOL_GPL(…);
static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
unsigned int access)
{ … }
#define PTTYPE_EPT …
#define PTTYPE …
#include "paging_tmpl.h"
#undef PTTYPE
#define PTTYPE …
#include "paging_tmpl.h"
#undef PTTYPE
#define PTTYPE …
#include "paging_tmpl.h"
#undef PTTYPE
static void __reset_rsvds_bits_mask(struct rsvd_bits_validate *rsvd_check,
u64 pa_bits_rsvd, int level, bool nx,
bool gbpages, bool pse, bool amd)
{ … }
static void reset_guest_rsvds_bits_mask(struct kvm_vcpu *vcpu,
struct kvm_mmu *context)
{ … }
static void __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
u64 pa_bits_rsvd, bool execonly,
int huge_page_level)
{ … }
static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
struct kvm_mmu *context, bool execonly, int huge_page_level)
{ … }
static inline u64 reserved_hpa_bits(void)
{ … }
static void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
struct kvm_mmu *context)
{ … }
static inline bool boot_cpu_is_amd(void)
{ … }
static void reset_tdp_shadow_zero_bits_mask(struct kvm_mmu *context)
{ … }
static void
reset_ept_shadow_zero_bits_mask(struct kvm_mmu *context, bool execonly)
{ … }
#define BYTE_MASK(access) …
static void update_permission_bitmask(struct kvm_mmu *mmu, bool ept)
{ … }
static void update_pkru_bitmask(struct kvm_mmu *mmu)
{ … }
static void reset_guest_paging_metadata(struct kvm_vcpu *vcpu,
struct kvm_mmu *mmu)
{ … }
static void paging64_init_context(struct kvm_mmu *context)
{ … }
static void paging32_init_context(struct kvm_mmu *context)
{ … }
static union kvm_cpu_role kvm_calc_cpu_role(struct kvm_vcpu *vcpu,
const struct kvm_mmu_role_regs *regs)
{ … }
void __kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
struct kvm_mmu *mmu)
{ … }
static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
{ … }
u8 kvm_mmu_get_max_tdp_level(void)
{ … }
static union kvm_mmu_page_role
kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
union kvm_cpu_role cpu_role)
{ … }
static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,
union kvm_cpu_role cpu_role)
{ … }
static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
union kvm_cpu_role cpu_role,
union kvm_mmu_page_role root_role)
{ … }
static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu,
union kvm_cpu_role cpu_role)
{ … }
void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
unsigned long cr4, u64 efer, gpa_t nested_cr3)
{ … }
EXPORT_SYMBOL_GPL(…);
static union kvm_cpu_role
kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
bool execonly, u8 level)
{ … }
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
int huge_page_level, bool accessed_dirty,
gpa_t new_eptp)
{ … }
EXPORT_SYMBOL_GPL(…);
static void init_kvm_softmmu(struct kvm_vcpu *vcpu,
union kvm_cpu_role cpu_role)
{ … }
static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu,
union kvm_cpu_role new_mode)
{ … }
void kvm_init_mmu(struct kvm_vcpu *vcpu)
{ … }
EXPORT_SYMBOL_GPL(…);
void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
{ … }
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
{ … }
EXPORT_SYMBOL_GPL(…);
int kvm_mmu_load(struct kvm_vcpu *vcpu)
{ … }
void kvm_mmu_unload(struct kvm_vcpu *vcpu)
{ … }
static bool is_obsolete_root(struct kvm *kvm, hpa_t root_hpa)
{ … }
static void __kvm_mmu_free_obsolete_roots(struct kvm *kvm, struct kvm_mmu *mmu)
{ … }
void kvm_mmu_free_obsolete_roots(struct kvm_vcpu *vcpu)
{ … }
static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
int *bytes)
{ … }
static bool detect_write_flooding(struct kvm_mmu_page *sp)
{ … }
static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
int bytes)
{ … }
static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
{ … }
void kvm_mmu_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
int bytes)
{ … }
int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
void *insn, int insn_len)
{ … }
EXPORT_SYMBOL_GPL(…);
void kvm_mmu_print_sptes(struct kvm_vcpu *vcpu, gpa_t gpa, const char *msg)
{ … }
EXPORT_SYMBOL_GPL(…);
static void __kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
u64 addr, hpa_t root_hpa)
{ … }
void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
u64 addr, unsigned long roots)
{ … }
EXPORT_SYMBOL_GPL(…);
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
{ … }
EXPORT_SYMBOL_GPL(…);
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
{ … }
void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
int tdp_max_root_level, int tdp_huge_page_level)
{ … }
EXPORT_SYMBOL_GPL(…);
slot_rmaps_handler;
static __always_inline bool __walk_slot_rmaps(struct kvm *kvm,
const struct kvm_memory_slot *slot,
slot_rmaps_handler fn,
int start_level, int end_level,
gfn_t start_gfn, gfn_t end_gfn,
bool flush_on_yield, bool flush)
{ … }
static __always_inline bool walk_slot_rmaps(struct kvm *kvm,
const struct kvm_memory_slot *slot,
slot_rmaps_handler fn,
int start_level, int end_level,
bool flush_on_yield)
{ … }
static __always_inline bool walk_slot_rmaps_4k(struct kvm *kvm,
const struct kvm_memory_slot *slot,
slot_rmaps_handler fn,
bool flush_on_yield)
{ … }
static void free_mmu_pages(struct kvm_mmu *mmu)
{ … }
static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
{ … }
int kvm_mmu_create(struct kvm_vcpu *vcpu)
{ … }
#define BATCH_ZAP_PAGES …
static void kvm_zap_obsolete_pages(struct kvm *kvm)
{ … }
static void kvm_mmu_zap_all_fast(struct kvm *kvm)
{ … }
static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
{ … }
void kvm_mmu_init_vm(struct kvm *kvm)
{ … }
static void mmu_free_vm_memory_caches(struct kvm *kvm)
{ … }
void kvm_mmu_uninit_vm(struct kvm *kvm)
{ … }
static bool kvm_rmap_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
{ … }
void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
{ … }
static bool slot_rmap_write_protect(struct kvm *kvm,
struct kvm_rmap_head *rmap_head,
const struct kvm_memory_slot *slot)
{ … }
void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
const struct kvm_memory_slot *memslot,
int start_level)
{ … }
static inline bool need_topup(struct kvm_mmu_memory_cache *cache, int min)
{ … }
static bool need_topup_split_caches_or_resched(struct kvm *kvm)
{ … }
static int topup_split_caches(struct kvm *kvm)
{ … }
static struct kvm_mmu_page *shadow_mmu_get_sp_for_split(struct kvm *kvm, u64 *huge_sptep)
{ … }
static void shadow_mmu_split_huge_page(struct kvm *kvm,
const struct kvm_memory_slot *slot,
u64 *huge_sptep)
{ … }
static int shadow_mmu_try_split_huge_page(struct kvm *kvm,
const struct kvm_memory_slot *slot,
u64 *huge_sptep)
{ … }
static bool shadow_mmu_try_split_huge_pages(struct kvm *kvm,
struct kvm_rmap_head *rmap_head,
const struct kvm_memory_slot *slot)
{ … }
static void kvm_shadow_mmu_try_split_huge_pages(struct kvm *kvm,
const struct kvm_memory_slot *slot,
gfn_t start, gfn_t end,
int target_level)
{ … }
void kvm_mmu_try_split_huge_pages(struct kvm *kvm,
const struct kvm_memory_slot *memslot,
u64 start, u64 end,
int target_level)
{ … }
void kvm_mmu_slot_try_split_huge_pages(struct kvm *kvm,
const struct kvm_memory_slot *memslot,
int target_level)
{ … }
static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
struct kvm_rmap_head *rmap_head,
const struct kvm_memory_slot *slot)
{ … }
EXPORT_SYMBOL_GPL(…);
static void kvm_rmap_zap_collapsible_sptes(struct kvm *kvm,
const struct kvm_memory_slot *slot)
{ … }
void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
const struct kvm_memory_slot *slot)
{ … }
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
const struct kvm_memory_slot *memslot)
{ … }
static void kvm_mmu_zap_all(struct kvm *kvm)
{ … }
void kvm_arch_flush_shadow_all(struct kvm *kvm)
{ … }
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot)
{ … }
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
{ … }
static unsigned long mmu_shrink_scan(struct shrinker *shrink,
struct shrink_control *sc)
{ … }
static unsigned long mmu_shrink_count(struct shrinker *shrink,
struct shrink_control *sc)
{ … }
static struct shrinker *mmu_shrinker;
static void mmu_destroy_caches(void)
{ … }
static int get_nx_huge_pages(char *buffer, const struct kernel_param *kp)
{ … }
static bool get_nx_auto_mode(void)
{ … }
static void __set_nx_huge_pages(bool val)
{ … }
static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
{ … }
void __init kvm_mmu_x86_module_init(void)
{ … }
int kvm_mmu_vendor_module_init(void)
{ … }
void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
{ … }
void kvm_mmu_vendor_module_exit(void)
{ … }
static bool calc_nx_huge_pages_recovery_period(uint *period)
{ … }
static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp)
{ … }
static void kvm_recover_nx_huge_pages(struct kvm *kvm)
{ … }
static long get_nx_huge_page_recovery_timeout(u64 start_time)
{ … }
static int kvm_nx_huge_page_recovery_worker(struct kvm *kvm, uintptr_t data)
{ … }
int kvm_mmu_post_init_vm(struct kvm *kvm)
{ … }
void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
{ … }
#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
struct kvm_gfn_range *range)
{ … }
static bool hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
int level)
{ … }
static void hugepage_clear_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
int level)
{ … }
static void hugepage_set_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
int level)
{ … }
static bool hugepage_has_attrs(struct kvm *kvm, struct kvm_memory_slot *slot,
gfn_t gfn, int level, unsigned long attrs)
{ … }
bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
struct kvm_gfn_range *range)
{ … }
void kvm_mmu_init_memslot_memory_attributes(struct kvm *kvm,
struct kvm_memory_slot *slot)
{ … }
#endif