linux/arch/x86/kvm/mmu/mmu.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * This module enables machines with Intel VT-x extensions to run virtual
 * machines without emulation or binary translation.
 *
 * MMU support
 *
 * Copyright (C) 2006 Qumranet, Inc.
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
 *
 * Authors:
 *   Yaniv Kamay  <[email protected]>
 *   Avi Kivity   <[email protected]>
 */
#define pr_fmt(fmt)

#include "irq.h"
#include "ioapic.h"
#include "mmu.h"
#include "mmu_internal.h"
#include "tdp_mmu.h"
#include "x86.h"
#include "kvm_cache_regs.h"
#include "smm.h"
#include "kvm_emulate.h"
#include "page_track.h"
#include "cpuid.h"
#include "spte.h"

#include <linux/kvm_host.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/moduleparam.h>
#include <linux/export.h>
#include <linux/swap.h>
#include <linux/hugetlb.h>
#include <linux/compiler.h>
#include <linux/srcu.h>
#include <linux/slab.h>
#include <linux/sched/signal.h>
#include <linux/uaccess.h>
#include <linux/hash.h>
#include <linux/kern_levels.h>
#include <linux/kstrtox.h>
#include <linux/kthread.h>
#include <linux/wordpart.h>

#include <asm/page.h>
#include <asm/memtype.h>
#include <asm/cmpxchg.h>
#include <asm/io.h>
#include <asm/set_memory.h>
#include <asm/spec-ctrl.h>
#include <asm/vmx.h>

#include "trace.h"

static bool nx_hugepage_mitigation_hard_disabled;

int __read_mostly nx_huge_pages =;
static uint __read_mostly nx_huge_pages_recovery_period_ms;
#ifdef CONFIG_PREEMPT_RT
/* Recovery can cause latency spikes, disable it for PREEMPT_RT.  */
static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
#else
static uint __read_mostly nx_huge_pages_recovery_ratio =;
#endif

static int get_nx_huge_pages(char *buffer, const struct kernel_param *kp);
static int set_nx_huge_pages(const char *val, const struct kernel_param *kp);
static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp);

static const struct kernel_param_ops nx_huge_pages_ops =;

static const struct kernel_param_ops nx_huge_pages_recovery_param_ops =;

module_param_cb();
__MODULE_PARM_TYPE();
module_param_cb();
__MODULE_PARM_TYPE();
module_param_cb();
__MODULE_PARM_TYPE();

static bool __read_mostly force_flush_and_sync_on_reuse;
module_param_named(flush_on_reuse, force_flush_and_sync_on_reuse, bool, 0644);

/*
 * When setting this variable to true it enables Two-Dimensional-Paging
 * where the hardware walks 2 page tables:
 * 1. the guest-virtual to guest-physical
 * 2. while doing 1. it walks guest-physical to host-physical
 * If the hardware supports that we don't need to do shadow paging.
 */
bool tdp_enabled =;

static bool __ro_after_init tdp_mmu_allowed;

#ifdef CONFIG_X86_64
bool __read_mostly tdp_mmu_enabled =;
module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0444);
#endif

static int max_huge_page_level __read_mostly;
static int tdp_root_level __read_mostly;
static int max_tdp_level __read_mostly;

#define PTE_PREFETCH_NUM

#include <trace/events/kvm.h>

/* make pte_list_desc fit well in cache lines */
#define PTE_LIST_EXT

/*
 * struct pte_list_desc is the core data structure used to implement a custom
 * list for tracking a set of related SPTEs, e.g. all the SPTEs that map a
 * given GFN when used in the context of rmaps.  Using a custom list allows KVM
 * to optimize for the common case where many GFNs will have at most a handful
 * of SPTEs pointing at them, i.e. allows packing multiple SPTEs into a small
 * memory footprint, which in turn improves runtime performance by exploiting
 * cache locality.
 *
 * A list is comprised of one or more pte_list_desc objects (descriptors).
 * Each individual descriptor stores up to PTE_LIST_EXT SPTEs.  If a descriptor
 * is full and a new SPTEs needs to be added, a new descriptor is allocated and
 * becomes the head of the list.  This means that by definitions, all tail
 * descriptors are full.
 *
 * Note, the meta data fields are deliberately placed at the start of the
 * structure to optimize the cacheline layout; accessing the descriptor will
 * touch only a single cacheline so long as @spte_count<=6 (or if only the
 * descriptors metadata is accessed).
 */
struct pte_list_desc {};

struct kvm_shadow_walk_iterator {};

#define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker)

#define for_each_shadow_entry(_vcpu, _addr, _walker)

#define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte)

static struct kmem_cache *pte_list_desc_cache;
struct kmem_cache *mmu_page_header_cache;
static struct percpu_counter kvm_total_used_mmu_pages;

static void mmu_spte_set(u64 *sptep, u64 spte);

struct kvm_mmu_role_regs {};

#define CREATE_TRACE_POINTS
#include "mmutrace.h"

/*
 * Yes, lot's of underscores.  They're a hint that you probably shouldn't be
 * reading from the role_regs.  Once the root_role is constructed, it becomes
 * the single source of truth for the MMU's state.
 */
#define BUILD_MMU_ROLE_REGS_ACCESSOR(reg, name, flag)
BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, pg, X86_CR0_PG);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, wp, X86_CR0_WP);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pse, X86_CR4_PSE);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pae, X86_CR4_PAE);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smep, X86_CR4_SMEP);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smap, X86_CR4_SMAP);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pke, X86_CR4_PKE);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, la57, X86_CR4_LA57);
BUILD_MMU_ROLE_REGS_ACCESSOR(efer, nx, EFER_NX);
BUILD_MMU_ROLE_REGS_ACCESSOR(efer, lma, EFER_LMA);

/*
 * The MMU itself (with a valid role) is the single source of truth for the
 * MMU.  Do not use the regs used to build the MMU/role, nor the vCPU.  The
 * regs don't account for dependencies, e.g. clearing CR4 bits if CR0.PG=1,
 * and the vCPU may be incorrect/irrelevant.
 */
#define BUILD_MMU_ROLE_ACCESSOR(base_or_ext, reg, name)
BUILD_MMU_ROLE_ACCESSOR(base, cr0, wp);
BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pse);
BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, smep);
BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, smap);
BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pke);
BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, la57);
BUILD_MMU_ROLE_ACCESSOR(base, efer, nx);
BUILD_MMU_ROLE_ACCESSOR(ext,  efer, lma);

static inline bool is_cr0_pg(struct kvm_mmu *mmu)
{}

static inline bool is_cr4_pae(struct kvm_mmu *mmu)
{}

static struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu)
{}

static unsigned long get_guest_cr3(struct kvm_vcpu *vcpu)
{}

static inline unsigned long kvm_mmu_get_guest_pgd(struct kvm_vcpu *vcpu,
						  struct kvm_mmu *mmu)
{}

static inline bool kvm_available_flush_remote_tlbs_range(void)
{}

static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index);

/* Flush the range of guest memory mapped by the given SPTE. */
static void kvm_flush_remote_tlbs_sptep(struct kvm *kvm, u64 *sptep)
{}

static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
			   unsigned int access)
{}

static gfn_t get_mmio_spte_gfn(u64 spte)
{}

static unsigned get_mmio_spte_access(u64 spte)
{}

static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
{}

static int is_cpuid_PSE36(void)
{}

#ifdef CONFIG_X86_64
static void __set_spte(u64 *sptep, u64 spte)
{}

static void __update_clear_spte_fast(u64 *sptep, u64 spte)
{}

static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
{}

static u64 __get_spte_lockless(u64 *sptep)
{}
#else
union split_spte {
	struct {
		u32 spte_low;
		u32 spte_high;
	};
	u64 spte;
};

static void count_spte_clear(u64 *sptep, u64 spte)
{
	struct kvm_mmu_page *sp =  sptep_to_sp(sptep);

	if (is_shadow_present_pte(spte))
		return;

	/* Ensure the spte is completely set before we increase the count */
	smp_wmb();
	sp->clear_spte_count++;
}

static void __set_spte(u64 *sptep, u64 spte)
{
	union split_spte *ssptep, sspte;

	ssptep = (union split_spte *)sptep;
	sspte = (union split_spte)spte;

	ssptep->spte_high = sspte.spte_high;

	/*
	 * If we map the spte from nonpresent to present, We should store
	 * the high bits firstly, then set present bit, so cpu can not
	 * fetch this spte while we are setting the spte.
	 */
	smp_wmb();

	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
}

static void __update_clear_spte_fast(u64 *sptep, u64 spte)
{
	union split_spte *ssptep, sspte;

	ssptep = (union split_spte *)sptep;
	sspte = (union split_spte)spte;

	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);

	/*
	 * If we map the spte from present to nonpresent, we should clear
	 * present bit firstly to avoid vcpu fetch the old high bits.
	 */
	smp_wmb();

	ssptep->spte_high = sspte.spte_high;
	count_spte_clear(sptep, spte);
}

static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
{
	union split_spte *ssptep, sspte, orig;

	ssptep = (union split_spte *)sptep;
	sspte = (union split_spte)spte;

	/* xchg acts as a barrier before the setting of the high bits */
	orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low);
	orig.spte_high = ssptep->spte_high;
	ssptep->spte_high = sspte.spte_high;
	count_spte_clear(sptep, spte);

	return orig.spte;
}

/*
 * The idea using the light way get the spte on x86_32 guest is from
 * gup_get_pte (mm/gup.c).
 *
 * An spte tlb flush may be pending, because they are coalesced and
 * we are running out of the MMU lock.  Therefore
 * we need to protect against in-progress updates of the spte.
 *
 * Reading the spte while an update is in progress may get the old value
 * for the high part of the spte.  The race is fine for a present->non-present
 * change (because the high part of the spte is ignored for non-present spte),
 * but for a present->present change we must reread the spte.
 *
 * All such changes are done in two steps (present->non-present and
 * non-present->present), hence it is enough to count the number of
 * present->non-present updates: if it changed while reading the spte,
 * we might have hit the race.  This is done using clear_spte_count.
 */
static u64 __get_spte_lockless(u64 *sptep)
{
	struct kvm_mmu_page *sp =  sptep_to_sp(sptep);
	union split_spte spte, *orig = (union split_spte *)sptep;
	int count;

retry:
	count = sp->clear_spte_count;
	smp_rmb();

	spte.spte_low = orig->spte_low;
	smp_rmb();

	spte.spte_high = orig->spte_high;
	smp_rmb();

	if (unlikely(spte.spte_low != orig->spte_low ||
	      count != sp->clear_spte_count))
		goto retry;

	return spte.spte;
}
#endif

/* Rules for using mmu_spte_set:
 * Set the sptep from nonpresent to present.
 * Note: the sptep being assigned *must* be either not present
 * or in a state where the hardware will not attempt to update
 * the spte.
 */
static void mmu_spte_set(u64 *sptep, u64 new_spte)
{}

/*
 * Update the SPTE (excluding the PFN), but do not track changes in its
 * accessed/dirty status.
 */
static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte)
{}

/* Rules for using mmu_spte_update:
 * Update the state bits, it means the mapped pfn is not changed.
 *
 * Whenever an MMU-writable SPTE is overwritten with a read-only SPTE, remote
 * TLBs must be flushed. Otherwise rmap_write_protect will find a read-only
 * spte, even though the writable spte might be cached on a CPU's TLB.
 *
 * Returns true if the TLB needs to be flushed
 */
static bool mmu_spte_update(u64 *sptep, u64 new_spte)
{}

/*
 * Rules for using mmu_spte_clear_track_bits:
 * It sets the sptep from present to nonpresent, and track the
 * state bits, it is used to clear the last level sptep.
 * Returns the old PTE.
 */
static u64 mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)
{}

/*
 * Rules for using mmu_spte_clear_no_track:
 * Directly clear spte without caring the state bits of sptep,
 * it is used to set the upper level spte.
 */
static void mmu_spte_clear_no_track(u64 *sptep)
{}

static u64 mmu_spte_get_lockless(u64 *sptep)
{}

/* Returns the Accessed status of the PTE and resets it at the same time. */
static bool mmu_spte_age(u64 *sptep)
{}

static inline bool is_tdp_mmu_active(struct kvm_vcpu *vcpu)
{}

static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
{}

static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
{}

static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
{}

static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{}

static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
{}

static bool sp_has_gptes(struct kvm_mmu_page *sp);

static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
{}

/*
 * For leaf SPTEs, fetch the *guest* access permissions being shadowed. Note
 * that the SPTE itself may have a more constrained access permissions that
 * what the guest enforces. For example, a guest may create an executable
 * huge PTE but KVM may disallow execution to mitigate iTLB multihit.
 */
static u32 kvm_mmu_page_get_access(struct kvm_mmu_page *sp, int index)
{}

static void kvm_mmu_page_set_translation(struct kvm_mmu_page *sp, int index,
					 gfn_t gfn, unsigned int access)
{}

static void kvm_mmu_page_set_access(struct kvm_mmu_page *sp, int index,
				    unsigned int access)
{}

/*
 * Return the pointer to the large page information for a given gfn,
 * handling slots that are not large page aligned.
 */
static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
		const struct kvm_memory_slot *slot, int level)
{}

/*
 * The most significant bit in disallow_lpage tracks whether or not memory
 * attributes are mixed, i.e. not identical for all gfns at the current level.
 * The lower order bits are used to refcount other cases where a hugepage is
 * disallowed, e.g. if KVM has shadow a page table at the gfn.
 */
#define KVM_LPAGE_MIXED_FLAG

static void update_gfn_disallow_lpage_count(const struct kvm_memory_slot *slot,
					    gfn_t gfn, int count)
{}

void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
{}

void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
{}

static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
{}

void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{}

static void account_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp,
				 bool nx_huge_page_possible)
{}

static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
{}

void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{}

static void unaccount_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{}

static struct kvm_memory_slot *gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu,
							   gfn_t gfn,
							   bool no_dirty_log)
{}

/*
 * About rmap_head encoding:
 *
 * If the bit zero of rmap_head->val is clear, then it points to the only spte
 * in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct
 * pte_list_desc containing more mappings.
 */

/*
 * Returns the number of pointers in the rmap chain, not counting the new one.
 */
static int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte,
			struct kvm_rmap_head *rmap_head)
{}

static void pte_list_desc_remove_entry(struct kvm *kvm,
				       struct kvm_rmap_head *rmap_head,
				       struct pte_list_desc *desc, int i)
{}

static void pte_list_remove(struct kvm *kvm, u64 *spte,
			    struct kvm_rmap_head *rmap_head)
{}

static void kvm_zap_one_rmap_spte(struct kvm *kvm,
				  struct kvm_rmap_head *rmap_head, u64 *sptep)
{}

/* Return true if at least one SPTE was zapped, false otherwise */
static bool kvm_zap_all_rmap_sptes(struct kvm *kvm,
				   struct kvm_rmap_head *rmap_head)
{}

unsigned int pte_list_count(struct kvm_rmap_head *rmap_head)
{}

static struct kvm_rmap_head *gfn_to_rmap(gfn_t gfn, int level,
					 const struct kvm_memory_slot *slot)
{}

static void rmap_remove(struct kvm *kvm, u64 *spte)
{}

/*
 * Used by the following functions to iterate through the sptes linked by a
 * rmap.  All fields are private and not assumed to be used outside.
 */
struct rmap_iterator {};

/*
 * Iteration must be started by this function.  This should also be used after
 * removing/dropping sptes from the rmap link because in such cases the
 * information in the iterator may not be valid.
 *
 * Returns sptep if found, NULL otherwise.
 */
static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head,
			   struct rmap_iterator *iter)
{}

/*
 * Must be used with a valid iterator: e.g. after rmap_get_first().
 *
 * Returns sptep if found, NULL otherwise.
 */
static u64 *rmap_get_next(struct rmap_iterator *iter)
{}

#define for_each_rmap_spte(_rmap_head_, _iter_, _spte_)

static void drop_spte(struct kvm *kvm, u64 *sptep)
{}

static void drop_large_spte(struct kvm *kvm, u64 *sptep, bool flush)
{}

/*
 * Write-protect on the specified @sptep, @pt_protect indicates whether
 * spte write-protection is caused by protecting shadow page table.
 *
 * Note: write protection is difference between dirty logging and spte
 * protection:
 * - for dirty logging, the spte can be set to writable at anytime if
 *   its dirty bitmap is properly set.
 * - for spte protection, the spte can be writable only after unsync-ing
 *   shadow page.
 *
 * Return true if tlb need be flushed.
 */
static bool spte_write_protect(u64 *sptep, bool pt_protect)
{}

static bool rmap_write_protect(struct kvm_rmap_head *rmap_head,
			       bool pt_protect)
{}

static bool spte_clear_dirty(u64 *sptep)
{}

static bool spte_wrprot_for_clear_dirty(u64 *sptep)
{}

/*
 * Gets the GFN ready for another round of dirty logging by clearing the
 *	- D bit on ad-enabled SPTEs, and
 *	- W bit on ad-disabled SPTEs.
 * Returns true iff any D or W bits were cleared.
 */
static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
			       const struct kvm_memory_slot *slot)
{}

/**
 * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
 * @kvm: kvm instance
 * @slot: slot to protect
 * @gfn_offset: start of the BITS_PER_LONG pages we care about
 * @mask: indicates which pages we should protect
 *
 * Used when we do not need to care about huge page mappings.
 */
static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
				     struct kvm_memory_slot *slot,
				     gfn_t gfn_offset, unsigned long mask)
{}

/**
 * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages, or write
 * protect the page if the D-bit isn't supported.
 * @kvm: kvm instance
 * @slot: slot to clear D-bit
 * @gfn_offset: start of the BITS_PER_LONG pages we care about
 * @mask: indicates which pages we should clear D-bit
 *
 * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap.
 */
static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
					 struct kvm_memory_slot *slot,
					 gfn_t gfn_offset, unsigned long mask)
{}

/**
 * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
 * PT level pages.
 *
 * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
 * enable dirty logging for them.
 *
 * We need to care about huge page mappings: e.g. during dirty logging we may
 * have such mappings.
 */
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
				struct kvm_memory_slot *slot,
				gfn_t gfn_offset, unsigned long mask)
{}

int kvm_cpu_dirty_log_size(void)
{}

bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
				    struct kvm_memory_slot *slot, u64 gfn,
				    int min_level)
{}

static bool kvm_vcpu_write_protect_gfn(struct kvm_vcpu *vcpu, u64 gfn)
{}

static bool __kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
			   const struct kvm_memory_slot *slot)
{}

static bool kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
			 struct kvm_memory_slot *slot, gfn_t gfn, int level)
{}

struct slot_rmap_walk_iterator {};

static void rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator,
				 int level)
{}

static void slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator,
				const struct kvm_memory_slot *slot,
				int start_level, int end_level,
				gfn_t start_gfn, gfn_t end_gfn)
{}

static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator)
{}

static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
{}

#define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_,	\
	   _start_gfn, _end_gfn, _iter_)

rmap_handler_t;

static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm,
						 struct kvm_gfn_range *range,
						 rmap_handler_t handler)
{}

bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
{}

static bool kvm_age_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
			 struct kvm_memory_slot *slot, gfn_t gfn, int level)
{}

static bool kvm_test_age_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
			      struct kvm_memory_slot *slot, gfn_t gfn, int level)
{}

#define RMAP_RECYCLE_THRESHOLD

static void __rmap_add(struct kvm *kvm,
		       struct kvm_mmu_memory_cache *cache,
		       const struct kvm_memory_slot *slot,
		       u64 *spte, gfn_t gfn, unsigned int access)
{}

static void rmap_add(struct kvm_vcpu *vcpu, const struct kvm_memory_slot *slot,
		     u64 *spte, gfn_t gfn, unsigned int access)
{}

bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{}

bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{}

static void kvm_mmu_check_sptes_at_free(struct kvm_mmu_page *sp)
{}

/*
 * This value is the sum of all of the kvm instances's
 * kvm->arch.n_used_mmu_pages values.  We need a global,
 * aggregate version in order to make the slab shrinker
 * faster
 */
static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, long nr)
{}

static void kvm_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{}

static void kvm_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{}

static void kvm_mmu_free_shadow_page(struct kvm_mmu_page *sp)
{}

static unsigned kvm_page_table_hashfn(gfn_t gfn)
{}

static void mmu_page_add_parent_pte(struct kvm_mmu_memory_cache *cache,
				    struct kvm_mmu_page *sp, u64 *parent_pte)
{}

static void mmu_page_remove_parent_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
				       u64 *parent_pte)
{}

static void drop_parent_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
			    u64 *parent_pte)
{}

static void mark_unsync(u64 *spte);
static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
{}

static void mark_unsync(u64 *spte)
{}

#define KVM_PAGE_ARRAY_NR

struct kvm_mmu_pages {};

static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
			 int idx)
{}

static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx)
{}

static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
			   struct kvm_mmu_pages *pvec)
{}

#define INVALID_INDEX

static int mmu_unsync_walk(struct kvm_mmu_page *sp,
			   struct kvm_mmu_pages *pvec)
{}

static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{}

static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
				     struct list_head *invalid_list);
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
				    struct list_head *invalid_list);

static bool sp_has_gptes(struct kvm_mmu_page *sp)
{}

#define for_each_valid_sp(_kvm, _sp, _list)

#define for_each_gfn_valid_sp_with_gptes(_kvm, _sp, _gfn)

static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{}

static int kvm_sync_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int i)
{}

static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{}

static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
			 struct list_head *invalid_list)
{}

static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
					struct list_head *invalid_list,
					bool remote_flush)
{}

static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
{}

struct mmu_page_path {};

#define for_each_sp(pvec, sp, parents, i)

static int mmu_pages_next(struct kvm_mmu_pages *pvec,
			  struct mmu_page_path *parents,
			  int i)
{}

static int mmu_pages_first(struct kvm_mmu_pages *pvec,
			   struct mmu_page_path *parents)
{}

static void mmu_pages_clear_parents(struct mmu_page_path *parents)
{}

static int mmu_sync_children(struct kvm_vcpu *vcpu,
			     struct kvm_mmu_page *parent, bool can_yield)
{}

static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
{}

static void clear_sp_write_flooding_count(u64 *spte)
{}

/*
 * The vCPU is required when finding indirect shadow pages; the shadow
 * page may already exist and syncing it needs the vCPU pointer in
 * order to read guest page tables.  Direct shadow pages are never
 * unsync, thus @vcpu can be NULL if @role.direct is true.
 */
static struct kvm_mmu_page *kvm_mmu_find_shadow_page(struct kvm *kvm,
						     struct kvm_vcpu *vcpu,
						     gfn_t gfn,
						     struct hlist_head *sp_list,
						     union kvm_mmu_page_role role)
{}

/* Caches used when allocating a new shadow page. */
struct shadow_page_caches {};

static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm *kvm,
						      struct shadow_page_caches *caches,
						      gfn_t gfn,
						      struct hlist_head *sp_list,
						      union kvm_mmu_page_role role)
{}

/* Note, @vcpu may be NULL if @role.direct is true; see kvm_mmu_find_shadow_page. */
static struct kvm_mmu_page *__kvm_mmu_get_shadow_page(struct kvm *kvm,
						      struct kvm_vcpu *vcpu,
						      struct shadow_page_caches *caches,
						      gfn_t gfn,
						      union kvm_mmu_page_role role)
{}

static struct kvm_mmu_page *kvm_mmu_get_shadow_page(struct kvm_vcpu *vcpu,
						    gfn_t gfn,
						    union kvm_mmu_page_role role)
{}

static union kvm_mmu_page_role kvm_mmu_child_role(u64 *sptep, bool direct,
						  unsigned int access)
{}

static struct kvm_mmu_page *kvm_mmu_get_child_sp(struct kvm_vcpu *vcpu,
						 u64 *sptep, gfn_t gfn,
						 bool direct, unsigned int access)
{}

static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator,
					struct kvm_vcpu *vcpu, hpa_t root,
					u64 addr)
{}

static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
			     struct kvm_vcpu *vcpu, u64 addr)
{}

static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
{}

static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator,
			       u64 spte)
{}

static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
{}

static void __link_shadow_page(struct kvm *kvm,
			       struct kvm_mmu_memory_cache *cache, u64 *sptep,
			       struct kvm_mmu_page *sp, bool flush)
{}

static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
			     struct kvm_mmu_page *sp)
{}

static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
				   unsigned direct_access)
{}

/* Returns the number of zapped non-leaf child shadow pages. */
static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
			    u64 *spte, struct list_head *invalid_list)
{}

static int kvm_mmu_page_unlink_children(struct kvm *kvm,
					struct kvm_mmu_page *sp,
					struct list_head *invalid_list)
{}

static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
{}

static int mmu_zap_unsync_children(struct kvm *kvm,
				   struct kvm_mmu_page *parent,
				   struct list_head *invalid_list)
{}

static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
				       struct kvm_mmu_page *sp,
				       struct list_head *invalid_list,
				       int *nr_zapped)
{}

static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
				     struct list_head *invalid_list)
{}

static void kvm_mmu_commit_zap_page(struct kvm *kvm,
				    struct list_head *invalid_list)
{}

static unsigned long kvm_mmu_zap_oldest_mmu_pages(struct kvm *kvm,
						  unsigned long nr_to_zap)
{}

static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
{}

static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
{}

/*
 * Changing the number of mmu pages allocated to the vm
 * Note: if goal_nr_mmu_pages is too small, you will get dead lock
 */
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
{}

int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
{}

static int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
{}

static void kvm_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{}

/*
 * Attempt to unsync any shadow pages that can be reached by the specified gfn,
 * KVM is creating a writable mapping for said gfn.  Returns 0 if all pages
 * were marked unsync (or if there is no shadow page), -EPERM if the SPTE must
 * be write-protected.
 */
int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot,
			    gfn_t gfn, bool can_unsync, bool prefetch)
{}

static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
			u64 *sptep, unsigned int pte_access, gfn_t gfn,
			kvm_pfn_t pfn, struct kvm_page_fault *fault)
{}

static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
				    struct kvm_mmu_page *sp,
				    u64 *start, u64 *end)
{}

static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
				  struct kvm_mmu_page *sp, u64 *sptep)
{}

static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
{}

/*
 * Lookup the mapping level for @gfn in the current mm.
 *
 * WARNING!  Use of host_pfn_mapping_level() requires the caller and the end
 * consumer to be tied into KVM's handlers for MMU notifier events!
 *
 * There are several ways to safely use this helper:
 *
 * - Check mmu_invalidate_retry_gfn() after grabbing the mapping level, before
 *   consuming it.  In this case, mmu_lock doesn't need to be held during the
 *   lookup, but it does need to be held while checking the MMU notifier.
 *
 * - Hold mmu_lock AND ensure there is no in-progress MMU notifier invalidation
 *   event for the hva.  This can be done by explicit checking the MMU notifier
 *   or by ensuring that KVM already has a valid mapping that covers the hva.
 *
 * - Do not use the result to install new mappings, e.g. use the host mapping
 *   level only to decide whether or not to zap an entry.  In this case, it's
 *   not required to hold mmu_lock (though it's highly likely the caller will
 *   want to hold mmu_lock anyways, e.g. to modify SPTEs).
 *
 * Note!  The lookup can still race with modifications to host page tables, but
 * the above "rules" ensure KVM will not _consume_ the result of the walk if a
 * race with the primary MMU occurs.
 */
static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn,
				  const struct kvm_memory_slot *slot)
{}

static int __kvm_mmu_max_mapping_level(struct kvm *kvm,
				       const struct kvm_memory_slot *slot,
				       gfn_t gfn, int max_level, bool is_private)
{}

int kvm_mmu_max_mapping_level(struct kvm *kvm,
			      const struct kvm_memory_slot *slot, gfn_t gfn,
			      int max_level)
{}

void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{}

void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level)
{}

static int direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{}

static void kvm_send_hwpoison_signal(struct kvm_memory_slot *slot, gfn_t gfn)
{}

static int kvm_handle_error_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{}

static int kvm_handle_noslot_fault(struct kvm_vcpu *vcpu,
				   struct kvm_page_fault *fault,
				   unsigned int access)
{}

static bool page_fault_can_be_fast(struct kvm *kvm, struct kvm_page_fault *fault)
{}

/*
 * Returns true if the SPTE was fixed successfully. Otherwise,
 * someone else modified the SPTE from its original value.
 */
static bool fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu,
				    struct kvm_page_fault *fault,
				    u64 *sptep, u64 old_spte, u64 new_spte)
{}

static bool is_access_allowed(struct kvm_page_fault *fault, u64 spte)
{}

/*
 * Returns the last level spte pointer of the shadow page walk for the given
 * gpa, and sets *spte to the spte value. This spte may be non-preset. If no
 * walk could be performed, returns NULL and *spte does not contain valid data.
 *
 * Contract:
 *  - Must be called between walk_shadow_page_lockless_{begin,end}.
 *  - The returned sptep must not be used after walk_shadow_page_lockless_end.
 */
static u64 *fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gpa_t gpa, u64 *spte)
{}

/*
 * Returns one of RET_PF_INVALID, RET_PF_FIXED or RET_PF_SPURIOUS.
 */
static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{}

static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
			       struct list_head *invalid_list)
{}

/* roots_to_free must be some combination of the KVM_MMU_ROOT_* flags */
void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu,
			ulong roots_to_free)
{}
EXPORT_SYMBOL_GPL();

void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu)
{}
EXPORT_SYMBOL_GPL();

static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, int quadrant,
			    u8 level)
{}

static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
{}

static int mmu_first_shadow_root_alloc(struct kvm *kvm)
{}

static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
{}

static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
{}

static bool is_unsync_root(hpa_t root)
{}

void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
{}

void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu)
{}

static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
				  gpa_t vaddr, u64 access,
				  struct x86_exception *exception)
{}

static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
{}

/*
 * Return the level of the lowest level SPTE added to sptes.
 * That SPTE may be non-present.
 *
 * Must be called between walk_shadow_page_lockless_{begin,end}.
 */
static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level)
{}

static int get_sptes_lockless(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
			      int *root_level)
{}

/* return true if reserved bit(s) are detected on a valid, non-MMIO SPTE. */
static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
{}

static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
{}

static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
					 struct kvm_page_fault *fault)
{}

static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
{}

static u32 alloc_apf_token(struct kvm_vcpu *vcpu)
{}

static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu,
				    struct kvm_page_fault *fault)
{}

void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
{}

static inline u8 kvm_max_level_for_order(int order)
{}

static u8 kvm_max_private_mapping_level(struct kvm *kvm, kvm_pfn_t pfn,
					u8 max_level, int gmem_order)
{}

static int kvm_faultin_pfn_private(struct kvm_vcpu *vcpu,
				   struct kvm_page_fault *fault)
{}

static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{}

static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
			   unsigned int access)
{}

/*
 * Returns true if the page fault is stale and needs to be retried, i.e. if the
 * root was invalidated by a memslot update or a relevant mmu_notifier fired.
 */
static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
				struct kvm_page_fault *fault)
{}

static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{}

static int nonpaging_page_fault(struct kvm_vcpu *vcpu,
				struct kvm_page_fault *fault)
{}

int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
				u64 fault_address, char *insn, int insn_len)
{}
EXPORT_SYMBOL_GPL();

#ifdef CONFIG_X86_64
static int kvm_tdp_mmu_page_fault(struct kvm_vcpu *vcpu,
				  struct kvm_page_fault *fault)
{}
#endif

bool kvm_mmu_may_ignore_guest_pat(void)
{}

int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{}

static int kvm_tdp_map_page(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code,
			    u8 *level)
{}

long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
				    struct kvm_pre_fault_memory *range)
{}

static void nonpaging_init_context(struct kvm_mmu *context)
{}

static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
				  union kvm_mmu_page_role role)
{}

/*
 * Find out if a previously cached root matching the new pgd/role is available,
 * and insert the current root as the MRU in the cache.
 * If a matching root is found, it is assigned to kvm_mmu->root and
 * true is returned.
 * If no match is found, kvm_mmu->root is left invalid, the LRU root is
 * evicted to make room for the current root, and false is returned.
 */
static bool cached_root_find_and_keep_current(struct kvm *kvm, struct kvm_mmu *mmu,
					      gpa_t new_pgd,
					      union kvm_mmu_page_role new_role)
{}

/*
 * Find out if a previously cached root matching the new pgd/role is available.
 * On entry, mmu->root is invalid.
 * If a matching root is found, it is assigned to kvm_mmu->root, the LRU entry
 * of the cache becomes invalid, and true is returned.
 * If no match is found, kvm_mmu->root is left invalid and false is returned.
 */
static bool cached_root_find_without_current(struct kvm *kvm, struct kvm_mmu *mmu,
					     gpa_t new_pgd,
					     union kvm_mmu_page_role new_role)
{}

static bool fast_pgd_switch(struct kvm *kvm, struct kvm_mmu *mmu,
			    gpa_t new_pgd, union kvm_mmu_page_role new_role)
{}

void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
{}
EXPORT_SYMBOL_GPL();

static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
			   unsigned int access)
{}

#define PTTYPE_EPT
#define PTTYPE
#include "paging_tmpl.h"
#undef PTTYPE

#define PTTYPE
#include "paging_tmpl.h"
#undef PTTYPE

#define PTTYPE
#include "paging_tmpl.h"
#undef PTTYPE

static void __reset_rsvds_bits_mask(struct rsvd_bits_validate *rsvd_check,
				    u64 pa_bits_rsvd, int level, bool nx,
				    bool gbpages, bool pse, bool amd)
{}

static void reset_guest_rsvds_bits_mask(struct kvm_vcpu *vcpu,
					struct kvm_mmu *context)
{}

static void __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
					u64 pa_bits_rsvd, bool execonly,
					int huge_page_level)
{}

static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
		struct kvm_mmu *context, bool execonly, int huge_page_level)
{}

static inline u64 reserved_hpa_bits(void)
{}

/*
 * the page table on host is the shadow page table for the page
 * table in guest or amd nested guest, its mmu features completely
 * follow the features in guest.
 */
static void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
					struct kvm_mmu *context)
{}

static inline bool boot_cpu_is_amd(void)
{}

/*
 * the direct page table on host, use as much mmu features as
 * possible, however, kvm currently does not do execution-protection.
 */
static void reset_tdp_shadow_zero_bits_mask(struct kvm_mmu *context)
{}

/*
 * as the comments in reset_shadow_zero_bits_mask() except it
 * is the shadow page table for intel nested guest.
 */
static void
reset_ept_shadow_zero_bits_mask(struct kvm_mmu *context, bool execonly)
{}

#define BYTE_MASK(access)


static void update_permission_bitmask(struct kvm_mmu *mmu, bool ept)
{}

/*
* PKU is an additional mechanism by which the paging controls access to
* user-mode addresses based on the value in the PKRU register.  Protection
* key violations are reported through a bit in the page fault error code.
* Unlike other bits of the error code, the PK bit is not known at the
* call site of e.g. gva_to_gpa; it must be computed directly in
* permission_fault based on two bits of PKRU, on some machine state (CR4,
* CR0, EFER, CPL), and on other bits of the error code and the page tables.
*
* In particular the following conditions come from the error code, the
* page tables and the machine state:
* - PK is always zero unless CR4.PKE=1 and EFER.LMA=1
* - PK is always zero if RSVD=1 (reserved bit set) or F=1 (instruction fetch)
* - PK is always zero if U=0 in the page tables
* - PKRU.WD is ignored if CR0.WP=0 and the access is a supervisor access.
*
* The PKRU bitmask caches the result of these four conditions.  The error
* code (minus the P bit) and the page table's U bit form an index into the
* PKRU bitmask.  Two bits of the PKRU bitmask are then extracted and ANDed
* with the two bits of the PKRU register corresponding to the protection key.
* For the first three conditions above the bits will be 00, thus masking
* away both AD and WD.  For all reads or if the last condition holds, WD
* only will be masked away.
*/
static void update_pkru_bitmask(struct kvm_mmu *mmu)
{}

static void reset_guest_paging_metadata(struct kvm_vcpu *vcpu,
					struct kvm_mmu *mmu)
{}

static void paging64_init_context(struct kvm_mmu *context)
{}

static void paging32_init_context(struct kvm_mmu *context)
{}

static union kvm_cpu_role kvm_calc_cpu_role(struct kvm_vcpu *vcpu,
					    const struct kvm_mmu_role_regs *regs)
{}

void __kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
					struct kvm_mmu *mmu)
{}

static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
{}

u8 kvm_mmu_get_max_tdp_level(void)
{}

static union kvm_mmu_page_role
kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
				union kvm_cpu_role cpu_role)
{}

static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,
			     union kvm_cpu_role cpu_role)
{}

static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
				    union kvm_cpu_role cpu_role,
				    union kvm_mmu_page_role root_role)
{}

static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu,
				union kvm_cpu_role cpu_role)
{}

void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
			     unsigned long cr4, u64 efer, gpa_t nested_cr3)
{}
EXPORT_SYMBOL_GPL();

static union kvm_cpu_role
kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
				   bool execonly, u8 level)
{}

void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
			     int huge_page_level, bool accessed_dirty,
			     gpa_t new_eptp)
{}
EXPORT_SYMBOL_GPL();

static void init_kvm_softmmu(struct kvm_vcpu *vcpu,
			     union kvm_cpu_role cpu_role)
{}

static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu,
				union kvm_cpu_role new_mode)
{}

void kvm_init_mmu(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();

void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
{}

void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();

int kvm_mmu_load(struct kvm_vcpu *vcpu)
{}

void kvm_mmu_unload(struct kvm_vcpu *vcpu)
{}

static bool is_obsolete_root(struct kvm *kvm, hpa_t root_hpa)
{}

static void __kvm_mmu_free_obsolete_roots(struct kvm *kvm, struct kvm_mmu *mmu)
{}

void kvm_mmu_free_obsolete_roots(struct kvm_vcpu *vcpu)
{}

static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
				    int *bytes)
{}

/*
 * If we're seeing too many writes to a page, it may no longer be a page table,
 * or we may be forking, in which case it is better to unmap the page.
 */
static bool detect_write_flooding(struct kvm_mmu_page *sp)
{}

/*
 * Misaligned accesses are too much trouble to fix up; also, they usually
 * indicate a page is not used as a page table.
 */
static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
				    int bytes)
{}

static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
{}

void kvm_mmu_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
			 int bytes)
{}

int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
		       void *insn, int insn_len)
{}
EXPORT_SYMBOL_GPL();

void kvm_mmu_print_sptes(struct kvm_vcpu *vcpu, gpa_t gpa, const char *msg)
{}
EXPORT_SYMBOL_GPL();

static void __kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
				      u64 addr, hpa_t root_hpa)
{}

void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
			     u64 addr, unsigned long roots)
{}
EXPORT_SYMBOL_GPL();

void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
{}
EXPORT_SYMBOL_GPL();


void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
{}

void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
		       int tdp_max_root_level, int tdp_huge_page_level)
{}
EXPORT_SYMBOL_GPL();

/* The return value indicates if tlb flush on all vcpus is needed. */
slot_rmaps_handler;

static __always_inline bool __walk_slot_rmaps(struct kvm *kvm,
					      const struct kvm_memory_slot *slot,
					      slot_rmaps_handler fn,
					      int start_level, int end_level,
					      gfn_t start_gfn, gfn_t end_gfn,
					      bool flush_on_yield, bool flush)
{}

static __always_inline bool walk_slot_rmaps(struct kvm *kvm,
					    const struct kvm_memory_slot *slot,
					    slot_rmaps_handler fn,
					    int start_level, int end_level,
					    bool flush_on_yield)
{}

static __always_inline bool walk_slot_rmaps_4k(struct kvm *kvm,
					       const struct kvm_memory_slot *slot,
					       slot_rmaps_handler fn,
					       bool flush_on_yield)
{}

static void free_mmu_pages(struct kvm_mmu *mmu)
{}

static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
{}

int kvm_mmu_create(struct kvm_vcpu *vcpu)
{}

#define BATCH_ZAP_PAGES
static void kvm_zap_obsolete_pages(struct kvm *kvm)
{}

/*
 * Fast invalidate all shadow pages and use lock-break technique
 * to zap obsolete pages.
 *
 * It's required when memslot is being deleted or VM is being
 * destroyed, in these cases, we should ensure that KVM MMU does
 * not use any resource of the being-deleted slot or all slots
 * after calling the function.
 */
static void kvm_mmu_zap_all_fast(struct kvm *kvm)
{}

static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
{}

void kvm_mmu_init_vm(struct kvm *kvm)
{}

static void mmu_free_vm_memory_caches(struct kvm *kvm)
{}

void kvm_mmu_uninit_vm(struct kvm *kvm)
{}

static bool kvm_rmap_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
{}

/*
 * Invalidate (zap) SPTEs that cover GFNs from gfn_start and up to gfn_end
 * (not including it)
 */
void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
{}

static bool slot_rmap_write_protect(struct kvm *kvm,
				    struct kvm_rmap_head *rmap_head,
				    const struct kvm_memory_slot *slot)
{}

void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
				      const struct kvm_memory_slot *memslot,
				      int start_level)
{}

static inline bool need_topup(struct kvm_mmu_memory_cache *cache, int min)
{}

static bool need_topup_split_caches_or_resched(struct kvm *kvm)
{}

static int topup_split_caches(struct kvm *kvm)
{}

static struct kvm_mmu_page *shadow_mmu_get_sp_for_split(struct kvm *kvm, u64 *huge_sptep)
{}

static void shadow_mmu_split_huge_page(struct kvm *kvm,
				       const struct kvm_memory_slot *slot,
				       u64 *huge_sptep)

{}

static int shadow_mmu_try_split_huge_page(struct kvm *kvm,
					  const struct kvm_memory_slot *slot,
					  u64 *huge_sptep)
{}

static bool shadow_mmu_try_split_huge_pages(struct kvm *kvm,
					    struct kvm_rmap_head *rmap_head,
					    const struct kvm_memory_slot *slot)
{}

static void kvm_shadow_mmu_try_split_huge_pages(struct kvm *kvm,
						const struct kvm_memory_slot *slot,
						gfn_t start, gfn_t end,
						int target_level)
{}

/* Must be called with the mmu_lock held in write-mode. */
void kvm_mmu_try_split_huge_pages(struct kvm *kvm,
				   const struct kvm_memory_slot *memslot,
				   u64 start, u64 end,
				   int target_level)
{}

void kvm_mmu_slot_try_split_huge_pages(struct kvm *kvm,
					const struct kvm_memory_slot *memslot,
					int target_level)
{}

static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
					 struct kvm_rmap_head *rmap_head,
					 const struct kvm_memory_slot *slot)
{}
EXPORT_SYMBOL_GPL();

static void kvm_rmap_zap_collapsible_sptes(struct kvm *kvm,
					   const struct kvm_memory_slot *slot)
{}

void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
				   const struct kvm_memory_slot *slot)
{}

void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
				   const struct kvm_memory_slot *memslot)
{}

static void kvm_mmu_zap_all(struct kvm *kvm)
{}

void kvm_arch_flush_shadow_all(struct kvm *kvm)
{}

void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
				   struct kvm_memory_slot *slot)
{}

void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
{}

static unsigned long mmu_shrink_scan(struct shrinker *shrink,
				     struct shrink_control *sc)
{}

static unsigned long mmu_shrink_count(struct shrinker *shrink,
				      struct shrink_control *sc)
{}

static struct shrinker *mmu_shrinker;

static void mmu_destroy_caches(void)
{}

static int get_nx_huge_pages(char *buffer, const struct kernel_param *kp)
{}

static bool get_nx_auto_mode(void)
{}

static void __set_nx_huge_pages(bool val)
{}

static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
{}

/*
 * nx_huge_pages needs to be resolved to true/false when kvm.ko is loaded, as
 * its default value of -1 is technically undefined behavior for a boolean.
 * Forward the module init call to SPTE code so that it too can handle module
 * params that need to be resolved/snapshot.
 */
void __init kvm_mmu_x86_module_init(void)
{}

/*
 * The bulk of the MMU initialization is deferred until the vendor module is
 * loaded as many of the masks/values may be modified by VMX or SVM, i.e. need
 * to be reset when a potentially different vendor module is loaded.
 */
int kvm_mmu_vendor_module_init(void)
{}

void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
{}

void kvm_mmu_vendor_module_exit(void)
{}

/*
 * Calculate the effective recovery period, accounting for '0' meaning "let KVM
 * select a halving time of 1 hour".  Returns true if recovery is enabled.
 */
static bool calc_nx_huge_pages_recovery_period(uint *period)
{}

static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp)
{}

static void kvm_recover_nx_huge_pages(struct kvm *kvm)
{}

static long get_nx_huge_page_recovery_timeout(u64 start_time)
{}

static int kvm_nx_huge_page_recovery_worker(struct kvm *kvm, uintptr_t data)
{}

int kvm_mmu_post_init_vm(struct kvm *kvm)
{}

void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
{}

#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
					struct kvm_gfn_range *range)
{}

static bool hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
				int level)
{}

static void hugepage_clear_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
				 int level)
{}

static void hugepage_set_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
			       int level)
{}

static bool hugepage_has_attrs(struct kvm *kvm, struct kvm_memory_slot *slot,
			       gfn_t gfn, int level, unsigned long attrs)
{}

bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
					 struct kvm_gfn_range *range)
{}

void kvm_mmu_init_memslot_memory_attributes(struct kvm *kvm,
					    struct kvm_memory_slot *slot)
{}
#endif