#ifndef __KVM_X86_MMU_INTERNAL_H
#define __KVM_X86_MMU_INTERNAL_H
#include <linux/types.h>
#include <linux/kvm_host.h>
#include <asm/kvm_host.h>
#ifdef CONFIG_KVM_PROVE_MMU
#define KVM_MMU_WARN_ON(x) …
#else
#define KVM_MMU_WARN_ON …
#endif
#define __PT_BASE_ADDR_MASK …
#define __PT_LEVEL_SHIFT(level, bits_per_level) …
#define __PT_INDEX(address, level, bits_per_level) …
#define __PT_LVL_ADDR_MASK(base_addr_mask, level, bits_per_level) …
#define __PT_LVL_OFFSET_MASK(base_addr_mask, level, bits_per_level) …
#define __PT_ENT_PER_PAGE(bits_per_level) …
#define INVALID_PAE_ROOT …
#define IS_VALID_PAE_ROOT(x) …
static inline hpa_t kvm_mmu_get_dummy_root(void)
{ … }
static inline bool kvm_mmu_is_dummy_root(hpa_t shadow_page)
{ … }
tdp_ptep_t;
struct kvm_mmu_page { … };
extern struct kmem_cache *mmu_page_header_cache;
static inline int kvm_mmu_role_as_id(union kvm_mmu_page_role role)
{ … }
static inline int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)
{ … }
static inline bool kvm_mmu_page_ad_need_write_protect(struct kvm_mmu_page *sp)
{ … }
static inline gfn_t gfn_round_for_level(gfn_t gfn, int level)
{ … }
int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot,
gfn_t gfn, bool can_unsync, bool prefetch);
void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
struct kvm_memory_slot *slot, u64 gfn,
int min_level);
static inline void kvm_flush_remote_tlbs_gfn(struct kvm *kvm, gfn_t gfn, int level)
{ … }
unsigned int pte_list_count(struct kvm_rmap_head *rmap_head);
extern int nx_huge_pages;
static inline bool is_nx_huge_page_enabled(struct kvm *kvm)
{ … }
struct kvm_page_fault { … };
int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
enum { … };
static inline void kvm_mmu_prepare_memory_fault_exit(struct kvm_vcpu *vcpu,
struct kvm_page_fault *fault)
{ … }
static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
u64 err, bool prefetch,
int *emulation_type, u8 *level)
{ … }
int kvm_mmu_max_mapping_level(struct kvm *kvm,
const struct kvm_memory_slot *slot, gfn_t gfn,
int max_level);
void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level);
void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp);
void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp);
#endif