linux/arch/x86/kvm/mmu.h

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __KVM_X86_MMU_H
#define __KVM_X86_MMU_H

#include <linux/kvm_host.h>
#include "kvm_cache_regs.h"
#include "cpuid.h"

extern bool __read_mostly enable_mmio_caching;

#define PT_WRITABLE_SHIFT
#define PT_USER_SHIFT

#define PT_PRESENT_MASK
#define PT_WRITABLE_MASK
#define PT_USER_MASK
#define PT_PWT_MASK
#define PT_PCD_MASK
#define PT_ACCESSED_SHIFT
#define PT_ACCESSED_MASK
#define PT_DIRTY_SHIFT
#define PT_DIRTY_MASK
#define PT_PAGE_SIZE_SHIFT
#define PT_PAGE_SIZE_MASK
#define PT_PAT_MASK
#define PT_GLOBAL_MASK
#define PT64_NX_SHIFT
#define PT64_NX_MASK

#define PT_PAT_SHIFT
#define PT_DIR_PAT_SHIFT
#define PT_DIR_PAT_MASK

#define PT64_ROOT_5LEVEL
#define PT64_ROOT_4LEVEL
#define PT32_ROOT_LEVEL
#define PT32E_ROOT_LEVEL

#define KVM_MMU_CR4_ROLE_BITS

#define KVM_MMU_CR0_ROLE_BITS
#define KVM_MMU_EFER_ROLE_BITS

static __always_inline u64 rsvd_bits(int s, int e)
{}

static inline gfn_t kvm_mmu_max_gfn(void)
{}

u8 kvm_mmu_get_max_tdp_level(void);

void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask);
void kvm_mmu_set_me_spte_mask(u64 me_value, u64 me_mask);
void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only);

void kvm_init_mmu(struct kvm_vcpu *vcpu);
void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
			     unsigned long cr4, u64 efer, gpa_t nested_cr3);
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
			     int huge_page_level, bool accessed_dirty,
			     gpa_t new_eptp);
bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
				u64 fault_address, char *insn, int insn_len);
void __kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
					struct kvm_mmu *mmu);

int kvm_mmu_load(struct kvm_vcpu *vcpu);
void kvm_mmu_unload(struct kvm_vcpu *vcpu);
void kvm_mmu_free_obsolete_roots(struct kvm_vcpu *vcpu);
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu);
void kvm_mmu_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
			 int bytes);

static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
{}

static inline unsigned long kvm_get_pcid(struct kvm_vcpu *vcpu, gpa_t cr3)
{}

static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
{}

static inline unsigned long kvm_get_active_cr3_lam_bits(struct kvm_vcpu *vcpu)
{}

static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
{}

static inline void kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
						    struct kvm_mmu *mmu)
{}

/*
 * Check if a given access (described through the I/D, W/R and U/S bits of a
 * page fault error code pfec) causes a permission fault with the given PTE
 * access rights (in ACC_* format).
 *
 * Return zero if the access does not fault; return the page fault error code
 * if the access faults.
 */
static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
				  unsigned pte_access, unsigned pte_pkey,
				  u64 access)
{}

bool kvm_mmu_may_ignore_guest_pat(void);

int kvm_mmu_post_init_vm(struct kvm *kvm);
void kvm_mmu_pre_destroy_vm(struct kvm *kvm);

static inline bool kvm_shadow_root_allocated(struct kvm *kvm)
{}

#ifdef CONFIG_X86_64
extern bool tdp_mmu_enabled;
#else
#define tdp_mmu_enabled
#endif

static inline bool kvm_memslots_have_rmaps(struct kvm *kvm)
{}

static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
{}

static inline unsigned long
__kvm_mmu_slot_lpages(struct kvm_memory_slot *slot, unsigned long npages,
		      int level)
{}

static inline unsigned long
kvm_mmu_slot_lpages(struct kvm_memory_slot *slot, int level)
{}

static inline void kvm_update_page_stats(struct kvm *kvm, int level, int count)
{}

gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access,
			   struct x86_exception *exception);

static inline gpa_t kvm_translate_gpa(struct kvm_vcpu *vcpu,
				      struct kvm_mmu *mmu,
				      gpa_t gpa, u64 access,
				      struct x86_exception *exception)
{}
#endif