linux/arch/x86/kvm/mmu/paging_tmpl.h

/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * This module enables machines with Intel VT-x extensions to run virtual
 * machines without emulation or binary translation.
 *
 * MMU support
 *
 * Copyright (C) 2006 Qumranet, Inc.
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
 *
 * Authors:
 *   Yaniv Kamay  <[email protected]>
 *   Avi Kivity   <[email protected]>
 */

/*
 * The MMU needs to be able to access/walk 32-bit and 64-bit guest page tables,
 * as well as guest EPT tables, so the code in this file is compiled thrice,
 * once per guest PTE type.  The per-type defines are #undef'd at the end.
 */

#if PTTYPE == 64
	#define pt_element_t
	#define guest_walker
	#define FNAME
	#define PT_LEVEL_BITS
	#define PT_GUEST_DIRTY_SHIFT
	#define PT_GUEST_ACCESSED_SHIFT
	#define PT_HAVE_ACCESSED_DIRTY
	#ifdef CONFIG_X86_64
	#define PT_MAX_FULL_LEVELS
	#else
	#define PT_MAX_FULL_LEVELS
	#endif
#elif PTTYPE == 32
	#define pt_element_t
	#define guest_walker
	#define FNAME
	#define PT_LEVEL_BITS
	#define PT_MAX_FULL_LEVELS
	#define PT_GUEST_DIRTY_SHIFT
	#define PT_GUEST_ACCESSED_SHIFT
	#define PT_HAVE_ACCESSED_DIRTY

	#define PT32_DIR_PSE36_SIZE
	#define PT32_DIR_PSE36_SHIFT
	#define PT32_DIR_PSE36_MASK
#elif PTTYPE == PTTYPE_EPT
	#define pt_element_t
	#define guest_walker
	#define FNAME
	#define PT_LEVEL_BITS
	#define PT_GUEST_DIRTY_SHIFT
	#define PT_GUEST_ACCESSED_SHIFT
	#define PT_HAVE_ACCESSED_DIRTY
	#define PT_MAX_FULL_LEVELS
#else
	#error Invalid PTTYPE value
#endif

/* Common logic, but per-type values.  These also need to be undefined. */
#define PT_BASE_ADDR_MASK
#define PT_LVL_ADDR_MASK
#define PT_LVL_OFFSET_MASK
#define PT_INDEX

#define PT_GUEST_DIRTY_MASK
#define PT_GUEST_ACCESSED_MASK

#define gpte_to_gfn_lvl
#define gpte_to_gfn

/*
 * The guest_walker structure emulates the behavior of the hardware page
 * table walker.
 */
struct guest_walker {};

#if PTTYPE == 32
static inline gfn_t pse36_gfn_delta(u32 gpte)
{
	int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;

	return (gpte & PT32_DIR_PSE36_MASK) << shift;
}
#endif

static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
{}

static inline void FNAME(protect_clean_gpte)(struct kvm_mmu *mmu, unsigned *access,
					     unsigned gpte)
{}

static inline int FNAME(is_present_gpte)(unsigned long pte)
{}

static bool FNAME(is_bad_mt_xwr)(struct rsvd_bits_validate *rsvd_check, u64 gpte)
{}

static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level)
{}

static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
				  struct kvm_mmu_page *sp, u64 *spte,
				  u64 gpte)
{}

/*
 * For PTTYPE_EPT, a page table can be executable but not readable
 * on supported processors. Therefore, set_spte does not automatically
 * set bit 0 if execute only is supported. Here, we repurpose ACC_USER_MASK
 * to signify readability since it isn't used in the EPT case
 */
static inline unsigned FNAME(gpte_access)(u64 gpte)
{}

static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
					     struct kvm_mmu *mmu,
					     struct guest_walker *walker,
					     gpa_t addr, int write_fault)
{}

static inline unsigned FNAME(gpte_pkeys)(struct kvm_vcpu *vcpu, u64 gpte)
{}

static inline bool FNAME(is_last_gpte)(struct kvm_mmu *mmu,
				       unsigned int level, unsigned int gpte)
{}
/*
 * Fetch a guest pte for a guest virtual address, or for an L2's GPA.
 */
static int FNAME(walk_addr_generic)(struct guest_walker *walker,
				    struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
				    gpa_t addr, u64 access)
{}

static int FNAME(walk_addr)(struct guest_walker *walker,
			    struct kvm_vcpu *vcpu, gpa_t addr, u64 access)
{}

static bool
FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
		     u64 *spte, pt_element_t gpte)
{}

static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu,
				struct guest_walker *gw, int level)
{}

static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
				u64 *sptep)
{}

/*
 * Fetch a shadow pte for a specific level in the paging hierarchy.
 * If the guest tries to write a write-protected page, we need to
 * emulate this operation, return 1 to indicate this case.
 */
static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
			 struct guest_walker *gw)
{}

/*
 * Page fault handler.  There are several causes for a page fault:
 *   - there is no shadow pte for the guest pte
 *   - write access through a shadow pte marked read only so that we can set
 *     the dirty bit
 *   - write access to a shadow pte marked read only so we can update the page
 *     dirty bitmap, when userspace requests it
 *   - mmio access; in this case we will never install a present shadow pte
 *   - normal guest page fault due to the guest pte marked not present, not
 *     writable, or not executable
 *
 *  Returns: 1 if we need to emulate the instruction, 0 otherwise, or
 *           a negative value on error.
 */
static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{}

static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
{}

/* Note, @addr is a GPA when gva_to_gpa() translates an L2 GPA to an L1 GPA. */
static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
			       gpa_t addr, u64 access,
			       struct x86_exception *exception)
{}

/*
 * Using the information in sp->shadowed_translation (kvm_mmu_page_get_gfn()) is
 * safe because:
 * - The spte has a reference to the struct page, so the pfn for a given gfn
 *   can't change unless all sptes pointing to it are nuked first.
 *
 * Returns
 * < 0: failed to sync spte
 *   0: the spte is synced and no tlb flushing is required
 * > 0: the spte is synced and tlb flushing is required
 */
static int FNAME(sync_spte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int i)
{}

#undef pt_element_t
#undef guest_walker
#undef FNAME
#undef PT_BASE_ADDR_MASK
#undef PT_INDEX
#undef PT_LVL_ADDR_MASK
#undef PT_LVL_OFFSET_MASK
#undef PT_LEVEL_BITS
#undef PT_MAX_FULL_LEVELS
#undef gpte_to_gfn
#undef gpte_to_gfn_lvl
#undef PT_GUEST_ACCESSED_MASK
#undef PT_GUEST_DIRTY_MASK
#undef PT_GUEST_DIRTY_SHIFT
#undef PT_GUEST_ACCESSED_SHIFT
#undef PT_HAVE_ACCESSED_DIRTY