linux/arch/x86/kvm/mmu/spte.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * Macros and functions to access KVM PTEs (also known as SPTEs)
 *
 * Copyright (C) 2006 Qumranet, Inc.
 * Copyright 2020 Red Hat, Inc. and/or its affiliates.
 */
#define pr_fmt(fmt)

#include <linux/kvm_host.h>
#include "mmu.h"
#include "mmu_internal.h"
#include "x86.h"
#include "spte.h"

#include <asm/e820/api.h>
#include <asm/memtype.h>
#include <asm/vmx.h>

bool __read_mostly enable_mmio_caching =;
static bool __ro_after_init allow_mmio_caching;
module_param_named(mmio_caching, enable_mmio_caching, bool, 0444);
EXPORT_SYMBOL_GPL();

u64 __read_mostly shadow_host_writable_mask;
u64 __read_mostly shadow_mmu_writable_mask;
u64 __read_mostly shadow_nx_mask;
u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
u64 __read_mostly shadow_user_mask;
u64 __read_mostly shadow_accessed_mask;
u64 __read_mostly shadow_dirty_mask;
u64 __read_mostly shadow_mmio_value;
u64 __read_mostly shadow_mmio_mask;
u64 __read_mostly shadow_mmio_access_mask;
u64 __read_mostly shadow_present_mask;
u64 __read_mostly shadow_memtype_mask;
u64 __read_mostly shadow_me_value;
u64 __read_mostly shadow_me_mask;
u64 __read_mostly shadow_acc_track_mask;

u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;

static u8 __init kvm_get_host_maxphyaddr(void)
{}

void __init kvm_mmu_spte_module_init(void)
{}

static u64 generation_mmio_spte_mask(u64 gen)
{}

u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access)
{}

static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
{}

/*
 * Returns true if the SPTE has bits that may be set without holding mmu_lock.
 * The caller is responsible for checking if the SPTE is shadow-present, and
 * for determining whether or not the caller cares about non-leaf SPTEs.
 */
bool spte_has_volatile_bits(u64 spte)
{}

bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
	       const struct kvm_memory_slot *slot,
	       unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
	       u64 old_spte, bool prefetch, bool can_unsync,
	       bool host_writable, u64 *new_spte)
{}

static u64 make_spte_executable(u64 spte)
{}

/*
 * Construct an SPTE that maps a sub-page of the given huge page SPTE where
 * `index` identifies which sub-page.
 *
 * This is used during huge page splitting to build the SPTEs that make up the
 * new page table.
 */
u64 make_huge_page_split_spte(struct kvm *kvm, u64 huge_spte,
			      union kvm_mmu_page_role role, int index)
{}


u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled)
{}

u64 mark_spte_for_access_track(u64 spte)
{}

void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask)
{}
EXPORT_SYMBOL_GPL();

void kvm_mmu_set_me_spte_mask(u64 me_value, u64 me_mask)
{}
EXPORT_SYMBOL_GPL();

void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only)
{}
EXPORT_SYMBOL_GPL();

void kvm_mmu_reset_all_pte_masks(void)
{}