// SPDX-License-Identifier: GPL-2.0 #ifndef __KVM_X86_MMU_TDP_ITER_H #define __KVM_X86_MMU_TDP_ITER_H #include <linux/kvm_host.h> #include "mmu.h" #include "spte.h" /* * TDP MMU SPTEs are RCU protected to allow paging structures (non-leaf SPTEs) * to be zapped while holding mmu_lock for read, and to allow TLB flushes to be * batched without having to collect the list of zapped SPs. Flows that can * remove SPs must service pending TLB flushes prior to dropping RCU protection. */ static inline u64 kvm_tdp_mmu_read_spte(tdp_ptep_t sptep) { … } static inline u64 kvm_tdp_mmu_write_spte_atomic(tdp_ptep_t sptep, u64 new_spte) { … } static inline void __kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 new_spte) { … } /* * SPTEs must be modified atomically if they are shadow-present, leaf * SPTEs, and have volatile bits, i.e. has bits that can be set outside * of mmu_lock. The Writable bit can be set by KVM's fast page fault * handler, and Accessed and Dirty bits can be set by the CPU. * * Note, non-leaf SPTEs do have Accessed bits and those bits are * technically volatile, but KVM doesn't consume the Accessed bit of * non-leaf SPTEs, i.e. KVM doesn't care if it clobbers the bit. This * logic needs to be reassessed if KVM were to use non-leaf Accessed * bits, e.g. to skip stepping down into child SPTEs when aging SPTEs. */ static inline bool kvm_tdp_mmu_spte_need_atomic_write(u64 old_spte, int level) { … } static inline u64 kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 old_spte, u64 new_spte, int level) { … } static inline u64 tdp_mmu_clear_spte_bits(tdp_ptep_t sptep, u64 old_spte, u64 mask, int level) { … } /* * A TDP iterator performs a pre-order walk over a TDP paging structure. */ struct tdp_iter { … }; /* * Iterates over every SPTE mapping the GFN range [start, end) in a * preorder traversal. */ #define for_each_tdp_pte_min_level(iter, root, min_level, start, end) … #define for_each_tdp_pte(iter, root, start, end) … tdp_ptep_t spte_to_child_pt(u64 pte, int level); void tdp_iter_start(struct tdp_iter *iter, struct kvm_mmu_page *root, int min_level, gfn_t next_last_level_gfn); void tdp_iter_next(struct tdp_iter *iter); void tdp_iter_restart(struct tdp_iter *iter); #endif /* __KVM_X86_MMU_TDP_ITER_H */