#ifndef _ASM_X86_TLBFLUSH_H
#define _ASM_X86_TLBFLUSH_H
#include <linux/mm_types.h>
#include <linux/mmu_notifier.h>
#include <linux/sched.h>
#include <asm/processor.h>
#include <asm/cpufeature.h>
#include <asm/special_insns.h>
#include <asm/smp.h>
#include <asm/invpcid.h>
#include <asm/pti.h>
#include <asm/processor-flags.h>
#include <asm/pgtable.h>
DECLARE_PER_CPU(u64, tlbstate_untag_mask);
void __flush_tlb_all(void);
#define TLB_FLUSH_ALL …
#define TLB_GENERATION_INVALID …
void cr4_update_irqsoff(unsigned long set, unsigned long clear);
unsigned long cr4_read_shadow(void);
static inline void cr4_set_bits_irqsoff(unsigned long mask)
{ … }
static inline void cr4_clear_bits_irqsoff(unsigned long mask)
{ … }
static inline void cr4_set_bits(unsigned long mask)
{ … }
static inline void cr4_clear_bits(unsigned long mask)
{ … }
#ifndef MODULE
#define TLB_NR_DYN_ASIDS …
struct tlb_context { … };
struct tlb_state { … };
DECLARE_PER_CPU_ALIGNED(struct tlb_state, cpu_tlbstate);
struct tlb_state_shared { … };
DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state_shared, cpu_tlbstate_shared);
bool nmi_uaccess_okay(void);
#define nmi_uaccess_okay …
static inline void cr4_init_shadow(void)
{ … }
extern unsigned long mmu_cr4_features;
extern u32 *trampoline_cr4_features;
extern void initialize_tlbstate_and_flush(void);
struct flush_tlb_info { … };
void flush_tlb_local(void);
void flush_tlb_one_user(unsigned long addr);
void flush_tlb_one_kernel(unsigned long addr);
void flush_tlb_multi(const struct cpumask *cpumask,
const struct flush_tlb_info *info);
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#endif
#define flush_tlb_mm(mm) …
#define flush_tlb_range(vma, start, end) …
extern void flush_tlb_all(void);
extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned int stride_shift,
bool freed_tables);
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
{ … }
static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
{ … }
static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
{ … }
static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
struct mm_struct *mm,
unsigned long uaddr)
{ … }
static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm)
{ … }
extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
static inline bool pte_flags_need_flush(unsigned long oldflags,
unsigned long newflags,
bool ignore_access)
{ … }
static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte)
{ … }
#define pte_needs_flush …
static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
{ … }
#define huge_pmd_needs_flush …
#ifdef CONFIG_ADDRESS_MASKING
static inline u64 tlbstate_lam_cr3_mask(void)
{ … }
static inline void set_tlbstate_lam_mode(struct mm_struct *mm)
{ … }
#else
static inline u64 tlbstate_lam_cr3_mask(void)
{
return 0;
}
static inline void set_tlbstate_lam_mode(struct mm_struct *mm)
{
}
#endif
#endif
static inline void __native_tlb_flush_global(unsigned long cr4)
{ … }
#endif