#ifndef _ASM_X86_PROCESSOR_H
#define _ASM_X86_PROCESSOR_H
#include <asm/processor-flags.h>
struct task_struct;
struct mm_struct;
struct io_bitmap;
struct vm86;
#include <asm/math_emu.h>
#include <asm/segment.h>
#include <asm/types.h>
#include <uapi/asm/sigcontext.h>
#include <asm/current.h>
#include <asm/cpufeatures.h>
#include <asm/cpuid.h>
#include <asm/page.h>
#include <asm/pgtable_types.h>
#include <asm/percpu.h>
#include <asm/desc_defs.h>
#include <asm/nops.h>
#include <asm/special_insns.h>
#include <asm/fpu/types.h>
#include <asm/unwind_hints.h>
#include <asm/vmxfeatures.h>
#include <asm/vdso/processor.h>
#include <asm/shstk.h>
#include <linux/personality.h>
#include <linux/cache.h>
#include <linux/threads.h>
#include <linux/math64.h>
#include <linux/err.h>
#include <linux/irqflags.h>
#include <linux/mem_encrypt.h>
#define NET_IP_ALIGN …
#define HBP_NUM …
#ifdef CONFIG_X86_VSMP
#define ARCH_MIN_TASKALIGN …
#define ARCH_MIN_MMSTRUCT_ALIGN …
#else
#define ARCH_MIN_TASKALIGN …
#define ARCH_MIN_MMSTRUCT_ALIGN …
#endif
enum tlb_infos { … };
extern u16 __read_mostly tlb_lli_4k[NR_INFO];
extern u16 __read_mostly tlb_lli_2m[NR_INFO];
extern u16 __read_mostly tlb_lli_4m[NR_INFO];
extern u16 __read_mostly tlb_lld_4k[NR_INFO];
extern u16 __read_mostly tlb_lld_2m[NR_INFO];
extern u16 __read_mostly tlb_lld_4m[NR_INFO];
extern u16 __read_mostly tlb_lld_1g[NR_INFO];
struct cpuinfo_topology { … };
struct cpuinfo_x86 { … } __randomize_layout;
#define X86_VENDOR_INTEL …
#define X86_VENDOR_CYRIX …
#define X86_VENDOR_AMD …
#define X86_VENDOR_UMC …
#define X86_VENDOR_CENTAUR …
#define X86_VENDOR_TRANSMETA …
#define X86_VENDOR_NSC …
#define X86_VENDOR_HYGON …
#define X86_VENDOR_ZHAOXIN …
#define X86_VENDOR_VORTEX …
#define X86_VENDOR_NUM …
#define X86_VENDOR_UNKNOWN …
extern struct cpuinfo_x86 boot_cpu_data;
extern struct cpuinfo_x86 new_cpu_data;
extern __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS];
extern __u32 cpu_caps_set[NCAPINTS + NBUGINTS];
DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
#define cpu_data(cpu) …
extern const struct seq_operations cpuinfo_op;
#define cache_line_size() …
extern void cpu_detect(struct cpuinfo_x86 *c);
static inline unsigned long long l1tf_pfn_limit(void)
{ … }
extern void early_cpu_init(void);
extern void identify_secondary_cpu(struct cpuinfo_x86 *);
extern void print_cpu_info(struct cpuinfo_x86 *);
void print_cpu_msr(struct cpuinfo_x86 *);
static inline unsigned long read_cr3_pa(void)
{ … }
static inline unsigned long native_read_cr3_pa(void)
{ … }
static inline void load_cr3(pgd_t *pgdir)
{ … }
#ifdef CONFIG_X86_32
struct x86_hw_tss {
unsigned short back_link, __blh;
unsigned long sp0;
unsigned short ss0, __ss0h;
unsigned long sp1;
unsigned short ss1;
unsigned short __ss1h;
unsigned long sp2;
unsigned short ss2, __ss2h;
unsigned long __cr3;
unsigned long ip;
unsigned long flags;
unsigned long ax;
unsigned long cx;
unsigned long dx;
unsigned long bx;
unsigned long sp;
unsigned long bp;
unsigned long si;
unsigned long di;
unsigned short es, __esh;
unsigned short cs, __csh;
unsigned short ss, __ssh;
unsigned short ds, __dsh;
unsigned short fs, __fsh;
unsigned short gs, __gsh;
unsigned short ldt, __ldth;
unsigned short trace;
unsigned short io_bitmap_base;
} __attribute__((packed));
#else
struct x86_hw_tss { … } __attribute__((packed));
#endif
#define IO_BITMAP_BITS …
#define IO_BITMAP_BYTES …
#define IO_BITMAP_LONGS …
#define IO_BITMAP_OFFSET_VALID_MAP …
#define IO_BITMAP_OFFSET_VALID_ALL …
#ifdef CONFIG_X86_IOPL_IOPERM
#define __KERNEL_TSS_LIMIT …
#else
#define __KERNEL_TSS_LIMIT …
#endif
#define IO_BITMAP_OFFSET_INVALID …
struct entry_stack { … };
struct entry_stack_page { … } __aligned(…);
struct x86_io_bitmap { … };
struct tss_struct { … } __aligned(…);
DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw);
struct irq_stack { … } __aligned(…);
#ifdef CONFIG_X86_64
struct fixed_percpu_data { … };
DECLARE_PER_CPU_FIRST(struct fixed_percpu_data, fixed_percpu_data) __visible;
DECLARE_INIT_PER_CPU(…);
static inline unsigned long cpu_kernelmode_gs_base(int cpu)
{ … }
extern asmlinkage void entry_SYSCALL32_ignore(void);
void current_save_fsgs(void);
#else
#ifdef CONFIG_STACKPROTECTOR
DECLARE_PER_CPU(unsigned long, __stack_chk_guard);
#endif
#endif
struct perf_event;
struct thread_struct { … };
extern void fpu_thread_struct_whitelist(unsigned long *offset, unsigned long *size);
static inline void arch_thread_struct_whitelist(unsigned long *offset,
unsigned long *size)
{ … }
static inline void
native_load_sp0(unsigned long sp0)
{ … }
static __always_inline void native_swapgs(void)
{ … }
static __always_inline unsigned long current_top_of_stack(void)
{ … }
static __always_inline bool on_thread_stack(void)
{ … }
#ifdef CONFIG_PARAVIRT_XXL
#include <asm/paravirt.h>
#else
static inline void load_sp0(unsigned long sp0)
{
native_load_sp0(sp0);
}
#endif
unsigned long __get_wchan(struct task_struct *p);
extern void select_idle_routine(void);
extern void amd_e400_c1e_apic_setup(void);
extern unsigned long boot_option_idle_override;
enum idle_boot_override { … };
extern void enable_sep_cpu(void);
extern struct desc_ptr early_gdt_descr;
extern void switch_gdt_and_percpu_base(int);
extern void load_direct_gdt(int);
extern void load_fixmap_gdt(int);
extern void cpu_init(void);
extern void cpu_init_exception_handling(void);
extern void cr4_init(void);
extern void set_task_blockstep(struct task_struct *task, bool on);
extern int bootloader_type;
extern int bootloader_version;
extern char ignore_fpu_irq;
#define HAVE_ARCH_PICK_MMAP_LAYOUT …
#define ARCH_HAS_PREFETCHW
#ifdef CONFIG_X86_32
#define BASE_PREFETCH …
#define ARCH_HAS_PREFETCH
#else
#define BASE_PREFETCH …
#endif
static inline void prefetch(const void *x)
{ … }
static __always_inline void prefetchw(const void *x)
{ … }
#define TOP_OF_INIT_STACK …
#define task_top_of_stack(task) …
#define task_pt_regs(task) …
#ifdef CONFIG_X86_32
#define INIT_THREAD …
#define KSTK_ESP …
#else
extern unsigned long __top_init_kernel_stack[];
#define INIT_THREAD …
extern unsigned long KSTK_ESP(struct task_struct *task);
#endif
extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
unsigned long new_sp);
#define __TASK_UNMAPPED_BASE(task_size) …
#define TASK_UNMAPPED_BASE …
#define KSTK_EIP(task) …
#define GET_TSC_CTL(adr) …
#define SET_TSC_CTL(val) …
extern int get_tsc_mode(unsigned long adr);
extern int set_tsc_mode(unsigned int val);
DECLARE_PER_CPU(u64, msr_misc_features_shadow);
static inline u32 per_cpu_llc_id(unsigned int cpu)
{ … }
static inline u32 per_cpu_l2c_id(unsigned int cpu)
{ … }
#ifdef CONFIG_CPU_SUP_AMD
extern u32 amd_get_highest_perf(void);
static __always_inline void amd_clear_divider(void)
{ … }
extern void amd_check_microcode(void);
#else
static inline u32 amd_get_highest_perf(void) { return 0; }
static inline void amd_clear_divider(void) { }
static inline void amd_check_microcode(void) { }
#endif
extern unsigned long arch_align_stack(unsigned long sp);
void free_init_pages(const char *what, unsigned long begin, unsigned long end);
extern void free_kernel_image_pages(const char *what, void *begin, void *end);
void default_idle(void);
#ifdef CONFIG_XEN
bool xen_set_default_idle(void);
#else
#define xen_set_default_idle …
#endif
void __noreturn stop_this_cpu(void *dummy);
void microcode_check(struct cpuinfo_x86 *prev_info);
void store_cpu_caps(struct cpuinfo_x86 *info);
enum l1tf_mitigations { … };
extern enum l1tf_mitigations l1tf_mitigation;
enum mds_mitigations { … };
extern bool gds_ucode_mitigated(void);
static inline void weak_wrmsr_fence(void)
{ … }
#endif