linux/arch/x86/include/asm/desc.h

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_DESC_H
#define _ASM_X86_DESC_H

#include <asm/desc_defs.h>
#include <asm/ldt.h>
#include <asm/mmu.h>
#include <asm/fixmap.h>
#include <asm/irq_vectors.h>
#include <asm/cpu_entry_area.h>

#include <linux/debug_locks.h>
#include <linux/smp.h>
#include <linux/percpu.h>

static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *info)
{}

struct gdt_page {} __attribute__((aligned));

DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
DECLARE_INIT_PER_CPU();

/* Provide the original GDT */
static inline struct desc_struct *get_cpu_gdt_rw(unsigned int cpu)
{}

/* Provide the current original GDT */
static inline struct desc_struct *get_current_gdt_rw(void)
{}

/* Provide the fixmap address of the remapped GDT */
static inline struct desc_struct *get_cpu_gdt_ro(int cpu)
{}

/* Provide the current read-only GDT */
static inline struct desc_struct *get_current_gdt_ro(void)
{}

/* Provide the physical address of the GDT page. */
static inline phys_addr_t get_cpu_gdt_paddr(unsigned int cpu)
{}

static inline void pack_gate(gate_desc *gate, unsigned type, unsigned long func,
			     unsigned dpl, unsigned ist, unsigned seg)
{}

static inline int desc_empty(const void *ptr)
{}

#ifdef CONFIG_PARAVIRT_XXL
#include <asm/paravirt.h>
#else
#define load_TR_desc
#define load_gdt
#define load_idt
#define load_tr
#define load_ldt

#define store_gdt
#define store_tr

#define load_TLS
#define set_ldt

#define write_ldt_entry
#define write_gdt_entry
#define write_idt_entry

static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
{
}

static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
{
}
#endif	/* CONFIG_PARAVIRT_XXL */

#define store_ldt(ldt)

static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
{}

static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
{}

static inline void
native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int type)
{}

static inline void set_tssldt_descriptor(void *d, unsigned long addr,
					 unsigned type, unsigned size)
{}

static inline void __set_tss_desc(unsigned cpu, unsigned int entry, struct x86_hw_tss *addr)
{}

#define set_tss_desc(cpu, addr)

static inline void native_set_ldt(const void *addr, unsigned int entries)
{}

static inline void native_load_gdt(const struct desc_ptr *dtr)
{}

static __always_inline void native_load_idt(const struct desc_ptr *dtr)
{}

static inline void native_store_gdt(struct desc_ptr *dtr)
{}

static inline void store_idt(struct desc_ptr *dtr)
{}

static inline void native_gdt_invalidate(void)
{}

static inline void native_idt_invalidate(void)
{}

/*
 * The LTR instruction marks the TSS GDT entry as busy. On 64-bit, the GDT is
 * a read-only remapping. To prevent a page fault, the GDT is switched to the
 * original writeable version when needed.
 */
#ifdef CONFIG_X86_64
static inline void native_load_tr_desc(void)
{}
#else
static inline void native_load_tr_desc(void)
{
	asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
}
#endif

static inline unsigned long native_store_tr(void)
{}

static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
{}

DECLARE_PER_CPU(bool, __tss_limit_invalid);

static inline void force_reload_TR(void)
{}

/*
 * Call this if you need the TSS limit to be correct, which should be the case
 * if and only if you have TIF_IO_BITMAP set or you're switching to a task
 * with TIF_IO_BITMAP set.
 */
static inline void refresh_tss_limit(void)
{}

/*
 * If you do something evil that corrupts the cached TSS limit (I'm looking
 * at you, VMX exits), call this function.
 *
 * The optimization here is that the TSS limit only matters for Linux if the
 * IO bitmap is in use.  If the TSS limit gets forced to its minimum value,
 * everything works except that IO bitmap will be ignored and all CPL 3 IO
 * instructions will #GP, which is exactly what we want for normal tasks.
 */
static inline void invalidate_tss_limit(void)
{}

/* This intentionally ignores lm, since 32-bit apps don't have that field. */
#define LDT_empty(info)

/* Lots of programs expect an all-zero user_desc to mean "no segment at all". */
static inline bool LDT_zero(const struct user_desc *info)
{}

static inline void clear_LDT(void)
{}

static inline unsigned long get_desc_base(const struct desc_struct *desc)
{}

static inline void set_desc_base(struct desc_struct *desc, unsigned long base)
{}

static inline unsigned long get_desc_limit(const struct desc_struct *desc)
{}

static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
{}

static inline void init_idt_data(struct idt_data *data, unsigned int n,
				 const void *addr)
{}

static inline void idt_init_desc(gate_desc *gate, const struct idt_data *d)
{}

extern unsigned long system_vectors[];

extern void load_current_idt(void);
extern void idt_setup_early_handler(void);
extern void idt_setup_early_traps(void);
extern void idt_setup_traps(void);
extern void idt_setup_apic_and_irq_gates(void);
extern bool idt_is_f00f_address(unsigned long address);

#ifdef CONFIG_X86_64
extern void idt_setup_early_pf(void);
#else
static inline void idt_setup_early_pf(void) { }
#endif

extern void idt_invalidate(void);

#endif /* _ASM_X86_DESC_H */