linux/arch/x86/kernel/process.c

// SPDX-License-Identifier: GPL-2.0
#define pr_fmt(fmt)

#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/cpu.h>
#include <linux/prctl.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/sched/idle.h>
#include <linux/sched/debug.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/pm.h>
#include <linux/tick.h>
#include <linux/random.h>
#include <linux/user-return-notifier.h>
#include <linux/dmi.h>
#include <linux/utsname.h>
#include <linux/stackprotector.h>
#include <linux/cpuidle.h>
#include <linux/acpi.h>
#include <linux/elf-randomize.h>
#include <linux/static_call.h>
#include <trace/events/power.h>
#include <linux/hw_breakpoint.h>
#include <linux/entry-common.h>
#include <asm/cpu.h>
#include <asm/apic.h>
#include <linux/uaccess.h>
#include <asm/mwait.h>
#include <asm/fpu/api.h>
#include <asm/fpu/sched.h>
#include <asm/fpu/xstate.h>
#include <asm/debugreg.h>
#include <asm/nmi.h>
#include <asm/tlbflush.h>
#include <asm/mce.h>
#include <asm/vm86.h>
#include <asm/switch_to.h>
#include <asm/desc.h>
#include <asm/prctl.h>
#include <asm/spec-ctrl.h>
#include <asm/io_bitmap.h>
#include <asm/proto.h>
#include <asm/frame.h>
#include <asm/unwind.h>
#include <asm/tdx.h>
#include <asm/mmu_context.h>
#include <asm/shstk.h>

#include "process.h"

/*
 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
 * no more per-task TSS's. The TSS size is kept cacheline-aligned
 * so they are allowed to end up in the .data..cacheline_aligned
 * section. Since TSS's are completely CPU-local, we want them
 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
 */
__visible DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw) =;
EXPORT_PER_CPU_SYMBOL();

DEFINE_PER_CPU(bool, __tss_limit_invalid);
EXPORT_PER_CPU_SYMBOL_GPL();

/*
 * this gets called so that we can store lazy state into memory and copy the
 * current task into the new thread.
 */
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{}

#ifdef CONFIG_X86_64
void arch_release_task_struct(struct task_struct *tsk)
{}
#endif

/*
 * Free thread data structures etc..
 */
void exit_thread(struct task_struct *tsk)
{}

static int set_new_tls(struct task_struct *p, unsigned long tls)
{}

__visible void ret_from_fork(struct task_struct *prev, struct pt_regs *regs,
				     int (*fn)(void *), void *fn_arg)
{}

int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{}

static void pkru_flush_thread(void)
{}

void flush_thread(void)
{}

void disable_TSC(void)
{}

static void enable_TSC(void)
{}

int get_tsc_mode(unsigned long adr)
{}

int set_tsc_mode(unsigned int val)
{}

DEFINE_PER_CPU(u64, msr_misc_features_shadow);

static void set_cpuid_faulting(bool on)
{}

static void disable_cpuid(void)
{}

static void enable_cpuid(void)
{}

static int get_cpuid_mode(void)
{}

static int set_cpuid_mode(unsigned long cpuid_enabled)
{}

/*
 * Called immediately after a successful exec.
 */
void arch_setup_new_exec(void)
{}

#ifdef CONFIG_X86_IOPL_IOPERM
static inline void switch_to_bitmap(unsigned long tifp)
{}

static void tss_copy_io_bitmap(struct tss_struct *tss, struct io_bitmap *iobm)
{}

/**
 * native_tss_update_io_bitmap - Update I/O bitmap before exiting to user mode
 */
void native_tss_update_io_bitmap(void)
{}
#else /* CONFIG_X86_IOPL_IOPERM */
static inline void switch_to_bitmap(unsigned long tifp) { }
#endif

#ifdef CONFIG_SMP

struct ssb_state {};

#define LSTATE_SSB

static DEFINE_PER_CPU(struct ssb_state, ssb_state);

void speculative_store_bypass_ht_init(void)
{}

/*
 * Logic is: First HT sibling enables SSBD for both siblings in the core
 * and last sibling to disable it, disables it for the whole core. This how
 * MSR_SPEC_CTRL works in "hardware":
 *
 *  CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL
 */
static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
{}
#else
static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
{
	u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);

	wrmsrl(MSR_AMD64_LS_CFG, msr);
}
#endif

static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
{}

/*
 * Update the MSRs managing speculation control, during context switch.
 *
 * tifp: Previous task's thread flags
 * tifn: Next task's thread flags
 */
static __always_inline void __speculation_ctrl_update(unsigned long tifp,
						      unsigned long tifn)
{}

static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
{}

void speculation_ctrl_update(unsigned long tif)
{}

/* Called from seccomp/prctl update */
void speculation_ctrl_update_current(void)
{}

static inline void cr4_toggle_bits_irqsoff(unsigned long mask)
{}

void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p)
{}

/*
 * Idle related variables and functions
 */
unsigned long boot_option_idle_override =;
EXPORT_SYMBOL();

/*
 * We use this if we don't have any better idle routine..
 */
void __cpuidle default_idle(void)
{}
#if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE)
EXPORT_SYMBOL(default_idle);
#endif

DEFINE_STATIC_CALL_NULL();

static bool x86_idle_set(void)
{}

#ifndef CONFIG_SMP
static inline void __noreturn play_dead(void)
{
	BUG();
}
#endif

void arch_cpu_idle_enter(void)
{}

void __noreturn arch_cpu_idle_dead(void)
{}

/*
 * Called from the generic idle code.
 */
void __cpuidle arch_cpu_idle(void)
{}
EXPORT_SYMBOL_GPL();

#ifdef CONFIG_XEN
bool xen_set_default_idle(void)
{}
#endif

struct cpumask cpus_stop_mask;

void __noreturn stop_this_cpu(void *dummy)
{}

/*
 * Prefer MWAIT over HALT if MWAIT is supported, MWAIT_CPUID leaf
 * exists and whenever MONITOR/MWAIT extensions are present there is at
 * least one C1 substate.
 *
 * Do not prefer MWAIT if MONITOR instruction has a bug or idle=nomwait
 * is passed to kernel commandline parameter.
 */
static __init bool prefer_mwait_c1_over_halt(void)
{}

/*
 * MONITOR/MWAIT with no hints, used for default C1 state. This invokes MWAIT
 * with interrupts enabled and no flags, which is backwards compatible with the
 * original MWAIT implementation.
 */
static __cpuidle void mwait_idle(void)
{}

void __init select_idle_routine(void)
{}

void amd_e400_c1e_apic_setup(void)
{}

void __init arch_post_acpi_subsys_init(void)
{}

static int __init idle_setup(char *str)
{}
early_param();

unsigned long arch_align_stack(unsigned long sp)
{}

unsigned long arch_randomize_brk(struct mm_struct *mm)
{}

/*
 * Called from fs/proc with a reference on @p to find the function
 * which called into schedule(). This needs to be done carefully
 * because the task might wake up and we might look at a stack
 * changing under us.
 */
unsigned long __get_wchan(struct task_struct *p)
{}

long do_arch_prctl_common(int option, unsigned long arg2)
{}