linux/arch/x86/kernel/cpu/bugs.c

// SPDX-License-Identifier: GPL-2.0
/*
 *  Copyright (C) 1994  Linus Torvalds
 *
 *  Cyrix stuff, June 1998 by:
 *	- Rafael R. Reilova (moved everything from head.S),
 *        <[email protected]>
 *	- Channing Corn (tests & fixes),
 *	- Andrew D. Balsa (code cleanup).
 */
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/nospec.h>
#include <linux/prctl.h>
#include <linux/sched/smt.h>
#include <linux/pgtable.h>
#include <linux/bpf.h>

#include <asm/spec-ctrl.h>
#include <asm/cmdline.h>
#include <asm/bugs.h>
#include <asm/processor.h>
#include <asm/processor-flags.h>
#include <asm/fpu/api.h>
#include <asm/msr.h>
#include <asm/vmx.h>
#include <asm/paravirt.h>
#include <asm/cpu_device_id.h>
#include <asm/e820/api.h>
#include <asm/hypervisor.h>
#include <asm/tlbflush.h>
#include <asm/cpu.h>

#include "cpu.h"

static void __init spectre_v1_select_mitigation(void);
static void __init spectre_v2_select_mitigation(void);
static void __init retbleed_select_mitigation(void);
static void __init spectre_v2_user_select_mitigation(void);
static void __init ssb_select_mitigation(void);
static void __init l1tf_select_mitigation(void);
static void __init mds_select_mitigation(void);
static void __init md_clear_update_mitigation(void);
static void __init md_clear_select_mitigation(void);
static void __init taa_select_mitigation(void);
static void __init mmio_select_mitigation(void);
static void __init srbds_select_mitigation(void);
static void __init l1d_flush_select_mitigation(void);
static void __init srso_select_mitigation(void);
static void __init gds_select_mitigation(void);

/* The base value of the SPEC_CTRL MSR without task-specific bits set */
u64 x86_spec_ctrl_base;
EXPORT_SYMBOL_GPL();

/* The current value of the SPEC_CTRL MSR with task-specific bits set */
DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
EXPORT_PER_CPU_SYMBOL_GPL();

u64 x86_pred_cmd __ro_after_init =;
EXPORT_SYMBOL_GPL();

static u64 __ro_after_init x86_arch_cap_msr;

static DEFINE_MUTEX(spec_ctrl_mutex);

void (*x86_return_thunk)(void) __ro_after_init =;

/* Update SPEC_CTRL MSR and its cached copy unconditionally */
static void update_spec_ctrl(u64 val)
{}

/*
 * Keep track of the SPEC_CTRL MSR value for the current task, which may differ
 * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update().
 */
void update_spec_ctrl_cond(u64 val)
{}

noinstr u64 spec_ctrl_current(void)
{}
EXPORT_SYMBOL_GPL();

/*
 * AMD specific MSR info for Speculative Store Bypass control.
 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
 */
u64 __ro_after_init x86_amd_ls_cfg_base;
u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;

/* Control conditional STIBP in switch_to() */
DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
/* Control conditional IBPB in switch_mm() */
DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
/* Control unconditional IBPB in switch_mm() */
DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);

/* Control MDS CPU buffer clear before idling (halt, mwait) */
DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
EXPORT_SYMBOL_GPL();

/*
 * Controls whether l1d flush based mitigations are enabled,
 * based on hw features and admin setting via boot parameter
 * defaults to false
 */
DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);

/* Controls CPU Fill buffer clear before KVM guest MMIO accesses */
DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear);
EXPORT_SYMBOL_GPL();

void __init cpu_select_mitigations(void)
{}

/*
 * NOTE: This function is *only* called for SVM, since Intel uses
 * MSR_IA32_SPEC_CTRL for SSBD.
 */
void
x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl, bool setguest)
{}
EXPORT_SYMBOL_GPL();

static void x86_amd_ssb_disable(void)
{}

#undef pr_fmt
#define pr_fmt(fmt)

/* Default mitigation for MDS-affected CPUs */
static enum mds_mitigations mds_mitigation __ro_after_init =;
static bool mds_nosmt __ro_after_init =;

static const char * const mds_strings[] =;

static void __init mds_select_mitigation(void)
{}

static int __init mds_cmdline(char *str)
{}
early_param();

#undef pr_fmt
#define pr_fmt(fmt)

enum taa_mitigations {};

/* Default mitigation for TAA-affected CPUs */
static enum taa_mitigations taa_mitigation __ro_after_init =;
static bool taa_nosmt __ro_after_init;

static const char * const taa_strings[] =;

static void __init taa_select_mitigation(void)
{}

static int __init tsx_async_abort_parse_cmdline(char *str)
{}
early_param();

#undef pr_fmt
#define pr_fmt(fmt)

enum mmio_mitigations {};

/* Default mitigation for Processor MMIO Stale Data vulnerabilities */
static enum mmio_mitigations mmio_mitigation __ro_after_init =;
static bool mmio_nosmt __ro_after_init =;

static const char * const mmio_strings[] =;

static void __init mmio_select_mitigation(void)
{}

static int __init mmio_stale_data_parse_cmdline(char *str)
{}
early_param();

#undef pr_fmt
#define pr_fmt(fmt)

enum rfds_mitigations {};

/* Default mitigation for Register File Data Sampling */
static enum rfds_mitigations rfds_mitigation __ro_after_init =;

static const char * const rfds_strings[] =;

static void __init rfds_select_mitigation(void)
{}

static __init int rfds_parse_cmdline(char *str)
{}
early_param();

#undef pr_fmt
#define pr_fmt(fmt)

static void __init md_clear_update_mitigation(void)
{}

static void __init md_clear_select_mitigation(void)
{}

#undef pr_fmt
#define pr_fmt(fmt)

enum srbds_mitigations {};

static enum srbds_mitigations srbds_mitigation __ro_after_init =;

static const char * const srbds_strings[] =;

static bool srbds_off;

void update_srbds_msr(void)
{}

static void __init srbds_select_mitigation(void)
{}

static int __init srbds_parse_cmdline(char *str)
{}
early_param();

#undef pr_fmt
#define pr_fmt(fmt)

enum l1d_flush_mitigations {};

static enum l1d_flush_mitigations l1d_flush_mitigation __initdata =;

static void __init l1d_flush_select_mitigation(void)
{}

static int __init l1d_flush_parse_cmdline(char *str)
{}
early_param();

#undef pr_fmt
#define pr_fmt(fmt)

enum gds_mitigations {};

static enum gds_mitigations gds_mitigation __ro_after_init =;

static const char * const gds_strings[] =;

bool gds_ucode_mitigated(void)
{}
EXPORT_SYMBOL_GPL();

void update_gds_msr(void)
{}

static void __init gds_select_mitigation(void)
{}

static int __init gds_parse_cmdline(char *str)
{}
early_param();

#undef pr_fmt
#define pr_fmt(fmt)

enum spectre_v1_mitigation {};

static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init =;

static const char * const spectre_v1_strings[] =;

/*
 * Does SMAP provide full mitigation against speculative kernel access to
 * userspace?
 */
static bool smap_works_speculatively(void)
{}

static void __init spectre_v1_select_mitigation(void)
{}

static int __init nospectre_v1_cmdline(char *str)
{}
early_param();

enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =;

#undef pr_fmt
#define pr_fmt(fmt)

enum retbleed_mitigation {};

enum retbleed_mitigation_cmd {};

static const char * const retbleed_strings[] =;

static enum retbleed_mitigation retbleed_mitigation __ro_after_init =;
static enum retbleed_mitigation_cmd retbleed_cmd __ro_after_init =;

static int __ro_after_init retbleed_nosmt =;

static int __init retbleed_parse_cmdline(char *str)
{}
early_param();

#define RETBLEED_UNTRAIN_MSG
#define RETBLEED_INTEL_MSG

static void __init retbleed_select_mitigation(void)
{}

#undef pr_fmt
#define pr_fmt(fmt)

static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =;
static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =;

#ifdef CONFIG_MITIGATION_RETPOLINE
static bool spectre_v2_bad_module;

bool retpoline_module_ok(bool has_retpoline)
{}

static inline const char *spectre_v2_module_string(void)
{}
#else
static inline const char *spectre_v2_module_string(void) { return ""; }
#endif

#define SPECTRE_V2_LFENCE_MSG
#define SPECTRE_V2_EIBRS_EBPF_MSG
#define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG
#define SPECTRE_V2_IBRS_PERF_MSG

#ifdef CONFIG_BPF_SYSCALL
void unpriv_ebpf_notify(int new_state)
{}
#endif

static inline bool match_option(const char *arg, int arglen, const char *opt)
{}

/* The kernel command line selection for spectre v2 */
enum spectre_v2_mitigation_cmd {};

enum spectre_v2_user_cmd {};

static const char * const spectre_v2_user_strings[] =;

static const struct {} v2_user_options[] __initconst =;

static void __init spec_v2_user_print_cond(const char *reason, bool secure)
{}

static __ro_after_init enum spectre_v2_mitigation_cmd spectre_v2_cmd;

static enum spectre_v2_user_cmd __init
spectre_v2_parse_user_cmdline(void)
{}

static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
{}

static void __init
spectre_v2_user_select_mitigation(void)
{}

static const char * const spectre_v2_strings[] =;

static const struct {} mitigation_options[] __initconst =;

static void __init spec_v2_print_cond(const char *reason, bool secure)
{}

static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
{}

static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
{}

static bool __ro_after_init rrsba_disabled;

/* Disable in-kernel use of non-RSB RET predictors */
static void __init spec_ctrl_disable_kernel_rrsba(void)
{}

static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode)
{}

/*
 * Set BHI_DIS_S to prevent indirect branches in kernel to be influenced by
 * branch history in userspace. Not needed if BHI_NO is set.
 */
static bool __init spec_ctrl_bhi_dis(void)
{}

enum bhi_mitigations {};

static enum bhi_mitigations bhi_mitigation __ro_after_init =;

static int __init spectre_bhi_parse_cmdline(char *str)
{}
early_param();

static void __init bhi_select_mitigation(void)
{}

static void __init spectre_v2_select_mitigation(void)
{}

static void update_stibp_msr(void * __unused)
{}

/* Update x86_spec_ctrl_base in case SMT state changed. */
static void update_stibp_strict(void)
{}

/* Update the static key controlling the evaluation of TIF_SPEC_IB */
static void update_indir_branch_cond(void)
{}

#undef pr_fmt
#define pr_fmt(fmt)

/* Update the static key controlling the MDS CPU buffer clear in idle */
static void update_mds_branch_idle(void)
{}

#define MDS_MSG_SMT
#define TAA_MSG_SMT
#define MMIO_MSG_SMT

void cpu_bugs_smt_update(void)
{}

#undef pr_fmt
#define pr_fmt(fmt)

static enum ssb_mitigation ssb_mode __ro_after_init =;

/* The kernel command line selection */
enum ssb_mitigation_cmd {};

static const char * const ssb_strings[] =;

static const struct {} ssb_mitigation_options[]  __initconst =;

static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
{}

static enum ssb_mitigation __init __ssb_select_mitigation(void)
{}

static void ssb_select_mitigation(void)
{}

#undef pr_fmt
#define pr_fmt(fmt)

static void task_update_spec_tif(struct task_struct *tsk)
{}

static int l1d_flush_prctl_set(struct task_struct *task, unsigned long ctrl)
{}

static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
{}

static bool is_spec_ib_user_controlled(void)
{}

static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
{}

int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
			     unsigned long ctrl)
{}

#ifdef CONFIG_SECCOMP
void arch_seccomp_spec_mitigate(struct task_struct *task)
{}
#endif

static int l1d_flush_prctl_get(struct task_struct *task)
{}

static int ssb_prctl_get(struct task_struct *task)
{}

static int ib_prctl_get(struct task_struct *task)
{}

int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
{}

void x86_spec_ctrl_setup_ap(void)
{}

bool itlb_multihit_kvm_mitigation;
EXPORT_SYMBOL_GPL();

#undef pr_fmt
#define pr_fmt(fmt)

/* Default mitigation for L1TF-affected CPUs */
enum l1tf_mitigations l1tf_mitigation __ro_after_init =;
#if IS_ENABLED(CONFIG_KVM_INTEL)
EXPORT_SYMBOL_GPL();
#endif
enum vmx_l1d_flush_state l1tf_vmx_mitigation =;
EXPORT_SYMBOL_GPL();

/*
 * These CPUs all support 44bits physical address space internally in the
 * cache but CPUID can report a smaller number of physical address bits.
 *
 * The L1TF mitigation uses the top most address bit for the inversion of
 * non present PTEs. When the installed memory reaches into the top most
 * address bit due to memory holes, which has been observed on machines
 * which report 36bits physical address bits and have 32G RAM installed,
 * then the mitigation range check in l1tf_select_mitigation() triggers.
 * This is a false positive because the mitigation is still possible due to
 * the fact that the cache uses 44bit internally. Use the cache bits
 * instead of the reported physical bits and adjust them on the affected
 * machines to 44bit if the reported bits are less than 44.
 */
static void override_cache_bits(struct cpuinfo_x86 *c)
{}

static void __init l1tf_select_mitigation(void)
{}

static int __init l1tf_cmdline(char *str)
{}
early_param();

#undef pr_fmt
#define pr_fmt(fmt)

enum srso_mitigation {};

enum srso_mitigation_cmd {};

static const char * const srso_strings[] =;

static enum srso_mitigation srso_mitigation __ro_after_init =;
static enum srso_mitigation_cmd srso_cmd __ro_after_init =;

static int __init srso_parse_cmdline(char *str)
{}
early_param();

#define SRSO_NOTICE

static void __init srso_select_mitigation(void)
{}

#undef pr_fmt
#define pr_fmt(fmt)

#ifdef CONFIG_SYSFS

#define L1TF_DEFAULT_MSG

#if IS_ENABLED(CONFIG_KVM_INTEL)
static const char * const l1tf_vmx_states[] =;

static ssize_t l1tf_show_state(char *buf)
{}

static ssize_t itlb_multihit_show_state(char *buf)
{}
#else
static ssize_t l1tf_show_state(char *buf)
{
	return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG);
}

static ssize_t itlb_multihit_show_state(char *buf)
{
	return sysfs_emit(buf, "Processor vulnerable\n");
}
#endif

static ssize_t mds_show_state(char *buf)
{}

static ssize_t tsx_async_abort_show_state(char *buf)
{}

static ssize_t mmio_stale_data_show_state(char *buf)
{}

static ssize_t rfds_show_state(char *buf)
{}

static char *stibp_state(void)
{}

static char *ibpb_state(void)
{}

static char *pbrsb_eibrs_state(void)
{}

static const char *spectre_bhi_state(void)
{}

static ssize_t spectre_v2_show_state(char *buf)
{}

static ssize_t srbds_show_state(char *buf)
{}

static ssize_t retbleed_show_state(char *buf)
{}

static ssize_t srso_show_state(char *buf)
{}

static ssize_t gds_show_state(char *buf)
{}

static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
			       char *buf, unsigned int bug)
{}

ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
{}

ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
{}

ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
{}

ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
{}

ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
{}

ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf)
{}

ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf)
{}

ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf)
{}

ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf)
{}

ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf)
{}

ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf)
{}

ssize_t cpu_show_spec_rstack_overflow(struct device *dev, struct device_attribute *attr, char *buf)
{}

ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *buf)
{}

ssize_t cpu_show_reg_file_data_sampling(struct device *dev, struct device_attribute *attr, char *buf)
{}
#endif

void __warn_thunk(void)
{}