linux/arch/x86/kernel/cpu/microcode/core.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * CPU Microcode Update Driver for Linux
 *
 * Copyright (C) 2000-2006 Tigran Aivazian <[email protected]>
 *	      2006	Shaohua Li <[email protected]>
 *	      2013-2016	Borislav Petkov <[email protected]>
 *
 * X86 CPU microcode early update for Linux:
 *
 *	Copyright (C) 2012 Fenghua Yu <[email protected]>
 *			   H Peter Anvin" <[email protected]>
 *		  (C) 2015 Borislav Petkov <[email protected]>
 *
 * This driver allows to upgrade microcode on x86 processors.
 */

#define pr_fmt(fmt)

#include <linux/platform_device.h>
#include <linux/stop_machine.h>
#include <linux/syscore_ops.h>
#include <linux/miscdevice.h>
#include <linux/capability.h>
#include <linux/firmware.h>
#include <linux/cpumask.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/cpu.h>
#include <linux/nmi.h>
#include <linux/fs.h>
#include <linux/mm.h>

#include <asm/apic.h>
#include <asm/cpu_device_id.h>
#include <asm/perf_event.h>
#include <asm/processor.h>
#include <asm/cmdline.h>
#include <asm/setup.h>

#include "internal.h"

static struct microcode_ops	*microcode_ops;
bool dis_ucode_ldr =;

bool force_minrev = IS_ENABLED();
module_param(force_minrev, bool, S_IRUSR | S_IWUSR);

/*
 * Synchronization.
 *
 * All non cpu-hotplug-callback call sites use:
 *
 * - cpus_read_lock/unlock() to synchronize with
 *   the cpu-hotplug-callback call sites.
 *
 * We guarantee that only a single cpu is being
 * updated at any particular moment of time.
 */
struct ucode_cpu_info		ucode_cpu_info[NR_CPUS];

/*
 * Those patch levels cannot be updated to newer ones and thus should be final.
 */
static u32 final_levels[] =;

struct early_load_data early_data;

/*
 * Check the current patch level on this CPU.
 *
 * Returns:
 *  - true: if update should stop
 *  - false: otherwise
 */
static bool amd_check_current_patch_level(void)
{}

static bool __init check_loader_disabled_bsp(void)
{}

void __init load_ucode_bsp(void)
{}

void load_ucode_ap(void)
{}

struct cpio_data __init find_microcode_in_initrd(const char *path)
{}

static void reload_early_microcode(unsigned int cpu)
{}

/* fake device for request_firmware */
static struct platform_device	*microcode_pdev;

#ifdef CONFIG_MICROCODE_LATE_LOADING
/*
 * Late loading dance. Why the heavy-handed stomp_machine effort?
 *
 * - HT siblings must be idle and not execute other code while the other sibling
 *   is loading microcode in order to avoid any negative interactions caused by
 *   the loading.
 *
 * - In addition, microcode update on the cores must be serialized until this
 *   requirement can be relaxed in the future. Right now, this is conservative
 *   and good.
 */
enum sibling_ctrl {};

struct microcode_ctrl {};

DEFINE_STATIC_KEY_FALSE(microcode_nmi_handler_enable);
static DEFINE_PER_CPU(struct microcode_ctrl, ucode_ctrl);
static atomic_t late_cpus_in, offline_in_nmi;
static unsigned int loops_per_usec;
static cpumask_t cpu_offline_mask;

static noinstr bool wait_for_cpus(atomic_t *cnt)
{}

static noinstr bool wait_for_ctrl(void)
{}

/*
 * Protected against instrumentation up to the point where the primary
 * thread completed the update. See microcode_nmi_handler() for details.
 */
static noinstr bool load_secondary_wait(unsigned int ctrl_cpu)
{}

/*
 * Protected against instrumentation up to the point where the primary
 * thread completed the update. See microcode_nmi_handler() for details.
 */
static noinstr void load_secondary(unsigned int cpu)
{}

static void __load_primary(unsigned int cpu)
{}

static bool kick_offline_cpus(unsigned int nr_offl)
{}

static void release_offline_cpus(void)
{}

static void load_primary(unsigned int cpu)
{}

/*
 * Minimal stub rendezvous handler for soft-offlined CPUs which participate
 * in the NMI rendezvous to protect against a concurrent NMI on affected
 * CPUs.
 */
void noinstr microcode_offline_nmi_handler(void)
{}

static noinstr bool microcode_update_handler(void)
{}

/*
 * Protection against instrumentation is required for CPUs which are not
 * safe against an NMI which is delivered to the secondary SMT sibling
 * while the primary thread updates the microcode. Instrumentation can end
 * up in #INT3, #DB and #PF. The IRET from those exceptions reenables NMI
 * which is the opposite of what the NMI rendezvous is trying to achieve.
 *
 * The primary thread is safe versus instrumentation as the actual
 * microcode update handles this correctly. It's only the sibling code
 * path which must be NMI safe until the primary thread completed the
 * update.
 */
bool noinstr microcode_nmi_handler(void)
{}

static int load_cpus_stopped(void *unused)
{}

static int load_late_stop_cpus(bool is_safe)
{}

/*
 * This function does two things:
 *
 * 1) Ensure that all required CPUs which are present and have been booted
 *    once are online.
 *
 *    To pass this check, all primary threads must be online.
 *
 *    If the microcode load is not safe against NMI then all SMT threads
 *    must be online as well because they still react to NMIs when they are
 *    soft-offlined and parked in one of the play_dead() variants. So if a
 *    NMI hits while the primary thread updates the microcode the resulting
 *    behaviour is undefined. The default play_dead() implementation on
 *    modern CPUs uses MWAIT, which is also not guaranteed to be safe
 *    against a microcode update which affects MWAIT.
 *
 *    As soft-offlined CPUs still react on NMIs, the SMT sibling
 *    restriction can be lifted when the vendor driver signals to use NMI
 *    for rendezvous and the APIC provides a mechanism to send an NMI to a
 *    soft-offlined CPU. The soft-offlined CPUs are then able to
 *    participate in the rendezvous in a trivial stub handler.
 *
 * 2) Initialize the per CPU control structure and create a cpumask
 *    which contains "offline"; secondary threads, so they can be handled
 *    correctly by a control CPU.
 */
static bool setup_cpus(void)
{}

static int load_late_locked(void)
{}

static ssize_t reload_store(struct device *dev,
			    struct device_attribute *attr,
			    const char *buf, size_t size)
{}

static DEVICE_ATTR_WO(reload);
#endif

static ssize_t version_show(struct device *dev,
			struct device_attribute *attr, char *buf)
{}

static ssize_t processor_flags_show(struct device *dev,
			struct device_attribute *attr, char *buf)
{}

static DEVICE_ATTR_RO(version);
static DEVICE_ATTR_RO(processor_flags);

static struct attribute *mc_default_attrs[] =;

static const struct attribute_group mc_attr_group =;

static void microcode_fini_cpu(int cpu)
{}

/**
 * microcode_bsp_resume - Update boot CPU microcode during resume.
 */
void microcode_bsp_resume(void)
{}

static struct syscore_ops mc_syscore_ops =;

static int mc_cpu_online(unsigned int cpu)
{}

static int mc_cpu_down_prep(unsigned int cpu)
{}

static struct attribute *cpu_root_microcode_attrs[] =;

static const struct attribute_group cpu_root_microcode_group =;

static int __init microcode_init(void)
{}
late_initcall(microcode_init);