linux/arch/x86/kernel/nmi.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
 *  Copyright (C) 2011	Don Zickus Red Hat, Inc.
 *
 *  Pentium III FXSR, SSE support
 *	Gareth Hughes <[email protected]>, May 2000
 */

/*
 * Handle hardware traps and faults.
 */
#include <linux/spinlock.h>
#include <linux/kprobes.h>
#include <linux/kdebug.h>
#include <linux/sched/debug.h>
#include <linux/nmi.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/hardirq.h>
#include <linux/ratelimit.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/atomic.h>
#include <linux/sched/clock.h>

#include <asm/cpu_entry_area.h>
#include <asm/traps.h>
#include <asm/mach_traps.h>
#include <asm/nmi.h>
#include <asm/x86_init.h>
#include <asm/reboot.h>
#include <asm/cache.h>
#include <asm/nospec-branch.h>
#include <asm/microcode.h>
#include <asm/sev.h>
#include <asm/fred.h>

#define CREATE_TRACE_POINTS
#include <trace/events/nmi.h>

struct nmi_desc {};

static struct nmi_desc nmi_desc[NMI_MAX] =;

struct nmi_stats {};

static DEFINE_PER_CPU(struct nmi_stats, nmi_stats);

static int ignore_nmis __read_mostly;

int unknown_nmi_panic;
/*
 * Prevent NMI reason port (0x61) being accessed simultaneously, can
 * only be used in NMI handler.
 */
static DEFINE_RAW_SPINLOCK(nmi_reason_lock);

static int __init setup_unknown_nmi_panic(char *str)
{}
__setup();

#define nmi_to_desc(type)

static u64 nmi_longest_ns =;

static int __init nmi_warning_debugfs(void)
{}
fs_initcall(nmi_warning_debugfs);

static void nmi_check_duration(struct nmiaction *action, u64 duration)
{}

static int nmi_handle(unsigned int type, struct pt_regs *regs)
{}
NOKPROBE_SYMBOL(nmi_handle);

int __register_nmi_handler(unsigned int type, struct nmiaction *action)
{}
EXPORT_SYMBOL();

void unregister_nmi_handler(unsigned int type, const char *name)
{}
EXPORT_SYMBOL_GPL();

static void
pci_serr_error(unsigned char reason, struct pt_regs *regs)
{}
NOKPROBE_SYMBOL(pci_serr_error);

static void
io_check_error(unsigned char reason, struct pt_regs *regs)
{}
NOKPROBE_SYMBOL(io_check_error);

static void
unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
{}
NOKPROBE_SYMBOL(unknown_nmi_error);

static DEFINE_PER_CPU(bool, swallow_nmi);
static DEFINE_PER_CPU(unsigned long, last_nmi_rip);

static noinstr void default_do_nmi(struct pt_regs *regs)
{}

/*
 * NMIs can page fault or hit breakpoints which will cause it to lose
 * its NMI context with the CPU when the breakpoint or page fault does an IRET.
 *
 * As a result, NMIs can nest if NMIs get unmasked due an IRET during
 * NMI processing.  On x86_64, the asm glue protects us from nested NMIs
 * if the outer NMI came from kernel mode, but we can still nest if the
 * outer NMI came from user mode.
 *
 * To handle these nested NMIs, we have three states:
 *
 *  1) not running
 *  2) executing
 *  3) latched
 *
 * When no NMI is in progress, it is in the "not running" state.
 * When an NMI comes in, it goes into the "executing" state.
 * Normally, if another NMI is triggered, it does not interrupt
 * the running NMI and the HW will simply latch it so that when
 * the first NMI finishes, it will restart the second NMI.
 * (Note, the latch is binary, thus multiple NMIs triggering,
 *  when one is running, are ignored. Only one NMI is restarted.)
 *
 * If an NMI executes an iret, another NMI can preempt it. We do not
 * want to allow this new NMI to run, but we want to execute it when the
 * first one finishes.  We set the state to "latched", and the exit of
 * the first NMI will perform a dec_return, if the result is zero
 * (NOT_RUNNING), then it will simply exit the NMI handler. If not, the
 * dec_return would have set the state to NMI_EXECUTING (what we want it
 * to be when we are running). In this case, we simply jump back to
 * rerun the NMI handler again, and restart the 'latched' NMI.
 *
 * No trap (breakpoint or page fault) should be hit before nmi_restart,
 * thus there is no race between the first check of state for NOT_RUNNING
 * and setting it to NMI_EXECUTING. The HW will prevent nested NMIs
 * at this point.
 *
 * In case the NMI takes a page fault, we need to save off the CR2
 * because the NMI could have preempted another page fault and corrupt
 * the CR2 that is about to be read. As nested NMIs must be restarted
 * and they can not take breakpoints or page faults, the update of the
 * CR2 must be done before converting the nmi state back to NOT_RUNNING.
 * Otherwise, there would be a race of another nested NMI coming in
 * after setting state to NOT_RUNNING but before updating the nmi_cr2.
 */
enum nmi_states {};
static DEFINE_PER_CPU(enum nmi_states, nmi_state);
static DEFINE_PER_CPU(unsigned long, nmi_cr2);
static DEFINE_PER_CPU(unsigned long, nmi_dr7);

DEFINE_IDTENTRY_RAW(exc_nmi)
{}

#if IS_ENABLED(CONFIG_KVM_INTEL)
DEFINE_IDTENTRY_RAW(exc_nmi_kvm_vmx)
{}
#if IS_MODULE(CONFIG_KVM_INTEL)
EXPORT_SYMBOL_GPL(asm_exc_nmi_kvm_vmx);
#endif
#endif

#ifdef CONFIG_NMI_CHECK_CPU

static char *nmi_check_stall_msg[] =;

void nmi_backtrace_stall_snap(const struct cpumask *btp)
{}

void nmi_backtrace_stall_check(const struct cpumask *btp)
{}

#endif

#ifdef CONFIG_X86_FRED
/*
 * With FRED, CR2/DR6 is pushed to #PF/#DB stack frame during FRED
 * event delivery, i.e., there is no problem of transient states.
 * And NMI unblocking only happens when the stack frame indicates
 * that so should happen.
 *
 * Thus, the NMI entry stub for FRED is really straightforward and
 * as simple as most exception handlers. As such, #DB is allowed
 * during NMI handling.
 */
DEFINE_FREDENTRY_NMI(exc_nmi)
{}
#endif

void stop_nmi(void)
{}

void restart_nmi(void)
{}

/* reset the back-to-back NMI logic */
void local_touch_nmi(void)
{}
EXPORT_SYMBOL_GPL();