linux/arch/x86/platform/uv/uv_nmi.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * SGI NMI support routines
 *
 * (C) Copyright 2020 Hewlett Packard Enterprise Development LP
 * Copyright (C) 2007-2017 Silicon Graphics, Inc. All rights reserved.
 * Copyright (c) Mike Travis
 */

#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/kdb.h>
#include <linux/kexec.h>
#include <linux/kgdb.h>
#include <linux/moduleparam.h>
#include <linux/nmi.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/clocksource.h>

#include <asm/apic.h>
#include <asm/current.h>
#include <asm/kdebug.h>
#include <asm/local64.h>
#include <asm/nmi.h>
#include <asm/reboot.h>
#include <asm/traps.h>
#include <asm/uv/uv.h>
#include <asm/uv/uv_hub.h>
#include <asm/uv/uv_mmrs.h>

/*
 * UV handler for NMI
 *
 * Handle system-wide NMI events generated by the global 'power nmi' command.
 *
 * Basic operation is to field the NMI interrupt on each CPU and wait
 * until all CPU's have arrived into the nmi handler.  If some CPU's do not
 * make it into the handler, try and force them in with the IPI(NMI) signal.
 *
 * We also have to lessen UV Hub MMR accesses as much as possible as this
 * disrupts the UV Hub's primary mission of directing NumaLink traffic and
 * can cause system problems to occur.
 *
 * To do this we register our primary NMI notifier on the NMI_UNKNOWN
 * chain.  This reduces the number of false NMI calls when the perf
 * tools are running which generate an enormous number of NMIs per
 * second (~4M/s for 1024 CPU threads).  Our secondary NMI handler is
 * very short as it only checks that if it has been "pinged" with the
 * IPI(NMI) signal as mentioned above, and does not read the UV Hub's MMR.
 *
 */

static struct uv_hub_nmi_s **uv_hub_nmi_list;

DEFINE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi);

/* Newer SMM NMI handler, not present in all systems */
static unsigned long uvh_nmi_mmrx;		/* UVH_EVENT_OCCURRED0/1 */
static unsigned long uvh_nmi_mmrx_clear;	/* UVH_EVENT_OCCURRED0/1_ALIAS */
static int uvh_nmi_mmrx_shift;			/* UVH_EVENT_OCCURRED0/1_EXTIO_INT0_SHFT */
static char *uvh_nmi_mmrx_type;			/* "EXTIO_INT0" */

/* Non-zero indicates newer SMM NMI handler present */
static unsigned long uvh_nmi_mmrx_supported;	/* UVH_EXTIO_INT0_BROADCAST */

/* Indicates to BIOS that we want to use the newer SMM NMI handler */
static unsigned long uvh_nmi_mmrx_req;		/* UVH_BIOS_KERNEL_MMR_ALIAS_2 */
static int uvh_nmi_mmrx_req_shift;		/* 62 */

/* UV hubless values */
#define NMI_CONTROL_PORT
#define NMI_DUMMY_PORT
#define PAD_OWN_GPP_D_0
#define GPI_NMI_STS_GPP_D_0
#define GPI_NMI_ENA_GPP_D_0
#define STS_GPP_D_0_MASK
#define PAD_CFG_DW0_GPP_D_0
#define GPIROUTNMI
#define PCH_PCR_GPIO_1_BASE
#define PCH_PCR_GPIO_ADDRESS(offset)

static u64 *pch_base;
static unsigned long nmi_mmr;
static unsigned long nmi_mmr_clear;
static unsigned long nmi_mmr_pending;

static atomic_t	uv_in_nmi;
static atomic_t uv_nmi_cpu =;
static atomic_t uv_nmi_cpus_in_nmi =;
static atomic_t uv_nmi_slave_continue;
static cpumask_var_t uv_nmi_cpu_mask;

static atomic_t uv_nmi_kexec_failed;

/* Values for uv_nmi_slave_continue */
#define SLAVE_CLEAR
#define SLAVE_CONTINUE
#define SLAVE_EXIT

/*
 * Default is all stack dumps go to the console and buffer.
 * Lower level to send to log buffer only.
 */
static int uv_nmi_loglevel =;
module_param_named(dump_loglevel, uv_nmi_loglevel, int, 0644);

/*
 * The following values show statistics on how perf events are affecting
 * this system.
 */
static int param_get_local64(char *buffer, const struct kernel_param *kp)
{}

static int param_set_local64(const char *val, const struct kernel_param *kp)
{}

static const struct kernel_param_ops param_ops_local64 =;
#define param_check_local64(name, p)

static local64_t uv_nmi_count;
module_param_named(nmi_count, uv_nmi_count, local64, 0644);

static local64_t uv_nmi_misses;
module_param_named(nmi_misses, uv_nmi_misses, local64, 0644);

static local64_t uv_nmi_ping_count;
module_param_named(ping_count, uv_nmi_ping_count, local64, 0644);

static local64_t uv_nmi_ping_misses;
module_param_named(ping_misses, uv_nmi_ping_misses, local64, 0644);

/*
 * Following values allow tuning for large systems under heavy loading
 */
static int uv_nmi_initial_delay =;
module_param_named(initial_delay, uv_nmi_initial_delay, int, 0644);

static int uv_nmi_slave_delay =;
module_param_named(slave_delay, uv_nmi_slave_delay, int, 0644);

static int uv_nmi_loop_delay =;
module_param_named(loop_delay, uv_nmi_loop_delay, int, 0644);

static int uv_nmi_trigger_delay =;
module_param_named(trigger_delay, uv_nmi_trigger_delay, int, 0644);

static int uv_nmi_wait_count =;
module_param_named(wait_count, uv_nmi_wait_count, int, 0644);

static int uv_nmi_retry_count =;
module_param_named(retry_count, uv_nmi_retry_count, int, 0644);

static bool uv_pch_intr_enable =;
static bool uv_pch_intr_now_enabled;
module_param_named(pch_intr_enable, uv_pch_intr_enable, bool, 0644);

static bool uv_pch_init_enable =;
module_param_named(pch_init_enable, uv_pch_init_enable, bool, 0644);

static int uv_nmi_debug;
module_param_named(debug, uv_nmi_debug, int, 0644);

#define nmi_debug(fmt, ...)

/* Valid NMI Actions */
enum action_t {};

static const char * const actions[nmi_act_max] =;

static const char * const actions_desc[nmi_act_max] =;

static enum action_t uv_nmi_action =;

static int param_get_action(char *buffer, const struct kernel_param *kp)
{}

static int param_set_action(const char *val, const struct kernel_param *kp)
{}

static const struct kernel_param_ops param_ops_action =;
#define param_check_action(name, p)

module_param_named(action, uv_nmi_action, action, 0644);

/* Setup which NMI support is present in system */
static void uv_nmi_setup_mmrs(void)
{}

/* Read NMI MMR and check if NMI flag was set by BMC. */
static inline int uv_nmi_test_mmr(struct uv_hub_nmi_s *hub_nmi)
{}

static inline void uv_local_mmr_clear_nmi(void)
{}

/*
 * UV hubless NMI handler functions
 */
static inline void uv_reassert_nmi(void)
{}

static void uv_init_hubless_pch_io(int offset, int mask, int data)
{}

static void uv_nmi_setup_hubless_intr(void)
{}

static struct init_nmi {} init_nmi[] =;

static void uv_init_hubless_pch_d0(void)
{}

static int uv_nmi_test_hubless(struct uv_hub_nmi_s *hub_nmi)
{}

static int uv_test_nmi(struct uv_hub_nmi_s *hub_nmi)
{}

/*
 * If first CPU in on this hub, set hub_nmi "in_nmi" and "owner" values and
 * return true.  If first CPU in on the system, set global "in_nmi" flag.
 */
static int uv_set_in_nmi(int cpu, struct uv_hub_nmi_s *hub_nmi)
{}

/* Check if this is a system NMI event */
static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi)
{}

/* Need to reset the NMI MMR register, but only once per hub. */
static inline void uv_clear_nmi(int cpu)
{}

/* Ping non-responding CPU's attempting to force them into the NMI handler */
static void uv_nmi_nr_cpus_ping(void)
{}

/* Clean up flags for CPU's that ignored both NMI and ping */
static void uv_nmi_cleanup_mask(void)
{}

/* Loop waiting as CPU's enter NMI handler */
static int uv_nmi_wait_cpus(int first)
{}

/* Wait until all slave CPU's have entered UV NMI handler */
static void uv_nmi_wait(int master)
{}

/* Dump Instruction Pointer header */
static void uv_nmi_dump_cpu_ip_hdr(void)
{}

/* Dump Instruction Pointer info */
static void uv_nmi_dump_cpu_ip(int cpu, struct pt_regs *regs)
{}

/*
 * Dump this CPU's state.  If action was set to "kdump" and the crash_kexec
 * failed, then we provide "dump" as an alternate action.  Action "dump" now
 * also includes the show "ips" (instruction pointers) action whereas the
 * action "ips" only displays instruction pointers for the non-idle CPU's.
 * This is an abbreviated form of the "ps" command.
 */
static void uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs)
{}

/* Trigger a slave CPU to dump its state */
static void uv_nmi_trigger_dump(int cpu)
{}

/* Wait until all CPU's ready to exit */
static void uv_nmi_sync_exit(int master)
{}

/* Current "health" check is to check which CPU's are responsive */
static void uv_nmi_action_health(int cpu, struct pt_regs *regs, int master)
{}

/* Walk through CPU list and dump state of each */
static void uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master)
{}

static void uv_nmi_touch_watchdogs(void)
{}

static void uv_nmi_kdump(int cpu, int main, struct pt_regs *regs)
{}

#ifdef CONFIG_KGDB
#ifdef CONFIG_KGDB_KDB
static inline int uv_nmi_kdb_reason(void)
{}
#else /* !CONFIG_KGDB_KDB */
static inline int uv_nmi_kdb_reason(void)
{
	/* Ensure user is expecting to attach gdb remote */
	if (uv_nmi_action == nmi_act_kgdb)
		return 0;

	pr_err("UV: NMI error: KDB is not enabled in this kernel\n");
	return -1;
}
#endif /* CONFIG_KGDB_KDB */

/*
 * Call KGDB/KDB from NMI handler
 *
 * Note that if both KGDB and KDB are configured, then the action of 'kgdb' or
 * 'kdb' has no affect on which is used.  See the KGDB documentation for further
 * information.
 */
static void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
{}

#else /* !CONFIG_KGDB */
static inline void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
{
	pr_err("UV: NMI error: KGDB is not enabled in this kernel\n");
}
#endif /* !CONFIG_KGDB */

/*
 * UV NMI handler
 */
static int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
{}

/*
 * NMI handler for pulling in CPU's when perf events are grabbing our NMI
 */
static int uv_handle_nmi_ping(unsigned int reason, struct pt_regs *regs)
{}

static void uv_register_nmi_notifier(void)
{}

void uv_nmi_init(void)
{}

/* Setup HUB NMI info */
static void __init uv_nmi_setup_common(bool hubbed)
{}

/* Setup for UV Hub systems */
void __init uv_nmi_setup(void)
{}

/* Setup for UV Hubless systems */
void __init uv_nmi_setup_hubless(void)
{}