linux/arch/x86/kernel/cpu/mce/intel.c

// SPDX-License-Identifier: GPL-2.0
/*
 * Intel specific MCE features.
 * Copyright 2004 Zwane Mwaikambo <[email protected]>
 * Copyright (C) 2008, 2009 Intel Corporation
 * Author: Andi Kleen
 */

#include <linux/gfp.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/cpumask.h>
#include <asm/apic.h>
#include <asm/cpufeature.h>
#include <asm/cpu_device_id.h>
#include <asm/processor.h>
#include <asm/msr.h>
#include <asm/mce.h>

#include "internal.h"

/*
 * Support for Intel Correct Machine Check Interrupts. This allows
 * the CPU to raise an interrupt when a corrected machine check happened.
 * Normally we pick those up using a regular polling timer.
 * Also supports reliable discovery of shared banks.
 */

/*
 * CMCI can be delivered to multiple cpus that share a machine check bank
 * so we need to designate a single cpu to process errors logged in each bank
 * in the interrupt handler (otherwise we would have many races and potential
 * double reporting of the same error).
 * Note that this can change when a cpu is offlined or brought online since
 * some MCA banks are shared across cpus. When a cpu is offlined, cmci_clear()
 * disables CMCI on all banks owned by the cpu and clears this bitfield. At
 * this point, cmci_rediscover() kicks in and a different cpu may end up
 * taking ownership of some of the shared MCA banks that were previously
 * owned by the offlined cpu.
 */
static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned);

/*
 * cmci_discover_lock protects against parallel discovery attempts
 * which could race against each other.
 */
static DEFINE_RAW_SPINLOCK(cmci_discover_lock);

/*
 * On systems that do support CMCI but it's disabled, polling for MCEs can
 * cause the same event to be reported multiple times because IA32_MCi_STATUS
 * is shared by the same package.
 */
static DEFINE_SPINLOCK(cmci_poll_lock);

/* Linux non-storm CMCI threshold (may be overridden by BIOS) */
#define CMCI_THRESHOLD

/*
 * MCi_CTL2 threshold for each bank when there is no storm.
 * Default value for each bank may have been set by BIOS.
 */
static u16 cmci_threshold[MAX_NR_BANKS];

/*
 * High threshold to limit CMCI rate during storms. Max supported is
 * 0x7FFF. Use this slightly smaller value so it has a distinctive
 * signature when some asks "Why am I not seeing all corrected errors?"
 * A high threshold is used instead of just disabling CMCI for a
 * bank because both corrected and uncorrected errors may be logged
 * in the same bank and signalled with CMCI. The threshold only applies
 * to corrected errors, so keeping CMCI enabled means that uncorrected
 * errors will still be processed in a timely fashion.
 */
#define CMCI_STORM_THRESHOLD

static int cmci_supported(int *banks)
{}

static bool lmce_supported(void)
{}

/*
 * Set a new CMCI threshold value. Preserve the state of the
 * MCI_CTL2_CMCI_EN bit in case this happens during a
 * cmci_rediscover() operation.
 */
static void cmci_set_threshold(int bank, int thresh)
{}

void mce_intel_handle_storm(int bank, bool on)
{}

/*
 * The interrupt handler. This is called on every event.
 * Just call the poller directly to log any events.
 * This could in theory increase the threshold under high load,
 * but doesn't for now.
 */
static void intel_threshold_interrupt(void)
{}

/*
 * Check all the reasons why current CPU cannot claim
 * ownership of a bank.
 * 1: CPU already owns this bank
 * 2: BIOS owns this bank
 * 3: Some other CPU owns this bank
 */
static bool cmci_skip_bank(int bank, u64 *val)
{}

/*
 * Decide which CMCI interrupt threshold to use:
 * 1: If this bank is in storm mode from whichever CPU was
 *    the previous owner, stay in storm mode.
 * 2: If ignoring any threshold set by BIOS, set Linux default
 * 3: Try to honor BIOS threshold (unless buggy BIOS set it at zero).
 */
static u64 cmci_pick_threshold(u64 val, int *bios_zero_thresh)
{}

/*
 * Try to claim ownership of a bank.
 */
static void cmci_claim_bank(int bank, u64 val, int bios_zero_thresh, int *bios_wrong_thresh)
{}

/*
 * Enable CMCI (Corrected Machine Check Interrupt) for available MCE banks
 * on this CPU. Use the algorithm recommended in the SDM to discover shared
 * banks. Called during initial bootstrap, and also for hotplug CPU operations
 * to rediscover/reassign machine check banks.
 */
static void cmci_discover(int banks)
{}

/*
 * Just in case we missed an event during initialization check
 * all the CMCI owned banks.
 */
void cmci_recheck(void)
{}

/* Caller must hold the lock on cmci_discover_lock */
static void __cmci_disable_bank(int bank)
{}

/*
 * Disable CMCI on this CPU for all banks it owns when it goes down.
 * This allows other CPUs to claim the banks on rediscovery.
 */
void cmci_clear(void)
{}

static void cmci_rediscover_work_func(void *arg)
{}

/* After a CPU went down cycle through all the others and rediscover */
void cmci_rediscover(void)
{}

/*
 * Reenable CMCI on this CPU in case a CPU down failed.
 */
void cmci_reenable(void)
{}

void cmci_disable_bank(int bank)
{}

/* Bank polling function when CMCI is disabled. */
static void cmci_mc_poll_banks(void)
{}

void intel_init_cmci(void)
{}

void intel_init_lmce(void)
{}

void intel_clear_lmce(void)
{}

/*
 * Enable additional error logs from the integrated
 * memory controller on processors that support this.
 */
static void intel_imc_init(struct cpuinfo_x86 *c)
{}

void mce_intel_feature_init(struct cpuinfo_x86 *c)
{}

void mce_intel_feature_clear(struct cpuinfo_x86 *c)
{}

bool intel_filter_mce(struct mce *m)
{}

/*
 * Check if the address reported by the CPU is in a format we can parse.
 * It would be possible to add code for most other cases, but all would
 * be somewhat complicated (e.g. segment offset would require an instruction
 * parser). So only support physical addresses up to page granularity for now.
 */
bool intel_mce_usable_address(struct mce *m)
{}