linux/arch/x86/include/asm/mce.h

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_MCE_H
#define _ASM_X86_MCE_H

#include <uapi/asm/mce.h>

/*
 * Machine Check support for x86
 */

/* MCG_CAP register defines */
#define MCG_BANKCNT_MASK
#define MCG_CTL_P
#define MCG_EXT_P
#define MCG_CMCI_P
#define MCG_SEAM_NR
#define MCG_EXT_CNT_MASK
#define MCG_EXT_CNT_SHIFT
#define MCG_EXT_CNT(c)
#define MCG_SER_P
#define MCG_ELOG_P
#define MCG_LMCE_P

/* MCG_STATUS register defines */
#define MCG_STATUS_RIPV
#define MCG_STATUS_EIPV
#define MCG_STATUS_MCIP
#define MCG_STATUS_LMCES
#define MCG_STATUS_SEAM_NR

/* MCG_EXT_CTL register defines */
#define MCG_EXT_CTL_LMCE_EN

/* MCi_STATUS register defines */
#define MCI_STATUS_VAL
#define MCI_STATUS_OVER
#define MCI_STATUS_UC
#define MCI_STATUS_EN
#define MCI_STATUS_MISCV
#define MCI_STATUS_ADDRV
#define MCI_STATUS_PCC
#define MCI_STATUS_S
#define MCI_STATUS_AR
#define MCI_STATUS_CEC_SHIFT
#define MCI_STATUS_CEC_MASK
#define MCI_STATUS_CEC(c)
#define MCI_STATUS_MSCOD(m)

/* AMD-specific bits */
#define MCI_STATUS_TCC
#define MCI_STATUS_SYNDV
#define MCI_STATUS_DEFERRED
#define MCI_STATUS_POISON
#define MCI_STATUS_SCRUB

/*
 * McaX field if set indicates a given bank supports MCA extensions:
 *  - Deferred error interrupt type is specifiable by bank.
 *  - MCx_MISC0[BlkPtr] field indicates presence of extended MISC registers,
 *    But should not be used to determine MSR numbers.
 *  - TCC bit is present in MCx_STATUS.
 */
#define MCI_CONFIG_MCAX
#define MCI_IPID_MCATYPE
#define MCI_IPID_HWID

/*
 * Note that the full MCACOD field of IA32_MCi_STATUS MSR is
 * bits 15:0.  But bit 12 is the 'F' bit, defined for corrected
 * errors to indicate that errors are being filtered by hardware.
 * We should mask out bit 12 when looking for specific signatures
 * of uncorrected errors - so the F bit is deliberately skipped
 * in this #define.
 */
#define MCACOD

/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
#define MCACOD_SCRUB
#define MCACOD_SCRUBMSK
#define MCACOD_L3WB
#define MCACOD_DATA
#define MCACOD_INSTR

/* MCi_MISC register defines */
#define MCI_MISC_ADDR_LSB(m)
#define MCI_MISC_ADDR_MODE(m)
#define MCI_MISC_ADDR_SEGOFF
#define MCI_MISC_ADDR_LINEAR
#define MCI_MISC_ADDR_PHYS
#define MCI_MISC_ADDR_MEM
#define MCI_MISC_ADDR_GENERIC

/* MCi_ADDR register defines */
#define MCI_ADDR_PHYSADDR

/* CTL2 register defines */
#define MCI_CTL2_CMCI_EN
#define MCI_CTL2_CMCI_THRESHOLD_MASK

#define MCJ_CTX_MASK
#define MCJ_CTX(flags)
#define MCJ_CTX_RANDOM
#define MCJ_CTX_PROCESS
#define MCJ_CTX_IRQ
#define MCJ_NMI_BROADCAST
#define MCJ_EXCEPTION
#define MCJ_IRQ_BROADCAST

#define MCE_OVERFLOW

#define MCE_LOG_MIN_LEN
#define MCE_LOG_SIGNATURE

/* AMD Scalable MCA */
#define MSR_AMD64_SMCA_MC0_CTL
#define MSR_AMD64_SMCA_MC0_STATUS
#define MSR_AMD64_SMCA_MC0_ADDR
#define MSR_AMD64_SMCA_MC0_MISC0
#define MSR_AMD64_SMCA_MC0_CONFIG
#define MSR_AMD64_SMCA_MC0_IPID
#define MSR_AMD64_SMCA_MC0_SYND
#define MSR_AMD64_SMCA_MC0_DESTAT
#define MSR_AMD64_SMCA_MC0_DEADDR
#define MSR_AMD64_SMCA_MC0_MISC1
#define MSR_AMD64_SMCA_MCx_CTL(x)
#define MSR_AMD64_SMCA_MCx_STATUS(x)
#define MSR_AMD64_SMCA_MCx_ADDR(x)
#define MSR_AMD64_SMCA_MCx_MISC(x)
#define MSR_AMD64_SMCA_MCx_CONFIG(x)
#define MSR_AMD64_SMCA_MCx_IPID(x)
#define MSR_AMD64_SMCA_MCx_SYND(x)
#define MSR_AMD64_SMCA_MCx_DESTAT(x)
#define MSR_AMD64_SMCA_MCx_DEADDR(x)
#define MSR_AMD64_SMCA_MCx_MISCy(x, y)

#define XEC(x, mask)

/* mce.kflags flag bits for logging etc. */
#define MCE_HANDLED_CEC
#define MCE_HANDLED_UC
#define MCE_HANDLED_EXTLOG
#define MCE_HANDLED_NFIT
#define MCE_HANDLED_EDAC
#define MCE_HANDLED_MCELOG

/*
 * Indicates an MCE which has happened in kernel space but from
 * which the kernel can recover simply by executing fixup_exception()
 * so that an error is returned to the caller of the function that
 * hit the machine check.
 */
#define MCE_IN_KERNEL_RECOV

/*
 * Indicates an MCE that happened in kernel space while copying data
 * from user. In this case fixup_exception() gets the kernel to the
 * error exit for the copy function. Machine check handler can then
 * treat it like a fault taken in user mode.
 */
#define MCE_IN_KERNEL_COPYIN

/*
 * This structure contains all data related to the MCE log.  Also
 * carries a signature to make it easier to find from external
 * debugging tools.  Each entry is only valid when its finished flag
 * is set.
 */
struct mce_log_buffer {};

/* Highest last */
enum mce_notifier_prios {};

struct notifier_block;
extern void mce_register_decode_chain(struct notifier_block *nb);
extern void mce_unregister_decode_chain(struct notifier_block *nb);

#include <linux/percpu.h>
#include <linux/atomic.h>

extern int mce_p5_enabled;

#ifdef CONFIG_ARCH_HAS_COPY_MC
extern void enable_copy_mc_fragile(void);
unsigned long __must_check copy_mc_fragile(void *dst, const void *src, unsigned cnt);
#else
static inline void enable_copy_mc_fragile(void)
{
}
#endif

struct cper_ia_proc_ctx;

#ifdef CONFIG_X86_MCE
int mcheck_init(void);
void mcheck_cpu_init(struct cpuinfo_x86 *c);
void mcheck_cpu_clear(struct cpuinfo_x86 *c);
int apei_smca_report_x86_error(struct cper_ia_proc_ctx *ctx_info,
			       u64 lapic_id);
#else
static inline int mcheck_init(void) { return 0; }
static inline void mcheck_cpu_init(struct cpuinfo_x86 *c) {}
static inline void mcheck_cpu_clear(struct cpuinfo_x86 *c) {}
static inline int apei_smca_report_x86_error(struct cper_ia_proc_ctx *ctx_info,
					     u64 lapic_id) { return -EINVAL; }
#endif

void mce_setup(struct mce *m);
void mce_log(struct mce *m);
DECLARE_PER_CPU(struct device *, mce_device);

/* Maximum number of MCA banks per CPU. */
#define MAX_NR_BANKS

#ifdef CONFIG_X86_MCE_INTEL
void mce_intel_feature_init(struct cpuinfo_x86 *c);
void mce_intel_feature_clear(struct cpuinfo_x86 *c);
void cmci_clear(void);
void cmci_reenable(void);
void cmci_rediscover(void);
void cmci_recheck(void);
#else
static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { }
static inline void mce_intel_feature_clear(struct cpuinfo_x86 *c) { }
static inline void cmci_clear(void) {}
static inline void cmci_reenable(void) {}
static inline void cmci_rediscover(void) {}
static inline void cmci_recheck(void) {}
#endif

int mce_available(struct cpuinfo_x86 *c);
bool mce_is_memory_error(struct mce *m);
bool mce_is_correctable(struct mce *m);
bool mce_usable_address(struct mce *m);

DECLARE_PER_CPU(unsigned, mce_exception_count);
DECLARE_PER_CPU(unsigned, mce_poll_count);

mce_banks_t;
DECLARE_PER_CPU(mce_banks_t, mce_poll_banks);

enum mcp_flags {};

void machine_check_poll(enum mcp_flags flags, mce_banks_t *b);

int mce_notify_irq(void);

DECLARE_PER_CPU(struct mce, injectm);

/* Disable CMCI/polling for MCA bank claimed by firmware */
extern void mce_disable_bank(int bank);

/*
 * Exception handler
 */
void do_machine_check(struct pt_regs *pt_regs);

/*
 * Threshold handler
 */
extern void (*mce_threshold_vector)(void);

/* Deferred error interrupt handler */
extern void (*deferred_error_int_vector)(void);

/*
 * Used by APEI to report memory error via /dev/mcelog
 */

struct cper_sec_mem_err;
extern void apei_mce_report_mem_error(int corrected,
				      struct cper_sec_mem_err *mem_err);

/*
 * Enumerate new IP types and HWID values in AMD processors which support
 * Scalable MCA.
 */
#ifdef CONFIG_X86_MCE_AMD

/* These may be used by multiple smca_hwid_mcatypes */
enum smca_bank_types {};

extern bool amd_mce_is_memory_error(struct mce *m);

extern int mce_threshold_create_device(unsigned int cpu);
extern int mce_threshold_remove_device(unsigned int cpu);

void mce_amd_feature_init(struct cpuinfo_x86 *c);
enum smca_bank_types smca_get_bank_type(unsigned int cpu, unsigned int bank);
#else

static inline int mce_threshold_create_device(unsigned int cpu)		{ return 0; };
static inline int mce_threshold_remove_device(unsigned int cpu)		{ return 0; };
static inline bool amd_mce_is_memory_error(struct mce *m)		{ return false; };
static inline void mce_amd_feature_init(struct cpuinfo_x86 *c)		{ }
#endif

static inline void mce_hygon_feature_init(struct cpuinfo_x86 *c)	{}

unsigned long copy_mc_fragile_handle_tail(char *to, char *from, unsigned len);

#endif /* _ASM_X86_MCE_H */