linux/drivers/edac/amd64_edac.c

// SPDX-License-Identifier: GPL-2.0-only
#include <linux/ras.h>
#include "amd64_edac.h"
#include <asm/amd_nb.h>

static struct edac_pci_ctl_info *pci_ctl;

/*
 * Set by command line parameter. If BIOS has enabled the ECC, this override is
 * cleared to prevent re-enabling the hardware by this driver.
 */
static int ecc_enable_override;
module_param(ecc_enable_override, int, 0644);

static struct msr __percpu *msrs;

static inline u32 get_umc_reg(struct amd64_pvt *pvt, u32 reg)
{}

/* Per-node stuff */
static struct ecc_settings **ecc_stngs;

/* Device for the PCI component */
static struct device *pci_ctl_dev;

/*
 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
 * or higher value'.
 *
 *FIXME: Produce a better mapping/linearisation.
 */
static const struct scrubrate {} scrubrates[] =;

int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
			       u32 *val, const char *func)
{}

int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
				u32 val, const char *func)
{}

/*
 * Select DCT to which PCI cfg accesses are routed
 */
static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
{}

/*
 *
 * Depending on the family, F2 DCT reads need special handling:
 *
 * K8: has a single DCT only and no address offsets >= 0x100
 *
 * F10h: each DCT has its own set of regs
 *	DCT0 -> F2x040..
 *	DCT1 -> F2x140..
 *
 * F16h: has only 1 DCT
 *
 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
 */
static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
					 int offset, u32 *val)
{}

/*
 * Memory scrubber control interface. For K8, memory scrubbing is handled by
 * hardware and can involve L2 cache, dcache as well as the main memory. With
 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
 * functionality.
 *
 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
 * bytes/sec for the setting.
 *
 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
 * other archs, we might not have access to the caches directly.
 */

/*
 * Scan the scrub rate mapping table for a close or matching bandwidth value to
 * issue. If requested is too big, then use last maximum value found.
 */
static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
{}

static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
{}

static int get_scrub_rate(struct mem_ctl_info *mci)
{}

/*
 * returns true if the SysAddr given by sys_addr matches the
 * DRAM base/limit associated with node_id
 */
static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
{}

/*
 * Attempt to map a SysAddr to a node. On success, return a pointer to the
 * mem_ctl_info structure for the node that the SysAddr maps to.
 *
 * On failure, return NULL.
 */
static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
						u64 sys_addr)
{}

/*
 * compute the CS base address of the @csrow on the DRAM controller @dct.
 * For details see F2x[5C:40] in the processor's BKDG
 */
static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
				 u64 *base, u64 *mask)
{}

#define for_each_chip_select(i, dct, pvt)

#define chip_select_base(i, dct, pvt)

#define for_each_chip_select_mask(i, dct, pvt)

#define for_each_umc(i)

/*
 * @input_addr is an InputAddr associated with the node given by mci. Return the
 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
 */
static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
{}

/*
 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
 * for the node represented by mci. Info is passed back in *hole_base,
 * *hole_offset, and *hole_size.  Function returns 0 if info is valid or 1 if
 * info is invalid. Info may be invalid for either of the following reasons:
 *
 * - The revision of the node is not E or greater.  In this case, the DRAM Hole
 *   Address Register does not exist.
 *
 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
 *   indicating that its contents are not valid.
 *
 * The values passed back in *hole_base, *hole_offset, and *hole_size are
 * complete 32-bit values despite the fact that the bitfields in the DHAR
 * only represent bits 31-24 of the base and offset values.
 */
static int get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
			      u64 *hole_offset, u64 *hole_size)
{}

#ifdef CONFIG_EDAC_DEBUG
#define EDAC_DCT_ATTR_SHOW(reg)

EDAC_DCT_ATTR_SHOW(dhar);
EDAC_DCT_ATTR_SHOW(dbam0);
EDAC_DCT_ATTR_SHOW(top_mem);
EDAC_DCT_ATTR_SHOW(top_mem2);

static ssize_t dram_hole_show(struct device *dev, struct device_attribute *mattr,
			      char *data)
{}

/*
 * update NUM_DBG_ATTRS in case you add new members
 */
static DEVICE_ATTR(dhar, S_IRUGO, dhar_show, NULL);
static DEVICE_ATTR(dbam, S_IRUGO, dbam0_show, NULL);
static DEVICE_ATTR(topmem, S_IRUGO, top_mem_show, NULL);
static DEVICE_ATTR(topmem2, S_IRUGO, top_mem2_show, NULL);
static DEVICE_ATTR_RO(dram_hole);

static struct attribute *dbg_attrs[] =;

static const struct attribute_group dbg_group =;

static ssize_t inject_section_show(struct device *dev,
				   struct device_attribute *mattr, char *buf)
{}

/*
 * store error injection section value which refers to one of 4 16-byte sections
 * within a 64-byte cacheline
 *
 * range: 0..3
 */
static ssize_t inject_section_store(struct device *dev,
				    struct device_attribute *mattr,
				    const char *data, size_t count)
{}

static ssize_t inject_word_show(struct device *dev,
				struct device_attribute *mattr, char *buf)
{}

/*
 * store error injection word value which refers to one of 9 16-bit word of the
 * 16-byte (128-bit + ECC bits) section
 *
 * range: 0..8
 */
static ssize_t inject_word_store(struct device *dev,
				 struct device_attribute *mattr,
				 const char *data, size_t count)
{}

static ssize_t inject_ecc_vector_show(struct device *dev,
				      struct device_attribute *mattr,
				      char *buf)
{}

/*
 * store 16 bit error injection vector which enables injecting errors to the
 * corresponding bit within the error injection word above. When used during a
 * DRAM ECC read, it holds the contents of the of the DRAM ECC bits.
 */
static ssize_t inject_ecc_vector_store(struct device *dev,
				       struct device_attribute *mattr,
				       const char *data, size_t count)
{}

/*
 * Do a DRAM ECC read. Assemble staged values in the pvt area, format into
 * fields needed by the injection registers and read the NB Array Data Port.
 */
static ssize_t inject_read_store(struct device *dev,
				 struct device_attribute *mattr,
				 const char *data, size_t count)
{}

/*
 * Do a DRAM ECC write. Assemble staged values in the pvt area and format into
 * fields needed by the injection registers.
 */
static ssize_t inject_write_store(struct device *dev,
				  struct device_attribute *mattr,
				  const char *data, size_t count)
{}

/*
 * update NUM_INJ_ATTRS in case you add new members
 */

static DEVICE_ATTR_RW(inject_section);
static DEVICE_ATTR_RW(inject_word);
static DEVICE_ATTR_RW(inject_ecc_vector);
static DEVICE_ATTR_WO(inject_write);
static DEVICE_ATTR_WO(inject_read);

static struct attribute *inj_attrs[] =;

static umode_t inj_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
{}

static const struct attribute_group inj_group =;
#endif /* CONFIG_EDAC_DEBUG */

/*
 * Return the DramAddr that the SysAddr given by @sys_addr maps to.  It is
 * assumed that sys_addr maps to the node given by mci.
 *
 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
 * then it is also involved in translating a SysAddr to a DramAddr. Sections
 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
 * These parts of the documentation are unclear. I interpret them as follows:
 *
 * When node n receives a SysAddr, it processes the SysAddr as follows:
 *
 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
 *    Limit registers for node n. If the SysAddr is not within the range
 *    specified by the base and limit values, then node n ignores the Sysaddr
 *    (since it does not map to node n). Otherwise continue to step 2 below.
 *
 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
 *    disabled so skip to step 3 below. Otherwise see if the SysAddr is within
 *    the range of relocated addresses (starting at 0x100000000) from the DRAM
 *    hole. If not, skip to step 3 below. Else get the value of the
 *    DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
 *    offset defined by this value from the SysAddr.
 *
 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
 *    Base register for node n. To obtain the DramAddr, subtract the base
 *    address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
 */
static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
{}

/*
 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
 * (section 3.4.4.1).  Return the number of bits from a SysAddr that are used
 * for node interleaving.
 */
static int num_node_interleave_bits(unsigned intlv_en)
{}

/* Translate the DramAddr given by @dram_addr to an InputAddr. */
static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
{}

/*
 * Translate the SysAddr represented by @sys_addr to an InputAddr.  It is
 * assumed that @sys_addr maps to the node given by mci.
 */
static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
{}

/* Map the Error address to a PAGE and PAGE OFFSET. */
static inline void error_address_to_page_and_offset(u64 error_address,
						    struct err_info *err)
{}

/*
 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
 * of a node that detected an ECC memory error.  mci represents the node that
 * the error address maps to (possibly different from the node that detected
 * the error).  Return the number of the csrow that sys_addr maps to, or -1 on
 * error.
 */
static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
{}

/*
 * See AMD PPR DF::LclNodeTypeMap
 *
 * This register gives information for nodes of the same type within a system.
 *
 * Reading this register from a GPU node will tell how many GPU nodes are in the
 * system and what the lowest AMD Node ID value is for the GPU nodes. Use this
 * info to fixup the Linux logical "Node ID" value set in the AMD NB code and EDAC.
 */
static struct local_node_map {} gpu_node_map;

#define PCI_DEVICE_ID_AMD_MI200_DF_F1
#define REG_LOCAL_NODE_TYPE_MAP

/* Local Node Type Map (LNTM) fields */
#define LNTM_NODE_COUNT
#define LNTM_BASE_NODE_ID

static int gpu_get_node_map(struct amd64_pvt *pvt)
{}

static int fixup_node_id(int node_id, struct mce *m)
{}

static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);

/*
 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
 * are ECC capable.
 */
static unsigned long dct_determine_edac_cap(struct amd64_pvt *pvt)
{}

static unsigned long umc_determine_edac_cap(struct amd64_pvt *pvt)
{}

/*
 * debug routine to display the memory sizes of all logical DIMMs and its
 * CSROWs
 */
static void dct_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
{}


static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
{}

#define CS_EVEN_PRIMARY
#define CS_ODD_PRIMARY
#define CS_EVEN_SECONDARY
#define CS_ODD_SECONDARY
#define CS_3R_INTERLEAVE

#define CS_EVEN
#define CS_ODD

static int umc_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
{}

static int __addr_mask_to_cs_size(u32 addr_mask_orig, unsigned int cs_mode,
				  int csrow_nr, int dimm)
{}

static int umc_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
				    unsigned int cs_mode, int csrow_nr)
{}

static void umc_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
{}

static void umc_dump_misc_regs(struct amd64_pvt *pvt)
{}

static void dct_dump_misc_regs(struct amd64_pvt *pvt)
{}

/*
 * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
 */
static void dct_prep_chip_selects(struct amd64_pvt *pvt)
{}

static void umc_prep_chip_selects(struct amd64_pvt *pvt)
{}

static void umc_read_base_mask(struct amd64_pvt *pvt)
{}

/*
 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
 */
static void dct_read_base_mask(struct amd64_pvt *pvt)
{}

static void umc_determine_memory_type(struct amd64_pvt *pvt)
{}

static void dct_determine_memory_type(struct amd64_pvt *pvt)
{}

/* On F10h and later ErrAddr is MC4_ADDR[47:1] */
static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
{}

static struct pci_dev *pci_get_related_function(unsigned int vendor,
						unsigned int device,
						struct pci_dev *related)
{}

static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
{}

static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
				    struct err_info *err)
{}

static int ddr2_cs_size(unsigned i, bool dct_width)
{}

static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
				  unsigned cs_mode, int cs_mask_nr)
{}

static int ddr3_cs_size(unsigned i, bool dct_width)
{}

static int ddr3_lrdimm_cs_size(unsigned i, unsigned rank_multiply)
{}

static int ddr4_cs_size(unsigned i)
{}

static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
				   unsigned cs_mode, int cs_mask_nr)
{}

/*
 * F15h supports only 64bit DCT interfaces
 */
static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
				   unsigned cs_mode, int cs_mask_nr)
{}

/* F15h M60h supports DDR4 mapping as well.. */
static int f15_m60h_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
					unsigned cs_mode, int cs_mask_nr)
{}

/*
 * F16h and F15h model 30h have only limited cs_modes.
 */
static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
				unsigned cs_mode, int cs_mask_nr)
{}

static void read_dram_ctl_register(struct amd64_pvt *pvt)
{}

/*
 * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG,
 * 2.10.12 Memory Interleaving Modes).
 */
static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
				     u8 intlv_en, int num_dcts_intlv,
				     u32 dct_sel)
{}

/*
 * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
 * Interleaving Modes.
 */
static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
				bool hi_range_sel, u8 intlv_en)
{}

/* Convert the sys_addr to the normalized DCT address */
static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
				 u64 sys_addr, bool hi_rng,
				 u32 dct_sel_base_addr)
{}

/*
 * checks if the csrow passed in is marked as SPARED, if so returns the new
 * spare row
 */
static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
{}

/*
 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
 *
 * Return:
 *	-EINVAL:  NOT FOUND
 *	0..csrow = Chip-Select Row
 */
static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
{}

/*
 * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
 * swapped with a region located at the bottom of memory so that the GPU can use
 * the interleaved region and thus two channels.
 */
static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
{}

/* For a given @dram_range, check if @sys_addr falls within it. */
static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
				  u64 sys_addr, int *chan_sel)
{}

static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
					u64 sys_addr, int *chan_sel)
{}

static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt,
					u64 sys_addr,
					int *chan_sel)
{}

/*
 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
 *
 * The @sys_addr is usually an error address received from the hardware
 * (MCX_ADDR).
 */
static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
				     struct err_info *err)
{}

/*
 * These are tables of eigenvectors (one per line) which can be used for the
 * construction of the syndrome tables. The modified syndrome search algorithm
 * uses those to find the symbol in error and thus the DIMM.
 *
 * Algorithm courtesy of Ross LaFetra from AMD.
 */
static const u16 x4_vectors[] =;

static const u16 x8_vectors[] =;

static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
			   unsigned v_dim)
{}

static int map_err_sym_to_channel(int err_sym, int sym_size)
{}

static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
{}

static void __log_ecc_error(struct mem_ctl_info *mci, struct err_info *err,
			    u8 ecc_type)
{}

static inline void decode_bus_error(int node_id, struct mce *m)
{}

/*
 * To find the UMC channel represented by this bank we need to match on its
 * instance_id. The instance_id of a bank is held in the lower 32 bits of its
 * IPID.
 *
 * Currently, we can derive the channel number by looking at the 6th nibble in
 * the instance_id. For example, instance_id=0xYXXXXX where Y is the channel
 * number.
 *
 * For DRAM ECC errors, the Chip Select number is given in bits [2:0] of
 * the MCA_SYND[ErrorInformation] field.
 */
static void umc_get_err_info(struct mce *m, struct err_info *err)
{}

static void decode_umc_error(int node_id, struct mce *m)
{}

/*
 * Use pvt->F3 which contains the F3 CPU PCI device to get the related
 * F1 (AddrMap) and F2 (Dct) devices. Return negative value on error.
 */
static int
reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
{}

static void determine_ecc_sym_sz(struct amd64_pvt *pvt)
{}

/*
 * Retrieve the hardware registers of the memory controller.
 */
static void umc_read_mc_regs(struct amd64_pvt *pvt)
{}

/*
 * Retrieve the hardware registers of the memory controller (this includes the
 * 'Address Map' and 'Misc' device regs)
 */
static void dct_read_mc_regs(struct amd64_pvt *pvt)
{}

/*
 * NOTE: CPU Revision Dependent code
 *
 * Input:
 *	@csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
 *	k8 private pointer to -->
 *			DRAM Bank Address mapping register
 *			node_id
 *			DCL register where dual_channel_active is
 *
 * The DBAM register consists of 4 sets of 4 bits each definitions:
 *
 * Bits:	CSROWs
 * 0-3		CSROWs 0 and 1
 * 4-7		CSROWs 2 and 3
 * 8-11		CSROWs 4 and 5
 * 12-15	CSROWs 6 and 7
 *
 * Values range from: 0 to 15
 * The meaning of the values depends on CPU revision and dual-channel state,
 * see relevant BKDG more info.
 *
 * The memory controller provides for total of only 8 CSROWs in its current
 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
 * single channel or two (2) DIMMs in dual channel mode.
 *
 * The following code logic collapses the various tables for CSROW based on CPU
 * revision.
 *
 * Returns:
 *	The number of PAGE_SIZE pages on the specified CSROW number it
 *	encompasses
 *
 */
static u32 dct_get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
{}

static u32 umc_get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
{}

static void umc_init_csrows(struct mem_ctl_info *mci)
{}

/*
 * Initialize the array of csrow attribute instances, based on the values
 * from pci config hardware registers.
 */
static void dct_init_csrows(struct mem_ctl_info *mci)
{}

/* get all cores on this DCT */
static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
{}

/* check MCG_CTL on all the cpus on this node */
static bool nb_mce_bank_enabled_on_node(u16 nid)
{}

static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
{}

static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
				       struct pci_dev *F3)
{}

static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
					struct pci_dev *F3)
{}

static bool dct_ecc_enabled(struct amd64_pvt *pvt)
{}

static bool umc_ecc_enabled(struct amd64_pvt *pvt)
{}

static inline void
umc_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
{}

static void dct_setup_mci_misc_attrs(struct mem_ctl_info *mci)
{}

static void umc_setup_mci_misc_attrs(struct mem_ctl_info *mci)
{}

static int dct_hw_info_get(struct amd64_pvt *pvt)
{}

static int umc_hw_info_get(struct amd64_pvt *pvt)
{}

/*
 * The CPUs have one channel per UMC, so UMC number is equivalent to a
 * channel number. The GPUs have 8 channels per UMC, so the UMC number no
 * longer works as a channel number.
 *
 * The channel number within a GPU UMC is given in MCA_IPID[15:12].
 * However, the IDs are split such that two UMC values go to one UMC, and
 * the channel numbers are split in two groups of four.
 *
 * Refer to comment on gpu_get_umc_base().
 *
 * For example,
 * UMC0 CH[3:0] = 0x0005[3:0]000
 * UMC0 CH[7:4] = 0x0015[3:0]000
 * UMC1 CH[3:0] = 0x0025[3:0]000
 * UMC1 CH[7:4] = 0x0035[3:0]000
 */
static void gpu_get_err_info(struct mce *m, struct err_info *err)
{}

static int gpu_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
				    unsigned int cs_mode, int csrow_nr)
{}

static void gpu_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
{}

static void gpu_dump_misc_regs(struct amd64_pvt *pvt)
{}

static u32 gpu_get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
{}

static void gpu_init_csrows(struct mem_ctl_info *mci)
{}

static void gpu_setup_mci_misc_attrs(struct mem_ctl_info *mci)
{}

/* ECC is enabled by default on GPU nodes */
static bool gpu_ecc_enabled(struct amd64_pvt *pvt)
{}

static inline u32 gpu_get_umc_base(struct amd64_pvt *pvt, u8 umc, u8 channel)
{}

static void gpu_read_mc_regs(struct amd64_pvt *pvt)
{}

static void gpu_read_base_mask(struct amd64_pvt *pvt)
{}

static void gpu_prep_chip_selects(struct amd64_pvt *pvt)
{}

static int gpu_hw_info_get(struct amd64_pvt *pvt)
{}

static void hw_info_put(struct amd64_pvt *pvt)
{}

static struct low_ops umc_ops =;

static struct low_ops gpu_ops =;

/* Use Family 16h versions for defaults and adjust as needed below. */
static struct low_ops dct_ops =;

static int per_family_init(struct amd64_pvt *pvt)
{}

static const struct attribute_group *amd64_edac_attr_groups[] =;

/*
 * For heterogeneous and APU models EDAC CHIP_SELECT and CHANNEL layers
 * should be swapped to fit into the layers.
 */
static unsigned int get_layer_size(struct amd64_pvt *pvt, u8 layer)
{}

static int init_one_instance(struct amd64_pvt *pvt)
{}

static bool instance_has_memory(struct amd64_pvt *pvt)
{}

static int probe_one_instance(unsigned int nid)
{}

static void remove_one_instance(unsigned int nid)
{}

static void setup_pci_device(void)
{}

static const struct x86_cpu_id amd64_cpuids[] =;
MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);

static int __init amd64_edac_init(void)
{}

static void __exit amd64_edac_exit(void)
{}

module_init();
module_exit(amd64_edac_exit);

MODULE_LICENSE();
MODULE_AUTHOR();
MODULE_DESCRIPTION();

module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC();