linux/drivers/edac/sb_edac.c

// SPDX-License-Identifier: GPL-2.0-only
/* Intel Sandy Bridge -EN/-EP/-EX Memory Controller kernel module
 *
 * This driver supports the memory controllers found on the Intel
 * processor family Sandy Bridge.
 *
 * Copyright (c) 2011 by:
 *	 Mauro Carvalho Chehab
 */

#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/edac.h>
#include <linux/mmzone.h>
#include <linux/smp.h>
#include <linux/bitmap.h>
#include <linux/math64.h>
#include <linux/mod_devicetable.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include <asm/processor.h>
#include <asm/mce.h>

#include "edac_module.h"

/* Static vars */
static LIST_HEAD(sbridge_edac_list);

/*
 * Alter this version for the module when modifications are made
 */
#define SBRIDGE_REVISION
#define EDAC_MOD_STR

/*
 * Debug macros
 */
#define sbridge_printk(level, fmt, arg...)

#define sbridge_mc_printk(mci, level, fmt, arg...)

/*
 * Get a bit field at register value <v>, from bit <lo> to bit <hi>
 */
#define GET_BITFIELD(v, lo, hi)

/* Devices 12 Function 6, Offsets 0x80 to 0xcc */
static const u32 sbridge_dram_rule[] =;

static const u32 ibridge_dram_rule[] =;

static const u32 knl_dram_rule[] =;

#define DRAM_RULE_ENABLE(reg)
#define A7MODE(reg)

static char *show_dram_attr(u32 attr)
{}

static const u32 sbridge_interleave_list[] =;

static const u32 ibridge_interleave_list[] =;

static const u32 knl_interleave_list[] =;
#define MAX_INTERLEAVE

struct interleave_pkg {};

static const struct interleave_pkg sbridge_interleave_pkg[] =;

static const struct interleave_pkg ibridge_interleave_pkg[] =;

static inline int sad_pkg(const struct interleave_pkg *table, u32 reg,
			  int interleave)
{}

/* Devices 12 Function 7 */

#define TOLM
#define TOHM
#define HASWELL_TOLM
#define HASWELL_TOHM_0
#define HASWELL_TOHM_1
#define KNL_TOLM
#define KNL_TOHM_0
#define KNL_TOHM_1

#define GET_TOLM(reg)
#define GET_TOHM(reg)

/* Device 13 Function 6 */

#define SAD_TARGET

#define SOURCE_ID(reg)

#define SOURCE_ID_KNL(reg)

#define SAD_CONTROL

/* Device 14 function 0 */

static const u32 tad_dram_rule[] =;
#define MAX_TAD

#define TAD_LIMIT(reg)
#define TAD_SOCK(reg)
#define TAD_CH(reg)
#define TAD_TGT3(reg)
#define TAD_TGT2(reg)
#define TAD_TGT1(reg)
#define TAD_TGT0(reg)

/* Device 15, function 0 */

#define MCMTR
#define KNL_MCMTR

#define IS_ECC_ENABLED(mcmtr)
#define IS_LOCKSTEP_ENABLED(mcmtr)
#define IS_CLOSE_PG(mcmtr)

/* Device 15, function 1 */

#define RASENABLES
#define IS_MIRROR_ENABLED(reg)

/* Device 15, functions 2-5 */

static const int mtr_regs[] =;

static const int knl_mtr_reg =;

#define RANK_DISABLE(mtr)
#define IS_DIMM_PRESENT(mtr)
#define RANK_CNT_BITS(mtr)
#define RANK_WIDTH_BITS(mtr)
#define COL_WIDTH_BITS(mtr)

static const u32 tad_ch_nilv_offset[] =;
#define CHN_IDX_OFFSET(reg)
#define TAD_OFFSET(reg)

static const u32 rir_way_limit[] =;
#define MAX_RIR_RANGES

#define IS_RIR_VALID(reg)
#define RIR_WAY(reg)

#define MAX_RIR_WAY

static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] =;

#define RIR_RNK_TGT(type, reg)

#define RIR_OFFSET(type, reg)

/* Device 16, functions 2-7 */

/*
 * FIXME: Implement the error count reads directly
 */

#define RANK_ODD_OV(reg)
#define RANK_ODD_ERR_CNT(reg)
#define RANK_EVEN_OV(reg)
#define RANK_EVEN_ERR_CNT(reg)

#if 0 /* Currently unused*/
static const u32 correrrcnt[] = {
	0x104, 0x108, 0x10c, 0x110,
};

static const u32 correrrthrsld[] = {
	0x11c, 0x120, 0x124, 0x128,
};
#endif

#define RANK_ODD_ERR_THRSLD(reg)
#define RANK_EVEN_ERR_THRSLD(reg)


/* Device 17, function 0 */

#define SB_RANK_CFG_A

#define IB_RANK_CFG_A

/*
 * sbridge structs
 */

#define NUM_CHANNELS
#define MAX_DIMMS
#define KNL_MAX_CHAS
#define KNL_MAX_CHANNELS
#define KNL_MAX_EDCS
#define CHANNEL_UNSPECIFIED

enum type {};

enum domain {};

enum mirroring_mode {};

struct sbridge_pvt;
struct sbridge_info {};

struct sbridge_channel {};

struct pci_id_descr {};

struct pci_id_table {};

struct sbridge_dev {};

struct knl_pvt {};

struct sbridge_pvt {};

#define PCI_DESCR(device_id, opt, domain)

static const struct pci_id_descr pci_dev_descr_sbridge[] =;

#define PCI_ID_TABLE_ENTRY(A, N, M, T)

static const struct pci_id_table pci_dev_descr_sbridge_table[] =;

/* This changes depending if 1HA or 2HA:
 * 1HA:
 *	0x0eb8 (17.0) is DDRIO0
 * 2HA:
 *	0x0ebc (17.4) is DDRIO0
 */
#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0
#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0

/* pci ids */
#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0
#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA
#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS
#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0
#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1
#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2
#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3
#define PCI_DEVICE_ID_INTEL_IBRIDGE_SAD
#define PCI_DEVICE_ID_INTEL_IBRIDGE_BR0
#define PCI_DEVICE_ID_INTEL_IBRIDGE_BR1
#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1
#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA
#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS
#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0
#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1
#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2
#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3

static const struct pci_id_descr pci_dev_descr_ibridge[] =;

static const struct pci_id_table pci_dev_descr_ibridge_table[] =;

/* Haswell support */
/* EN processor:
 *	- 1 IMC
 *	- 3 DDR3 channels, 2 DPC per channel
 * EP processor:
 *	- 1 or 2 IMC
 *	- 4 DDR4 channels, 3 DPC per channel
 * EP 4S processor:
 *	- 2 IMC
 *	- 4 DDR4 channels, 3 DPC per channel
 * EX processor:
 *	- 2 IMC
 *	- each IMC interfaces with a SMI 2 channel
 *	- each SMI channel interfaces with a scalable memory buffer
 *	- each scalable memory buffer supports 4 DDR3/DDR4 channels, 3 DPC
 */
#define HASWELL_DDRCRCLKCONTROLS
#define HASWELL_HASYSDEFEATURE2
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3
static const struct pci_id_descr pci_dev_descr_haswell[] =;

static const struct pci_id_table pci_dev_descr_haswell_table[] =;

/* Knight's Landing Support */
/*
 * KNL's memory channels are swizzled between memory controllers.
 * MC0 is mapped to CH3,4,5 and MC1 is mapped to CH0,1,2
 */
#define knl_channel_remap(mc, chan)

/* Memory controller, TAD tables, error injection - 2-8-0, 2-9-0 (2 of these) */
#define PCI_DEVICE_ID_INTEL_KNL_IMC_MC
/* DRAM channel stuff; bank addrs, dimmmtr, etc.. 2-8-2 - 2-9-4 (6 of these) */
#define PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN
/* kdrwdbu TAD limits/offsets, MCMTR - 2-10-1, 2-11-1 (2 of these) */
#define PCI_DEVICE_ID_INTEL_KNL_IMC_TA
/* CHA broadcast registers, dram rules - 1-29-0 (1 of these) */
#define PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0
/* SAD target - 1-29-1 (1 of these) */
#define PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1
/* Caching / Home Agent */
#define PCI_DEVICE_ID_INTEL_KNL_IMC_CHA
/* Device with TOLM and TOHM, 0-5-0 (1 of these) */
#define PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM

/*
 * KNL differs from SB, IB, and Haswell in that it has multiple
 * instances of the same device with the same device ID, so we handle that
 * by creating as many copies in the table as we expect to find.
 * (Like device ID must be grouped together.)
 */

static const struct pci_id_descr pci_dev_descr_knl[] =;

static const struct pci_id_table pci_dev_descr_knl_table[] =;

/*
 * Broadwell support
 *
 * DE processor:
 *	- 1 IMC
 *	- 2 DDR3 channels, 2 DPC per channel
 * EP processor:
 *	- 1 or 2 IMC
 *	- 4 DDR4 channels, 3 DPC per channel
 * EP 4S processor:
 *	- 2 IMC
 *	- 4 DDR4 channels, 3 DPC per channel
 * EX processor:
 *	- 2 IMC
 *	- each IMC interfaces with a SMI 2 channel
 *	- each SMI channel interfaces with a scalable memory buffer
 *	- each scalable memory buffer supports 4 DDR3/DDR4 channels, 3 DPC
 */
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_VTD_MISC
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0

static const struct pci_id_descr pci_dev_descr_broadwell[] =;

static const struct pci_id_table pci_dev_descr_broadwell_table[] =;


/****************************************************************************
			Ancillary status routines
 ****************************************************************************/

static inline int numrank(enum type type, u32 mtr)
{}

static inline int numrow(u32 mtr)
{}

static inline int numcol(u32 mtr)
{}

static struct sbridge_dev *get_sbridge_dev(int seg, u8 bus, enum domain dom,
					   int multi_bus,
					   struct sbridge_dev *prev)
{}

static struct sbridge_dev *alloc_sbridge_dev(int seg, u8 bus, enum domain dom,
					     const struct pci_id_table *table)
{}

static void free_sbridge_dev(struct sbridge_dev *sbridge_dev)
{}

static u64 sbridge_get_tolm(struct sbridge_pvt *pvt)
{}

static u64 sbridge_get_tohm(struct sbridge_pvt *pvt)
{}

static u64 ibridge_get_tolm(struct sbridge_pvt *pvt)
{}

static u64 ibridge_get_tohm(struct sbridge_pvt *pvt)
{}

static u64 rir_limit(u32 reg)
{}

static u64 sad_limit(u32 reg)
{}

static u32 interleave_mode(u32 reg)
{}

static u32 dram_attr(u32 reg)
{}

static u64 knl_sad_limit(u32 reg)
{}

static u32 knl_interleave_mode(u32 reg)
{}

static const char * const knl_intlv_mode[] =;

static const char *get_intlv_mode_str(u32 reg, enum type t)
{}

static u32 dram_attr_knl(u32 reg)
{}


static enum mem_type get_memory_type(struct sbridge_pvt *pvt)
{}

static enum mem_type haswell_get_memory_type(struct sbridge_pvt *pvt)
{}

static enum dev_type knl_get_width(struct sbridge_pvt *pvt, u32 mtr)
{}

static enum dev_type sbridge_get_width(struct sbridge_pvt *pvt, u32 mtr)
{}

static enum dev_type __ibridge_get_width(u32 mtr)
{}

static enum dev_type ibridge_get_width(struct sbridge_pvt *pvt, u32 mtr)
{}

static enum dev_type broadwell_get_width(struct sbridge_pvt *pvt, u32 mtr)
{}

static enum mem_type knl_get_memory_type(struct sbridge_pvt *pvt)
{}

static u8 get_node_id(struct sbridge_pvt *pvt)
{}

static u8 haswell_get_node_id(struct sbridge_pvt *pvt)
{}

static u8 knl_get_node_id(struct sbridge_pvt *pvt)
{}

/*
 * Use the reporting bank number to determine which memory
 * controller (also known as "ha" for "home agent"). Sandy
 * Bridge only has one memory controller per socket, so the
 * answer is always zero.
 */
static u8 sbridge_get_ha(u8 bank)
{}

/*
 * On Ivy Bridge, Haswell and Broadwell the error may be in a
 * home agent bank (7, 8), or one of the per-channel memory
 * controller banks (9 .. 16).
 */
static u8 ibridge_get_ha(u8 bank)
{}

/* Not used, but included for safety/symmetry */
static u8 knl_get_ha(u8 bank)
{}

static u64 haswell_get_tolm(struct sbridge_pvt *pvt)
{}

static u64 haswell_get_tohm(struct sbridge_pvt *pvt)
{}

static u64 knl_get_tolm(struct sbridge_pvt *pvt)
{}

static u64 knl_get_tohm(struct sbridge_pvt *pvt)
{}


static u64 haswell_rir_limit(u32 reg)
{}

static inline u8 sad_pkg_socket(u8 pkg)
{}

static inline u8 sad_pkg_ha(u8 pkg)
{}

static int haswell_chan_hash(int idx, u64 addr)
{}

/* Low bits of TAD limit, and some metadata. */
static const u32 knl_tad_dram_limit_lo[] =;

/* Low bits of TAD offset. */
static const u32 knl_tad_dram_offset_lo[] =;

/* High 16 bits of TAD limit and offset. */
static const u32 knl_tad_dram_hi[] =;

/* Number of ways a tad entry is interleaved. */
static const u32 knl_tad_ways[] =;

/*
 * Retrieve the n'th Target Address Decode table entry
 * from the memory controller's TAD table.
 *
 * @pvt:	driver private data
 * @entry:	which entry you want to retrieve
 * @mc:		which memory controller (0 or 1)
 * @offset:	output tad range offset
 * @limit:	output address of first byte above tad range
 * @ways:	output number of interleave ways
 *
 * The offset value has curious semantics.  It's a sort of running total
 * of the sizes of all the memory regions that aren't mapped in this
 * tad table.
 */
static int knl_get_tad(const struct sbridge_pvt *pvt,
		const int entry,
		const int mc,
		u64 *offset,
		u64 *limit,
		int *ways)
{}

/* Determine which memory controller is responsible for a given channel. */
static int knl_channel_mc(int channel)
{}

/*
 * Get the Nth entry from EDC_ROUTE_TABLE register.
 * (This is the per-tile mapping of logical interleave targets to
 *  physical EDC modules.)
 *
 * entry 0: 0:2
 *       1: 3:5
 *       2: 6:8
 *       3: 9:11
 *       4: 12:14
 *       5: 15:17
 *       6: 18:20
 *       7: 21:23
 * reserved: 24:31
 */
static u32 knl_get_edc_route(int entry, u32 reg)
{}

/*
 * Get the Nth entry from MC_ROUTE_TABLE register.
 * (This is the per-tile mapping of logical interleave targets to
 *  physical DRAM channels modules.)
 *
 * entry 0: mc 0:2   channel 18:19
 *       1: mc 3:5   channel 20:21
 *       2: mc 6:8   channel 22:23
 *       3: mc 9:11  channel 24:25
 *       4: mc 12:14 channel 26:27
 *       5: mc 15:17 channel 28:29
 * reserved: 30:31
 *
 * Though we have 3 bits to identify the MC, we should only see
 * the values 0 or 1.
 */

static u32 knl_get_mc_route(int entry, u32 reg)
{}

/*
 * Render the EDC_ROUTE register in human-readable form.
 * Output string s should be at least KNL_MAX_EDCS*2 bytes.
 */
static void knl_show_edc_route(u32 reg, char *s)
{}

/*
 * Render the MC_ROUTE register in human-readable form.
 * Output string s should be at least KNL_MAX_CHANNELS*2 bytes.
 */
static void knl_show_mc_route(u32 reg, char *s)
{}

#define KNL_EDC_ROUTE
#define KNL_MC_ROUTE

/* Is this dram rule backed by regular DRAM in flat mode? */
#define KNL_EDRAM(reg)

/* Is this dram rule cached? */
#define KNL_CACHEABLE(reg)

/* Is this rule backed by edc ? */
#define KNL_EDRAM_ONLY(reg)

/* Is this rule backed by DRAM, cacheable in EDRAM? */
#define KNL_CACHEABLE(reg)

/* Is this rule mod3? */
#define KNL_MOD3(reg)

/*
 * Figure out how big our RAM modules are.
 *
 * The DIMMMTR register in KNL doesn't tell us the size of the DIMMs, so we
 * have to figure this out from the SAD rules, interleave lists, route tables,
 * and TAD rules.
 *
 * SAD rules can have holes in them (e.g. the 3G-4G hole), so we have to
 * inspect the TAD rules to figure out how large the SAD regions really are.
 *
 * When we know the real size of a SAD region and how many ways it's
 * interleaved, we know the individual contribution of each channel to
 * TAD is size/ways.
 *
 * Finally, we have to check whether each channel participates in each SAD
 * region.
 *
 * Fortunately, KNL only supports one DIMM per channel, so once we know how
 * much memory the channel uses, we know the DIMM is at least that large.
 * (The BIOS might possibly choose not to map all available memory, in which
 * case we will underreport the size of the DIMM.)
 *
 * In theory, we could try to determine the EDC sizes as well, but that would
 * only work in flat mode, not in cache mode.
 *
 * @mc_sizes: Output sizes of channels (must have space for KNL_MAX_CHANNELS
 *            elements)
 */
static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
{}

static void get_source_id(struct mem_ctl_info *mci)
{}

static int __populate_dimms(struct mem_ctl_info *mci,
			    u64 knl_mc_sizes[KNL_MAX_CHANNELS],
			    enum edac_type mode)
{}

static int get_dimm_config(struct mem_ctl_info *mci)
{}

static void get_memory_layout(const struct mem_ctl_info *mci)
{}

static struct mem_ctl_info *get_mci_for_node_id(u8 node_id, u8 ha)
{}

static u8 sb_close_row[] =;

static u8 sb_close_column[] =;

static u8 sb_open_row[] =;

static u8 sb_open_column[] =;

static u8 sb_open_fine_column[] =;

static int sb_bits(u64 addr, int nbits, u8 *bits)
{}

static int sb_bank_bits(u64 addr, int b0, int b1, int do_xor, int x0, int x1)
{}

static bool sb_decode_ddr4(struct mem_ctl_info *mci, int ch, u8 rank,
			   u64 rank_addr, char *msg)
{}

static bool sb_decode_ddr3(struct mem_ctl_info *mci, int ch, u8 rank,
			   u64 rank_addr, char *msg)
{}

static int get_memory_error_data(struct mem_ctl_info *mci,
				 u64 addr,
				 u8 *socket, u8 *ha,
				 long *channel_mask,
				 u8 *rank,
				 char **area_type, char *msg)
{}

static int get_memory_error_data_from_mce(struct mem_ctl_info *mci,
					  const struct mce *m, u8 *socket,
					  u8 *ha, long *channel_mask,
					  char *msg)
{}

/****************************************************************************
	Device initialization routines: put/get, init/exit
 ****************************************************************************/

/*
 *	sbridge_put_all_devices	'put' all the devices that we have
 *				reserved via 'get'
 */
static void sbridge_put_devices(struct sbridge_dev *sbridge_dev)
{}

static void sbridge_put_all_devices(void)
{}

static int sbridge_get_onedevice(struct pci_dev **prev,
				 u8 *num_mc,
				 const struct pci_id_table *table,
				 const unsigned devno,
				 const int multi_bus)
{}

/*
 * sbridge_get_all_devices - Find and perform 'get' operation on the MCH's
 *			     devices we want to reference for this driver.
 * @num_mc: pointer to the memory controllers count, to be incremented in case
 *	    of success.
 * @table: model specific table
 *
 * returns 0 in case of success or error code
 */
static int sbridge_get_all_devices(u8 *num_mc,
					const struct pci_id_table *table)
{}

/*
 * Device IDs for {SBRIDGE,IBRIDGE,HASWELL,BROADWELL}_IMC_HA0_TAD0 are in
 * the format: XXXa. So we can convert from a device to the corresponding
 * channel like this
 */
#define TAD_DEV_TO_CHAN(dev)

static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
				 struct sbridge_dev *sbridge_dev)
{}

static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
				 struct sbridge_dev *sbridge_dev)
{}

static int haswell_mci_bind_devs(struct mem_ctl_info *mci,
				 struct sbridge_dev *sbridge_dev)
{}

static int broadwell_mci_bind_devs(struct mem_ctl_info *mci,
				 struct sbridge_dev *sbridge_dev)
{}

static int knl_mci_bind_devs(struct mem_ctl_info *mci,
			struct sbridge_dev *sbridge_dev)
{}

/****************************************************************************
			Error check routines
 ****************************************************************************/

/*
 * While Sandy Bridge has error count registers, SMI BIOS read values from
 * and resets the counters. So, they are not reliable for the OS to read
 * from them. So, we have no option but to just trust on whatever MCE is
 * telling us about the errors.
 */
static void sbridge_mce_output_error(struct mem_ctl_info *mci,
				    const struct mce *m)
{}

/*
 * Check that logging is enabled and that this is the right type
 * of error for us to handle.
 */
static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
				   void *data)
{}

static struct notifier_block sbridge_mce_dec =;

/****************************************************************************
			EDAC register/unregister logic
 ****************************************************************************/

static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
{}

static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
{}

static const struct x86_cpu_id sbridge_cpuids[] =;
MODULE_DEVICE_TABLE(x86cpu, sbridge_cpuids);

/*
 *	sbridge_probe	Get all devices and register memory controllers
 *			present.
 *	return:
 *		0 for FOUND a device
 *		< 0 for error code
 */

static int sbridge_probe(const struct x86_cpu_id *id)
{}

/*
 *	sbridge_remove	cleanup
 *
 */
static void sbridge_remove(void)
{}

/*
 *	sbridge_init		Module entry function
 *			Try to initialize this module for its devices
 */
static int __init sbridge_init(void)
{}

/*
 *	sbridge_exit()	Module exit function
 *			Unregister the driver
 */
static void __exit sbridge_exit(void)
{}

module_init();
module_exit(sbridge_exit);

module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC();

MODULE_LICENSE();
MODULE_AUTHOR();
MODULE_AUTHOR();
MODULE_DESCRIPTION();