Commit e94ee641 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'edac_updates_for_v6.4' of git://git.kernel.org/pub/scm/linux/kernel/git/ras/ras

Pull EDAC updates from Borislav Petkov:

 - skx_edac: Fix overflow when decoding 32G DIMM ranks

 - i10nm_edac: Add Sierra Forest support

 - amd64_edac: Split driver code between legacy and SMCA systems. The
   final goal is adding support for more hw, like GPUs

 - The usual minor cleanups and fixes

* tag 'edac_updates_for_v6.4' of git://git.kernel.org/pub/scm/linux/kernel/git/ras/ras: (25 commits)
  EDAC/i10nm: Add Intel Sierra Forest server support
  EDAC/amd64: Fix indentation in umc_determine_edac_cap()
  EDAC/altera: Remove MODULE_LICENSE in non-module
  EDAC: Sanitize MODULE_AUTHOR strings
  EDAC/amd81[13]1: Remove trailing newline from MODULE_AUTHOR
  EDAC/amd64: Add get_err_info() to pvt->ops
  EDAC/amd64: Split dump_misc_regs() into dct/umc functions
  EDAC/amd64: Split init_csrows() into dct/umc functions
  EDAC/amd64: Split determine_edac_cap() into dct/umc functions
  EDAC/amd64: Rename f17h_determine_edac_ctl_cap()
  EDAC/amd64: Split setup_mci_misc_attrs() into dct/umc functions
  EDAC/amd64: Split ecc_enabled() into dct/umc functions
  EDAC/amd64: Split read_mc_regs() into dct/umc functions
  EDAC/amd64: Split determine_memory_type() into dct/umc functions
  EDAC/amd64: Split read_base_mask() into dct/umc functions
  EDAC/amd64: Split prep_chip_selects() into dct/umc functions
  EDAC/amd64: Rework hw_info_{get,put}
  EDAC/amd64: Merge struct amd64_family_type into struct amd64_pvt
  EDAC/amd64: Do not discover ECC symbol size for Family 17h and later
  EDAC/amd64: Drop dbam_to_cs() for Family 17h and later
  ...
parents f7301270 ce8ac911
......@@ -2149,10 +2149,8 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
}
edac->sb_irq = platform_get_irq(pdev, 0);
if (edac->sb_irq < 0) {
dev_err(&pdev->dev, "No SBERR IRQ resource\n");
if (edac->sb_irq < 0)
return edac->sb_irq;
}
irq_set_chained_handler_and_data(edac->sb_irq,
altr_edac_a10_irq_handler,
......@@ -2184,10 +2182,9 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
}
#else
edac->db_irq = platform_get_irq(pdev, 1);
if (edac->db_irq < 0) {
dev_err(&pdev->dev, "No DBERR IRQ resource\n");
if (edac->db_irq < 0)
return edac->db_irq;
}
irq_set_chained_handler_and_data(edac->db_irq,
altr_edac_a10_irq_handler, edac);
#endif
......@@ -2226,6 +2223,5 @@ static struct platform_driver altr_edac_a10_driver = {
};
module_platform_driver(altr_edac_a10_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Thor Thayer");
MODULE_DESCRIPTION("EDAC Driver for Altera Memories");
......@@ -13,11 +13,9 @@ module_param(ecc_enable_override, int, 0644);
static struct msr __percpu *msrs;
static struct amd64_family_type *fam_type;
static inline u32 get_umc_reg(u32 reg)
static inline u32 get_umc_reg(struct amd64_pvt *pvt, u32 reg)
{
if (!fam_type->flags.zn_regs_v2)
if (!pvt->flags.zn_regs_v2)
return reg;
switch (reg) {
......@@ -437,7 +435,7 @@ static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
for (i = 0; i < pvt->csels[dct].m_cnt; i++)
#define for_each_umc(i) \
for (i = 0; i < fam_type->max_mcs; i++)
for (i = 0; i < pvt->max_mcs; i++)
/*
* @input_addr is an InputAddr associated with the node given by mci. Return the
......@@ -1258,40 +1256,102 @@ static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
* Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
* are ECC capable.
*/
static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
static unsigned long dct_determine_edac_cap(struct amd64_pvt *pvt)
{
unsigned long edac_cap = EDAC_FLAG_NONE;
u8 bit;
if (pvt->umc) {
u8 i, umc_en_mask = 0, dimm_ecc_en_mask = 0;
bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
? 19
: 17;
for_each_umc(i) {
if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT))
continue;
if (pvt->dclr0 & BIT(bit))
edac_cap = EDAC_FLAG_SECDED;
umc_en_mask |= BIT(i);
return edac_cap;
}
/* UMC Configuration bit 12 (DimmEccEn) */
if (pvt->umc[i].umc_cfg & BIT(12))
dimm_ecc_en_mask |= BIT(i);
}
static unsigned long umc_determine_edac_cap(struct amd64_pvt *pvt)
{
u8 i, umc_en_mask = 0, dimm_ecc_en_mask = 0;
unsigned long edac_cap = EDAC_FLAG_NONE;
if (umc_en_mask == dimm_ecc_en_mask)
edac_cap = EDAC_FLAG_SECDED;
} else {
bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
? 19
: 17;
for_each_umc(i) {
if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT))
continue;
if (pvt->dclr0 & BIT(bit))
edac_cap = EDAC_FLAG_SECDED;
umc_en_mask |= BIT(i);
/* UMC Configuration bit 12 (DimmEccEn) */
if (pvt->umc[i].umc_cfg & BIT(12))
dimm_ecc_en_mask |= BIT(i);
}
if (umc_en_mask == dimm_ecc_en_mask)
edac_cap = EDAC_FLAG_SECDED;
return edac_cap;
}
static void debug_display_dimm_sizes(struct amd64_pvt *, u8);
/*
* debug routine to display the memory sizes of all logical DIMMs and its
* CSROWs
*/
static void dct_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
{
u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
int dimm, size0, size1;
if (pvt->fam == 0xf) {
/* K8 families < revF not supported yet */
if (pvt->ext_model < K8_REV_F)
return;
WARN_ON(ctrl != 0);
}
if (pvt->fam == 0x10) {
dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
: pvt->dbam0;
dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
pvt->csels[1].csbases :
pvt->csels[0].csbases;
} else if (ctrl) {
dbam = pvt->dbam0;
dcsb = pvt->csels[1].csbases;
}
edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
ctrl, dbam);
edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
/* Dump memory sizes for DIMM and its CSROWs */
for (dimm = 0; dimm < 4; dimm++) {
size0 = 0;
if (dcsb[dimm * 2] & DCSB_CS_ENABLE)
/*
* For F15m60h, we need multiplier for LRDIMM cs_size
* calculation. We pass dimm value to the dbam_to_cs
* mapper so we can find the multiplier from the
* corresponding DCSM.
*/
size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
DBAM_DIMM(dimm, dbam),
dimm);
size1 = 0;
if (dcsb[dimm * 2 + 1] & DCSB_CS_ENABLE)
size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
DBAM_DIMM(dimm, dbam),
dimm);
amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
dimm * 2, size0,
dimm * 2 + 1, size1);
}
}
static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
{
......@@ -1334,7 +1394,7 @@ static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
#define CS_EVEN (CS_EVEN_PRIMARY | CS_EVEN_SECONDARY)
#define CS_ODD (CS_ODD_PRIMARY | CS_ODD_SECONDARY)
static int f17_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
static int umc_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
{
u8 base, count = 0;
int cs_mode = 0;
......@@ -1366,7 +1426,85 @@ static int f17_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
return cs_mode;
}
static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
static int umc_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
unsigned int cs_mode, int csrow_nr)
{
u32 addr_mask_orig, addr_mask_deinterleaved;
u32 msb, weight, num_zero_bits;
int cs_mask_nr = csrow_nr;
int dimm, size = 0;
/* No Chip Selects are enabled. */
if (!cs_mode)
return size;
/* Requested size of an even CS but none are enabled. */
if (!(cs_mode & CS_EVEN) && !(csrow_nr & 1))
return size;
/* Requested size of an odd CS but none are enabled. */
if (!(cs_mode & CS_ODD) && (csrow_nr & 1))
return size;
/*
* Family 17h introduced systems with one mask per DIMM,
* and two Chip Selects per DIMM.
*
* CS0 and CS1 -> MASK0 / DIMM0
* CS2 and CS3 -> MASK1 / DIMM1
*
* Family 19h Model 10h introduced systems with one mask per Chip Select,
* and two Chip Selects per DIMM.
*
* CS0 -> MASK0 -> DIMM0
* CS1 -> MASK1 -> DIMM0
* CS2 -> MASK2 -> DIMM1
* CS3 -> MASK3 -> DIMM1
*
* Keep the mask number equal to the Chip Select number for newer systems,
* and shift the mask number for older systems.
*/
dimm = csrow_nr >> 1;
if (!pvt->flags.zn_regs_v2)
cs_mask_nr >>= 1;
/* Asymmetric dual-rank DIMM support. */
if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY))
addr_mask_orig = pvt->csels[umc].csmasks_sec[cs_mask_nr];
else
addr_mask_orig = pvt->csels[umc].csmasks[cs_mask_nr];
/*
* The number of zero bits in the mask is equal to the number of bits
* in a full mask minus the number of bits in the current mask.
*
* The MSB is the number of bits in the full mask because BIT[0] is
* always 0.
*
* In the special 3 Rank interleaving case, a single bit is flipped
* without swapping with the most significant bit. This can be handled
* by keeping the MSB where it is and ignoring the single zero bit.
*/
msb = fls(addr_mask_orig) - 1;
weight = hweight_long(addr_mask_orig);
num_zero_bits = msb - weight - !!(cs_mode & CS_3R_INTERLEAVE);
/* Take the number of zero bits off from the top of the mask. */
addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1);
edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm);
edac_dbg(1, " Original AddrMask: 0x%x\n", addr_mask_orig);
edac_dbg(1, " Deinterleaved AddrMask: 0x%x\n", addr_mask_deinterleaved);
/* Register [31:1] = Address [39:9]. Size is in kBs here. */
size = (addr_mask_deinterleaved >> 2) + 1;
/* Return size in MBs. */
return size >> 10;
}
static void umc_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
{
int dimm, size0, size1, cs0, cs1, cs_mode;
......@@ -1376,10 +1514,10 @@ static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
cs0 = dimm * 2;
cs1 = dimm * 2 + 1;
cs_mode = f17_get_cs_mode(dimm, ctrl, pvt);
cs_mode = umc_get_cs_mode(dimm, ctrl, pvt);
size0 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs0);
size1 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs1);
size0 = umc_addr_mask_to_cs_size(pvt, ctrl, cs_mode, cs0);
size1 = umc_addr_mask_to_cs_size(pvt, ctrl, cs_mode, cs1);
amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
cs0, size0,
......@@ -1387,7 +1525,7 @@ static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
}
}
static void __dump_misc_regs_df(struct amd64_pvt *pvt)
static void umc_dump_misc_regs(struct amd64_pvt *pvt)
{
struct amd64_umc *umc;
u32 i, tmp, umc_base;
......@@ -1420,18 +1558,17 @@ static void __dump_misc_regs_df(struct amd64_pvt *pvt)
if (umc->dram_type == MEM_LRDDR4 || umc->dram_type == MEM_LRDDR5) {
amd_smn_read(pvt->mc_node_id,
umc_base + get_umc_reg(UMCCH_ADDR_CFG),
umc_base + get_umc_reg(pvt, UMCCH_ADDR_CFG),
&tmp);
edac_dbg(1, "UMC%d LRDIMM %dx rank multiply\n",
i, 1 << ((tmp >> 4) & 0x3));
}
debug_display_dimm_sizes_df(pvt, i);
umc_debug_display_dimm_sizes(pvt, i);
}
}
/* Display and decode various NB registers for debug purposes. */
static void __dump_misc_regs(struct amd64_pvt *pvt)
static void dct_dump_misc_regs(struct amd64_pvt *pvt)
{
edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
......@@ -1451,28 +1588,19 @@ static void __dump_misc_regs(struct amd64_pvt *pvt)
(pvt->fam == 0xf) ? k8_dhar_offset(pvt)
: f10_dhar_offset(pvt));
debug_display_dimm_sizes(pvt, 0);
dct_debug_display_dimm_sizes(pvt, 0);
/* everything below this point is Fam10h and above */
if (pvt->fam == 0xf)
return;
debug_display_dimm_sizes(pvt, 1);
dct_debug_display_dimm_sizes(pvt, 1);
/* Only if NOT ganged does dclr1 have valid info */
if (!dct_ganging_enabled(pvt))
debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
}
/* Display and decode various NB registers for debug purposes. */
static void dump_misc_regs(struct amd64_pvt *pvt)
{
if (pvt->umc)
__dump_misc_regs_df(pvt);
else
__dump_misc_regs(pvt);
amd64_info("using x%u syndromes.\n", pvt->ecc_sym_sz);
}
......@@ -1480,7 +1608,7 @@ static void dump_misc_regs(struct amd64_pvt *pvt)
/*
* See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
*/
static void prep_chip_selects(struct amd64_pvt *pvt)
static void dct_prep_chip_selects(struct amd64_pvt *pvt)
{
if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
......@@ -1488,21 +1616,23 @@ static void prep_chip_selects(struct amd64_pvt *pvt)
} else if (pvt->fam == 0x15 && pvt->model == 0x30) {
pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
} else if (pvt->fam >= 0x17) {
int umc;
for_each_umc(umc) {
pvt->csels[umc].b_cnt = 4;
pvt->csels[umc].m_cnt = fam_type->flags.zn_regs_v2 ? 4 : 2;
}
} else {
pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
}
}
static void read_umc_base_mask(struct amd64_pvt *pvt)
static void umc_prep_chip_selects(struct amd64_pvt *pvt)
{
int umc;
for_each_umc(umc) {
pvt->csels[umc].b_cnt = 4;
pvt->csels[umc].m_cnt = pvt->flags.zn_regs_v2 ? 4 : 2;
}
}
static void umc_read_base_mask(struct amd64_pvt *pvt)
{
u32 umc_base_reg, umc_base_reg_sec;
u32 umc_mask_reg, umc_mask_reg_sec;
......@@ -1533,7 +1663,7 @@ static void read_umc_base_mask(struct amd64_pvt *pvt)
}
umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK;
umc_mask_reg_sec = get_umc_base(umc) + get_umc_reg(UMCCH_ADDR_MASK_SEC);
umc_mask_reg_sec = get_umc_base(umc) + get_umc_reg(pvt, UMCCH_ADDR_MASK_SEC);
for_each_chip_select_mask(cs, umc, pvt) {
mask = &pvt->csels[umc].csmasks[cs];
......@@ -1556,15 +1686,10 @@ static void read_umc_base_mask(struct amd64_pvt *pvt)
/*
* Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
*/
static void read_dct_base_mask(struct amd64_pvt *pvt)
static void dct_read_base_mask(struct amd64_pvt *pvt)
{
int cs;
prep_chip_selects(pvt);
if (pvt->umc)
return read_umc_base_mask(pvt);
for_each_chip_select(cs, 0, pvt) {
int reg0 = DCSB0 + (cs * 4);
int reg1 = DCSB1 + (cs * 4);
......@@ -1604,7 +1729,7 @@ static void read_dct_base_mask(struct amd64_pvt *pvt)
}
}
static void determine_memory_type_df(struct amd64_pvt *pvt)
static void umc_determine_memory_type(struct amd64_pvt *pvt)
{
struct amd64_umc *umc;
u32 i;
......@@ -1621,7 +1746,7 @@ static void determine_memory_type_df(struct amd64_pvt *pvt)
* Check if the system supports the "DDR Type" field in UMC Config
* and has DDR5 DIMMs in use.
*/
if (fam_type->flags.zn_regs_v2 && ((umc->umc_cfg & GENMASK(2, 0)) == 0x1)) {
if (pvt->flags.zn_regs_v2 && ((umc->umc_cfg & GENMASK(2, 0)) == 0x1)) {
if (umc->dimm_cfg & BIT(5))
umc->dram_type = MEM_LRDDR5;
else if (umc->dimm_cfg & BIT(4))
......@@ -1641,13 +1766,10 @@ static void determine_memory_type_df(struct amd64_pvt *pvt)
}
}
static void determine_memory_type(struct amd64_pvt *pvt)
static void dct_determine_memory_type(struct amd64_pvt *pvt)
{
u32 dram_ctrl, dcsm;
if (pvt->umc)
return determine_memory_type_df(pvt);
switch (pvt->fam) {
case 0xf:
if (pvt->ext_model >= K8_REV_F)
......@@ -1697,6 +1819,8 @@ static void determine_memory_type(struct amd64_pvt *pvt)
WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
pvt->dram_type = MEM_EMPTY;
}
edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
return;
ddr3:
......@@ -2081,84 +2205,6 @@ static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
return ddr3_cs_size(cs_mode, false);
}
static int f17_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
unsigned int cs_mode, int csrow_nr)
{
u32 addr_mask_orig, addr_mask_deinterleaved;
u32 msb, weight, num_zero_bits;
int cs_mask_nr = csrow_nr;
int dimm, size = 0;
/* No Chip Selects are enabled. */
if (!cs_mode)
return size;
/* Requested size of an even CS but none are enabled. */
if (!(cs_mode & CS_EVEN) && !(csrow_nr & 1))
return size;
/* Requested size of an odd CS but none are enabled. */
if (!(cs_mode & CS_ODD) && (csrow_nr & 1))
return size;
/*
* Family 17h introduced systems with one mask per DIMM,
* and two Chip Selects per DIMM.
*
* CS0 and CS1 -> MASK0 / DIMM0
* CS2 and CS3 -> MASK1 / DIMM1
*
* Family 19h Model 10h introduced systems with one mask per Chip Select,
* and two Chip Selects per DIMM.
*
* CS0 -> MASK0 -> DIMM0
* CS1 -> MASK1 -> DIMM0
* CS2 -> MASK2 -> DIMM1
* CS3 -> MASK3 -> DIMM1
*
* Keep the mask number equal to the Chip Select number for newer systems,
* and shift the mask number for older systems.
*/
dimm = csrow_nr >> 1;
if (!fam_type->flags.zn_regs_v2)
cs_mask_nr >>= 1;
/* Asymmetric dual-rank DIMM support. */
if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY))
addr_mask_orig = pvt->csels[umc].csmasks_sec[cs_mask_nr];
else
addr_mask_orig = pvt->csels[umc].csmasks[cs_mask_nr];
/*
* The number of zero bits in the mask is equal to the number of bits
* in a full mask minus the number of bits in the current mask.
*
* The MSB is the number of bits in the full mask because BIT[0] is
* always 0.
*
* In the special 3 Rank interleaving case, a single bit is flipped
* without swapping with the most significant bit. This can be handled
* by keeping the MSB where it is and ignoring the single zero bit.
*/
msb = fls(addr_mask_orig) - 1;
weight = hweight_long(addr_mask_orig);
num_zero_bits = msb - weight - !!(cs_mode & CS_3R_INTERLEAVE);
/* Take the number of zero bits off from the top of the mask. */
addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1);
edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm);
edac_dbg(1, " Original AddrMask: 0x%x\n", addr_mask_orig);
edac_dbg(1, " Deinterleaved AddrMask: 0x%x\n", addr_mask_deinterleaved);
/* Register [31:1] = Address [39:9]. Size is in kBs here. */
size = (addr_mask_deinterleaved >> 2) + 1;
/* Return size in MBs. */
return size >> 10;
}
static void read_dram_ctl_register(struct amd64_pvt *pvt)
{
......@@ -2681,196 +2727,6 @@ static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
}
/*
* debug routine to display the memory sizes of all logical DIMMs and its
* CSROWs
*/
static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
{
int dimm, size0, size1;
u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
if (pvt->fam == 0xf) {
/* K8 families < revF not supported yet */
if (pvt->ext_model < K8_REV_F)
return;
else
WARN_ON(ctrl != 0);
}
if (pvt->fam == 0x10) {
dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
: pvt->dbam0;
dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
pvt->csels[1].csbases :
pvt->csels[0].csbases;
} else if (ctrl) {
dbam = pvt->dbam0;
dcsb = pvt->csels[1].csbases;
}
edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
ctrl, dbam);
edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
/* Dump memory sizes for DIMM and its CSROWs */
for (dimm = 0; dimm < 4; dimm++) {
size0 = 0;
if (dcsb[dimm*2] & DCSB_CS_ENABLE)
/*
* For F15m60h, we need multiplier for LRDIMM cs_size
* calculation. We pass dimm value to the dbam_to_cs
* mapper so we can find the multiplier from the
* corresponding DCSM.
*/
size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
DBAM_DIMM(dimm, dbam),
dimm);
size1 = 0;
if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
DBAM_DIMM(dimm, dbam),
dimm);
amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
dimm * 2, size0,
dimm * 2 + 1, size1);
}
}
static struct amd64_family_type family_types[] = {
[K8_CPUS] = {
.ctl_name = "K8",
.f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
.f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
.max_mcs = 2,
.ops = {
.map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
.dbam_to_cs = k8_dbam_to_chip_select,
}
},
[F10_CPUS] = {
.ctl_name = "F10h",
.f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
.f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
.max_mcs = 2,
.ops = {
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
.dbam_to_cs = f10_dbam_to_chip_select,
}
},
[F15_CPUS] = {
.ctl_name = "F15h",
.f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
.f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2,
.max_mcs = 2,
.ops = {
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
.dbam_to_cs = f15_dbam_to_chip_select,
}
},
[F15_M30H_CPUS] = {
.ctl_name = "F15h_M30h",
.f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
.f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
.max_mcs = 2,
.ops = {
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
.dbam_to_cs = f16_dbam_to_chip_select,
}
},
[F15_M60H_CPUS] = {
.ctl_name = "F15h_M60h",
.f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1,
.f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2,
.max_mcs = 2,
.ops = {
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
.dbam_to_cs = f15_m60h_dbam_to_chip_select,
}
},
[F16_CPUS] = {
.ctl_name = "F16h",
.f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
.f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2,
.max_mcs = 2,
.ops = {
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
.dbam_to_cs = f16_dbam_to_chip_select,
}
},
[F16_M30H_CPUS] = {
.ctl_name = "F16h_M30h",
.f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1,
.f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
.max_mcs = 2,
.ops = {
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
.dbam_to_cs = f16_dbam_to_chip_select,
}
},
[F17_CPUS] = {
.ctl_name = "F17h",
.max_mcs = 2,
.ops = {
.dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
[F17_M10H_CPUS] = {
.ctl_name = "F17h_M10h",
.max_mcs = 2,
.ops = {
.dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
[F17_M30H_CPUS] = {
.ctl_name = "F17h_M30h",
.max_mcs = 8,
.ops = {
.dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
[F17_M60H_CPUS] = {
.ctl_name = "F17h_M60h",
.max_mcs = 2,
.ops = {
.dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
[F17_M70H_CPUS] = {
.ctl_name = "F17h_M70h",
.max_mcs = 2,
.ops = {
.dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
[F19_CPUS] = {
.ctl_name = "F19h",
.max_mcs = 8,
.ops = {
.dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
[F19_M10H_CPUS] = {
.ctl_name = "F19h_M10h",
.max_mcs = 12,
.flags.zn_regs_v2 = 1,
.ops = {
.dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
[F19_M50H_CPUS] = {
.ctl_name = "F19h_M50h",
.max_mcs = 2,
.ops = {
.dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
};
/*
* These are tables of eigenvectors (one per line) which can be used for the
* construction of the syndrome tables. The modified syndrome search algorithm
......@@ -3118,10 +2974,14 @@ static inline void decode_bus_error(int node_id, struct mce *m)
* Currently, we can derive the channel number by looking at the 6th nibble in
* the instance_id. For example, instance_id=0xYXXXXX where Y is the channel
* number.
*
* For DRAM ECC errors, the Chip Select number is given in bits [2:0] of
* the MCA_SYND[ErrorInformation] field.
*/
static int find_umc_channel(struct mce *m)
static void umc_get_err_info(struct mce *m, struct err_info *err)
{
return (m->ipid & GENMASK(31, 0)) >> 20;
err->channel = (m->ipid & GENMASK(31, 0)) >> 20;
err->csrow = m->synd & 0x7;
}
static void decode_umc_error(int node_id, struct mce *m)
......@@ -3143,8 +3003,6 @@ static void decode_umc_error(int node_id, struct mce *m)
if (m->status & MCI_STATUS_DEFERRED)
ecc_type = 3;
err.channel = find_umc_channel(m);
if (!(m->status & MCI_STATUS_SYNDV)) {
err.err_code = ERR_SYND;
goto log_error;
......@@ -3159,7 +3017,7 @@ static void decode_umc_error(int node_id, struct mce *m)
err.err_code = ERR_CHANNEL;
}
err.csrow = m->synd & 0x7;
pvt->ops->get_err_info(m, &err);
if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
err.err_code = ERR_NORM_ADDR;
......@@ -3179,9 +3037,6 @@ static void decode_umc_error(int node_id, struct mce *m)
static int
reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
{
if (pvt->umc)
return 0;
/* Reserve the ADDRESS MAP Device */
pvt->F1 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
if (!pvt->F1) {
......@@ -3209,36 +3064,11 @@ reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
return 0;
}
static void free_mc_sibling_devs(struct amd64_pvt *pvt)
{
if (pvt->umc) {
return;
} else {
pci_dev_put(pvt->F1);
pci_dev_put(pvt->F2);
}
}
static void determine_ecc_sym_sz(struct amd64_pvt *pvt)
{
pvt->ecc_sym_sz = 4;
if (pvt->umc) {
u8 i;
for_each_umc(i) {
/* Check enabled channels only: */
if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
if (pvt->umc[i].ecc_ctrl & BIT(9)) {
pvt->ecc_sym_sz = 16;
return;
} else if (pvt->umc[i].ecc_ctrl & BIT(7)) {
pvt->ecc_sym_sz = 8;
return;
}
}
}
} else if (pvt->fam >= 0x10) {
if (pvt->fam >= 0x10) {
u32 tmp;
amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
......@@ -3255,7 +3085,7 @@ static void determine_ecc_sym_sz(struct amd64_pvt *pvt)
/*
* Retrieve the hardware registers of the memory controller.
*/
static void __read_mc_regs_df(struct amd64_pvt *pvt)
static void umc_read_mc_regs(struct amd64_pvt *pvt)
{
u8 nid = pvt->mc_node_id;
struct amd64_umc *umc;
......@@ -3267,7 +3097,7 @@ static void __read_mc_regs_df(struct amd64_pvt *pvt)
umc_base = get_umc_base(i);
umc = &pvt->umc[i];
amd_smn_read(nid, umc_base + get_umc_reg(UMCCH_DIMM_CFG), &umc->dimm_cfg);
amd_smn_read(nid, umc_base + get_umc_reg(pvt, UMCCH_DIMM_CFG), &umc->dimm_cfg);
amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
......@@ -3279,7 +3109,7 @@ static void __read_mc_regs_df(struct amd64_pvt *pvt)
* Retrieve the hardware registers of the memory controller (this includes the
* 'Address Map' and 'Misc' device regs)
*/
static void read_mc_regs(struct amd64_pvt *pvt)
static void dct_read_mc_regs(struct amd64_pvt *pvt)
{
unsigned int range;
u64 msr_val;
......@@ -3300,12 +3130,6 @@ static void read_mc_regs(struct amd64_pvt *pvt)
edac_dbg(0, " TOP_MEM2 disabled\n");
}
if (pvt->umc) {
__read_mc_regs_df(pvt);
goto skip;
}
amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
read_dram_ctl_register(pvt);
......@@ -3346,14 +3170,6 @@ static void read_mc_regs(struct amd64_pvt *pvt)
amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
}
skip:
read_dct_base_mask(pvt);
determine_memory_type(pvt);
if (!pvt->umc)
edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
determine_ecc_sym_sz(pvt);
}
......@@ -3391,36 +3207,47 @@ static void read_mc_regs(struct amd64_pvt *pvt)
* encompasses
*
*/
static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
static u32 dct_get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
{
u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
int csrow_nr = csrow_nr_orig;
u32 cs_mode, nr_pages;
if (!pvt->umc) {
csrow_nr >>= 1;
cs_mode = DBAM_DIMM(csrow_nr, dbam);
} else {
cs_mode = f17_get_cs_mode(csrow_nr >> 1, dct, pvt);
}
csrow_nr >>= 1;
cs_mode = DBAM_DIMM(csrow_nr, dbam);
nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr);
nr_pages <<= 20 - PAGE_SHIFT;
edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
csrow_nr_orig, dct, cs_mode);
csrow_nr, dct, cs_mode);
edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
return nr_pages;
}
static int init_csrows_df(struct mem_ctl_info *mci)
static u32 umc_get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
{
int csrow_nr = csrow_nr_orig;
u32 cs_mode, nr_pages;
cs_mode = umc_get_cs_mode(csrow_nr >> 1, dct, pvt);
nr_pages = umc_addr_mask_to_cs_size(pvt, dct, cs_mode, csrow_nr);
nr_pages <<= 20 - PAGE_SHIFT;
edac_dbg(0, "csrow: %d, channel: %d, cs_mode %d\n",
csrow_nr_orig, dct, cs_mode);
edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
return nr_pages;
}
static void umc_init_csrows(struct mem_ctl_info *mci)
{
struct amd64_pvt *pvt = mci->pvt_info;
enum edac_type edac_mode = EDAC_NONE;
enum dev_type dev_type = DEV_UNKNOWN;
struct dimm_info *dimm;
int empty = 1;
u8 umc, cs;
if (mci->edac_ctl_cap & EDAC_FLAG_S16ECD16ED) {
......@@ -3441,40 +3268,34 @@ static int init_csrows_df(struct mem_ctl_info *mci)
if (!csrow_enabled(cs, umc, pvt))
continue;
empty = 0;
dimm = mci->csrows[cs]->channels[umc]->dimm;
edac_dbg(1, "MC node: %d, csrow: %d\n",
pvt->mc_node_id, cs);
dimm->nr_pages = get_csrow_nr_pages(pvt, umc, cs);
dimm->nr_pages = umc_get_csrow_nr_pages(pvt, umc, cs);
dimm->mtype = pvt->umc[umc].dram_type;
dimm->edac_mode = edac_mode;
dimm->dtype = dev_type;
dimm->grain = 64;
}
}
return empty;
}
/*
* Initialize the array of csrow attribute instances, based on the values
* from pci config hardware registers.
*/
static int init_csrows(struct mem_ctl_info *mci)
static void dct_init_csrows(struct mem_ctl_info *mci)
{
struct amd64_pvt *pvt = mci->pvt_info;
enum edac_type edac_mode = EDAC_NONE;
struct csrow_info *csrow;
struct dimm_info *dimm;
int i, j, empty = 1;
int nr_pages = 0;
int i, j;
u32 val;
if (pvt->umc)
return init_csrows_df(mci);
amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
pvt->nbcfg = val;
......@@ -3497,19 +3318,18 @@ static int init_csrows(struct mem_ctl_info *mci)
continue;
csrow = mci->csrows[i];
empty = 0;
edac_dbg(1, "MC node: %d, csrow: %d\n",
pvt->mc_node_id, i);
if (row_dct0) {
nr_pages = get_csrow_nr_pages(pvt, 0, i);
nr_pages = dct_get_csrow_nr_pages(pvt, 0, i);
csrow->channels[0]->dimm->nr_pages = nr_pages;
}
/* K8 has only one DCT */
if (pvt->fam != 0xf && row_dct1) {
int row_dct1_pages = get_csrow_nr_pages(pvt, 1, i);
int row_dct1_pages = dct_get_csrow_nr_pages(pvt, 1, i);
csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
nr_pages += row_dct1_pages;
......@@ -3524,15 +3344,13 @@ static int init_csrows(struct mem_ctl_info *mci)
: EDAC_SECDED;
}
for (j = 0; j < fam_type->max_mcs; j++) {
for (j = 0; j < pvt->max_mcs; j++) {
dimm = csrow->channels[j]->dimm;
dimm->mtype = pvt->dram_type;
dimm->edac_mode = edac_mode;
dimm->grain = 64;
}
}
return empty;
}
/* get all cores on this DCT */
......@@ -3695,59 +3513,66 @@ static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
amd64_warn("Error restoring NB MCGCTL settings!\n");
}
static bool ecc_enabled(struct amd64_pvt *pvt)
static bool dct_ecc_enabled(struct amd64_pvt *pvt)
{
u16 nid = pvt->mc_node_id;
bool nb_mce_en = false;
u8 ecc_en = 0, i;
u8 ecc_en = 0;
u32 value;
if (boot_cpu_data.x86 >= 0x17) {
u8 umc_en_mask = 0, ecc_en_mask = 0;
struct amd64_umc *umc;
amd64_read_pci_cfg(pvt->F3, NBCFG, &value);
for_each_umc(i) {
umc = &pvt->umc[i];
ecc_en = !!(value & NBCFG_ECC_ENABLE);
/* Only check enabled UMCs. */
if (!(umc->sdp_ctrl & UMC_SDP_INIT))
continue;
nb_mce_en = nb_mce_bank_enabled_on_node(nid);
if (!nb_mce_en)
edac_dbg(0, "NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
MSR_IA32_MCG_CTL, nid);
umc_en_mask |= BIT(i);
edac_dbg(3, "Node %d: DRAM ECC %s.\n", nid, (ecc_en ? "enabled" : "disabled"));
if (umc->umc_cap_hi & UMC_ECC_ENABLED)
ecc_en_mask |= BIT(i);
}
if (!ecc_en || !nb_mce_en)
return false;
else
return true;
}
/* Check whether at least one UMC is enabled: */
if (umc_en_mask)
ecc_en = umc_en_mask == ecc_en_mask;
else
edac_dbg(0, "Node %d: No enabled UMCs.\n", nid);
static bool umc_ecc_enabled(struct amd64_pvt *pvt)
{
u8 umc_en_mask = 0, ecc_en_mask = 0;
u16 nid = pvt->mc_node_id;
struct amd64_umc *umc;
u8 ecc_en = 0, i;
/* Assume UMC MCA banks are enabled. */
nb_mce_en = true;
} else {
amd64_read_pci_cfg(pvt->F3, NBCFG, &value);
for_each_umc(i) {
umc = &pvt->umc[i];
ecc_en = !!(value & NBCFG_ECC_ENABLE);
/* Only check enabled UMCs. */
if (!(umc->sdp_ctrl & UMC_SDP_INIT))
continue;
umc_en_mask |= BIT(i);
nb_mce_en = nb_mce_bank_enabled_on_node(nid);
if (!nb_mce_en)
edac_dbg(0, "NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
MSR_IA32_MCG_CTL, nid);
if (umc->umc_cap_hi & UMC_ECC_ENABLED)
ecc_en_mask |= BIT(i);
}
/* Check whether at least one UMC is enabled: */
if (umc_en_mask)
ecc_en = umc_en_mask == ecc_en_mask;
else
edac_dbg(0, "Node %d: No enabled UMCs.\n", nid);
edac_dbg(3, "Node %d: DRAM ECC %s.\n", nid, (ecc_en ? "enabled" : "disabled"));
if (!ecc_en || !nb_mce_en)
if (!ecc_en)
return false;
else
return true;
}
static inline void
f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
umc_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
{
u8 i, ecc_en = 1, cpk_en = 1, dev_x4 = 1, dev_x16 = 1;
......@@ -3777,145 +3602,234 @@ f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
}
}
static void setup_mci_misc_attrs(struct mem_ctl_info *mci)
static void dct_setup_mci_misc_attrs(struct mem_ctl_info *mci)
{
struct amd64_pvt *pvt = mci->pvt_info;
mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
mci->edac_ctl_cap = EDAC_FLAG_NONE;
if (pvt->umc) {
f17h_determine_edac_ctl_cap(mci, pvt);
} else {
if (pvt->nbcap & NBCAP_SECDED)
mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
if (pvt->nbcap & NBCAP_SECDED)
mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
if (pvt->nbcap & NBCAP_CHIPKILL)
mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
}
if (pvt->nbcap & NBCAP_CHIPKILL)
mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
mci->edac_cap = determine_edac_cap(pvt);
mci->edac_cap = dct_determine_edac_cap(pvt);
mci->mod_name = EDAC_MOD_STR;
mci->ctl_name = fam_type->ctl_name;
mci->ctl_name = pvt->ctl_name;
mci->dev_name = pci_name(pvt->F3);
mci->ctl_page_to_phys = NULL;
if (pvt->fam >= 0x17)
return;
/* memory scrubber interface */
mci->set_sdram_scrub_rate = set_scrub_rate;
mci->get_sdram_scrub_rate = get_scrub_rate;
dct_init_csrows(mci);
}
/*
* returns a pointer to the family descriptor on success, NULL otherwise.
*/
static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
static void umc_setup_mci_misc_attrs(struct mem_ctl_info *mci)
{
struct amd64_pvt *pvt = mci->pvt_info;
mci->mtype_cap = MEM_FLAG_DDR4 | MEM_FLAG_RDDR4;
mci->edac_ctl_cap = EDAC_FLAG_NONE;
umc_determine_edac_ctl_cap(mci, pvt);
mci->edac_cap = umc_determine_edac_cap(pvt);
mci->mod_name = EDAC_MOD_STR;
mci->ctl_name = pvt->ctl_name;
mci->dev_name = pci_name(pvt->F3);
mci->ctl_page_to_phys = NULL;
umc_init_csrows(mci);
}
static int dct_hw_info_get(struct amd64_pvt *pvt)
{
int ret = reserve_mc_sibling_devs(pvt, pvt->f1_id, pvt->f2_id);
if (ret)
return ret;
dct_prep_chip_selects(pvt);
dct_read_base_mask(pvt);
dct_read_mc_regs(pvt);
dct_determine_memory_type(pvt);
return 0;
}
static int umc_hw_info_get(struct amd64_pvt *pvt)
{
pvt->umc = kcalloc(pvt->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL);
if (!pvt->umc)
return -ENOMEM;
umc_prep_chip_selects(pvt);
umc_read_base_mask(pvt);
umc_read_mc_regs(pvt);
umc_determine_memory_type(pvt);
return 0;
}
static void hw_info_put(struct amd64_pvt *pvt)
{
pci_dev_put(pvt->F1);
pci_dev_put(pvt->F2);
kfree(pvt->umc);
}
static struct low_ops umc_ops = {
.hw_info_get = umc_hw_info_get,
.ecc_enabled = umc_ecc_enabled,
.setup_mci_misc_attrs = umc_setup_mci_misc_attrs,
.dump_misc_regs = umc_dump_misc_regs,
.get_err_info = umc_get_err_info,
};
/* Use Family 16h versions for defaults and adjust as needed below. */
static struct low_ops dct_ops = {
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
.dbam_to_cs = f16_dbam_to_chip_select,
.hw_info_get = dct_hw_info_get,
.ecc_enabled = dct_ecc_enabled,
.setup_mci_misc_attrs = dct_setup_mci_misc_attrs,
.dump_misc_regs = dct_dump_misc_regs,
};
static int per_family_init(struct amd64_pvt *pvt)
{
pvt->ext_model = boot_cpu_data.x86_model >> 4;
pvt->stepping = boot_cpu_data.x86_stepping;
pvt->model = boot_cpu_data.x86_model;
pvt->fam = boot_cpu_data.x86;
pvt->max_mcs = 2;
/*
* Decide on which ops group to use here and do any family/model
* overrides below.
*/
if (pvt->fam >= 0x17)
pvt->ops = &umc_ops;
else
pvt->ops = &dct_ops;
switch (pvt->fam) {
case 0xf:
fam_type = &family_types[K8_CPUS];
pvt->ops = &family_types[K8_CPUS].ops;
pvt->ctl_name = (pvt->ext_model >= K8_REV_F) ?
"K8 revF or later" : "K8 revE or earlier";
pvt->f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP;
pvt->f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL;
pvt->ops->map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow;
pvt->ops->dbam_to_cs = k8_dbam_to_chip_select;
break;
case 0x10:
fam_type = &family_types[F10_CPUS];
pvt->ops = &family_types[F10_CPUS].ops;
pvt->ctl_name = "F10h";
pvt->f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP;
pvt->f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM;
pvt->ops->dbam_to_cs = f10_dbam_to_chip_select;
break;
case 0x15:
if (pvt->model == 0x30) {
fam_type = &family_types[F15_M30H_CPUS];
pvt->ops = &family_types[F15_M30H_CPUS].ops;
switch (pvt->model) {
case 0x30:
pvt->ctl_name = "F15h_M30h";
pvt->f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
pvt->f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2;
break;
} else if (pvt->model == 0x60) {
fam_type = &family_types[F15_M60H_CPUS];
pvt->ops = &family_types[F15_M60H_CPUS].ops;
case 0x60:
pvt->ctl_name = "F15h_M60h";
pvt->f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
pvt->f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2;
pvt->ops->dbam_to_cs = f15_m60h_dbam_to_chip_select;
break;
case 0x13:
/* Richland is only client */
return -ENODEV;
default:
pvt->ctl_name = "F15h";
pvt->f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1;
pvt->f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2;
pvt->ops->dbam_to_cs = f15_dbam_to_chip_select;
break;
/* Richland is only client */
} else if (pvt->model == 0x13) {
return NULL;
} else {
fam_type = &family_types[F15_CPUS];
pvt->ops = &family_types[F15_CPUS].ops;
}
break;
case 0x16:
if (pvt->model == 0x30) {
fam_type = &family_types[F16_M30H_CPUS];
pvt->ops = &family_types[F16_M30H_CPUS].ops;
switch (pvt->model) {
case 0x30:
pvt->ctl_name = "F16h_M30h";
pvt->f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1;
pvt->f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2;
break;
default:
pvt->ctl_name = "F16h";
pvt->f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1;
pvt->f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2;
break;
}
fam_type = &family_types[F16_CPUS];
pvt->ops = &family_types[F16_CPUS].ops;
break;
case 0x17:
if (pvt->model >= 0x10 && pvt->model <= 0x2f) {
fam_type = &family_types[F17_M10H_CPUS];
pvt->ops = &family_types[F17_M10H_CPUS].ops;
switch (pvt->model) {
case 0x10 ... 0x2f:
pvt->ctl_name = "F17h_M10h";
break;
} else if (pvt->model >= 0x30 && pvt->model <= 0x3f) {
fam_type = &family_types[F17_M30H_CPUS];
pvt->ops = &family_types[F17_M30H_CPUS].ops;
case 0x30 ... 0x3f:
pvt->ctl_name = "F17h_M30h";
pvt->max_mcs = 8;
break;
} else if (pvt->model >= 0x60 && pvt->model <= 0x6f) {
fam_type = &family_types[F17_M60H_CPUS];
pvt->ops = &family_types[F17_M60H_CPUS].ops;
case 0x60 ... 0x6f:
pvt->ctl_name = "F17h_M60h";
break;
} else if (pvt->model >= 0x70 && pvt->model <= 0x7f) {
fam_type = &family_types[F17_M70H_CPUS];
pvt->ops = &family_types[F17_M70H_CPUS].ops;
case 0x70 ... 0x7f:
pvt->ctl_name = "F17h_M70h";
break;
default:
pvt->ctl_name = "F17h";
break;
}
fallthrough;
case 0x18:
fam_type = &family_types[F17_CPUS];
pvt->ops = &family_types[F17_CPUS].ops;
break;
if (pvt->fam == 0x18)
family_types[F17_CPUS].ctl_name = "F18h";
case 0x18:
pvt->ctl_name = "F18h";
break;
case 0x19:
if (pvt->model >= 0x10 && pvt->model <= 0x1f) {
fam_type = &family_types[F19_M10H_CPUS];
pvt->ops = &family_types[F19_M10H_CPUS].ops;
switch (pvt->model) {
case 0x00 ... 0x0f:
pvt->ctl_name = "F19h";
pvt->max_mcs = 8;
break;
} else if (pvt->model >= 0x20 && pvt->model <= 0x2f) {
fam_type = &family_types[F17_M70H_CPUS];
pvt->ops = &family_types[F17_M70H_CPUS].ops;
fam_type->ctl_name = "F19h_M20h";
case 0x10 ... 0x1f:
pvt->ctl_name = "F19h_M10h";
pvt->max_mcs = 12;
pvt->flags.zn_regs_v2 = 1;
break;
} else if (pvt->model >= 0x50 && pvt->model <= 0x5f) {
fam_type = &family_types[F19_M50H_CPUS];
pvt->ops = &family_types[F19_M50H_CPUS].ops;
fam_type->ctl_name = "F19h_M50h";
case 0x20 ... 0x2f:
pvt->ctl_name = "F19h_M20h";
break;
} else if (pvt->model >= 0xa0 && pvt->model <= 0xaf) {
fam_type = &family_types[F19_M10H_CPUS];
pvt->ops = &family_types[F19_M10H_CPUS].ops;
fam_type->ctl_name = "F19h_MA0h";
case 0x50 ... 0x5f:
pvt->ctl_name = "F19h_M50h";
break;
case 0xa0 ... 0xaf:
pvt->ctl_name = "F19h_MA0h";
pvt->max_mcs = 12;
pvt->flags.zn_regs_v2 = 1;
break;
}
fam_type = &family_types[F19_CPUS];
pvt->ops = &family_types[F19_CPUS].ops;
family_types[F19_CPUS].ctl_name = "F19h";
break;
default:
amd64_err("Unsupported family!\n");
return NULL;
return -ENODEV;
}
return fam_type;
return 0;
}
static const struct attribute_group *amd64_edac_attr_groups[] = {
......@@ -3926,37 +3840,6 @@ static const struct attribute_group *amd64_edac_attr_groups[] = {
NULL
};
static int hw_info_get(struct amd64_pvt *pvt)
{
u16 pci_id1 = 0, pci_id2 = 0;
int ret;
if (pvt->fam >= 0x17) {
pvt->umc = kcalloc(fam_type->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL);
if (!pvt->umc)
return -ENOMEM;
} else {
pci_id1 = fam_type->f1_id;
pci_id2 = fam_type->f2_id;
}
ret = reserve_mc_sibling_devs(pvt, pci_id1, pci_id2);
if (ret)
return ret;
read_mc_regs(pvt);
return 0;
}
static void hw_info_put(struct amd64_pvt *pvt)
{
if (pvt->F1)
free_mc_sibling_devs(pvt);
kfree(pvt->umc);
}
static int init_one_instance(struct amd64_pvt *pvt)
{
struct mem_ctl_info *mci = NULL;
......@@ -3967,7 +3850,7 @@ static int init_one_instance(struct amd64_pvt *pvt)
layers[0].size = pvt->csels[0].b_cnt;
layers[0].is_virt_csrow = true;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = fam_type->max_mcs;
layers[1].size = pvt->max_mcs;
layers[1].is_virt_csrow = false;
mci = edac_mc_alloc(pvt->mc_node_id, ARRAY_SIZE(layers), layers, 0);
......@@ -3977,10 +3860,7 @@ static int init_one_instance(struct amd64_pvt *pvt)
mci->pvt_info = pvt;
mci->pdev = &pvt->F3->dev;
setup_mci_misc_attrs(mci);
if (init_csrows(mci))
mci->edac_cap = EDAC_FLAG_NONE;
pvt->ops->setup_mci_misc_attrs(mci);
ret = -ENODEV;
if (edac_mc_add_mc_with_groups(mci, amd64_edac_attr_groups)) {
......@@ -3997,7 +3877,7 @@ static bool instance_has_memory(struct amd64_pvt *pvt)
bool cs_enabled = false;
int cs = 0, dct = 0;
for (dct = 0; dct < fam_type->max_mcs; dct++) {
for (dct = 0; dct < pvt->max_mcs; dct++) {
for_each_chip_select(cs, dct, pvt)
cs_enabled |= csrow_enabled(cs, dct, pvt);
}
......@@ -4026,12 +3906,11 @@ static int probe_one_instance(unsigned int nid)
pvt->mc_node_id = nid;
pvt->F3 = F3;
ret = -ENODEV;
fam_type = per_family_init(pvt);
if (!fam_type)
ret = per_family_init(pvt);
if (ret < 0)
goto err_enable;
ret = hw_info_get(pvt);
ret = pvt->ops->hw_info_get(pvt);
if (ret < 0)
goto err_enable;
......@@ -4041,7 +3920,7 @@ static int probe_one_instance(unsigned int nid)
goto err_enable;
}
if (!ecc_enabled(pvt)) {
if (!pvt->ops->ecc_enabled(pvt)) {
ret = -ENODEV;
if (!ecc_enable_override)
......@@ -4067,13 +3946,10 @@ static int probe_one_instance(unsigned int nid)
goto err_enable;
}
amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
(pvt->fam == 0xf ?
(pvt->ext_model >= K8_REV_F ? "revF or later "
: "revE or earlier ")
: ""), pvt->mc_node_id);
amd64_info("%s detected (node %d).\n", pvt->ctl_name, pvt->mc_node_id);
dump_misc_regs(pvt);
/* Display and decode various registers for debug purposes. */
pvt->ops->dump_misc_regs(pvt);
return ret;
......@@ -4244,10 +4120,8 @@ module_init(amd64_edac_init);
module_exit(amd64_edac_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
"Dave Peterson, Thayne Harbaugh");
MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
EDAC_AMD64_VERSION);
MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, Dave Peterson, Thayne Harbaugh; AMD");
MODULE_DESCRIPTION("MC support for AMD64 memory controllers - " EDAC_AMD64_VERSION);
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
......@@ -273,25 +273,6 @@
#define UMC_SDP_INIT BIT(31)
enum amd_families {
K8_CPUS = 0,
F10_CPUS,
F15_CPUS,
F15_M30H_CPUS,
F15_M60H_CPUS,
F16_CPUS,
F16_M30H_CPUS,
F17_CPUS,
F17_M10H_CPUS,
F17_M30H_CPUS,
F17_M60H_CPUS,
F17_M70H_CPUS,
F19_CPUS,
F19_M10H_CPUS,
F19_M50H_CPUS,
NUM_FAMILIES,
};
/* Error injection control structure */
struct error_injection {
u32 section;
......@@ -334,6 +315,16 @@ struct amd64_umc {
enum mem_type dram_type;
};
struct amd64_family_flags {
/*
* Indicates that the system supports the new register offsets, etc.
* first introduced with Family 19h Model 10h.
*/
__u64 zn_regs_v2 : 1,
__reserved : 63;
};
struct amd64_pvt {
struct low_ops *ops;
......@@ -375,6 +366,12 @@ struct amd64_pvt {
/* x4, x8, or x16 syndromes in use */
u8 ecc_sym_sz;
const char *ctl_name;
u16 f1_id, f2_id;
/* Maximum number of memory controllers per die/node. */
u8 max_mcs;
struct amd64_family_flags flags;
/* place to store error injection parameters prior to issue */
struct error_injection injection;
......@@ -465,29 +462,15 @@ struct ecc_settings {
* functions and per device encoding/decoding logic.
*/
struct low_ops {
void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci, u64 sys_addr,
struct err_info *);
int (*dbam_to_cs) (struct amd64_pvt *pvt, u8 dct,
unsigned cs_mode, int cs_mask_nr);
};
struct amd64_family_flags {
/*
* Indicates that the system supports the new register offsets, etc.
* first introduced with Family 19h Model 10h.
*/
__u64 zn_regs_v2 : 1,
__reserved : 63;
};
struct amd64_family_type {
const char *ctl_name;
u16 f1_id, f2_id;
/* Maximum number of memory controllers per die/node. */
u8 max_mcs;
struct amd64_family_flags flags;
struct low_ops ops;
void (*map_sysaddr_to_csrow)(struct mem_ctl_info *mci, u64 sys_addr,
struct err_info *err);
int (*dbam_to_cs)(struct amd64_pvt *pvt, u8 dct,
unsigned int cs_mode, int cs_mask_nr);
int (*hw_info_get)(struct amd64_pvt *pvt);
bool (*ecc_enabled)(struct amd64_pvt *pvt);
void (*setup_mci_misc_attrs)(struct mem_ctl_info *mci);
void (*dump_misc_regs)(struct amd64_pvt *pvt);
void (*get_err_info)(struct mce *m, struct err_info *err);
};
int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
......
......@@ -593,5 +593,5 @@ module_init(amd8111_edac_init);
module_exit(amd8111_edac_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>\n");
MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>");
MODULE_DESCRIPTION("AMD8111 HyperTransport I/O Hub EDAC kernel module");
......@@ -354,5 +354,5 @@ module_init(amd8131_edac_init);
module_exit(amd8131_edac_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>\n");
MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>");
MODULE_DESCRIPTION("AMD8131 HyperTransport PCI-X Tunnel EDAC kernel module");
......@@ -1462,7 +1462,7 @@ module_init(e752x_init);
module_exit(e752x_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman\n");
MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman");
MODULE_DESCRIPTION("MC support for Intel e752x/3100 memory controllers");
module_param(force_function_unhide, int, 0444);
......
......@@ -596,8 +596,7 @@ module_init(e7xxx_init);
module_exit(e7xxx_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n"
"Based on.work by Dan Hollis et al");
MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al");
MODULE_DESCRIPTION("MC support for Intel e7xxx memory controllers");
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
......@@ -906,6 +906,7 @@ static const struct x86_cpu_id i10nm_cpuids[] = {
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SAPPHIRERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg),
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(EMERALDRAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg),
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(GRANITERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SIERRAFOREST_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
{}
};
MODULE_DEVICE_TABLE(x86cpu, i10nm_cpuids);
......
......@@ -1573,13 +1573,10 @@ module_init(i5000_init);
module_exit(i5000_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR
("Linux Networx (http://lnxi.com) Doug Thompson <norsk5@xmission.com>");
MODULE_DESCRIPTION("MC Driver for Intel I5000 memory controllers - "
I5000_REVISION);
MODULE_AUTHOR("Linux Networx (http://lnxi.com) Doug Thompson <norsk5@xmission.com>");
MODULE_DESCRIPTION("MC Driver for Intel I5000 memory controllers - " I5000_REVISION);
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
module_param(misc_messages, int, 0444);
MODULE_PARM_DESC(misc_messages, "Log miscellaneous non fatal messages");
......@@ -909,7 +909,7 @@ static void i5100_do_inject(struct mem_ctl_info *mci)
*
* The injection code don't work without setting this register.
* The register needs to be flipped off then on else the hardware
* will only preform the first injection.
* will only perform the first injection.
*
* Stop condition bits 7:4
* 1010 - Stop after one injection
......@@ -1220,6 +1220,5 @@ module_init(i5100_init);
module_exit(i5100_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR
("Arthur Jones <ajones@riverbed.com>");
MODULE_AUTHOR("Arthur Jones <ajones@riverbed.com>");
MODULE_DESCRIPTION("MC Driver for Intel I5100 memory controllers");
......@@ -355,8 +355,7 @@ module_init(i82860_init);
module_exit(i82860_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com) "
"Ben Woodard <woodard@redhat.com>");
MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com) Ben Woodard <woodard@redhat.com>");
MODULE_DESCRIPTION("ECC support for Intel 82860 memory hub controllers");
module_param(edac_op_state, int, 0444);
......
......@@ -72,5 +72,4 @@ module_exit(fsl_ddr_mc_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("NXP Semiconductor");
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state,
"EDAC Error Reporting state: 0=Poll, 2=Interrupt");
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll, 2=Interrupt");
......@@ -711,5 +711,4 @@ module_exit(mpc85xx_mc_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Montavista Software, Inc.");
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state,
"EDAC Error Reporting state: 0=Poll, 2=Interrupt");
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll, 2=Interrupt");
......@@ -415,8 +415,7 @@ module_init(r82600_init);
module_exit(r82600_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD Ltd. "
"on behalf of EADS Astrium");
MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD Ltd. on behalf of EADS Astrium");
MODULE_DESCRIPTION("MC support for Radisys 82600 memory controllers");
module_param(disable_hardware_scrub, bool, 0644);
......
......@@ -510,7 +510,7 @@ static bool skx_rir_decode(struct decoded_addr *res)
}
static u8 skx_close_row[] = {
15, 16, 17, 18, 20, 21, 22, 28, 10, 11, 12, 13, 29, 30, 31, 32, 33
15, 16, 17, 18, 20, 21, 22, 28, 10, 11, 12, 13, 29, 30, 31, 32, 33, 34
};
static u8 skx_close_column[] = {
......@@ -518,7 +518,7 @@ static u8 skx_close_column[] = {
};
static u8 skx_open_row[] = {
14, 15, 16, 20, 28, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33
14, 15, 16, 20, 28, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33, 34
};
static u8 skx_open_column[] = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment