Commit c91662cb authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'edac_for_3.18' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp

Pull EDAC fixes from Borislav Petkov:
 "Misc small fixes/improvements accumulated from the summer months,
  nothing earth-shattering.

  The mpc85xx_edac fix is tagged for stable but since it got confirmed
  only days before the final 3.17 release, I didn't rush a last minute
  pull request to you.  I hope that's ok as it will trickle down to
  stable anyway and we'll make sure it lands in 3.17-stable too"

* tag 'edac_for_3.18' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp:
  mpc85xx_edac: Make L2 interrupt shared too
  amd64_edac: Modify usage of amd64_read_dct_pci_cfg()
  ppc4xx_edac: Fix build error caused by wrong member access
  EDAC: Fix mem_types strings type
parents d0cd8481 a18c3f16
...@@ -86,62 +86,74 @@ int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset, ...@@ -86,62 +86,74 @@ int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
return err; return err;
} }
/*
* Select DCT to which PCI cfg accesses are routed
*/
static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
{
u32 reg = 0;
amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
reg &= (pvt->model == 0x30) ? ~3 : ~1;
reg |= dct;
amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
}
/* /*
* *
* Depending on the family, F2 DCT reads need special handling: * Depending on the family, F2 DCT reads need special handling:
* *
* K8: has a single DCT only * K8: has a single DCT only and no address offsets >= 0x100
* *
* F10h: each DCT has its own set of regs * F10h: each DCT has its own set of regs
* DCT0 -> F2x040.. * DCT0 -> F2x040..
* DCT1 -> F2x140.. * DCT1 -> F2x140..
* *
* F15h: we select which DCT we access using F1x10C[DctCfgSel]
*
* F16h: has only 1 DCT * F16h: has only 1 DCT
*
* F15h: we select which DCT we access using F1x10C[DctCfgSel]
*/ */
static int k8_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val, static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
const char *func) int offset, u32 *val)
{ {
if (addr >= 0x100) switch (pvt->fam) {
return -EINVAL; case 0xf:
if (dct || offset >= 0x100)
return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func); return -EINVAL;
} break;
static int f10_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val, case 0x10:
const char *func) if (dct) {
{ /*
return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func); * Note: If ganging is enabled, barring the regs
} * F2x[1,0]98 and F2x[1,0]9C; reads reads to F2x1xx
* return 0. (cf. Section 2.8.1 F10h BKDG)
*/
if (dct_ganging_enabled(pvt))
return 0;
/* offset += 0x100;
* Select DCT to which PCI cfg accesses are routed }
*/ break;
static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
{
u32 reg = 0;
amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg); case 0x15:
reg &= (pvt->model >= 0x30) ? ~3 : ~1; /*
reg |= dct; * F15h: F2x1xx addresses do not map explicitly to DCT1.
amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg); * We should select which DCT we access using F1x10C[DctCfgSel]
} */
dct = (dct && pvt->model == 0x30) ? 3 : dct;
f15h_select_dct(pvt, dct);
break;
static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val, case 0x16:
const char *func) if (dct)
{ return -EINVAL;
u8 dct = 0; break;
/* For F15 M30h, the second dct is DCT 3, refer to BKDG Section 2.10 */ default:
if (addr >= 0x140 && addr <= 0x1a0) { break;
dct = (pvt->model >= 0x30) ? 3 : 1;
addr -= 0x100;
} }
return amd64_read_pci_cfg(pvt->F2, offset, val);
f15h_select_dct(pvt, dct);
return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
} }
/* /*
...@@ -768,16 +780,17 @@ static void read_dct_base_mask(struct amd64_pvt *pvt) ...@@ -768,16 +780,17 @@ static void read_dct_base_mask(struct amd64_pvt *pvt)
u32 *base0 = &pvt->csels[0].csbases[cs]; u32 *base0 = &pvt->csels[0].csbases[cs];
u32 *base1 = &pvt->csels[1].csbases[cs]; u32 *base1 = &pvt->csels[1].csbases[cs];
if (!amd64_read_dct_pci_cfg(pvt, reg0, base0)) if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n", edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
cs, *base0, reg0); cs, *base0, reg0);
if (pvt->fam == 0xf || dct_ganging_enabled(pvt)) if (pvt->fam == 0xf)
continue; continue;
if (!amd64_read_dct_pci_cfg(pvt, reg1, base1)) if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n", edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
cs, *base1, reg1); cs, *base1, (pvt->fam == 0x10) ? reg1
: reg0);
} }
for_each_chip_select_mask(cs, 0, pvt) { for_each_chip_select_mask(cs, 0, pvt) {
...@@ -786,16 +799,17 @@ static void read_dct_base_mask(struct amd64_pvt *pvt) ...@@ -786,16 +799,17 @@ static void read_dct_base_mask(struct amd64_pvt *pvt)
u32 *mask0 = &pvt->csels[0].csmasks[cs]; u32 *mask0 = &pvt->csels[0].csmasks[cs];
u32 *mask1 = &pvt->csels[1].csmasks[cs]; u32 *mask1 = &pvt->csels[1].csmasks[cs];
if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0)) if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n", edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
cs, *mask0, reg0); cs, *mask0, reg0);
if (pvt->fam == 0xf || dct_ganging_enabled(pvt)) if (pvt->fam == 0xf)
continue; continue;
if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1)) if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n", edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
cs, *mask1, reg1); cs, *mask1, (pvt->fam == 0x10) ? reg1
: reg0);
} }
} }
...@@ -1198,7 +1212,7 @@ static void read_dram_ctl_register(struct amd64_pvt *pvt) ...@@ -1198,7 +1212,7 @@ static void read_dram_ctl_register(struct amd64_pvt *pvt)
if (pvt->fam == 0xf) if (pvt->fam == 0xf)
return; return;
if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) { if (!amd64_read_pci_cfg(pvt->F2, DCT_SEL_LO, &pvt->dct_sel_lo)) {
edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n", edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
pvt->dct_sel_lo, dct_sel_baseaddr(pvt)); pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
...@@ -1219,7 +1233,7 @@ static void read_dram_ctl_register(struct amd64_pvt *pvt) ...@@ -1219,7 +1233,7 @@ static void read_dram_ctl_register(struct amd64_pvt *pvt)
dct_sel_interleave_addr(pvt)); dct_sel_interleave_addr(pvt));
} }
amd64_read_dct_pci_cfg(pvt, DCT_SEL_HI, &pvt->dct_sel_hi); amd64_read_pci_cfg(pvt->F2, DCT_SEL_HI, &pvt->dct_sel_hi);
} }
/* /*
...@@ -1430,7 +1444,7 @@ static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr) ...@@ -1430,7 +1444,7 @@ static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
return sys_addr; return sys_addr;
} }
amd64_read_dct_pci_cfg(pvt, SWAP_INTLV_REG, &swap_reg); amd64_read_pci_cfg(pvt->F2, SWAP_INTLV_REG, &swap_reg);
if (!(swap_reg & 0x1)) if (!(swap_reg & 0x1))
return sys_addr; return sys_addr;
...@@ -1723,10 +1737,16 @@ static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl) ...@@ -1723,10 +1737,16 @@ static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
WARN_ON(ctrl != 0); WARN_ON(ctrl != 0);
} }
dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0; if (pvt->fam == 0x10) {
dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
: pvt->csels[0].csbases; : pvt->dbam0;
dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
pvt->csels[1].csbases :
pvt->csels[0].csbases;
} else if (ctrl) {
dbam = pvt->dbam0;
dcsb = pvt->csels[1].csbases;
}
edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
ctrl, dbam); ctrl, dbam);
...@@ -1760,7 +1780,6 @@ static struct amd64_family_type family_types[] = { ...@@ -1760,7 +1780,6 @@ static struct amd64_family_type family_types[] = {
.early_channel_count = k8_early_channel_count, .early_channel_count = k8_early_channel_count,
.map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow, .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
.dbam_to_cs = k8_dbam_to_chip_select, .dbam_to_cs = k8_dbam_to_chip_select,
.read_dct_pci_cfg = k8_read_dct_pci_cfg,
} }
}, },
[F10_CPUS] = { [F10_CPUS] = {
...@@ -1771,7 +1790,6 @@ static struct amd64_family_type family_types[] = { ...@@ -1771,7 +1790,6 @@ static struct amd64_family_type family_types[] = {
.early_channel_count = f1x_early_channel_count, .early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
.dbam_to_cs = f10_dbam_to_chip_select, .dbam_to_cs = f10_dbam_to_chip_select,
.read_dct_pci_cfg = f10_read_dct_pci_cfg,
} }
}, },
[F15_CPUS] = { [F15_CPUS] = {
...@@ -1782,7 +1800,6 @@ static struct amd64_family_type family_types[] = { ...@@ -1782,7 +1800,6 @@ static struct amd64_family_type family_types[] = {
.early_channel_count = f1x_early_channel_count, .early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
.dbam_to_cs = f15_dbam_to_chip_select, .dbam_to_cs = f15_dbam_to_chip_select,
.read_dct_pci_cfg = f15_read_dct_pci_cfg,
} }
}, },
[F15_M30H_CPUS] = { [F15_M30H_CPUS] = {
...@@ -1793,7 +1810,6 @@ static struct amd64_family_type family_types[] = { ...@@ -1793,7 +1810,6 @@ static struct amd64_family_type family_types[] = {
.early_channel_count = f1x_early_channel_count, .early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
.dbam_to_cs = f16_dbam_to_chip_select, .dbam_to_cs = f16_dbam_to_chip_select,
.read_dct_pci_cfg = f15_read_dct_pci_cfg,
} }
}, },
[F16_CPUS] = { [F16_CPUS] = {
...@@ -1804,7 +1820,6 @@ static struct amd64_family_type family_types[] = { ...@@ -1804,7 +1820,6 @@ static struct amd64_family_type family_types[] = {
.early_channel_count = f1x_early_channel_count, .early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
.dbam_to_cs = f16_dbam_to_chip_select, .dbam_to_cs = f16_dbam_to_chip_select,
.read_dct_pci_cfg = f10_read_dct_pci_cfg,
} }
}, },
[F16_M30H_CPUS] = { [F16_M30H_CPUS] = {
...@@ -1815,7 +1830,6 @@ static struct amd64_family_type family_types[] = { ...@@ -1815,7 +1830,6 @@ static struct amd64_family_type family_types[] = {
.early_channel_count = f1x_early_channel_count, .early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
.dbam_to_cs = f16_dbam_to_chip_select, .dbam_to_cs = f16_dbam_to_chip_select,
.read_dct_pci_cfg = f10_read_dct_pci_cfg,
} }
}, },
}; };
...@@ -2148,25 +2162,25 @@ static void read_mc_regs(struct amd64_pvt *pvt) ...@@ -2148,25 +2162,25 @@ static void read_mc_regs(struct amd64_pvt *pvt)
read_dct_base_mask(pvt); read_dct_base_mask(pvt);
amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar); amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
amd64_read_dct_pci_cfg(pvt, DBAM0, &pvt->dbam0); amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0);
amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare); amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
amd64_read_dct_pci_cfg(pvt, DCLR0, &pvt->dclr0); amd64_read_dct_pci_cfg(pvt, 0, DCLR0, &pvt->dclr0);
amd64_read_dct_pci_cfg(pvt, DCHR0, &pvt->dchr0); amd64_read_dct_pci_cfg(pvt, 0, DCHR0, &pvt->dchr0);
if (!dct_ganging_enabled(pvt)) { if (!dct_ganging_enabled(pvt)) {
amd64_read_dct_pci_cfg(pvt, DCLR1, &pvt->dclr1); amd64_read_dct_pci_cfg(pvt, 1, DCLR0, &pvt->dclr1);
amd64_read_dct_pci_cfg(pvt, DCHR1, &pvt->dchr1); amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
} }
pvt->ecc_sym_sz = 4; pvt->ecc_sym_sz = 4;
if (pvt->fam >= 0x10) { if (pvt->fam >= 0x10) {
amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp); amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
/* F16h has only DCT0, so no need to read dbam1 */
if (pvt->fam != 0x16) if (pvt->fam != 0x16)
/* F16h has only DCT0 */ amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);
amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1);
/* F10h, revD and later can do x8 ECC too */ /* F10h, revD and later can do x8 ECC too */
if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25)) if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
......
...@@ -481,8 +481,6 @@ struct low_ops { ...@@ -481,8 +481,6 @@ struct low_ops {
void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci, u64 sys_addr, void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci, u64 sys_addr,
struct err_info *); struct err_info *);
int (*dbam_to_cs) (struct amd64_pvt *pvt, u8 dct, unsigned cs_mode); int (*dbam_to_cs) (struct amd64_pvt *pvt, u8 dct, unsigned cs_mode);
int (*read_dct_pci_cfg) (struct amd64_pvt *pvt, int offset,
u32 *val, const char *func);
}; };
struct amd64_family_type { struct amd64_family_type {
...@@ -502,9 +500,6 @@ int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset, ...@@ -502,9 +500,6 @@ int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
#define amd64_write_pci_cfg(pdev, offset, val) \ #define amd64_write_pci_cfg(pdev, offset, val) \
__amd64_write_pci_cfg_dword(pdev, offset, val, __func__) __amd64_write_pci_cfg_dword(pdev, offset, val, __func__)
#define amd64_read_dct_pci_cfg(pvt, offset, val) \
pvt->ops->read_dct_pci_cfg(pvt, offset, val, __func__)
int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
u64 *hole_offset, u64 *hole_size); u64 *hole_offset, u64 *hole_size);
......
...@@ -66,7 +66,7 @@ ...@@ -66,7 +66,7 @@
#define EDAC_PCI "PCI" #define EDAC_PCI "PCI"
#define EDAC_DEBUG "DEBUG" #define EDAC_DEBUG "DEBUG"
extern const char *edac_mem_types[]; extern const char * const edac_mem_types[];
#ifdef CONFIG_EDAC_DEBUG #ifdef CONFIG_EDAC_DEBUG
extern int edac_debug_level; extern int edac_debug_level;
......
...@@ -128,7 +128,7 @@ static void edac_mc_dump_mci(struct mem_ctl_info *mci) ...@@ -128,7 +128,7 @@ static void edac_mc_dump_mci(struct mem_ctl_info *mci)
/* /*
* keep those in sync with the enum mem_type * keep those in sync with the enum mem_type
*/ */
const char *edac_mem_types[] = { const char * const edac_mem_types[] = {
"Empty csrow", "Empty csrow",
"Reserved csrow type", "Reserved csrow type",
"Unknown csrow type", "Unknown csrow type",
......
...@@ -633,7 +633,7 @@ static int mpc85xx_l2_err_probe(struct platform_device *op) ...@@ -633,7 +633,7 @@ static int mpc85xx_l2_err_probe(struct platform_device *op)
if (edac_op_state == EDAC_OPSTATE_INT) { if (edac_op_state == EDAC_OPSTATE_INT) {
pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0); pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
res = devm_request_irq(&op->dev, pdata->irq, res = devm_request_irq(&op->dev, pdata->irq,
mpc85xx_l2_isr, 0, mpc85xx_l2_isr, IRQF_SHARED,
"[EDAC] L2 err", edac_dev); "[EDAC] L2 err", edac_dev);
if (res < 0) { if (res < 0) {
printk(KERN_ERR printk(KERN_ERR
......
...@@ -974,7 +974,7 @@ static int ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1) ...@@ -974,7 +974,7 @@ static int ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
* page size (PAGE_SIZE) or the memory width (2 or 4). * page size (PAGE_SIZE) or the memory width (2 or 4).
*/ */
for (j = 0; j < csi->nr_channels; j++) { for (j = 0; j < csi->nr_channels; j++) {
struct dimm_info *dimm = csi->channels[j].dimm; struct dimm_info *dimm = csi->channels[j]->dimm;
dimm->nr_pages = nr_pages / csi->nr_channels; dimm->nr_pages = nr_pages / csi->nr_channels;
dimm->grain = 1; dimm->grain = 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment