Commit 442d1ba2 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'edac-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp

* 'edac-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp:
  amd64_edac: Disable DRAM ECC injection on K8
  EDAC: Fixup scrubrate manipulation
  amd64_edac: Remove two-stage initialization
  amd64_edac: Check ECC capabilities initially
  amd64_edac: Carve out ECC-related hw settings
  amd64_edac: Remove PCI ECS enabling functions
  amd64_edac: Remove explicit Kconfig PCI dependency
  amd64_edac: Allocate driver instances dynamically
  amd64_edac: Rework printk macros
  amd64_edac: Rename CPU PCI devices
  amd64_edac: Concentrate per-family init even more
  amd64_edac: Cleanup the CPU PCI device reservation
  amd64_edac: Simplify CPU family detection
  amd64_edac: Add per-family init function
  amd64_edac: Use cached extended CPU model
  amd64_edac: Remove F11h support
parents fb5131e1 a135cef7
...@@ -75,11 +75,11 @@ config EDAC_MCE ...@@ -75,11 +75,11 @@ config EDAC_MCE
bool bool
config EDAC_AMD64 config EDAC_AMD64
tristate "AMD64 (Opteron, Athlon64) K8, F10h, F11h" tristate "AMD64 (Opteron, Athlon64) K8, F10h"
depends on EDAC_MM_EDAC && AMD_NB && X86_64 && PCI && EDAC_DECODE_MCE depends on EDAC_MM_EDAC && AMD_NB && X86_64 && EDAC_DECODE_MCE
help help
Support for error detection and correction on the AMD 64 Support for error detection and correction of DRAM ECC errors on
Families of Memory Controllers (K8, F10h and F11h) the AMD64 families of memory controllers (K8 and F10h)
config EDAC_AMD64_ERROR_INJECTION config EDAC_AMD64_ERROR_INJECTION
bool "Sysfs HW Error injection facilities" bool "Sysfs HW Error injection facilities"
......
...@@ -15,10 +15,14 @@ module_param(ecc_enable_override, int, 0644); ...@@ -15,10 +15,14 @@ module_param(ecc_enable_override, int, 0644);
static struct msr __percpu *msrs; static struct msr __percpu *msrs;
/* Lookup table for all possible MC control instances */ /*
struct amd64_pvt; * count successfully initialized driver instances for setup_pci_device()
static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES]; */
static struct amd64_pvt *pvt_lookup[EDAC_MAX_NUMNODES]; static atomic_t drv_instances = ATOMIC_INIT(0);
/* Per-node driver instances */
static struct mem_ctl_info **mcis;
static struct ecc_settings **ecc_stngs;
/* /*
* Address to DRAM bank mapping: see F2x80 for K8 and F2x[1,0]80 for Fam10 and * Address to DRAM bank mapping: see F2x80 for K8 and F2x[1,0]80 for Fam10 and
...@@ -73,7 +77,11 @@ static int ddr3_dbam[] = { [0] = -1, ...@@ -73,7 +77,11 @@ static int ddr3_dbam[] = { [0] = -1,
*FIXME: Produce a better mapping/linearisation. *FIXME: Produce a better mapping/linearisation.
*/ */
struct scrubrate scrubrates[] = {
struct scrubrate {
u32 scrubval; /* bit pattern for scrub rate */
u32 bandwidth; /* bandwidth consumed (bytes/sec) */
} scrubrates[] = {
{ 0x01, 1600000000UL}, { 0x01, 1600000000UL},
{ 0x02, 800000000UL}, { 0x02, 800000000UL},
{ 0x03, 400000000UL}, { 0x03, 400000000UL},
...@@ -117,8 +125,7 @@ struct scrubrate scrubrates[] = { ...@@ -117,8 +125,7 @@ struct scrubrate scrubrates[] = {
* scan the scrub rate mapping table for a close or matching bandwidth value to * scan the scrub rate mapping table for a close or matching bandwidth value to
* issue. If requested is too big, then use last maximum value found. * issue. If requested is too big, then use last maximum value found.
*/ */
static int amd64_search_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
u32 min_scrubrate)
{ {
u32 scrubval; u32 scrubval;
int i; int i;
...@@ -134,7 +141,7 @@ static int amd64_search_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, ...@@ -134,7 +141,7 @@ static int amd64_search_set_scrub_rate(struct pci_dev *ctl, u32 new_bw,
* skip scrub rates which aren't recommended * skip scrub rates which aren't recommended
* (see F10 BKDG, F3x58) * (see F10 BKDG, F3x58)
*/ */
if (scrubrates[i].scrubval < min_scrubrate) if (scrubrates[i].scrubval < min_rate)
continue; continue;
if (scrubrates[i].bandwidth <= new_bw) if (scrubrates[i].bandwidth <= new_bw)
...@@ -148,64 +155,41 @@ static int amd64_search_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, ...@@ -148,64 +155,41 @@ static int amd64_search_set_scrub_rate(struct pci_dev *ctl, u32 new_bw,
} }
scrubval = scrubrates[i].scrubval; scrubval = scrubrates[i].scrubval;
if (scrubval)
edac_printk(KERN_DEBUG, EDAC_MC,
"Setting scrub rate bandwidth: %u\n",
scrubrates[i].bandwidth);
else
edac_printk(KERN_DEBUG, EDAC_MC, "Turning scrubbing off.\n");
pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F); pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F);
if (scrubval)
return scrubrates[i].bandwidth;
return 0; return 0;
} }
static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bandwidth) static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
{ {
struct amd64_pvt *pvt = mci->pvt_info; struct amd64_pvt *pvt = mci->pvt_info;
u32 min_scrubrate = 0x0;
switch (boot_cpu_data.x86) { return __amd64_set_scrub_rate(pvt->F3, bw, pvt->min_scrubrate);
case 0xf:
min_scrubrate = K8_MIN_SCRUB_RATE_BITS;
break;
case 0x10:
min_scrubrate = F10_MIN_SCRUB_RATE_BITS;
break;
case 0x11:
min_scrubrate = F11_MIN_SCRUB_RATE_BITS;
break;
default:
amd64_printk(KERN_ERR, "Unsupported family!\n");
return -EINVAL;
}
return amd64_search_set_scrub_rate(pvt->misc_f3_ctl, bandwidth,
min_scrubrate);
} }
static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw) static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
{ {
struct amd64_pvt *pvt = mci->pvt_info; struct amd64_pvt *pvt = mci->pvt_info;
u32 scrubval = 0; u32 scrubval = 0;
int status = -1, i; int i, retval = -EINVAL;
amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_SCRCTRL, &scrubval); amd64_read_pci_cfg(pvt->F3, K8_SCRCTRL, &scrubval);
scrubval = scrubval & 0x001F; scrubval = scrubval & 0x001F;
edac_printk(KERN_DEBUG, EDAC_MC, amd64_debug("pci-read, sdram scrub control value: %d\n", scrubval);
"pci-read, sdram scrub control value: %d \n", scrubval);
for (i = 0; i < ARRAY_SIZE(scrubrates); i++) { for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
if (scrubrates[i].scrubval == scrubval) { if (scrubrates[i].scrubval == scrubval) {
*bw = scrubrates[i].bandwidth; retval = scrubrates[i].bandwidth;
status = 0;
break; break;
} }
} }
return retval;
return status;
} }
/* Map from a CSROW entry to the mask entry that operates on it */ /* Map from a CSROW entry to the mask entry that operates on it */
...@@ -314,9 +298,7 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci, ...@@ -314,9 +298,7 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
if (unlikely((intlv_en != 0x01) && if (unlikely((intlv_en != 0x01) &&
(intlv_en != 0x03) && (intlv_en != 0x03) &&
(intlv_en != 0x07))) { (intlv_en != 0x07))) {
amd64_printk(KERN_WARNING, "junk value of 0x%x extracted from " amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
"IntlvEn field of DRAM Base Register for node 0: "
"this probably indicates a BIOS bug.\n", intlv_en);
return NULL; return NULL;
} }
...@@ -332,10 +314,8 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci, ...@@ -332,10 +314,8 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
/* sanity test for sys_addr */ /* sanity test for sys_addr */
if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) { if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
amd64_printk(KERN_WARNING, amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
"%s(): sys_addr 0x%llx falls outside base/limit " "range for node %d with node interleaving enabled.\n",
"address range for node %d with node interleaving "
"enabled.\n",
__func__, sys_addr, node_id); __func__, sys_addr, node_id);
return NULL; return NULL;
} }
...@@ -788,8 +768,7 @@ static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr) ...@@ -788,8 +768,7 @@ static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr)); csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
if (csrow == -1) if (csrow == -1)
amd64_mc_printk(mci, KERN_ERR, amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
"Failed to translate InputAddr to csrow for "
"address 0x%lx\n", (unsigned long)sys_addr); "address 0x%lx\n", (unsigned long)sys_addr);
return csrow; return csrow;
} }
...@@ -801,21 +780,6 @@ static u16 extract_syndrome(struct err_regs *err) ...@@ -801,21 +780,6 @@ static u16 extract_syndrome(struct err_regs *err)
return ((err->nbsh >> 15) & 0xff) | ((err->nbsl >> 16) & 0xff00); return ((err->nbsh >> 15) & 0xff) | ((err->nbsl >> 16) & 0xff00);
} }
static void amd64_cpu_display_info(struct amd64_pvt *pvt)
{
if (boot_cpu_data.x86 == 0x11)
edac_printk(KERN_DEBUG, EDAC_MC, "F11h CPU detected\n");
else if (boot_cpu_data.x86 == 0x10)
edac_printk(KERN_DEBUG, EDAC_MC, "F10h CPU detected\n");
else if (boot_cpu_data.x86 == 0xf)
edac_printk(KERN_DEBUG, EDAC_MC, "%s detected\n",
(pvt->ext_model >= K8_REV_F) ?
"Rev F or later" : "Rev E or earlier");
else
/* we'll hardly ever ever get here */
edac_printk(KERN_ERR, EDAC_MC, "Unknown cpu!\n");
}
/* /*
* Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
* are ECC capable. * are ECC capable.
...@@ -893,8 +857,7 @@ static void amd64_dump_misc_regs(struct amd64_pvt *pvt) ...@@ -893,8 +857,7 @@ static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
return; return;
} }
amd64_printk(KERN_INFO, "using %s syndromes.\n", amd64_info("using %s syndromes.\n", ((pvt->syn_type == 8) ? "x8" : "x4"));
((pvt->syn_type == 8) ? "x8" : "x4"));
/* Only if NOT ganged does dclr1 have valid info */ /* Only if NOT ganged does dclr1 have valid info */
if (!dct_ganging_enabled(pvt)) if (!dct_ganging_enabled(pvt))
...@@ -915,10 +878,10 @@ static void amd64_dump_misc_regs(struct amd64_pvt *pvt) ...@@ -915,10 +878,10 @@ static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
/* Read in both of DBAM registers */ /* Read in both of DBAM registers */
static void amd64_read_dbam_reg(struct amd64_pvt *pvt) static void amd64_read_dbam_reg(struct amd64_pvt *pvt)
{ {
amd64_read_pci_cfg(pvt->dram_f2_ctl, DBAM0, &pvt->dbam0); amd64_read_pci_cfg(pvt->F2, DBAM0, &pvt->dbam0);
if (boot_cpu_data.x86 >= 0x10) if (boot_cpu_data.x86 >= 0x10)
amd64_read_pci_cfg(pvt->dram_f2_ctl, DBAM1, &pvt->dbam1); amd64_read_pci_cfg(pvt->F2, DBAM1, &pvt->dbam1);
} }
/* /*
...@@ -965,15 +928,9 @@ static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt) ...@@ -965,15 +928,9 @@ static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt)
pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS; pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS;
pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS; pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS;
pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT; pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT;
if (boot_cpu_data.x86 == 0x11) {
pvt->cs_count = 4;
pvt->num_dcsm = 2;
} else {
pvt->cs_count = 8; pvt->cs_count = 8;
pvt->num_dcsm = 4; pvt->num_dcsm = 4;
} }
}
} }
/* /*
...@@ -987,14 +944,14 @@ static void amd64_read_dct_base_mask(struct amd64_pvt *pvt) ...@@ -987,14 +944,14 @@ static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
for (cs = 0; cs < pvt->cs_count; cs++) { for (cs = 0; cs < pvt->cs_count; cs++) {
reg = K8_DCSB0 + (cs * 4); reg = K8_DCSB0 + (cs * 4);
if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, &pvt->dcsb0[cs])) if (!amd64_read_pci_cfg(pvt->F2, reg, &pvt->dcsb0[cs]))
debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n", debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n",
cs, pvt->dcsb0[cs], reg); cs, pvt->dcsb0[cs], reg);
/* If DCT are NOT ganged, then read in DCT1's base */ /* If DCT are NOT ganged, then read in DCT1's base */
if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) { if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
reg = F10_DCSB1 + (cs * 4); reg = F10_DCSB1 + (cs * 4);
if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, if (!amd64_read_pci_cfg(pvt->F2, reg,
&pvt->dcsb1[cs])) &pvt->dcsb1[cs]))
debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n", debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
cs, pvt->dcsb1[cs], reg); cs, pvt->dcsb1[cs], reg);
...@@ -1005,14 +962,14 @@ static void amd64_read_dct_base_mask(struct amd64_pvt *pvt) ...@@ -1005,14 +962,14 @@ static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
for (cs = 0; cs < pvt->num_dcsm; cs++) { for (cs = 0; cs < pvt->num_dcsm; cs++) {
reg = K8_DCSM0 + (cs * 4); reg = K8_DCSM0 + (cs * 4);
if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, &pvt->dcsm0[cs])) if (!amd64_read_pci_cfg(pvt->F2, reg, &pvt->dcsm0[cs]))
debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n", debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n",
cs, pvt->dcsm0[cs], reg); cs, pvt->dcsm0[cs], reg);
/* If DCT are NOT ganged, then read in DCT1's mask */ /* If DCT are NOT ganged, then read in DCT1's mask */
if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) { if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
reg = F10_DCSM1 + (cs * 4); reg = F10_DCSM1 + (cs * 4);
if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, if (!amd64_read_pci_cfg(pvt->F2, reg,
&pvt->dcsm1[cs])) &pvt->dcsm1[cs]))
debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n", debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
cs, pvt->dcsm1[cs], reg); cs, pvt->dcsm1[cs], reg);
...@@ -1022,7 +979,7 @@ static void amd64_read_dct_base_mask(struct amd64_pvt *pvt) ...@@ -1022,7 +979,7 @@ static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
} }
} }
static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt) static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs)
{ {
enum mem_type type; enum mem_type type;
...@@ -1035,7 +992,7 @@ static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt) ...@@ -1035,7 +992,7 @@ static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt)
type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR; type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
} }
debugf1(" Memory type is: %s\n", edac_mem_types[type]); amd64_info("CS%d: %s\n", cs, edac_mem_types[type]);
return type; return type;
} }
...@@ -1053,17 +1010,16 @@ static int k8_early_channel_count(struct amd64_pvt *pvt) ...@@ -1053,17 +1010,16 @@ static int k8_early_channel_count(struct amd64_pvt *pvt)
{ {
int flag, err = 0; int flag, err = 0;
err = amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0); err = amd64_read_pci_cfg(pvt->F2, F10_DCLR_0, &pvt->dclr0);
if (err) if (err)
return err; return err;
if ((boot_cpu_data.x86_model >> 4) >= K8_REV_F) { if (pvt->ext_model >= K8_REV_F)
/* RevF (NPT) and later */ /* RevF (NPT) and later */
flag = pvt->dclr0 & F10_WIDTH_128; flag = pvt->dclr0 & F10_WIDTH_128;
} else { else
/* RevE and earlier */ /* RevE and earlier */
flag = pvt->dclr0 & REVE_WIDTH_128; flag = pvt->dclr0 & REVE_WIDTH_128;
}
/* not used */ /* not used */
pvt->dclr1 = 0; pvt->dclr1 = 0;
...@@ -1090,14 +1046,14 @@ static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram) ...@@ -1090,14 +1046,14 @@ static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
u32 low; u32 low;
u32 off = dram << 3; /* 8 bytes between DRAM entries */ u32 off = dram << 3; /* 8 bytes between DRAM entries */
amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DRAM_BASE_LOW + off, &low); amd64_read_pci_cfg(pvt->F1, K8_DRAM_BASE_LOW + off, &low);
/* Extract parts into separate data entries */ /* Extract parts into separate data entries */
pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8; pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8;
pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7; pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7;
pvt->dram_rw_en[dram] = (low & 0x3); pvt->dram_rw_en[dram] = (low & 0x3);
amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DRAM_LIMIT_LOW + off, &low); amd64_read_pci_cfg(pvt->F1, K8_DRAM_LIMIT_LOW + off, &low);
/* /*
* Extract parts into separate data entries. Limit is the HIGHEST memory * Extract parts into separate data entries. Limit is the HIGHEST memory
...@@ -1127,8 +1083,7 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, ...@@ -1127,8 +1083,7 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
* 2 DIMMs is in error. So we need to ID 'both' of them * 2 DIMMs is in error. So we need to ID 'both' of them
* as suspect. * as suspect.
*/ */
amd64_mc_printk(mci, KERN_WARNING, amd64_mc_warn(mci, "unknown syndrome 0x%04x - possible "
"unknown syndrome 0x%04x - possible "
"error reporting race\n", syndrome); "error reporting race\n", syndrome);
edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
return; return;
...@@ -1151,8 +1106,7 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, ...@@ -1151,8 +1106,7 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
*/ */
src_mci = find_mc_by_sys_addr(mci, sys_addr); src_mci = find_mc_by_sys_addr(mci, sys_addr);
if (!src_mci) { if (!src_mci) {
amd64_mc_printk(mci, KERN_ERR, amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
"failed to map error address 0x%lx to a node\n",
(unsigned long)sys_addr); (unsigned long)sys_addr);
edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
return; return;
...@@ -1220,7 +1174,7 @@ static int f10_early_channel_count(struct amd64_pvt *pvt) ...@@ -1220,7 +1174,7 @@ static int f10_early_channel_count(struct amd64_pvt *pvt)
* both controllers since DIMMs can be placed in either one. * both controllers since DIMMs can be placed in either one.
*/ */
for (i = 0; i < ARRAY_SIZE(dbams); i++) { for (i = 0; i < ARRAY_SIZE(dbams); i++) {
if (amd64_read_pci_cfg(pvt->dram_f2_ctl, dbams[i], &dbam)) if (amd64_read_pci_cfg(pvt->F2, dbams[i], &dbam))
goto err_reg; goto err_reg;
for (j = 0; j < 4; j++) { for (j = 0; j < 4; j++) {
...@@ -1234,7 +1188,7 @@ static int f10_early_channel_count(struct amd64_pvt *pvt) ...@@ -1234,7 +1188,7 @@ static int f10_early_channel_count(struct amd64_pvt *pvt)
if (channels > 2) if (channels > 2)
channels = 2; channels = 2;
debugf0("MCT channel count: %d\n", channels); amd64_info("MCT channel count: %d\n", channels);
return channels; return channels;
...@@ -1255,31 +1209,6 @@ static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode) ...@@ -1255,31 +1209,6 @@ static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
return dbam_map[cs_mode]; return dbam_map[cs_mode];
} }
/* Enable extended configuration access via 0xCF8 feature */
static void amd64_setup(struct amd64_pvt *pvt)
{
u32 reg;
amd64_read_pci_cfg(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, &reg);
pvt->flags.cf8_extcfg = !!(reg & F10_NB_CFG_LOW_ENABLE_EXT_CFG);
reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG;
pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg);
}
/* Restore the extended configuration access via 0xCF8 feature */
static void amd64_teardown(struct amd64_pvt *pvt)
{
u32 reg;
amd64_read_pci_cfg(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, &reg);
reg &= ~F10_NB_CFG_LOW_ENABLE_EXT_CFG;
if (pvt->flags.cf8_extcfg)
reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG;
pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg);
}
static u64 f10_get_error_address(struct mem_ctl_info *mci, static u64 f10_get_error_address(struct mem_ctl_info *mci,
struct err_regs *info) struct err_regs *info)
{ {
...@@ -1301,10 +1230,8 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram) ...@@ -1301,10 +1230,8 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
high_offset = F10_DRAM_BASE_HIGH + (dram << 3); high_offset = F10_DRAM_BASE_HIGH + (dram << 3);
/* read the 'raw' DRAM BASE Address register */ /* read the 'raw' DRAM BASE Address register */
amd64_read_pci_cfg(pvt->addr_f1_ctl, low_offset, &low_base); amd64_read_pci_cfg(pvt->F1, low_offset, &low_base);
amd64_read_pci_cfg(pvt->F1, high_offset, &high_base);
/* Read from the ECS data register */
amd64_read_pci_cfg(pvt->addr_f1_ctl, high_offset, &high_base);
/* Extract parts into separate data entries */ /* Extract parts into separate data entries */
pvt->dram_rw_en[dram] = (low_base & 0x3); pvt->dram_rw_en[dram] = (low_base & 0x3);
...@@ -1321,10 +1248,8 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram) ...@@ -1321,10 +1248,8 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3); high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3);
/* read the 'raw' LIMIT registers */ /* read the 'raw' LIMIT registers */
amd64_read_pci_cfg(pvt->addr_f1_ctl, low_offset, &low_limit); amd64_read_pci_cfg(pvt->F1, low_offset, &low_limit);
amd64_read_pci_cfg(pvt->F1, high_offset, &high_limit);
/* Read from the ECS data register for the HIGH portion */
amd64_read_pci_cfg(pvt->addr_f1_ctl, high_offset, &high_limit);
pvt->dram_DstNode[dram] = (low_limit & 0x7); pvt->dram_DstNode[dram] = (low_limit & 0x7);
pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7; pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7;
...@@ -1341,7 +1266,7 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram) ...@@ -1341,7 +1266,7 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
static void f10_read_dram_ctl_register(struct amd64_pvt *pvt) static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
{ {
if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCTL_SEL_LOW, if (!amd64_read_pci_cfg(pvt->F2, F10_DCTL_SEL_LOW,
&pvt->dram_ctl_select_low)) { &pvt->dram_ctl_select_low)) {
debugf0("F2x110 (DCTL Sel. Low): 0x%08x, " debugf0("F2x110 (DCTL Sel. Low): 0x%08x, "
"High range addresses at: 0x%x\n", "High range addresses at: 0x%x\n",
...@@ -1367,7 +1292,7 @@ static void f10_read_dram_ctl_register(struct amd64_pvt *pvt) ...@@ -1367,7 +1292,7 @@ static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
dct_sel_interleave_addr(pvt)); dct_sel_interleave_addr(pvt));
} }
amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCTL_SEL_HIGH, amd64_read_pci_cfg(pvt->F2, F10_DCTL_SEL_HIGH,
&pvt->dram_ctl_select_high); &pvt->dram_ctl_select_high);
} }
...@@ -1496,7 +1421,7 @@ static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs) ...@@ -1496,7 +1421,7 @@ static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
int cs_found = -EINVAL; int cs_found = -EINVAL;
int csrow; int csrow;
mci = mci_lookup[nid]; mci = mcis[nid];
if (!mci) if (!mci)
return cs_found; return cs_found;
...@@ -1738,28 +1663,17 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt) ...@@ -1738,28 +1663,17 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE) if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE)
size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam)); size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
edac_printk(KERN_DEBUG, EDAC_MC, " %d: %5dMB %d: %5dMB\n", amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
dimm * 2, size0 << factor, dimm * 2, size0 << factor,
dimm * 2 + 1, size1 << factor); dimm * 2 + 1, size1 << factor);
} }
} }
/*
* There currently are 3 types type of MC devices for AMD Athlon/Opterons
* (as per PCI DEVICE_IDs):
*
* Family K8: That is the Athlon64 and Opteron CPUs. They all have the same PCI
* DEVICE ID, even though there is differences between the different Revisions
* (CG,D,E,F).
*
* Family F10h and F11h.
*
*/
static struct amd64_family_type amd64_family_types[] = { static struct amd64_family_type amd64_family_types[] = {
[K8_CPUS] = { [K8_CPUS] = {
.ctl_name = "RevF", .ctl_name = "K8",
.addr_f1_ctl = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP, .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
.misc_f3_ctl = PCI_DEVICE_ID_AMD_K8_NB_MISC, .f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC,
.ops = { .ops = {
.early_channel_count = k8_early_channel_count, .early_channel_count = k8_early_channel_count,
.get_error_address = k8_get_error_address, .get_error_address = k8_get_error_address,
...@@ -1769,22 +1683,9 @@ static struct amd64_family_type amd64_family_types[] = { ...@@ -1769,22 +1683,9 @@ static struct amd64_family_type amd64_family_types[] = {
} }
}, },
[F10_CPUS] = { [F10_CPUS] = {
.ctl_name = "Family 10h", .ctl_name = "F10h",
.addr_f1_ctl = PCI_DEVICE_ID_AMD_10H_NB_MAP, .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
.misc_f3_ctl = PCI_DEVICE_ID_AMD_10H_NB_MISC, .f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC,
.ops = {
.early_channel_count = f10_early_channel_count,
.get_error_address = f10_get_error_address,
.read_dram_base_limit = f10_read_dram_base_limit,
.read_dram_ctl_register = f10_read_dram_ctl_register,
.map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
.dbam_to_cs = f10_dbam_to_chip_select,
}
},
[F11_CPUS] = {
.ctl_name = "Family 11h",
.addr_f1_ctl = PCI_DEVICE_ID_AMD_11H_NB_MAP,
.misc_f3_ctl = PCI_DEVICE_ID_AMD_11H_NB_MISC,
.ops = { .ops = {
.early_channel_count = f10_early_channel_count, .early_channel_count = f10_early_channel_count,
.get_error_address = f10_get_error_address, .get_error_address = f10_get_error_address,
...@@ -1970,8 +1871,7 @@ static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome) ...@@ -1970,8 +1871,7 @@ static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
ARRAY_SIZE(x4_vectors), ARRAY_SIZE(x4_vectors),
pvt->syn_type); pvt->syn_type);
else { else {
amd64_printk(KERN_WARNING, "%s: Illegal syndrome type: %u\n", amd64_warn("Illegal syndrome type: %u\n", pvt->syn_type);
__func__, pvt->syn_type);
return err_sym; return err_sym;
} }
...@@ -1989,17 +1889,15 @@ static void amd64_handle_ce(struct mem_ctl_info *mci, ...@@ -1989,17 +1889,15 @@ static void amd64_handle_ce(struct mem_ctl_info *mci,
u64 sys_addr; u64 sys_addr;
/* Ensure that the Error Address is VALID */ /* Ensure that the Error Address is VALID */
if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) { if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) {
amd64_mc_printk(mci, KERN_ERR, amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
"HW has no ERROR_ADDRESS available\n");
edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
return; return;
} }
sys_addr = pvt->ops->get_error_address(mci, info); sys_addr = pvt->ops->get_error_address(mci, info);
amd64_mc_printk(mci, KERN_ERR, amd64_mc_err(mci, "CE ERROR_ADDRESS= 0x%llx\n", sys_addr);
"CE ERROR_ADDRESS= 0x%llx\n", sys_addr);
pvt->ops->map_sysaddr_to_csrow(mci, info, sys_addr); pvt->ops->map_sysaddr_to_csrow(mci, info, sys_addr);
} }
...@@ -2016,9 +1914,8 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, ...@@ -2016,9 +1914,8 @@ static void amd64_handle_ue(struct mem_ctl_info *mci,
log_mci = mci; log_mci = mci;
if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) { if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) {
amd64_mc_printk(mci, KERN_CRIT, amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
"HW has no ERROR_ADDRESS available\n");
edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
return; return;
} }
...@@ -2031,8 +1928,7 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, ...@@ -2031,8 +1928,7 @@ static void amd64_handle_ue(struct mem_ctl_info *mci,
*/ */
src_mci = find_mc_by_sys_addr(mci, sys_addr); src_mci = find_mc_by_sys_addr(mci, sys_addr);
if (!src_mci) { if (!src_mci) {
amd64_mc_printk(mci, KERN_CRIT, amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n",
"ERROR ADDRESS (0x%lx) value NOT mapped to a MC\n",
(unsigned long)sys_addr); (unsigned long)sys_addr);
edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
return; return;
...@@ -2042,8 +1938,7 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, ...@@ -2042,8 +1938,7 @@ static void amd64_handle_ue(struct mem_ctl_info *mci,
csrow = sys_addr_to_csrow(log_mci, sys_addr); csrow = sys_addr_to_csrow(log_mci, sys_addr);
if (csrow < 0) { if (csrow < 0) {
amd64_mc_printk(mci, KERN_CRIT, amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n",
"ERROR_ADDRESS (0x%lx) value NOT mapped to 'csrow'\n",
(unsigned long)sys_addr); (unsigned long)sys_addr);
edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
} else { } else {
...@@ -2075,7 +1970,7 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci, ...@@ -2075,7 +1970,7 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
void amd64_decode_bus_error(int node_id, struct mce *m, u32 nbcfg) void amd64_decode_bus_error(int node_id, struct mce *m, u32 nbcfg)
{ {
struct mem_ctl_info *mci = mci_lookup[node_id]; struct mem_ctl_info *mci = mcis[node_id];
struct err_regs regs; struct err_regs regs;
regs.nbsl = (u32) m->status; regs.nbsl = (u32) m->status;
...@@ -2099,75 +1994,50 @@ void amd64_decode_bus_error(int node_id, struct mce *m, u32 nbcfg) ...@@ -2099,75 +1994,50 @@ void amd64_decode_bus_error(int node_id, struct mce *m, u32 nbcfg)
} }
/* /*
* Input: * Use pvt->F2 which contains the F2 CPU PCI device to get the related
* 1) struct amd64_pvt which contains pvt->dram_f2_ctl pointer * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error.
* 2) AMD Family index value
*
* Ouput:
* Upon return of 0, the following filled in:
*
* struct pvt->addr_f1_ctl
* struct pvt->misc_f3_ctl
*
* Filled in with related device funcitions of 'dram_f2_ctl'
* These devices are "reserved" via the pci_get_device()
*
* Upon return of 1 (error status):
*
* Nothing reserved
*/ */
static int amd64_reserve_mc_sibling_devices(struct amd64_pvt *pvt, int mc_idx) static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id)
{ {
const struct amd64_family_type *amd64_dev = &amd64_family_types[mc_idx];
/* Reserve the ADDRESS MAP Device */ /* Reserve the ADDRESS MAP Device */
pvt->addr_f1_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor, pvt->F1 = pci_get_related_function(pvt->F2->vendor, f1_id, pvt->F2);
amd64_dev->addr_f1_ctl, if (!pvt->F1) {
pvt->dram_f2_ctl); amd64_err("error address map device not found: "
if (!pvt->addr_f1_ctl) {
amd64_printk(KERN_ERR, "error address map device not found: "
"vendor %x device 0x%x (broken BIOS?)\n", "vendor %x device 0x%x (broken BIOS?)\n",
PCI_VENDOR_ID_AMD, amd64_dev->addr_f1_ctl); PCI_VENDOR_ID_AMD, f1_id);
return 1; return -ENODEV;
} }
/* Reserve the MISC Device */ /* Reserve the MISC Device */
pvt->misc_f3_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor, pvt->F3 = pci_get_related_function(pvt->F2->vendor, f3_id, pvt->F2);
amd64_dev->misc_f3_ctl, if (!pvt->F3) {
pvt->dram_f2_ctl); pci_dev_put(pvt->F1);
pvt->F1 = NULL;
if (!pvt->misc_f3_ctl) { amd64_err("error F3 device not found: "
pci_dev_put(pvt->addr_f1_ctl);
pvt->addr_f1_ctl = NULL;
amd64_printk(KERN_ERR, "error miscellaneous device not found: "
"vendor %x device 0x%x (broken BIOS?)\n", "vendor %x device 0x%x (broken BIOS?)\n",
PCI_VENDOR_ID_AMD, amd64_dev->misc_f3_ctl); PCI_VENDOR_ID_AMD, f3_id);
return 1;
}
debugf1(" Addr Map device PCI Bus ID:\t%s\n", return -ENODEV;
pci_name(pvt->addr_f1_ctl)); }
debugf1(" DRAM MEM-CTL PCI Bus ID:\t%s\n", debugf1("F1: %s\n", pci_name(pvt->F1));
pci_name(pvt->dram_f2_ctl)); debugf1("F2: %s\n", pci_name(pvt->F2));
debugf1(" Misc device PCI Bus ID:\t%s\n", debugf1("F3: %s\n", pci_name(pvt->F3));
pci_name(pvt->misc_f3_ctl));
return 0; return 0;
} }
static void amd64_free_mc_sibling_devices(struct amd64_pvt *pvt) static void free_mc_sibling_devs(struct amd64_pvt *pvt)
{ {
pci_dev_put(pvt->addr_f1_ctl); pci_dev_put(pvt->F1);
pci_dev_put(pvt->misc_f3_ctl); pci_dev_put(pvt->F3);
} }
/* /*
* Retrieve the hardware registers of the memory controller (this includes the * Retrieve the hardware registers of the memory controller (this includes the
* 'Address Map' and 'Misc' device regs) * 'Address Map' and 'Misc' device regs)
*/ */
static void amd64_read_mc_registers(struct amd64_pvt *pvt) static void read_mc_regs(struct amd64_pvt *pvt)
{ {
u64 msr_val; u64 msr_val;
u32 tmp; u32 tmp;
...@@ -2188,9 +2058,7 @@ static void amd64_read_mc_registers(struct amd64_pvt *pvt) ...@@ -2188,9 +2058,7 @@ static void amd64_read_mc_registers(struct amd64_pvt *pvt)
} else } else
debugf0(" TOP_MEM2 disabled.\n"); debugf0(" TOP_MEM2 disabled.\n");
amd64_cpu_display_info(pvt); amd64_read_pci_cfg(pvt->F3, K8_NBCAP, &pvt->nbcap);
amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCAP, &pvt->nbcap);
if (pvt->ops->read_dram_ctl_register) if (pvt->ops->read_dram_ctl_register)
pvt->ops->read_dram_ctl_register(pvt); pvt->ops->read_dram_ctl_register(pvt);
...@@ -2227,21 +2095,20 @@ static void amd64_read_mc_registers(struct amd64_pvt *pvt) ...@@ -2227,21 +2095,20 @@ static void amd64_read_mc_registers(struct amd64_pvt *pvt)
amd64_read_dct_base_mask(pvt); amd64_read_dct_base_mask(pvt);
amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DHAR, &pvt->dhar); amd64_read_pci_cfg(pvt->F1, K8_DHAR, &pvt->dhar);
amd64_read_dbam_reg(pvt); amd64_read_dbam_reg(pvt);
amd64_read_pci_cfg(pvt->misc_f3_ctl, amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
F10_ONLINE_SPARE, &pvt->online_spare);
amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0); amd64_read_pci_cfg(pvt->F2, F10_DCLR_0, &pvt->dclr0);
amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_0, &pvt->dchr0); amd64_read_pci_cfg(pvt->F2, F10_DCHR_0, &pvt->dchr0);
if (boot_cpu_data.x86 >= 0x10) { if (boot_cpu_data.x86 >= 0x10) {
if (!dct_ganging_enabled(pvt)) { if (!dct_ganging_enabled(pvt)) {
amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_1, &pvt->dclr1); amd64_read_pci_cfg(pvt->F2, F10_DCLR_1, &pvt->dclr1);
amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_1, &pvt->dchr1); amd64_read_pci_cfg(pvt->F2, F10_DCHR_1, &pvt->dchr1);
} }
amd64_read_pci_cfg(pvt->misc_f3_ctl, EXT_NB_MCA_CFG, &tmp); amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
} }
if (boot_cpu_data.x86 == 0x10 && if (boot_cpu_data.x86 == 0x10 &&
...@@ -2321,21 +2188,22 @@ static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt) ...@@ -2321,21 +2188,22 @@ static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
* Initialize the array of csrow attribute instances, based on the values * Initialize the array of csrow attribute instances, based on the values
* from pci config hardware registers. * from pci config hardware registers.
*/ */
static int amd64_init_csrows(struct mem_ctl_info *mci) static int init_csrows(struct mem_ctl_info *mci)
{ {
struct csrow_info *csrow; struct csrow_info *csrow;
struct amd64_pvt *pvt; struct amd64_pvt *pvt = mci->pvt_info;
u64 input_addr_min, input_addr_max, sys_addr; u64 input_addr_min, input_addr_max, sys_addr;
u32 val;
int i, empty = 1; int i, empty = 1;
pvt = mci->pvt_info; amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &val);
amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &pvt->nbcfg); pvt->nbcfg = val;
pvt->ctl_error_info.nbcfg = val;
debugf0("NBCFG= 0x%x CHIPKILL= %s DRAM ECC= %s\n", pvt->nbcfg, debugf0("node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
(pvt->nbcfg & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled", pvt->mc_node_id, val,
(pvt->nbcfg & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled" !!(val & K8_NBCFG_CHIPKILL), !!(val & K8_NBCFG_ECC_ENABLE));
);
for (i = 0; i < pvt->cs_count; i++) { for (i = 0; i < pvt->cs_count; i++) {
csrow = &mci->csrows[i]; csrow = &mci->csrows[i];
...@@ -2359,7 +2227,7 @@ static int amd64_init_csrows(struct mem_ctl_info *mci) ...@@ -2359,7 +2227,7 @@ static int amd64_init_csrows(struct mem_ctl_info *mci)
csrow->page_mask = ~mask_from_dct_mask(pvt, i); csrow->page_mask = ~mask_from_dct_mask(pvt, i);
/* 8 bytes of resolution */ /* 8 bytes of resolution */
csrow->mtype = amd64_determine_memory_type(pvt); csrow->mtype = amd64_determine_memory_type(pvt, i);
debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i); debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i);
debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n", debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n",
...@@ -2404,8 +2272,7 @@ static bool amd64_nb_mce_bank_enabled_on_node(int nid) ...@@ -2404,8 +2272,7 @@ static bool amd64_nb_mce_bank_enabled_on_node(int nid)
bool ret = false; bool ret = false;
if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) { if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
amd64_printk(KERN_WARNING, "%s: error allocating mask\n", amd64_warn("%s: Error allocating mask\n", __func__);
__func__);
return false; return false;
} }
...@@ -2431,18 +2298,17 @@ static bool amd64_nb_mce_bank_enabled_on_node(int nid) ...@@ -2431,18 +2298,17 @@ static bool amd64_nb_mce_bank_enabled_on_node(int nid)
return ret; return ret;
} }
static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on) static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on)
{ {
cpumask_var_t cmask; cpumask_var_t cmask;
int cpu; int cpu;
if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) { if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
amd64_printk(KERN_WARNING, "%s: error allocating mask\n", amd64_warn("%s: error allocating mask\n", __func__);
__func__);
return false; return false;
} }
get_cpus_on_this_dct_cpumask(cmask, pvt->mc_node_id); get_cpus_on_this_dct_cpumask(cmask, nid);
rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
...@@ -2452,14 +2318,14 @@ static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on) ...@@ -2452,14 +2318,14 @@ static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
if (on) { if (on) {
if (reg->l & K8_MSR_MCGCTL_NBE) if (reg->l & K8_MSR_MCGCTL_NBE)
pvt->flags.nb_mce_enable = 1; s->flags.nb_mce_enable = 1;
reg->l |= K8_MSR_MCGCTL_NBE; reg->l |= K8_MSR_MCGCTL_NBE;
} else { } else {
/* /*
* Turn off NB MCE reporting only when it was off before * Turn off NB MCE reporting only when it was off before
*/ */
if (!pvt->flags.nb_mce_enable) if (!s->flags.nb_mce_enable)
reg->l &= ~K8_MSR_MCGCTL_NBE; reg->l &= ~K8_MSR_MCGCTL_NBE;
} }
} }
...@@ -2470,92 +2336,92 @@ static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on) ...@@ -2470,92 +2336,92 @@ static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
return 0; return 0;
} }
static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci) static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
struct pci_dev *F3)
{ {
struct amd64_pvt *pvt = mci->pvt_info; bool ret = true;
u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn; u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value); if (toggle_ecc_err_reporting(s, nid, ON)) {
amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
return false;
}
/* turn on UECCn and CECCEn bits */ amd64_read_pci_cfg(F3, K8_NBCTL, &value);
pvt->old_nbctl = value & mask;
pvt->nbctl_mcgctl_saved = 1;
value |= mask; /* turn on UECCEn and CECCEn bits */
pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value); s->old_nbctl = value & mask;
s->nbctl_valid = true;
if (amd64_toggle_ecc_err_reporting(pvt, ON)) value |= mask;
amd64_printk(KERN_WARNING, "Error enabling ECC reporting over " pci_write_config_dword(F3, K8_NBCTL, value);
"MCGCTL!\n");
amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value); amd64_read_pci_cfg(F3, K8_NBCFG, &value);
debugf0("NBCFG(1)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value, debugf0("1: node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
(value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled", nid, value,
(value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"); !!(value & K8_NBCFG_CHIPKILL), !!(value & K8_NBCFG_ECC_ENABLE));
if (!(value & K8_NBCFG_ECC_ENABLE)) { if (!(value & K8_NBCFG_ECC_ENABLE)) {
amd64_printk(KERN_WARNING, amd64_warn("DRAM ECC disabled on this node, enabling...\n");
"This node reports that DRAM ECC is "
"currently Disabled; ENABLING now\n");
pvt->flags.nb_ecc_prev = 0; s->flags.nb_ecc_prev = 0;
/* Attempt to turn on DRAM ECC Enable */ /* Attempt to turn on DRAM ECC Enable */
value |= K8_NBCFG_ECC_ENABLE; value |= K8_NBCFG_ECC_ENABLE;
pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value); pci_write_config_dword(F3, K8_NBCFG, value);
amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value); amd64_read_pci_cfg(F3, K8_NBCFG, &value);
if (!(value & K8_NBCFG_ECC_ENABLE)) { if (!(value & K8_NBCFG_ECC_ENABLE)) {
amd64_printk(KERN_WARNING, amd64_warn("Hardware rejected DRAM ECC enable,"
"Hardware rejects Enabling DRAM ECC checking\n" "check memory DIMM configuration.\n");
"Check memory DIMM configuration\n"); ret = false;
} else { } else {
amd64_printk(KERN_DEBUG, amd64_info("Hardware accepted DRAM ECC Enable\n");
"Hardware accepted DRAM ECC Enable\n");
} }
} else { } else {
pvt->flags.nb_ecc_prev = 1; s->flags.nb_ecc_prev = 1;
} }
debugf0("NBCFG(2)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value, debugf0("2: node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
(value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled", nid, value,
(value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"); !!(value & K8_NBCFG_CHIPKILL), !!(value & K8_NBCFG_ECC_ENABLE));
pvt->ctl_error_info.nbcfg = value; return ret;
} }
static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt) static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid,
struct pci_dev *F3)
{ {
u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn; u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
if (!pvt->nbctl_mcgctl_saved) if (!s->nbctl_valid)
return; return;
amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value); amd64_read_pci_cfg(F3, K8_NBCTL, &value);
value &= ~mask; value &= ~mask;
value |= pvt->old_nbctl; value |= s->old_nbctl;
pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value); pci_write_config_dword(F3, K8_NBCTL, value);
/* restore previous BIOS DRAM ECC "off" setting which we force-enabled */ /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
if (!pvt->flags.nb_ecc_prev) { if (!s->flags.nb_ecc_prev) {
amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value); amd64_read_pci_cfg(F3, K8_NBCFG, &value);
value &= ~K8_NBCFG_ECC_ENABLE; value &= ~K8_NBCFG_ECC_ENABLE;
pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value); pci_write_config_dword(F3, K8_NBCFG, value);
} }
/* restore the NB Enable MCGCTL bit */ /* restore the NB Enable MCGCTL bit */
if (amd64_toggle_ecc_err_reporting(pvt, OFF)) if (toggle_ecc_err_reporting(s, nid, OFF))
amd64_printk(KERN_WARNING, "Error restoring NB MCGCTL settings!\n"); amd64_warn("Error restoring NB MCGCTL settings!\n");
} }
/* /*
* EDAC requires that the BIOS have ECC enabled before taking over the * EDAC requires that the BIOS have ECC enabled before
* processing of ECC errors. This is because the BIOS can properly initialize * taking over the processing of ECC errors. A command line
* the memory system completely. A command line option allows to force-enable * option allows to force-enable hardware ECC later in
* hardware ECC later in amd64_enable_ecc_error_reporting(). * enable_ecc_error_reporting().
*/ */
static const char *ecc_msg = static const char *ecc_msg =
"ECC disabled in the BIOS or no ECC capability, module will not load.\n" "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
...@@ -2563,38 +2429,28 @@ static const char *ecc_msg = ...@@ -2563,38 +2429,28 @@ static const char *ecc_msg =
"'ecc_enable_override'.\n" "'ecc_enable_override'.\n"
" (Note that use of the override may cause unknown side effects.)\n"; " (Note that use of the override may cause unknown side effects.)\n";
static int amd64_check_ecc_enabled(struct amd64_pvt *pvt) static bool ecc_enabled(struct pci_dev *F3, u8 nid)
{ {
u32 value; u32 value;
u8 ecc_enabled = 0; u8 ecc_en = 0;
bool nb_mce_en = false; bool nb_mce_en = false;
amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value); amd64_read_pci_cfg(F3, K8_NBCFG, &value);
ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE); ecc_en = !!(value & K8_NBCFG_ECC_ENABLE);
if (!ecc_enabled) amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
amd64_printk(KERN_NOTICE, "This node reports that Memory ECC "
"is currently disabled, set F3x%x[22] (%s).\n",
K8_NBCFG, pci_name(pvt->misc_f3_ctl));
else
amd64_printk(KERN_INFO, "ECC is enabled by BIOS.\n");
nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id); nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid);
if (!nb_mce_en) if (!nb_mce_en)
amd64_printk(KERN_NOTICE, "NB MCE bank disabled, set MSR " amd64_notice("NB MCE bank disabled, set MSR "
"0x%08x[4] on node %d to enable.\n", "0x%08x[4] on node %d to enable.\n",
MSR_IA32_MCG_CTL, pvt->mc_node_id); MSR_IA32_MCG_CTL, nid);
if (!ecc_enabled || !nb_mce_en) { if (!ecc_en || !nb_mce_en) {
if (!ecc_enable_override) { amd64_notice("%s", ecc_msg);
amd64_printk(KERN_NOTICE, "%s", ecc_msg); return false;
return -ENODEV;
} else {
amd64_printk(KERN_WARNING, "Forcing ECC checking on!\n");
}
} }
return true;
return 0;
} }
struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) + struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) +
...@@ -2603,13 +2459,14 @@ struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) + ...@@ -2603,13 +2459,14 @@ struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) +
struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } }; struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } };
static void amd64_set_mc_sysfs_attributes(struct mem_ctl_info *mci) static void set_mc_sysfs_attrs(struct mem_ctl_info *mci)
{ {
unsigned int i = 0, j = 0; unsigned int i = 0, j = 0;
for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++) for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++)
sysfs_attrs[i] = amd64_dbg_attrs[i]; sysfs_attrs[i] = amd64_dbg_attrs[i];
if (boot_cpu_data.x86 >= 0x10)
for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++) for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++)
sysfs_attrs[i] = amd64_inj_attrs[j]; sysfs_attrs[i] = amd64_inj_attrs[j];
...@@ -2618,7 +2475,7 @@ static void amd64_set_mc_sysfs_attributes(struct mem_ctl_info *mci) ...@@ -2618,7 +2475,7 @@ static void amd64_set_mc_sysfs_attributes(struct mem_ctl_info *mci)
mci->mc_driver_sysfs_attributes = sysfs_attrs; mci->mc_driver_sysfs_attributes = sysfs_attrs;
} }
static void amd64_setup_mci_misc_attributes(struct mem_ctl_info *mci) static void setup_mci_misc_attrs(struct mem_ctl_info *mci)
{ {
struct amd64_pvt *pvt = mci->pvt_info; struct amd64_pvt *pvt = mci->pvt_info;
...@@ -2634,8 +2491,8 @@ static void amd64_setup_mci_misc_attributes(struct mem_ctl_info *mci) ...@@ -2634,8 +2491,8 @@ static void amd64_setup_mci_misc_attributes(struct mem_ctl_info *mci)
mci->edac_cap = amd64_determine_edac_cap(pvt); mci->edac_cap = amd64_determine_edac_cap(pvt);
mci->mod_name = EDAC_MOD_STR; mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = EDAC_AMD64_VERSION; mci->mod_ver = EDAC_AMD64_VERSION;
mci->ctl_name = get_amd_family_name(pvt->mc_type_index); mci->ctl_name = pvt->ctl_name;
mci->dev_name = pci_name(pvt->dram_f2_ctl); mci->dev_name = pci_name(pvt->F2);
mci->ctl_page_to_phys = NULL; mci->ctl_page_to_phys = NULL;
/* memory scrubber interface */ /* memory scrubber interface */
...@@ -2644,111 +2501,94 @@ static void amd64_setup_mci_misc_attributes(struct mem_ctl_info *mci) ...@@ -2644,111 +2501,94 @@ static void amd64_setup_mci_misc_attributes(struct mem_ctl_info *mci)
} }
/* /*
* Init stuff for this DRAM Controller device. * returns a pointer to the family descriptor on success, NULL otherwise.
* */
* Due to a hardware feature on Fam10h CPUs, the Enable Extended Configuration static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
* Space feature MUST be enabled on ALL Processors prior to actually reading {
* from the ECS registers. Since the loading of the module can occur on any u8 fam = boot_cpu_data.x86;
* 'core', and cores don't 'see' all the other processors ECS data when the struct amd64_family_type *fam_type = NULL;
* others are NOT enabled. Our solution is to first enable ECS access in this
* routine on all processors, gather some data in a amd64_pvt structure and switch (fam) {
* later come back in a finish-setup function to perform that final case 0xf:
* initialization. See also amd64_init_2nd_stage() for that. fam_type = &amd64_family_types[K8_CPUS];
*/ pvt->ops = &amd64_family_types[K8_CPUS].ops;
static int amd64_probe_one_instance(struct pci_dev *dram_f2_ctl, pvt->ctl_name = fam_type->ctl_name;
int mc_type_index) pvt->min_scrubrate = K8_MIN_SCRUB_RATE_BITS;
break;
case 0x10:
fam_type = &amd64_family_types[F10_CPUS];
pvt->ops = &amd64_family_types[F10_CPUS].ops;
pvt->ctl_name = fam_type->ctl_name;
pvt->min_scrubrate = F10_MIN_SCRUB_RATE_BITS;
break;
default:
amd64_err("Unsupported family!\n");
return NULL;
}
pvt->ext_model = boot_cpu_data.x86_model >> 4;
amd64_info("%s %sdetected (node %d).\n", pvt->ctl_name,
(fam == 0xf ?
(pvt->ext_model >= K8_REV_F ? "revF or later "
: "revE or earlier ")
: ""), pvt->mc_node_id);
return fam_type;
}
static int amd64_init_one_instance(struct pci_dev *F2)
{ {
struct amd64_pvt *pvt = NULL; struct amd64_pvt *pvt = NULL;
struct amd64_family_type *fam_type = NULL;
struct mem_ctl_info *mci = NULL;
int err = 0, ret; int err = 0, ret;
u8 nid = get_node_id(F2);
ret = -ENOMEM; ret = -ENOMEM;
pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL); pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
if (!pvt) if (!pvt)
goto err_exit; goto err_ret;
pvt->mc_node_id = get_node_id(dram_f2_ctl); pvt->mc_node_id = nid;
pvt->F2 = F2;
pvt->dram_f2_ctl = dram_f2_ctl; ret = -EINVAL;
pvt->ext_model = boot_cpu_data.x86_model >> 4; fam_type = amd64_per_family_init(pvt);
pvt->mc_type_index = mc_type_index; if (!fam_type)
pvt->ops = family_ops(mc_type_index); goto err_free;
/*
* We have the dram_f2_ctl device as an argument, now go reserve its
* sibling devices from the PCI system.
*/
ret = -ENODEV; ret = -ENODEV;
err = amd64_reserve_mc_sibling_devices(pvt, mc_type_index); err = reserve_mc_sibling_devs(pvt, fam_type->f1_id, fam_type->f3_id);
if (err) if (err)
goto err_free; goto err_free;
ret = -EINVAL; read_mc_regs(pvt);
err = amd64_check_ecc_enabled(pvt);
if (err)
goto err_put;
/*
* Key operation here: setup of HW prior to performing ops on it. Some
* setup is required to access ECS data. After this is performed, the
* 'teardown' function must be called upon error and normal exit paths.
*/
if (boot_cpu_data.x86 >= 0x10)
amd64_setup(pvt);
/*
* Save the pointer to the private data for use in 2nd initialization
* stage
*/
pvt_lookup[pvt->mc_node_id] = pvt;
return 0;
err_put:
amd64_free_mc_sibling_devices(pvt);
err_free:
kfree(pvt);
err_exit:
return ret;
}
/*
* This is the finishing stage of the init code. Needs to be performed after all
* MCs' hardware have been prepped for accessing extended config space.
*/
static int amd64_init_2nd_stage(struct amd64_pvt *pvt)
{
int node_id = pvt->mc_node_id;
struct mem_ctl_info *mci;
int ret = -ENODEV;
amd64_read_mc_registers(pvt);
/* /*
* We need to determine how many memory channels there are. Then use * We need to determine how many memory channels there are. Then use
* that information for calculating the size of the dynamic instance * that information for calculating the size of the dynamic instance
* tables in the 'mci' structure * tables in the 'mci' structure.
*/ */
ret = -EINVAL;
pvt->channel_count = pvt->ops->early_channel_count(pvt); pvt->channel_count = pvt->ops->early_channel_count(pvt);
if (pvt->channel_count < 0) if (pvt->channel_count < 0)
goto err_exit; goto err_siblings;
ret = -ENOMEM; ret = -ENOMEM;
mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, node_id); mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, nid);
if (!mci) if (!mci)
goto err_exit; goto err_siblings;
mci->pvt_info = pvt; mci->pvt_info = pvt;
mci->dev = &pvt->F2->dev;
mci->dev = &pvt->dram_f2_ctl->dev; setup_mci_misc_attrs(mci);
amd64_setup_mci_misc_attributes(mci);
if (amd64_init_csrows(mci)) if (init_csrows(mci))
mci->edac_cap = EDAC_FLAG_NONE; mci->edac_cap = EDAC_FLAG_NONE;
amd64_enable_ecc_error_reporting(mci); set_mc_sysfs_attrs(mci);
amd64_set_mc_sysfs_attributes(mci);
ret = -ENODEV; ret = -ENODEV;
if (edac_mc_add_mc(mci)) { if (edac_mc_add_mc(mci)) {
...@@ -2756,54 +2596,77 @@ static int amd64_init_2nd_stage(struct amd64_pvt *pvt) ...@@ -2756,54 +2596,77 @@ static int amd64_init_2nd_stage(struct amd64_pvt *pvt)
goto err_add_mc; goto err_add_mc;
} }
mci_lookup[node_id] = mci;
pvt_lookup[node_id] = NULL;
/* register stuff with EDAC MCE */ /* register stuff with EDAC MCE */
if (report_gart_errors) if (report_gart_errors)
amd_report_gart_errors(true); amd_report_gart_errors(true);
amd_register_ecc_decoder(amd64_decode_bus_error); amd_register_ecc_decoder(amd64_decode_bus_error);
mcis[nid] = mci;
atomic_inc(&drv_instances);
return 0; return 0;
err_add_mc: err_add_mc:
edac_mc_free(mci); edac_mc_free(mci);
err_exit: err_siblings:
debugf0("failure to init 2nd stage: ret=%d\n", ret); free_mc_sibling_devs(pvt);
amd64_restore_ecc_error_reporting(pvt);
if (boot_cpu_data.x86 > 0xf)
amd64_teardown(pvt);
amd64_free_mc_sibling_devices(pvt);
kfree(pvt_lookup[pvt->mc_node_id]); err_free:
pvt_lookup[node_id] = NULL; kfree(pvt);
err_ret:
return ret; return ret;
} }
static int __devinit amd64_probe_one_instance(struct pci_dev *pdev,
static int __devinit amd64_init_one_instance(struct pci_dev *pdev,
const struct pci_device_id *mc_type) const struct pci_device_id *mc_type)
{ {
u8 nid = get_node_id(pdev);
struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
struct ecc_settings *s;
int ret = 0; int ret = 0;
debugf0("(MC node=%d,mc_type='%s')\n", get_node_id(pdev),
get_amd_family_name(mc_type->driver_data));
ret = pci_enable_device(pdev); ret = pci_enable_device(pdev);
if (ret < 0) if (ret < 0) {
ret = -EIO;
else
ret = amd64_probe_one_instance(pdev, mc_type->driver_data);
if (ret < 0)
debugf0("ret=%d\n", ret); debugf0("ret=%d\n", ret);
return -EIO;
}
ret = -ENOMEM;
s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
if (!s)
goto err_out;
ecc_stngs[nid] = s;
if (!ecc_enabled(F3, nid)) {
ret = -ENODEV;
if (!ecc_enable_override)
goto err_enable;
amd64_warn("Forcing ECC on!\n");
if (!enable_ecc_error_reporting(s, nid, F3))
goto err_enable;
}
ret = amd64_init_one_instance(pdev);
if (ret < 0) {
amd64_err("Error probing instance: %d\n", nid);
restore_ecc_error_reporting(s, nid, F3);
}
return ret;
err_enable:
kfree(s);
ecc_stngs[nid] = NULL;
err_out:
return ret; return ret;
} }
...@@ -2811,6 +2674,9 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev) ...@@ -2811,6 +2674,9 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
{ {
struct mem_ctl_info *mci; struct mem_ctl_info *mci;
struct amd64_pvt *pvt; struct amd64_pvt *pvt;
u8 nid = get_node_id(pdev);
struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
struct ecc_settings *s = ecc_stngs[nid];
/* Remove from EDAC CORE tracking list */ /* Remove from EDAC CORE tracking list */
mci = edac_mc_del_mc(&pdev->dev); mci = edac_mc_del_mc(&pdev->dev);
...@@ -2819,20 +2685,20 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev) ...@@ -2819,20 +2685,20 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
pvt = mci->pvt_info; pvt = mci->pvt_info;
amd64_restore_ecc_error_reporting(pvt); restore_ecc_error_reporting(s, nid, F3);
if (boot_cpu_data.x86 > 0xf) free_mc_sibling_devs(pvt);
amd64_teardown(pvt);
amd64_free_mc_sibling_devices(pvt);
/* unregister from EDAC MCE */ /* unregister from EDAC MCE */
amd_report_gart_errors(false); amd_report_gart_errors(false);
amd_unregister_ecc_decoder(amd64_decode_bus_error); amd_unregister_ecc_decoder(amd64_decode_bus_error);
kfree(ecc_stngs[nid]);
ecc_stngs[nid] = NULL;
/* Free the EDAC CORE resources */ /* Free the EDAC CORE resources */
mci->pvt_info = NULL; mci->pvt_info = NULL;
mci_lookup[pvt->mc_node_id] = NULL; mcis[nid] = NULL;
kfree(pvt); kfree(pvt);
edac_mc_free(mci); edac_mc_free(mci);
...@@ -2851,7 +2717,6 @@ static const struct pci_device_id amd64_pci_table[] __devinitdata = { ...@@ -2851,7 +2717,6 @@ static const struct pci_device_id amd64_pci_table[] __devinitdata = {
.subdevice = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
.class = 0, .class = 0,
.class_mask = 0, .class_mask = 0,
.driver_data = K8_CPUS
}, },
{ {
.vendor = PCI_VENDOR_ID_AMD, .vendor = PCI_VENDOR_ID_AMD,
...@@ -2860,16 +2725,6 @@ static const struct pci_device_id amd64_pci_table[] __devinitdata = { ...@@ -2860,16 +2725,6 @@ static const struct pci_device_id amd64_pci_table[] __devinitdata = {
.subdevice = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
.class = 0, .class = 0,
.class_mask = 0, .class_mask = 0,
.driver_data = F10_CPUS
},
{
.vendor = PCI_VENDOR_ID_AMD,
.device = PCI_DEVICE_ID_AMD_11H_NB_DRAM,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.class = 0,
.class_mask = 0,
.driver_data = F11_CPUS
}, },
{0, } {0, }
}; };
...@@ -2877,12 +2732,12 @@ MODULE_DEVICE_TABLE(pci, amd64_pci_table); ...@@ -2877,12 +2732,12 @@ MODULE_DEVICE_TABLE(pci, amd64_pci_table);
static struct pci_driver amd64_pci_driver = { static struct pci_driver amd64_pci_driver = {
.name = EDAC_MOD_STR, .name = EDAC_MOD_STR,
.probe = amd64_init_one_instance, .probe = amd64_probe_one_instance,
.remove = __devexit_p(amd64_remove_one_instance), .remove = __devexit_p(amd64_remove_one_instance),
.id_table = amd64_pci_table, .id_table = amd64_pci_table,
}; };
static void amd64_setup_pci_device(void) static void setup_pci_device(void)
{ {
struct mem_ctl_info *mci; struct mem_ctl_info *mci;
struct amd64_pvt *pvt; struct amd64_pvt *pvt;
...@@ -2890,13 +2745,12 @@ static void amd64_setup_pci_device(void) ...@@ -2890,13 +2745,12 @@ static void amd64_setup_pci_device(void)
if (amd64_ctl_pci) if (amd64_ctl_pci)
return; return;
mci = mci_lookup[0]; mci = mcis[0];
if (mci) { if (mci) {
pvt = mci->pvt_info; pvt = mci->pvt_info;
amd64_ctl_pci = amd64_ctl_pci =
edac_pci_create_generic_ctl(&pvt->dram_f2_ctl->dev, edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
EDAC_MOD_STR);
if (!amd64_ctl_pci) { if (!amd64_ctl_pci) {
pr_warning("%s(): Unable to create PCI control\n", pr_warning("%s(): Unable to create PCI control\n",
...@@ -2910,8 +2764,7 @@ static void amd64_setup_pci_device(void) ...@@ -2910,8 +2764,7 @@ static void amd64_setup_pci_device(void)
static int __init amd64_edac_init(void) static int __init amd64_edac_init(void)
{ {
int nb, err = -ENODEV; int err = -ENODEV;
bool load_ok = false;
edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n"); edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n");
...@@ -2920,41 +2773,41 @@ static int __init amd64_edac_init(void) ...@@ -2920,41 +2773,41 @@ static int __init amd64_edac_init(void)
if (amd_cache_northbridges() < 0) if (amd_cache_northbridges() < 0)
goto err_ret; goto err_ret;
err = -ENOMEM;
mcis = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL);
ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL);
if (!(mcis && ecc_stngs))
goto err_ret;
msrs = msrs_alloc(); msrs = msrs_alloc();
if (!msrs) if (!msrs)
goto err_ret; goto err_free;
err = pci_register_driver(&amd64_pci_driver); err = pci_register_driver(&amd64_pci_driver);
if (err) if (err)
goto err_pci; goto err_pci;
/*
* At this point, the array 'pvt_lookup[]' contains pointers to alloc'd
* amd64_pvt structs. These will be used in the 2nd stage init function
* to finish initialization of the MC instances.
*/
err = -ENODEV; err = -ENODEV;
for (nb = 0; nb < amd_nb_num(); nb++) { if (!atomic_read(&drv_instances))
if (!pvt_lookup[nb]) goto err_no_instances;
continue;
err = amd64_init_2nd_stage(pvt_lookup[nb]); setup_pci_device();
if (err)
goto err_2nd_stage;
load_ok = true;
}
if (load_ok) {
amd64_setup_pci_device();
return 0; return 0;
}
err_2nd_stage: err_no_instances:
pci_unregister_driver(&amd64_pci_driver); pci_unregister_driver(&amd64_pci_driver);
err_pci: err_pci:
msrs_free(msrs); msrs_free(msrs);
msrs = NULL; msrs = NULL;
err_free:
kfree(mcis);
mcis = NULL;
kfree(ecc_stngs);
ecc_stngs = NULL;
err_ret: err_ret:
return err; return err;
} }
...@@ -2966,6 +2819,12 @@ static void __exit amd64_edac_exit(void) ...@@ -2966,6 +2819,12 @@ static void __exit amd64_edac_exit(void)
pci_unregister_driver(&amd64_pci_driver); pci_unregister_driver(&amd64_pci_driver);
kfree(ecc_stngs);
ecc_stngs = NULL;
kfree(mcis);
mcis = NULL;
msrs_free(msrs); msrs_free(msrs);
msrs = NULL; msrs = NULL;
} }
......
...@@ -74,11 +74,26 @@ ...@@ -74,11 +74,26 @@
#include "edac_core.h" #include "edac_core.h"
#include "mce_amd.h" #include "mce_amd.h"
#define amd64_printk(level, fmt, arg...) \ #define amd64_debug(fmt, arg...) \
edac_printk(level, "amd64", fmt, ##arg) edac_printk(KERN_DEBUG, "amd64", fmt, ##arg)
#define amd64_mc_printk(mci, level, fmt, arg...) \ #define amd64_info(fmt, arg...) \
edac_mc_chipset_printk(mci, level, "amd64", fmt, ##arg) edac_printk(KERN_INFO, "amd64", fmt, ##arg)
#define amd64_notice(fmt, arg...) \
edac_printk(KERN_NOTICE, "amd64", fmt, ##arg)
#define amd64_warn(fmt, arg...) \
edac_printk(KERN_WARNING, "amd64", fmt, ##arg)
#define amd64_err(fmt, arg...) \
edac_printk(KERN_ERR, "amd64", fmt, ##arg)
#define amd64_mc_warn(mci, fmt, arg...) \
edac_mc_chipset_printk(mci, KERN_WARNING, "amd64", fmt, ##arg)
#define amd64_mc_err(mci, fmt, arg...) \
edac_mc_chipset_printk(mci, KERN_ERR, "amd64", fmt, ##arg)
/* /*
* Throughout the comments in this code, the following terms are used: * Throughout the comments in this code, the following terms are used:
...@@ -129,11 +144,9 @@ ...@@ -129,11 +144,9 @@
* sections 3.5.4 and 3.5.5 for more information. * sections 3.5.4 and 3.5.5 for more information.
*/ */
#define EDAC_AMD64_VERSION " Ver: 3.3.0 " __DATE__ #define EDAC_AMD64_VERSION "v3.3.0"
#define EDAC_MOD_STR "amd64_edac" #define EDAC_MOD_STR "amd64_edac"
#define EDAC_MAX_NUMNODES 8
/* Extended Model from CPUID, for CPU Revision numbers */ /* Extended Model from CPUID, for CPU Revision numbers */
#define K8_REV_D 1 #define K8_REV_D 1
#define K8_REV_E 2 #define K8_REV_E 2
...@@ -322,9 +335,6 @@ ...@@ -322,9 +335,6 @@
#define K8_SCRCTRL 0x58 #define K8_SCRCTRL 0x58
#define F10_NB_CFG_LOW 0x88 #define F10_NB_CFG_LOW 0x88
#define F10_NB_CFG_LOW_ENABLE_EXT_CFG BIT(14)
#define F10_NB_CFG_HIGH 0x8C
#define F10_ONLINE_SPARE 0xB0 #define F10_ONLINE_SPARE 0xB0
#define F10_ONLINE_SPARE_SWAPDONE0(x) ((x) & BIT(1)) #define F10_ONLINE_SPARE_SWAPDONE0(x) ((x) & BIT(1))
...@@ -373,7 +383,6 @@ static inline int get_node_id(struct pci_dev *pdev) ...@@ -373,7 +383,6 @@ static inline int get_node_id(struct pci_dev *pdev)
enum amd64_chipset_families { enum amd64_chipset_families {
K8_CPUS = 0, K8_CPUS = 0,
F10_CPUS, F10_CPUS,
F11_CPUS,
}; };
/* Error injection control structure */ /* Error injection control structure */
...@@ -384,16 +393,13 @@ struct error_injection { ...@@ -384,16 +393,13 @@ struct error_injection {
}; };
struct amd64_pvt { struct amd64_pvt {
struct low_ops *ops;
/* pci_device handles which we utilize */ /* pci_device handles which we utilize */
struct pci_dev *addr_f1_ctl; struct pci_dev *F1, *F2, *F3;
struct pci_dev *dram_f2_ctl;
struct pci_dev *misc_f3_ctl;
int mc_node_id; /* MC index of this MC node */ int mc_node_id; /* MC index of this MC node */
int ext_model; /* extended model value of this node */ int ext_model; /* extended model value of this node */
struct low_ops *ops; /* pointer to per PCI Device ID func table */
int channel_count; int channel_count;
/* Raw registers */ /* Raw registers */
...@@ -455,27 +461,27 @@ struct amd64_pvt { ...@@ -455,27 +461,27 @@ struct amd64_pvt {
/* place to store error injection parameters prior to issue */ /* place to store error injection parameters prior to issue */
struct error_injection injection; struct error_injection injection;
/* Save old hw registers' values before we modified them */ /* DCT per-family scrubrate setting */
u32 nbctl_mcgctl_saved; /* When true, following 2 are valid */ u32 min_scrubrate;
u32 old_nbctl;
/* MC Type Index value: socket F vs Family 10h */ /* family name this instance is running on */
u32 mc_type_index; const char *ctl_name;
};
/*
* per-node ECC settings descriptor
*/
struct ecc_settings {
u32 old_nbctl;
bool nbctl_valid;
/* misc settings */
struct flags { struct flags {
unsigned long cf8_extcfg:1;
unsigned long nb_mce_enable:1; unsigned long nb_mce_enable:1;
unsigned long nb_ecc_prev:1; unsigned long nb_ecc_prev:1;
} flags; } flags;
}; };
struct scrubrate {
u32 scrubval; /* bit pattern for scrub rate */
u32 bandwidth; /* bandwidth consumed (bytes/sec) */
};
extern struct scrubrate scrubrates[23];
extern const char *tt_msgs[4]; extern const char *tt_msgs[4];
extern const char *ll_msgs[4]; extern const char *ll_msgs[4];
extern const char *rrrr_msgs[16]; extern const char *rrrr_msgs[16];
...@@ -517,23 +523,10 @@ struct low_ops { ...@@ -517,23 +523,10 @@ struct low_ops {
struct amd64_family_type { struct amd64_family_type {
const char *ctl_name; const char *ctl_name;
u16 addr_f1_ctl; u16 f1_id, f3_id;
u16 misc_f3_ctl;
struct low_ops ops; struct low_ops ops;
}; };
static struct amd64_family_type amd64_family_types[];
static inline const char *get_amd_family_name(int index)
{
return amd64_family_types[index].ctl_name;
}
static inline struct low_ops *family_ops(int index)
{
return &amd64_family_types[index].ops;
}
static inline int amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset, static inline int amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
u32 *val, const char *func) u32 *val, const char *func)
{ {
...@@ -541,7 +534,7 @@ static inline int amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset, ...@@ -541,7 +534,7 @@ static inline int amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
err = pci_read_config_dword(pdev, offset, val); err = pci_read_config_dword(pdev, offset, val);
if (err) if (err)
amd64_printk(KERN_WARNING, "%s: error reading F%dx%x.\n", amd64_warn("%s: error reading F%dx%x.\n",
func, PCI_FUNC(pdev->devfn), offset); func, PCI_FUNC(pdev->devfn), offset);
return err; return err;
...@@ -556,7 +549,6 @@ static inline int amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset, ...@@ -556,7 +549,6 @@ static inline int amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
*/ */
#define K8_MIN_SCRUB_RATE_BITS 0x0 #define K8_MIN_SCRUB_RATE_BITS 0x0
#define F10_MIN_SCRUB_RATE_BITS 0x5 #define F10_MIN_SCRUB_RATE_BITS 0x5
#define F11_MIN_SCRUB_RATE_BITS 0x6
int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
u64 *hole_offset, u64 *hole_size); u64 *hole_offset, u64 *hole_size);
...@@ -23,9 +23,7 @@ static ssize_t amd64_inject_section_store(struct mem_ctl_info *mci, ...@@ -23,9 +23,7 @@ static ssize_t amd64_inject_section_store(struct mem_ctl_info *mci,
if (ret != -EINVAL) { if (ret != -EINVAL) {
if (value > 3) { if (value > 3) {
amd64_printk(KERN_WARNING, amd64_warn("%s: invalid section 0x%lx\n", __func__, value);
"%s: invalid section 0x%lx\n",
__func__, value);
return -EINVAL; return -EINVAL;
} }
...@@ -58,9 +56,7 @@ static ssize_t amd64_inject_word_store(struct mem_ctl_info *mci, ...@@ -58,9 +56,7 @@ static ssize_t amd64_inject_word_store(struct mem_ctl_info *mci,
if (ret != -EINVAL) { if (ret != -EINVAL) {
if (value > 8) { if (value > 8) {
amd64_printk(KERN_WARNING, amd64_warn("%s: invalid word 0x%lx\n", __func__, value);
"%s: invalid word 0x%lx\n",
__func__, value);
return -EINVAL; return -EINVAL;
} }
...@@ -92,8 +88,7 @@ static ssize_t amd64_inject_ecc_vector_store(struct mem_ctl_info *mci, ...@@ -92,8 +88,7 @@ static ssize_t amd64_inject_ecc_vector_store(struct mem_ctl_info *mci,
if (ret != -EINVAL) { if (ret != -EINVAL) {
if (value & 0xFFFF0000) { if (value & 0xFFFF0000) {
amd64_printk(KERN_WARNING, amd64_warn("%s: invalid EccVector: 0x%lx\n",
"%s: invalid EccVector: 0x%lx\n",
__func__, value); __func__, value);
return -EINVAL; return -EINVAL;
} }
...@@ -122,15 +117,13 @@ static ssize_t amd64_inject_read_store(struct mem_ctl_info *mci, ...@@ -122,15 +117,13 @@ static ssize_t amd64_inject_read_store(struct mem_ctl_info *mci,
/* Form value to choose 16-byte section of cacheline */ /* Form value to choose 16-byte section of cacheline */
section = F10_NB_ARRAY_DRAM_ECC | section = F10_NB_ARRAY_DRAM_ECC |
SET_NB_ARRAY_ADDRESS(pvt->injection.section); SET_NB_ARRAY_ADDRESS(pvt->injection.section);
pci_write_config_dword(pvt->misc_f3_ctl, pci_write_config_dword(pvt->F3, F10_NB_ARRAY_ADDR, section);
F10_NB_ARRAY_ADDR, section);
word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection.word, word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection.word,
pvt->injection.bit_map); pvt->injection.bit_map);
/* Issue 'word' and 'bit' along with the READ request */ /* Issue 'word' and 'bit' along with the READ request */
pci_write_config_dword(pvt->misc_f3_ctl, pci_write_config_dword(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
F10_NB_ARRAY_DATA, word_bits);
debugf0("section=0x%x word_bits=0x%x\n", section, word_bits); debugf0("section=0x%x word_bits=0x%x\n", section, word_bits);
...@@ -157,15 +150,13 @@ static ssize_t amd64_inject_write_store(struct mem_ctl_info *mci, ...@@ -157,15 +150,13 @@ static ssize_t amd64_inject_write_store(struct mem_ctl_info *mci,
/* Form value to choose 16-byte section of cacheline */ /* Form value to choose 16-byte section of cacheline */
section = F10_NB_ARRAY_DRAM_ECC | section = F10_NB_ARRAY_DRAM_ECC |
SET_NB_ARRAY_ADDRESS(pvt->injection.section); SET_NB_ARRAY_ADDRESS(pvt->injection.section);
pci_write_config_dword(pvt->misc_f3_ctl, pci_write_config_dword(pvt->F3, F10_NB_ARRAY_ADDR, section);
F10_NB_ARRAY_ADDR, section);
word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection.word, word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection.word,
pvt->injection.bit_map); pvt->injection.bit_map);
/* Issue 'word' and 'bit' along with the READ request */ /* Issue 'word' and 'bit' along with the READ request */
pci_write_config_dword(pvt->misc_f3_ctl, pci_write_config_dword(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
F10_NB_ARRAY_DATA, word_bits);
debugf0("section=0x%x word_bits=0x%x\n", section, word_bits); debugf0("section=0x%x word_bits=0x%x\n", section, word_bits);
......
...@@ -818,9 +818,10 @@ static void cpc925_del_edac_devices(void) ...@@ -818,9 +818,10 @@ static void cpc925_del_edac_devices(void)
} }
/* Convert current back-ground scrub rate into byte/sec bandwith */ /* Convert current back-ground scrub rate into byte/sec bandwith */
static int cpc925_get_sdram_scrub_rate(struct mem_ctl_info *mci, u32 *bw) static int cpc925_get_sdram_scrub_rate(struct mem_ctl_info *mci)
{ {
struct cpc925_mc_pdata *pdata = mci->pvt_info; struct cpc925_mc_pdata *pdata = mci->pvt_info;
int bw;
u32 mscr; u32 mscr;
u8 si; u8 si;
...@@ -832,11 +833,11 @@ static int cpc925_get_sdram_scrub_rate(struct mem_ctl_info *mci, u32 *bw) ...@@ -832,11 +833,11 @@ static int cpc925_get_sdram_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
if (((mscr & MSCR_SCRUB_MOD_MASK) != MSCR_BACKGR_SCRUB) || if (((mscr & MSCR_SCRUB_MOD_MASK) != MSCR_BACKGR_SCRUB) ||
(si == 0)) { (si == 0)) {
cpc925_mc_printk(mci, KERN_INFO, "Scrub mode not enabled\n"); cpc925_mc_printk(mci, KERN_INFO, "Scrub mode not enabled\n");
*bw = 0; bw = 0;
} else } else
*bw = CPC925_SCRUB_BLOCK_SIZE * 0xFA67 / si; bw = CPC925_SCRUB_BLOCK_SIZE * 0xFA67 / si;
return 0; return bw;
} }
/* Return 0 for single channel; 1 for dual channel */ /* Return 0 for single channel; 1 for dual channel */
......
...@@ -983,11 +983,11 @@ static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw) ...@@ -983,11 +983,11 @@ static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw)
pci_write_config_word(pdev, E752X_MCHSCRB, scrubrates[i].scrubval); pci_write_config_word(pdev, E752X_MCHSCRB, scrubrates[i].scrubval);
return 0; return scrubrates[i].bandwidth;
} }
/* Convert current scrub rate value into byte/sec bandwidth */ /* Convert current scrub rate value into byte/sec bandwidth */
static int get_sdram_scrub_rate(struct mem_ctl_info *mci, u32 *bw) static int get_sdram_scrub_rate(struct mem_ctl_info *mci)
{ {
const struct scrubrate *scrubrates; const struct scrubrate *scrubrates;
struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
...@@ -1013,10 +1013,8 @@ static int get_sdram_scrub_rate(struct mem_ctl_info *mci, u32 *bw) ...@@ -1013,10 +1013,8 @@ static int get_sdram_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
"Invalid sdram scrub control value: 0x%x\n", scrubval); "Invalid sdram scrub control value: 0x%x\n", scrubval);
return -1; return -1;
} }
return scrubrates[i].bandwidth;
*bw = scrubrates[i].bandwidth;
return 0;
} }
/* Return 1 if dual channel mode is active. Else return 0. */ /* Return 1 if dual channel mode is active. Else return 0. */
......
...@@ -68,9 +68,10 @@ ...@@ -68,9 +68,10 @@
#define EDAC_PCI "PCI" #define EDAC_PCI "PCI"
#define EDAC_DEBUG "DEBUG" #define EDAC_DEBUG "DEBUG"
extern const char *edac_mem_types[];
#ifdef CONFIG_EDAC_DEBUG #ifdef CONFIG_EDAC_DEBUG
extern int edac_debug_level; extern int edac_debug_level;
extern const char *edac_mem_types[];
#define edac_debug_printk(level, fmt, arg...) \ #define edac_debug_printk(level, fmt, arg...) \
do { \ do { \
...@@ -386,7 +387,7 @@ struct mem_ctl_info { ...@@ -386,7 +387,7 @@ struct mem_ctl_info {
representation and converts it to the closest matching representation and converts it to the closest matching
bandwith in bytes/sec. bandwith in bytes/sec.
*/ */
int (*get_sdram_scrub_rate) (struct mem_ctl_info * mci, u32 * bw); int (*get_sdram_scrub_rate) (struct mem_ctl_info * mci);
/* pointer to edac checking routine */ /* pointer to edac checking routine */
......
...@@ -76,6 +76,8 @@ static void edac_mc_dump_mci(struct mem_ctl_info *mci) ...@@ -76,6 +76,8 @@ static void edac_mc_dump_mci(struct mem_ctl_info *mci)
debugf3("\tpvt_info = %p\n\n", mci->pvt_info); debugf3("\tpvt_info = %p\n\n", mci->pvt_info);
} }
#endif /* CONFIG_EDAC_DEBUG */
/* /*
* keep those in sync with the enum mem_type * keep those in sync with the enum mem_type
*/ */
...@@ -100,8 +102,6 @@ const char *edac_mem_types[] = { ...@@ -100,8 +102,6 @@ const char *edac_mem_types[] = {
}; };
EXPORT_SYMBOL_GPL(edac_mem_types); EXPORT_SYMBOL_GPL(edac_mem_types);
#endif /* CONFIG_EDAC_DEBUG */
/* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'. /* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'.
* Adjust 'ptr' so that its alignment is at least as stringent as what the * Adjust 'ptr' so that its alignment is at least as stringent as what the
* compiler would provide for X and return the aligned result. * compiler would provide for X and return the aligned result.
......
...@@ -436,56 +436,55 @@ static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci, ...@@ -436,56 +436,55 @@ static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci,
return count; return count;
} }
/* memory scrubbing */ /* Memory scrubbing interface:
*
* A MC driver can limit the scrubbing bandwidth based on the CPU type.
* Therefore, ->set_sdram_scrub_rate should be made to return the actual
* bandwidth that is accepted or 0 when scrubbing is to be disabled.
*
* Negative value still means that an error has occurred while setting
* the scrub rate.
*/
static ssize_t mci_sdram_scrub_rate_store(struct mem_ctl_info *mci, static ssize_t mci_sdram_scrub_rate_store(struct mem_ctl_info *mci,
const char *data, size_t count) const char *data, size_t count)
{ {
unsigned long bandwidth = 0; unsigned long bandwidth = 0;
int err; int new_bw = 0;
if (!mci->set_sdram_scrub_rate) { if (!mci->set_sdram_scrub_rate)
edac_printk(KERN_WARNING, EDAC_MC,
"Memory scrub rate setting not implemented!\n");
return -EINVAL; return -EINVAL;
}
if (strict_strtoul(data, 10, &bandwidth) < 0) if (strict_strtoul(data, 10, &bandwidth) < 0)
return -EINVAL; return -EINVAL;
err = mci->set_sdram_scrub_rate(mci, (u32)bandwidth); new_bw = mci->set_sdram_scrub_rate(mci, bandwidth);
if (err) { if (new_bw >= 0) {
edac_printk(KERN_DEBUG, EDAC_MC, edac_printk(KERN_DEBUG, EDAC_MC, "Scrub rate set to %d\n", new_bw);
"Failed setting scrub rate to %lu\n", bandwidth);
return -EINVAL;
}
else {
edac_printk(KERN_DEBUG, EDAC_MC,
"Scrub rate set to: %lu\n", bandwidth);
return count; return count;
} }
edac_printk(KERN_DEBUG, EDAC_MC, "Error setting scrub rate to: %lu\n", bandwidth);
return -EINVAL;
} }
/*
* ->get_sdram_scrub_rate() return value semantics same as above.
*/
static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data) static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data)
{ {
u32 bandwidth = 0; int bandwidth = 0;
int err;
if (!mci->get_sdram_scrub_rate) { if (!mci->get_sdram_scrub_rate)
edac_printk(KERN_WARNING, EDAC_MC,
"Memory scrub rate reading not implemented\n");
return -EINVAL; return -EINVAL;
}
err = mci->get_sdram_scrub_rate(mci, &bandwidth); bandwidth = mci->get_sdram_scrub_rate(mci);
if (err) { if (bandwidth < 0) {
edac_printk(KERN_DEBUG, EDAC_MC, "Error reading scrub rate\n"); edac_printk(KERN_DEBUG, EDAC_MC, "Error reading scrub rate\n");
return err; return bandwidth;
} }
else {
edac_printk(KERN_DEBUG, EDAC_MC, edac_printk(KERN_DEBUG, EDAC_MC, "Read scrub rate: %d\n", bandwidth);
"Read scrub rate: %d\n", bandwidth);
return sprintf(data, "%d\n", bandwidth); return sprintf(data, "%d\n", bandwidth);
}
} }
/* default attribute files for the MCI object */ /* default attribute files for the MCI object */
......
...@@ -611,20 +611,17 @@ static int i5100_set_scrub_rate(struct mem_ctl_info *mci, u32 bandwidth) ...@@ -611,20 +611,17 @@ static int i5100_set_scrub_rate(struct mem_ctl_info *mci, u32 bandwidth)
bandwidth = 5900000 * i5100_mc_scrben(dw); bandwidth = 5900000 * i5100_mc_scrben(dw);
return 0; return bandwidth;
} }
static int i5100_get_scrub_rate(struct mem_ctl_info *mci, static int i5100_get_scrub_rate(struct mem_ctl_info *mci)
u32 *bandwidth)
{ {
struct i5100_priv *priv = mci->pvt_info; struct i5100_priv *priv = mci->pvt_info;
u32 dw; u32 dw;
pci_read_config_dword(priv->mc, I5100_MC, &dw); pci_read_config_dword(priv->mc, I5100_MC, &dw);
*bandwidth = 5900000 * i5100_mc_scrben(dw); return 5900000 * i5100_mc_scrben(dw);
return 0;
} }
static struct pci_dev *pci_get_device_func(unsigned vendor, static struct pci_dev *pci_get_device_func(unsigned vendor,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment