Commit 13189525 authored by Doug Thompson's avatar Doug Thompson Committed by Linus Torvalds

[PATCH] EDAC: probe1 cleanup 1-of-2

- Add lower-level functions that handle various parts of the initialization
  done by the xxx_probe1() functions.  Some of the xxx_probe1() functions are
  much too long and complicated (see "Chapter 5: Functions" in
  Documentation/CodingStyle).

- Cleanup of probe1() functions in EDAC
Signed-off-by: default avatarDoug Thompson <norsk5@xmission.com>
Cc: Alan Cox <alan@lxorguk.ukuu.org.uk>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 2d7bbb91
...@@ -182,6 +182,38 @@ static void amd76x_check(struct mem_ctl_info *mci) ...@@ -182,6 +182,38 @@ static void amd76x_check(struct mem_ctl_info *mci)
amd76x_process_error_info(mci, &info, 1); amd76x_process_error_info(mci, &info, 1);
} }
static void amd76x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
enum edac_type edac_mode)
{
struct csrow_info *csrow;
u32 mba, mba_base, mba_mask, dms;
int index;
for (index = 0; index < mci->nr_csrows; index++) {
csrow = &mci->csrows[index];
/* find the DRAM Chip Select Base address and mask */
pci_read_config_dword(pdev,
AMD76X_MEM_BASE_ADDR + (index * 4),
&mba);
if (!(mba & BIT(0)))
continue;
mba_base = mba & 0xff800000UL;
mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL;
pci_read_config_dword(pdev, AMD76X_DRAM_MODE_STATUS, &dms);
csrow->first_page = mba_base >> PAGE_SHIFT;
csrow->nr_pages = (mba_mask + 1) >> PAGE_SHIFT;
csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
csrow->page_mask = mba_mask >> PAGE_SHIFT;
csrow->grain = csrow->nr_pages << PAGE_SHIFT;
csrow->mtype = MEM_RDDR;
csrow->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN;
csrow->edac_mode = edac_mode;
}
}
/** /**
* amd76x_probe1 - Perform set up for detected device * amd76x_probe1 - Perform set up for detected device
* @pdev; PCI device detected * @pdev; PCI device detected
...@@ -193,15 +225,13 @@ static void amd76x_check(struct mem_ctl_info *mci) ...@@ -193,15 +225,13 @@ static void amd76x_check(struct mem_ctl_info *mci)
*/ */
static int amd76x_probe1(struct pci_dev *pdev, int dev_idx) static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
{ {
int rc = -ENODEV; static const enum edac_type ems_modes[] = {
int index;
struct mem_ctl_info *mci = NULL;
enum edac_type ems_modes[] = {
EDAC_NONE, EDAC_NONE,
EDAC_EC, EDAC_EC,
EDAC_SECDED, EDAC_SECDED,
EDAC_SECDED EDAC_SECDED
}; };
struct mem_ctl_info *mci = NULL;
u32 ems; u32 ems;
u32 ems_mode; u32 ems_mode;
struct amd76x_error_info discard; struct amd76x_error_info discard;
...@@ -212,8 +242,7 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx) ...@@ -212,8 +242,7 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS); mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS);
if (mci == NULL) { if (mci == NULL) {
rc = -ENOMEM; return -ENOMEM;
goto fail;
} }
debugf0("%s(): mci = %p\n", __func__, mci); debugf0("%s(): mci = %p\n", __func__, mci);
...@@ -228,33 +257,7 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx) ...@@ -228,33 +257,7 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
mci->edac_check = amd76x_check; mci->edac_check = amd76x_check;
mci->ctl_page_to_phys = NULL; mci->ctl_page_to_phys = NULL;
for (index = 0; index < mci->nr_csrows; index++) { amd76x_init_csrows(mci, pdev, ems_modes[ems_mode]);
struct csrow_info *csrow = &mci->csrows[index];
u32 mba;
u32 mba_base;
u32 mba_mask;
u32 dms;
/* find the DRAM Chip Select Base address and mask */
pci_read_config_dword(pdev,
AMD76X_MEM_BASE_ADDR + (index * 4), &mba);
if (!(mba & BIT(0)))
continue;
mba_base = mba & 0xff800000UL;
mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL;
pci_read_config_dword(pdev, AMD76X_DRAM_MODE_STATUS, &dms);
csrow->first_page = mba_base >> PAGE_SHIFT;
csrow->nr_pages = (mba_mask + 1) >> PAGE_SHIFT;
csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
csrow->page_mask = mba_mask >> PAGE_SHIFT;
csrow->grain = csrow->nr_pages << PAGE_SHIFT;
csrow->mtype = MEM_RDDR;
csrow->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN;
csrow->edac_mode = ems_modes[ems_mode];
}
amd76x_get_error_info(mci, &discard); /* clear counters */ amd76x_get_error_info(mci, &discard); /* clear counters */
/* Here we assume that we will never see multiple instances of this /* Here we assume that we will never see multiple instances of this
...@@ -270,9 +273,8 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx) ...@@ -270,9 +273,8 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
return 0; return 0;
fail: fail:
if (mci != NULL) edac_mc_free(mci);
edac_mc_free(mci); return -ENODEV;
return rc;
} }
/* returns count (>= 0), or negative on error */ /* returns count (>= 0), or negative on error */
......
This diff is collapsed.
...@@ -335,99 +335,61 @@ static void e7xxx_check(struct mem_ctl_info *mci) ...@@ -335,99 +335,61 @@ static void e7xxx_check(struct mem_ctl_info *mci)
e7xxx_process_error_info(mci, &info, 1); e7xxx_process_error_info(mci, &info, 1);
} }
static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) /* Return 1 if dual channel mode is active. Else return 0. */
static inline int dual_channel_active(u32 drc, int dev_idx)
{ {
int rc = -ENODEV; return (dev_idx == E7501) ? ((drc >> 22) & 0x1) : 1;
int index; }
u16 pci_data;
struct mem_ctl_info *mci = NULL;
struct e7xxx_pvt *pvt = NULL;
u32 drc;
int drc_chan = 1; /* Number of channels 0=1chan,1=2chan */
int drc_drbg = 1; /* DRB granularity 0=32mb,1=64mb */
int drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
u32 dra;
unsigned long last_cumul_size;
struct e7xxx_error_info discard;
debugf0("%s(): mci\n", __func__);
/* need to find out the number of channels */
pci_read_config_dword(pdev, E7XXX_DRC, &drc);
/* Return DRB granularity (0=32mb, 1=64mb). */
static inline int drb_granularity(u32 drc, int dev_idx)
{
/* only e7501 can be single channel */ /* only e7501 can be single channel */
if (dev_idx == E7501) { return (dev_idx == E7501) ? ((drc >> 18) & 0x3) : 1;
drc_chan = ((drc >> 22) & 0x1); }
drc_drbg = (drc >> 18) & 0x3;
}
drc_ddim = (drc >> 20) & 0x3;
mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1);
if (mci == NULL) {
rc = -ENOMEM;
goto fail;
}
debugf3("%s(): init mci\n", __func__);
mci->mtype_cap = MEM_FLAG_RDDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED |
EDAC_FLAG_S4ECD4ED;
/* FIXME - what if different memory types are in different csrows? */
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = E7XXX_REVISION;
mci->dev = &pdev->dev;
debugf3("%s(): init pvt\n", __func__);
pvt = (struct e7xxx_pvt *) mci->pvt_info;
pvt->dev_info = &e7xxx_devs[dev_idx];
pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
pvt->dev_info->err_dev,
pvt->bridge_ck);
if (!pvt->bridge_ck) {
e7xxx_printk(KERN_ERR, "error reporting device not found:"
"vendor %x device 0x%x (broken BIOS?)\n",
PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev);
goto fail;
}
debugf3("%s(): more mci init\n", __func__); static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
mci->ctl_name = pvt->dev_info->ctl_name; int dev_idx, u32 drc)
mci->edac_check = e7xxx_check; {
mci->ctl_page_to_phys = ctl_page_to_phys; unsigned long last_cumul_size;
int index;
u8 value;
u32 dra, cumul_size;
int drc_chan, drc_drbg, drc_ddim, mem_dev;
struct csrow_info *csrow;
/* find out the device types */
pci_read_config_dword(pdev, E7XXX_DRA, &dra); pci_read_config_dword(pdev, E7XXX_DRA, &dra);
drc_chan = dual_channel_active(drc, dev_idx);
drc_drbg = drb_granularity(drc, dev_idx);
drc_ddim = (drc >> 20) & 0x3;
last_cumul_size = 0;
/* /* The dram row boundary (DRB) reg values are boundary address
* The dram row boundary (DRB) reg values are boundary address
* for each DRAM row with a granularity of 32 or 64MB (single/dual * for each DRAM row with a granularity of 32 or 64MB (single/dual
* channel operation). DRB regs are cumulative; therefore DRB7 will * channel operation). DRB regs are cumulative; therefore DRB7 will
* contain the total memory contained in all eight rows. * contain the total memory contained in all eight rows.
*/ */
for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) { for (index = 0; index < mci->nr_csrows; index++) {
u8 value;
u32 cumul_size;
/* mem_dev 0=x8, 1=x4 */ /* mem_dev 0=x8, 1=x4 */
int mem_dev = (dra >> (index * 4 + 3)) & 0x1; mem_dev = (dra >> (index * 4 + 3)) & 0x1;
struct csrow_info *csrow = &mci->csrows[index]; csrow = &mci->csrows[index];
pci_read_config_byte(pdev, E7XXX_DRB + index, &value); pci_read_config_byte(pdev, E7XXX_DRB + index, &value);
/* convert a 64 or 32 MiB DRB to a page size. */ /* convert a 64 or 32 MiB DRB to a page size. */
cumul_size = value << (25 + drc_drbg - PAGE_SHIFT); cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
cumul_size); cumul_size);
if (cumul_size == last_cumul_size) if (cumul_size == last_cumul_size)
continue; /* not populated */ continue; /* not populated */
csrow->first_page = last_cumul_size; csrow->first_page = last_cumul_size;
csrow->last_page = cumul_size - 1; csrow->last_page = cumul_size - 1;
csrow->nr_pages = cumul_size - last_cumul_size; csrow->nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size; last_cumul_size = cumul_size;
csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */ csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
csrow->mtype = MEM_RDDR; /* only one type supported */ csrow->mtype = MEM_RDDR; /* only one type supported */
csrow->dtype = mem_dev ? DEV_X4 : DEV_X8; csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
/* /*
...@@ -445,9 +407,54 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) ...@@ -445,9 +407,54 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
} else } else
csrow->edac_mode = EDAC_NONE; csrow->edac_mode = EDAC_NONE;
} }
}
mci->edac_cap |= EDAC_FLAG_NONE; static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
{
u16 pci_data;
struct mem_ctl_info *mci = NULL;
struct e7xxx_pvt *pvt = NULL;
u32 drc;
int drc_chan;
struct e7xxx_error_info discard;
debugf0("%s(): mci\n", __func__);
pci_read_config_dword(pdev, E7XXX_DRC, &drc);
drc_chan = dual_channel_active(drc, dev_idx);
mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1);
if (mci == NULL)
return -ENOMEM;
debugf3("%s(): init mci\n", __func__);
mci->mtype_cap = MEM_FLAG_RDDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED |
EDAC_FLAG_S4ECD4ED;
/* FIXME - what if different memory types are in different csrows? */
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = E7XXX_REVISION;
mci->dev = &pdev->dev;
debugf3("%s(): init pvt\n", __func__);
pvt = (struct e7xxx_pvt *) mci->pvt_info;
pvt->dev_info = &e7xxx_devs[dev_idx];
pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
pvt->dev_info->err_dev,
pvt->bridge_ck);
if (!pvt->bridge_ck) {
e7xxx_printk(KERN_ERR, "error reporting device not found:"
"vendor %x device 0x%x (broken BIOS?)\n",
PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev);
goto fail0;
}
debugf3("%s(): more mci init\n", __func__);
mci->ctl_name = pvt->dev_info->ctl_name;
mci->edac_check = e7xxx_check;
mci->ctl_page_to_phys = ctl_page_to_phys;
e7xxx_init_csrows(mci, pdev, dev_idx, drc);
mci->edac_cap |= EDAC_FLAG_NONE;
debugf3("%s(): tolm, remapbase, remaplimit\n", __func__); debugf3("%s(): tolm, remapbase, remaplimit\n", __func__);
/* load the top of low memory, remap base, and remap limit vars */ /* load the top of low memory, remap base, and remap limit vars */
pci_read_config_word(pdev, E7XXX_TOLM, &pci_data); pci_read_config_word(pdev, E7XXX_TOLM, &pci_data);
...@@ -468,21 +475,20 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) ...@@ -468,21 +475,20 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
*/ */
if (edac_mc_add_mc(mci,0)) { if (edac_mc_add_mc(mci,0)) {
debugf3("%s(): failed edac_mc_add_mc()\n", __func__); debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
goto fail; goto fail1;
} }
/* get this far and it's successful */ /* get this far and it's successful */
debugf3("%s(): success\n", __func__); debugf3("%s(): success\n", __func__);
return 0; return 0;
fail: fail1:
if (mci != NULL) { pci_dev_put(pvt->bridge_ck);
if(pvt != NULL && pvt->bridge_ck)
pci_dev_put(pvt->bridge_ck); fail0:
edac_mc_free(mci); edac_mc_free(mci);
}
return rc; return -ENODEV;
} }
/* returns count (>= 0), or negative on error */ /* returns count (>= 0), or negative on error */
......
...@@ -133,15 +133,50 @@ static void i82860_check(struct mem_ctl_info *mci) ...@@ -133,15 +133,50 @@ static void i82860_check(struct mem_ctl_info *mci)
i82860_process_error_info(mci, &info, 1); i82860_process_error_info(mci, &info, 1);
} }
static int i82860_probe1(struct pci_dev *pdev, int dev_idx) static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev)
{ {
int rc = -ENODEV;
int index;
struct mem_ctl_info *mci = NULL;
unsigned long last_cumul_size; unsigned long last_cumul_size;
struct i82860_error_info discard; u16 mchcfg_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */
u16 value;
u32 cumul_size;
struct csrow_info *csrow;
int index;
pci_read_config_word(pdev, I82860_MCHCFG, &mchcfg_ddim);
mchcfg_ddim = mchcfg_ddim & 0x180;
last_cumul_size = 0;
/* The group row boundary (GRA) reg values are boundary address
* for each DRAM row with a granularity of 16MB. GRA regs are
* cumulative; therefore GRA15 will contain the total memory contained
* in all eight rows.
*/
for (index = 0; index < mci->nr_csrows; index++) {
csrow = &mci->csrows[index];
pci_read_config_word(pdev, I82860_GBA + index * 2, &value);
cumul_size = (value & I82860_GBA_MASK) <<
(I82860_GBA_SHIFT - PAGE_SHIFT);
debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
cumul_size);
if (cumul_size == last_cumul_size)
continue; /* not populated */
u16 mchcfg_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ csrow->first_page = last_cumul_size;
csrow->last_page = cumul_size - 1;
csrow->nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
csrow->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */
csrow->mtype = MEM_RMBS;
csrow->dtype = DEV_UNKNOWN;
csrow->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE;
}
}
static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
{
struct mem_ctl_info *mci;
struct i82860_error_info discard;
/* RDRAM has channels but these don't map onto the abstractions that /* RDRAM has channels but these don't map onto the abstractions that
edac uses. edac uses.
...@@ -159,53 +194,15 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx) ...@@ -159,53 +194,15 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
debugf3("%s(): init mci\n", __func__); debugf3("%s(): init mci\n", __func__);
mci->dev = &pdev->dev; mci->dev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_DDR; mci->mtype_cap = MEM_FLAG_DDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
/* I"m not sure about this but I think that all RDRAM is SECDED */ /* I"m not sure about this but I think that all RDRAM is SECDED */
mci->edac_cap = EDAC_FLAG_SECDED; mci->edac_cap = EDAC_FLAG_SECDED;
/* adjust FLAGS */
mci->mod_name = EDAC_MOD_STR; mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = I82860_REVISION; mci->mod_ver = I82860_REVISION;
mci->ctl_name = i82860_devs[dev_idx].ctl_name; mci->ctl_name = i82860_devs[dev_idx].ctl_name;
mci->edac_check = i82860_check; mci->edac_check = i82860_check;
mci->ctl_page_to_phys = NULL; mci->ctl_page_to_phys = NULL;
i82860_init_csrows(mci, pdev);
pci_read_config_word(pdev, I82860_MCHCFG, &mchcfg_ddim);
mchcfg_ddim = mchcfg_ddim & 0x180;
/*
* The group row boundary (GRA) reg values are boundary address
* for each DRAM row with a granularity of 16MB. GRA regs are
* cumulative; therefore GRA15 will contain the total memory contained
* in all eight rows.
*/
for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
u16 value;
u32 cumul_size;
struct csrow_info *csrow = &mci->csrows[index];
pci_read_config_word(pdev, I82860_GBA + index * 2,
&value);
cumul_size = (value & I82860_GBA_MASK) <<
(I82860_GBA_SHIFT - PAGE_SHIFT);
debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
cumul_size);
if (cumul_size == last_cumul_size)
continue; /* not populated */
csrow->first_page = last_cumul_size;
csrow->last_page = cumul_size - 1;
csrow->nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
csrow->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */
csrow->mtype = MEM_RMBS;
csrow->dtype = DEV_UNKNOWN;
csrow->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE;
}
i82860_get_error_info(mci, &discard); /* clear counters */ i82860_get_error_info(mci, &discard); /* clear counters */
/* Here we assume that we will never see multiple instances of this /* Here we assume that we will never see multiple instances of this
...@@ -213,14 +210,17 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx) ...@@ -213,14 +210,17 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
*/ */
if (edac_mc_add_mc(mci,0)) { if (edac_mc_add_mc(mci,0)) {
debugf3("%s(): failed edac_mc_add_mc()\n", __func__); debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
edac_mc_free(mci); goto fail;
} else {
/* get this far and it's successful */
debugf3("%s(): success\n", __func__);
rc = 0;
} }
return rc; /* get this far and it's successful */
debugf3("%s(): success\n", __func__);
return 0;
fail:
edac_mc_free(mci);
return -ENODEV;
} }
/* returns count (>= 0), or negative on error */ /* returns count (>= 0), or negative on error */
......
...@@ -265,116 +265,109 @@ static void i82875p_check(struct mem_ctl_info *mci) ...@@ -265,116 +265,109 @@ static void i82875p_check(struct mem_ctl_info *mci)
extern int pci_proc_attach_device(struct pci_dev *); extern int pci_proc_attach_device(struct pci_dev *);
#endif #endif
static int i82875p_probe1(struct pci_dev *pdev, int dev_idx) /* Return 0 on success or 1 on failure. */
static int i82875p_setup_overfl_dev(struct pci_dev *pdev,
struct pci_dev **ovrfl_pdev, void __iomem **ovrfl_window)
{ {
int rc = -ENODEV; struct pci_dev *dev;
int index; void __iomem *window;
struct mem_ctl_info *mci = NULL;
struct i82875p_pvt *pvt = NULL;
unsigned long last_cumul_size;
struct pci_dev *ovrfl_pdev;
void __iomem *ovrfl_window = NULL;
u32 drc;
u32 drc_chan; /* Number of channels 0=1chan,1=2chan */
u32 nr_chans;
u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
struct i82875p_error_info discard;
debugf0("%s()\n", __func__); *ovrfl_pdev = NULL;
ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL); *ovrfl_window = NULL;
dev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL);
if (!ovrfl_pdev) { if (dev == NULL) {
/* /* Intel tells BIOS developers to hide device 6 which
* Intel tells BIOS developers to hide device 6 which
* configures the overflow device access containing * configures the overflow device access containing
* the DRBs - this is where we expose device 6. * the DRBs - this is where we expose device 6.
* http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm
*/ */
pci_write_bits8(pdev, 0xf4, 0x2, 0x2); pci_write_bits8(pdev, 0xf4, 0x2, 0x2);
ovrfl_pdev = dev = pci_scan_single_device(pdev->bus, PCI_DEVFN(6, 0));
pci_scan_single_device(pdev->bus, PCI_DEVFN(6, 0));
if (!ovrfl_pdev) if (dev == NULL)
return -ENODEV; return 1;
} }
*ovrfl_pdev = dev;
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
if (!ovrfl_pdev->procent && pci_proc_attach_device(ovrfl_pdev)) { if ((dev->procent == NULL) && pci_proc_attach_device(dev)) {
i82875p_printk(KERN_ERR, i82875p_printk(KERN_ERR, "%s(): Failed to attach overflow "
"%s(): Failed to attach overflow device\n", __func__); "device\n", __func__);
return -ENODEV; return 1;
} }
#endif #endif /* CONFIG_PROC_FS */
/* CONFIG_PROC_FS */ if (pci_enable_device(dev)) {
if (pci_enable_device(ovrfl_pdev)) { i82875p_printk(KERN_ERR, "%s(): Failed to enable overflow "
i82875p_printk(KERN_ERR, "device\n", __func__);
"%s(): Failed to enable overflow device\n", __func__); return 1;
return -ENODEV;
} }
if (pci_request_regions(ovrfl_pdev, pci_name(ovrfl_pdev))) { if (pci_request_regions(dev, pci_name(dev))) {
#ifdef CORRECT_BIOS #ifdef CORRECT_BIOS
goto fail0; goto fail0;
#endif #endif
} }
/* cache is irrelevant for PCI bus reads/writes */ /* cache is irrelevant for PCI bus reads/writes */
ovrfl_window = ioremap_nocache(pci_resource_start(ovrfl_pdev, 0), window = ioremap_nocache(pci_resource_start(dev, 0),
pci_resource_len(ovrfl_pdev, 0)); pci_resource_len(dev, 0));
if (!ovrfl_window) { if (window == NULL) {
i82875p_printk(KERN_ERR, "%s(): Failed to ioremap bar6\n", i82875p_printk(KERN_ERR, "%s(): Failed to ioremap bar6\n",
__func__); __func__);
goto fail1; goto fail1;
} }
/* need to find out the number of channels */ *ovrfl_window = window;
drc = readl(ovrfl_window + I82875P_DRC); return 0;
drc_chan = ((drc >> 21) & 0x1);
nr_chans = drc_chan + 1;
drc_ddim = (drc >> 18) & 0x1; fail1:
mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans), pci_release_regions(dev);
nr_chans);
if (!mci) { #ifdef CORRECT_BIOS
rc = -ENOMEM; fail0:
goto fail2; pci_disable_device(dev);
} #endif
/* NOTE: the ovrfl proc entry and pci_dev are intentionally left */
return 1;
}
debugf3("%s(): init mci\n", __func__);
mci->dev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_DDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_UNKNOWN;
/* adjust FLAGS */
mci->mod_name = EDAC_MOD_STR; /* Return 1 if dual channel mode is active. Else return 0. */
mci->mod_ver = I82875P_REVISION; static inline int dual_channel_active(u32 drc)
mci->ctl_name = i82875p_devs[dev_idx].ctl_name; {
mci->edac_check = i82875p_check; return (drc >> 21) & 0x1;
mci->ctl_page_to_phys = NULL; }
debugf3("%s(): init pvt\n", __func__);
pvt = (struct i82875p_pvt *) mci->pvt_info;
pvt->ovrfl_pdev = ovrfl_pdev;
pvt->ovrfl_window = ovrfl_window;
/*
* The dram row boundary (DRB) reg values are boundary address static void i82875p_init_csrows(struct mem_ctl_info *mci,
struct pci_dev *pdev, void __iomem *ovrfl_window, u32 drc)
{
struct csrow_info *csrow;
unsigned long last_cumul_size;
u8 value;
u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
u32 cumul_size;
int index;
drc_ddim = (drc >> 18) & 0x1;
last_cumul_size = 0;
/* The dram row boundary (DRB) reg values are boundary address
* for each DRAM row with a granularity of 32 or 64MB (single/dual * for each DRAM row with a granularity of 32 or 64MB (single/dual
* channel operation). DRB regs are cumulative; therefore DRB7 will * channel operation). DRB regs are cumulative; therefore DRB7 will
* contain the total memory contained in all eight rows. * contain the total memory contained in all eight rows.
*/ */
for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
u8 value; for (index = 0; index < mci->nr_csrows; index++) {
u32 cumul_size; csrow = &mci->csrows[index];
struct csrow_info *csrow = &mci->csrows[index];
value = readb(ovrfl_window + I82875P_DRB + index); value = readb(ovrfl_window + I82875P_DRB + index);
cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT); cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT);
debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
cumul_size); cumul_size);
if (cumul_size == last_cumul_size) if (cumul_size == last_cumul_size)
continue; /* not populated */ continue; /* not populated */
...@@ -382,12 +375,54 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx) ...@@ -382,12 +375,54 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
csrow->last_page = cumul_size - 1; csrow->last_page = cumul_size - 1;
csrow->nr_pages = cumul_size - last_cumul_size; csrow->nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size; last_cumul_size = cumul_size;
csrow->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */ csrow->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */
csrow->mtype = MEM_DDR; csrow->mtype = MEM_DDR;
csrow->dtype = DEV_UNKNOWN; csrow->dtype = DEV_UNKNOWN;
csrow->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE; csrow->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE;
} }
}
static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
{
int rc = -ENODEV;
struct mem_ctl_info *mci;
struct i82875p_pvt *pvt;
struct pci_dev *ovrfl_pdev;
void __iomem *ovrfl_window;
u32 drc;
u32 nr_chans;
struct i82875p_error_info discard;
debugf0("%s()\n", __func__);
ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL);
if (i82875p_setup_overfl_dev(pdev, &ovrfl_pdev, &ovrfl_window))
return -ENODEV;
drc = readl(ovrfl_window + I82875P_DRC);
nr_chans = dual_channel_active(drc) + 1;
mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans),
nr_chans);
if (!mci) {
rc = -ENOMEM;
goto fail0;
}
debugf3("%s(): init mci\n", __func__);
mci->dev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_DDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_UNKNOWN;
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = I82875P_REVISION;
mci->ctl_name = i82875p_devs[dev_idx].ctl_name;
mci->edac_check = i82875p_check;
mci->ctl_page_to_phys = NULL;
debugf3("%s(): init pvt\n", __func__);
pvt = (struct i82875p_pvt *) mci->pvt_info;
pvt->ovrfl_pdev = ovrfl_pdev;
pvt->ovrfl_window = ovrfl_window;
i82875p_init_csrows(mci, pdev, ovrfl_window, drc);
i82875p_get_error_info(mci, &discard); /* clear counters */ i82875p_get_error_info(mci, &discard); /* clear counters */
/* Here we assume that we will never see multiple instances of this /* Here we assume that we will never see multiple instances of this
...@@ -395,25 +430,20 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx) ...@@ -395,25 +430,20 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
*/ */
if (edac_mc_add_mc(mci,0)) { if (edac_mc_add_mc(mci,0)) {
debugf3("%s(): failed edac_mc_add_mc()\n", __func__); debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
goto fail3; goto fail1;
} }
/* get this far and it's successful */ /* get this far and it's successful */
debugf3("%s(): success\n", __func__); debugf3("%s(): success\n", __func__);
return 0; return 0;
fail3: fail1:
edac_mc_free(mci); edac_mc_free(mci);
fail2: fail0:
iounmap(ovrfl_window); iounmap(ovrfl_window);
fail1:
pci_release_regions(ovrfl_pdev); pci_release_regions(ovrfl_pdev);
#ifdef CORRECT_BIOS
fail0:
#endif
pci_disable_device(ovrfl_pdev); pci_disable_device(ovrfl_pdev);
/* NOTE: the ovrfl proc entry and pci_dev are intentionally left */ /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */
return rc; return rc;
......
...@@ -205,25 +205,72 @@ static void r82600_check(struct mem_ctl_info *mci) ...@@ -205,25 +205,72 @@ static void r82600_check(struct mem_ctl_info *mci)
r82600_process_error_info(mci, &info, 1); r82600_process_error_info(mci, &info, 1);
} }
static int r82600_probe1(struct pci_dev *pdev, int dev_idx) static inline int ecc_enabled(u8 dramcr)
{ {
int rc = -ENODEV; return dramcr & BIT(5);
}
static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
u8 dramcr)
{
struct csrow_info *csrow;
int index; int index;
struct mem_ctl_info *mci = NULL; u8 drbar; /* SDRAM Row Boundry Address Register */
u32 row_high_limit, row_high_limit_last;
u32 reg_sdram, ecc_on, row_base;
ecc_on = ecc_enabled(dramcr);
reg_sdram = dramcr & BIT(4);
row_high_limit_last = 0;
for (index = 0; index < mci->nr_csrows; index++) {
csrow = &mci->csrows[index];
/* find the DRAM Chip Select Base address and mask */
pci_read_config_byte(pdev, R82600_DRBA + index, &drbar);
debugf1("%s() Row=%d DRBA = %#0x\n", __func__, index, drbar);
row_high_limit = ((u32) drbar << 24);
/* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */
debugf1("%s() Row=%d, Boundry Address=%#0x, Last = %#0x\n",
__func__, index, row_high_limit, row_high_limit_last);
/* Empty row [p.57] */
if (row_high_limit == row_high_limit_last)
continue;
row_base = row_high_limit_last;
csrow->first_page = row_base >> PAGE_SHIFT;
csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
csrow->nr_pages = csrow->last_page - csrow->first_page + 1;
/* Error address is top 19 bits - so granularity is *
* 14 bits */
csrow->grain = 1 << 14;
csrow->mtype = reg_sdram ? MEM_RDDR : MEM_DDR;
/* FIXME - check that this is unknowable with this chipset */
csrow->dtype = DEV_UNKNOWN;
/* Mode is global on 82600 */
csrow->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE;
row_high_limit_last = row_high_limit;
}
}
static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
{
struct mem_ctl_info *mci;
u8 dramcr; u8 dramcr;
u32 ecc_on;
u32 reg_sdram;
u32 eapr; u32 eapr;
u32 scrub_disabled; u32 scrub_disabled;
u32 sdram_refresh_rate; u32 sdram_refresh_rate;
u32 row_high_limit_last = 0;
struct r82600_error_info discard; struct r82600_error_info discard;
debugf0("%s()\n", __func__); debugf0("%s()\n", __func__);
pci_read_config_byte(pdev, R82600_DRAMC, &dramcr); pci_read_config_byte(pdev, R82600_DRAMC, &dramcr);
pci_read_config_dword(pdev, R82600_EAP, &eapr); pci_read_config_dword(pdev, R82600_EAP, &eapr);
ecc_on = dramcr & BIT(5);
reg_sdram = dramcr & BIT(4);
scrub_disabled = eapr & BIT(31); scrub_disabled = eapr & BIT(31);
sdram_refresh_rate = dramcr & (BIT(0) | BIT(1)); sdram_refresh_rate = dramcr & (BIT(0) | BIT(1));
debugf2("%s(): sdram refresh rate = %#0x\n", __func__, debugf2("%s(): sdram refresh rate = %#0x\n", __func__,
...@@ -231,10 +278,8 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx) ...@@ -231,10 +278,8 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
debugf2("%s(): DRAMC register = %#0x\n", __func__, dramcr); debugf2("%s(): DRAMC register = %#0x\n", __func__, dramcr);
mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS); mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS);
if (mci == NULL) { if (mci == NULL)
rc = -ENOMEM; return -ENOMEM;
goto fail;
}
debugf0("%s(): mci = %p\n", __func__, mci); debugf0("%s(): mci = %p\n", __func__, mci);
mci->dev = &pdev->dev; mci->dev = &pdev->dev;
...@@ -250,7 +295,7 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx) ...@@ -250,7 +295,7 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
* is possible. */ * is possible. */
mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
if (ecc_on) { if (ecc_enabled(dramcr)) {
if (scrub_disabled) if (scrub_disabled)
debugf3("%s(): mci = %p - Scrubbing disabled! EAP: " debugf3("%s(): mci = %p - Scrubbing disabled! EAP: "
"%#0x\n", __func__, mci, eapr); "%#0x\n", __func__, mci, eapr);
...@@ -262,46 +307,7 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx) ...@@ -262,46 +307,7 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
mci->ctl_name = "R82600"; mci->ctl_name = "R82600";
mci->edac_check = r82600_check; mci->edac_check = r82600_check;
mci->ctl_page_to_phys = NULL; mci->ctl_page_to_phys = NULL;
r82600_init_csrows(mci, pdev, dramcr);
for (index = 0; index < mci->nr_csrows; index++) {
struct csrow_info *csrow = &mci->csrows[index];
u8 drbar; /* sDram Row Boundry Address Register */
u32 row_high_limit;
u32 row_base;
/* find the DRAM Chip Select Base address and mask */
pci_read_config_byte(pdev, R82600_DRBA + index, &drbar);
debugf1("MC%d: %s() Row=%d DRBA = %#0x\n", mci->mc_idx,
__func__, index, drbar);
row_high_limit = ((u32) drbar << 24);
/* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */
debugf1("MC%d: %s() Row=%d, Boundry Address=%#0x, Last = "
"%#0x \n", mci->mc_idx, __func__, index,
row_high_limit, row_high_limit_last);
/* Empty row [p.57] */
if (row_high_limit == row_high_limit_last)
continue;
row_base = row_high_limit_last;
csrow->first_page = row_base >> PAGE_SHIFT;
csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
csrow->nr_pages = csrow->last_page - csrow->first_page + 1;
/* Error address is top 19 bits - so granularity is *
* 14 bits */
csrow->grain = 1 << 14;
csrow->mtype = reg_sdram ? MEM_RDDR : MEM_DDR;
/* FIXME - check that this is unknowable with this chipset */
csrow->dtype = DEV_UNKNOWN;
/* Mode is global on 82600 */
csrow->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE;
row_high_limit_last = row_high_limit;
}
r82600_get_error_info(mci, &discard); /* clear counters */ r82600_get_error_info(mci, &discard); /* clear counters */
/* Here we assume that we will never see multiple instances of this /* Here we assume that we will never see multiple instances of this
...@@ -324,10 +330,8 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx) ...@@ -324,10 +330,8 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
return 0; return 0;
fail: fail:
if (mci) edac_mc_free(mci);
edac_mc_free(mci); return -ENODEV;
return rc;
} }
/* returns count (>= 0), or negative on error */ /* returns count (>= 0), or negative on error */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment