Commit 0a227af5 authored by Yazen Ghannam's avatar Yazen Ghannam Committed by Borislav Petkov

EDAC/amd64: Support more than two controllers for chip select handling

The struct chip_select array that's used for saving chip select bases
and masks is fixed at length of two. There should be one struct
chip_select for each controller, so this array should be increased to
support systems that may have more than two controllers.

Increase the size of the struct chip_select array to eight, which is the
largest number of controllers per die currently supported on AMD
systems.

Also, carve out the Family 17h+ reading of the bases/masks into a
separate function. This effectively reverts the original bases/masks
reading code to before Family 17h support was added.
Signed-off-by: default avatarYazen Ghannam <yazen.ghannam@amd.com>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Tested-by: default avatarKim Phillips <kim.phillips@amd.com>
Cc: James Morse <james.morse@arm.com>
Cc: Mauro Carvalho Chehab <mchehab@kernel.org>
Cc: linux-edac <linux-edac@vger.kernel.org>
Link: https://lkml.kernel.org/r/20190228153558.127292-5-Yazen.Ghannam@amd.com
parent 7835961d
...@@ -914,48 +914,64 @@ static void prep_chip_selects(struct amd64_pvt *pvt) ...@@ -914,48 +914,64 @@ static void prep_chip_selects(struct amd64_pvt *pvt)
} else if (pvt->fam == 0x15 && pvt->model == 0x30) { } else if (pvt->fam == 0x15 && pvt->model == 0x30) {
pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4; pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2; pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
} else if (pvt->fam >= 0x17) {
int umc;
for_each_umc(umc) {
pvt->csels[umc].b_cnt = 8;
pvt->csels[umc].m_cnt = 4;
}
} else { } else {
pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8; pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4; pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
} }
} }
static void read_umc_base_mask(struct amd64_pvt *pvt)
{
int cs, umc;
for_each_umc(umc) {
u32 umc_base_reg = get_umc_base(umc) + UMCCH_BASE_ADDR;
u32 umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK;
for_each_chip_select(cs, 0, pvt) {
u32 *base = &pvt->csels[umc].csbases[cs];
u32 *mask = &pvt->csels[umc].csmasks[cs];
u32 base_reg = umc_base_reg + (cs * 4);
u32 mask_reg = umc_mask_reg + ((cs >> 1) * 4);
if (!amd_smn_read(pvt->mc_node_id, base_reg, base))
edac_dbg(0, " DCSB%d[%d]=0x%08x reg: 0x%x\n",
umc, cs, *base, base_reg);
if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask))
edac_dbg(0, " DCSM%d[%d]=0x%08x reg: 0x%x\n",
umc, cs, *mask, mask_reg);
}
}
}
/* /*
* Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
*/ */
static void read_dct_base_mask(struct amd64_pvt *pvt) static void read_dct_base_mask(struct amd64_pvt *pvt)
{ {
int base_reg0, base_reg1, mask_reg0, mask_reg1, cs; int cs;
prep_chip_selects(pvt); prep_chip_selects(pvt);
if (pvt->umc) { if (pvt->umc)
base_reg0 = get_umc_base(0) + UMCCH_BASE_ADDR; return read_umc_base_mask(pvt);
base_reg1 = get_umc_base(1) + UMCCH_BASE_ADDR;
mask_reg0 = get_umc_base(0) + UMCCH_ADDR_MASK;
mask_reg1 = get_umc_base(1) + UMCCH_ADDR_MASK;
} else {
base_reg0 = DCSB0;
base_reg1 = DCSB1;
mask_reg0 = DCSM0;
mask_reg1 = DCSM1;
}
for_each_chip_select(cs, 0, pvt) { for_each_chip_select(cs, 0, pvt) {
int reg0 = base_reg0 + (cs * 4); int reg0 = DCSB0 + (cs * 4);
int reg1 = base_reg1 + (cs * 4); int reg1 = DCSB1 + (cs * 4);
u32 *base0 = &pvt->csels[0].csbases[cs]; u32 *base0 = &pvt->csels[0].csbases[cs];
u32 *base1 = &pvt->csels[1].csbases[cs]; u32 *base1 = &pvt->csels[1].csbases[cs];
if (pvt->umc) {
if (!amd_smn_read(pvt->mc_node_id, reg0, base0))
edac_dbg(0, " DCSB0[%d]=0x%08x reg: 0x%x\n",
cs, *base0, reg0);
if (!amd_smn_read(pvt->mc_node_id, reg1, base1))
edac_dbg(0, " DCSB1[%d]=0x%08x reg: 0x%x\n",
cs, *base1, reg1);
} else {
if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0)) if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n", edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
cs, *base0, reg0); cs, *base0, reg0);
...@@ -968,23 +984,13 @@ static void read_dct_base_mask(struct amd64_pvt *pvt) ...@@ -968,23 +984,13 @@ static void read_dct_base_mask(struct amd64_pvt *pvt)
cs, *base1, (pvt->fam == 0x10) ? reg1 cs, *base1, (pvt->fam == 0x10) ? reg1
: reg0); : reg0);
} }
}
for_each_chip_select_mask(cs, 0, pvt) { for_each_chip_select_mask(cs, 0, pvt) {
int reg0 = mask_reg0 + (cs * 4); int reg0 = DCSM0 + (cs * 4);
int reg1 = mask_reg1 + (cs * 4); int reg1 = DCSM1 + (cs * 4);
u32 *mask0 = &pvt->csels[0].csmasks[cs]; u32 *mask0 = &pvt->csels[0].csmasks[cs];
u32 *mask1 = &pvt->csels[1].csmasks[cs]; u32 *mask1 = &pvt->csels[1].csmasks[cs];
if (pvt->umc) {
if (!amd_smn_read(pvt->mc_node_id, reg0, mask0))
edac_dbg(0, " DCSM0[%d]=0x%08x reg: 0x%x\n",
cs, *mask0, reg0);
if (!amd_smn_read(pvt->mc_node_id, reg1, mask1))
edac_dbg(0, " DCSM1[%d]=0x%08x reg: 0x%x\n",
cs, *mask1, reg1);
} else {
if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0)) if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n", edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
cs, *mask0, reg0); cs, *mask0, reg0);
...@@ -997,7 +1003,6 @@ static void read_dct_base_mask(struct amd64_pvt *pvt) ...@@ -997,7 +1003,6 @@ static void read_dct_base_mask(struct amd64_pvt *pvt)
cs, *mask1, (pvt->fam == 0x10) ? reg1 cs, *mask1, (pvt->fam == 0x10) ? reg1
: reg0); : reg0);
} }
}
} }
static void determine_memory_type(struct amd64_pvt *pvt) static void determine_memory_type(struct amd64_pvt *pvt)
......
...@@ -96,6 +96,7 @@ ...@@ -96,6 +96,7 @@
/* Hardware limit on ChipSelect rows per MC and processors per system */ /* Hardware limit on ChipSelect rows per MC and processors per system */
#define NUM_CHIPSELECTS 8 #define NUM_CHIPSELECTS 8
#define DRAM_RANGES 8 #define DRAM_RANGES 8
#define NUM_CONTROLLERS 8
#define ON true #define ON true
#define OFF false #define OFF false
...@@ -351,8 +352,8 @@ struct amd64_pvt { ...@@ -351,8 +352,8 @@ struct amd64_pvt {
u32 dbam0; /* DRAM Base Address Mapping reg for DCT0 */ u32 dbam0; /* DRAM Base Address Mapping reg for DCT0 */
u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */ u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */
/* one for each DCT */ /* one for each DCT/UMC */
struct chip_select csels[2]; struct chip_select csels[NUM_CONTROLLERS];
/* DRAM base and limit pairs F1x[78,70,68,60,58,50,48,40] */ /* DRAM base and limit pairs F1x[78,70,68,60,58,50,48,40] */
struct dram_range ranges[DRAM_RANGES]; struct dram_range ranges[DRAM_RANGES];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment