Commit 8028e290 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'edac_updates_for_v6.11' of git://git.kernel.org/pub/scm/linux/kernel/git/ras/ras

Pull EDAC updates from Borislav Petkov:

 - The AMD memory controllers data fabric version 4.5 supports
   non-power-of-2 denormalization in the sense that certain bits of the
   system physical address cannot be reconstructed from the normalized
   address reported by the RAS hardware. Add support for handling such
   addresses

 - Switch the EDAC drivers to the new Intel CPU model defines

 - The usual fixes and cleanups all over the place

* tag 'edac_updates_for_v6.11' of git://git.kernel.org/pub/scm/linux/kernel/git/ras/ras:
  EDAC: Add missing MODULE_DESCRIPTION() macros
  EDAC/dmc520: Use devm_platform_ioremap_resource()
  EDAC/igen6: Add Intel Arrow Lake-U/H SoCs support
  RAS/AMD/FMPM: Use atl internal.h for INVALID_SPA
  RAS/AMD/ATL: Implement DF 4.5 NP2 denormalization
  RAS/AMD/ATL: Validate address map when information is gathered
  RAS/AMD/ATL: Expand helpers for adding and removing base and hole
  RAS/AMD/ATL: Read DRAM hole base early
  RAS/AMD/ATL: Add amd_atl pr_fmt() prefix
  RAS/AMD/ATL: Add a missing module description
  EDAC, i10nm: make skx_common.o a separate module
  EDAC/skx: Switch to new Intel CPU model defines
  EDAC/sb_edac: Switch to new Intel CPU model defines
  EDAC, pnd2: Switch to new Intel CPU model defines
  EDAC/i10nm: Switch to new Intel CPU model defines
  EDAC/ghes: Add missing newline to pr_info() statement
  RAS/AMD/ATL: Add missing newline to pr_info() statement
  EDAC/thunderx: Remove unused struct error_syndrome
parents e23dd95c 03a9b670
......@@ -54,11 +54,13 @@ obj-$(CONFIG_EDAC_MPC85XX) += mpc85xx_edac_mod.o
layerscape_edac_mod-y := fsl_ddr_edac.o layerscape_edac.o
obj-$(CONFIG_EDAC_LAYERSCAPE) += layerscape_edac_mod.o
skx_edac-y := skx_common.o skx_base.o
obj-$(CONFIG_EDAC_SKX) += skx_edac.o
skx_edac_common-y := skx_common.o
i10nm_edac-y := skx_common.o i10nm_base.o
obj-$(CONFIG_EDAC_I10NM) += i10nm_edac.o
skx_edac-y := skx_base.o
obj-$(CONFIG_EDAC_SKX) += skx_edac.o skx_edac_common.o
i10nm_edac-y := i10nm_base.o
obj-$(CONFIG_EDAC_I10NM) += i10nm_edac.o skx_edac_common.o
obj-$(CONFIG_EDAC_CELL) += cell_edac.o
obj-$(CONFIG_EDAC_PPC4XX) += ppc4xx_edac.o
......
......@@ -480,7 +480,6 @@ static int dmc520_edac_probe(struct platform_device *pdev)
struct mem_ctl_info *mci;
void __iomem *reg_base;
u32 irq_mask_all = 0;
struct resource *res;
struct device *dev;
int ret, idx, irq;
u32 reg_val;
......@@ -505,8 +504,7 @@ static int dmc520_edac_probe(struct platform_device *pdev)
}
/* Initialize dmc520 edac */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
reg_base = devm_ioremap_resource(dev, res);
reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg_base))
return PTR_ERR(reg_base);
......
......@@ -547,7 +547,7 @@ static int __init ghes_edac_init(void)
return -ENODEV;
if (list_empty(ghes_devs)) {
pr_info("GHES probing device list is empty");
pr_info("GHES probing device list is empty\n");
return -ENODEV;
}
......
......@@ -942,16 +942,16 @@ static struct res_config gnr_cfg = {
};
static const struct x86_cpu_id i10nm_cpuids[] = {
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0),
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1),
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ICELAKE_X, X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0),
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ICELAKE_X, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1),
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ICELAKE_D, X86_STEPPINGS(0x0, 0xf), &i10nm_cfg1),
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SAPPHIRERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg),
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(EMERALDRAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg),
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(GRANITERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_CRESTMONT_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_CRESTMONT, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_TREMONT_D, X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0),
X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_TREMONT_D, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1),
X86_MATCH_VFM_STEPPINGS(INTEL_ICELAKE_X, X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0),
X86_MATCH_VFM_STEPPINGS(INTEL_ICELAKE_X, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1),
X86_MATCH_VFM_STEPPINGS(INTEL_ICELAKE_D, X86_STEPPINGS(0x0, 0xf), &i10nm_cfg1),
X86_MATCH_VFM_STEPPINGS(INTEL_SAPPHIRERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg),
X86_MATCH_VFM_STEPPINGS(INTEL_EMERALDRAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg),
X86_MATCH_VFM_STEPPINGS(INTEL_GRANITERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_CRESTMONT_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_CRESTMONT, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
{}
};
MODULE_DEVICE_TABLE(x86cpu, i10nm_cpuids);
......
......@@ -258,6 +258,11 @@ static struct work_struct ecclog_work;
#define DID_MTL_P_SKU2 0x7d02
#define DID_MTL_P_SKU3 0x7d14
/* Compute die IDs for Arrow Lake-UH with IBECC */
#define DID_ARL_UH_SKU1 0x7d06
#define DID_ARL_UH_SKU2 0x7d20
#define DID_ARL_UH_SKU3 0x7d30
static int get_mchbar(struct pci_dev *pdev, u64 *mchbar)
{
union {
......@@ -597,6 +602,9 @@ static const struct pci_device_id igen6_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, DID_MTL_P_SKU1), (kernel_ulong_t)&mtl_p_cfg },
{ PCI_VDEVICE(INTEL, DID_MTL_P_SKU2), (kernel_ulong_t)&mtl_p_cfg },
{ PCI_VDEVICE(INTEL, DID_MTL_P_SKU3), (kernel_ulong_t)&mtl_p_cfg },
{ PCI_VDEVICE(INTEL, DID_ARL_UH_SKU1), (kernel_ulong_t)&mtl_p_cfg },
{ PCI_VDEVICE(INTEL, DID_ARL_UH_SKU2), (kernel_ulong_t)&mtl_p_cfg },
{ PCI_VDEVICE(INTEL, DID_ARL_UH_SKU3), (kernel_ulong_t)&mtl_p_cfg },
{ },
};
MODULE_DEVICE_TABLE(pci, igen6_pci_tbl);
......
......@@ -69,6 +69,7 @@ static void __exit fsl_ddr_mc_exit(void)
module_exit(fsl_ddr_mc_exit);
MODULE_DESCRIPTION("Freescale Layerscape EDAC driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("NXP Semiconductor");
module_param(edac_op_state, int, 0444);
......
......@@ -704,6 +704,7 @@ static void __exit mpc85xx_mc_exit(void)
module_exit(mpc85xx_mc_exit);
MODULE_DESCRIPTION("Freescale MPC85xx Memory Controller EDAC driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Montavista Software, Inc.");
module_param(edac_op_state, int, 0444);
......
......@@ -201,5 +201,6 @@ static struct platform_driver octeon_l2c_driver = {
};
module_platform_driver(octeon_l2c_driver);
MODULE_DESCRIPTION("Cavium Octeon Secondary Caches (L2C) EDAC driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
......@@ -319,5 +319,6 @@ static struct platform_driver octeon_lmc_edac_driver = {
};
module_platform_driver(octeon_lmc_edac_driver);
MODULE_DESCRIPTION("Cavium Octeon DRAM Memory Controller (LMC) EDAC driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
......@@ -137,5 +137,6 @@ static struct platform_driver co_cache_error_driver = {
};
module_platform_driver(co_cache_error_driver);
MODULE_DESCRIPTION("Cavium Octeon Primary Caches EDAC driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
......@@ -104,5 +104,6 @@ static struct platform_driver octeon_pci_driver = {
};
module_platform_driver(octeon_pci_driver);
MODULE_DESCRIPTION("Cavium Octeon PCI Controller EDAC driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
......@@ -1511,8 +1511,8 @@ static struct dunit_ops dnv_ops = {
};
static const struct x86_cpu_id pnd2_cpuids[] = {
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &apl_ops),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &dnv_ops),
X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &apl_ops),
X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_D, &dnv_ops),
{ }
};
MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
......
......@@ -3546,13 +3546,13 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
}
static const struct x86_cpu_id sbridge_cpuids[] = {
X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &pci_dev_descr_sbridge_table),
X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &pci_dev_descr_ibridge_table),
X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &pci_dev_descr_haswell_table),
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &pci_dev_descr_broadwell_table),
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &pci_dev_descr_broadwell_table),
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &pci_dev_descr_knl_table),
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &pci_dev_descr_knl_table),
X86_MATCH_VFM(INTEL_SANDYBRIDGE_X, &pci_dev_descr_sbridge_table),
X86_MATCH_VFM(INTEL_IVYBRIDGE_X, &pci_dev_descr_ibridge_table),
X86_MATCH_VFM(INTEL_HASWELL_X, &pci_dev_descr_haswell_table),
X86_MATCH_VFM(INTEL_BROADWELL_X, &pci_dev_descr_broadwell_table),
X86_MATCH_VFM(INTEL_BROADWELL_D, &pci_dev_descr_broadwell_table),
X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &pci_dev_descr_knl_table),
X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &pci_dev_descr_knl_table),
{ }
};
MODULE_DEVICE_TABLE(x86cpu, sbridge_cpuids);
......
......@@ -164,7 +164,7 @@ static struct res_config skx_cfg = {
};
static const struct x86_cpu_id skx_cpuids[] = {
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x0, 0xf), &skx_cfg),
X86_MATCH_VFM_STEPPINGS(INTEL_SKYLAKE_X, X86_STEPPINGS(0x0, 0xf), &skx_cfg),
{ }
};
MODULE_DEVICE_TABLE(x86cpu, skx_cpuids);
......
......@@ -48,7 +48,7 @@ static u64 skx_tolm, skx_tohm;
static LIST_HEAD(dev_edac_list);
static bool skx_mem_cfg_2lm;
int __init skx_adxl_get(void)
int skx_adxl_get(void)
{
const char * const *names;
int i, j;
......@@ -110,12 +110,14 @@ int __init skx_adxl_get(void)
return -ENODEV;
}
EXPORT_SYMBOL_GPL(skx_adxl_get);
void __exit skx_adxl_put(void)
void skx_adxl_put(void)
{
kfree(adxl_values);
kfree(adxl_msg);
}
EXPORT_SYMBOL_GPL(skx_adxl_put);
static bool skx_adxl_decode(struct decoded_addr *res, bool error_in_1st_level_mem)
{
......@@ -187,12 +189,14 @@ void skx_set_mem_cfg(bool mem_cfg_2lm)
{
skx_mem_cfg_2lm = mem_cfg_2lm;
}
EXPORT_SYMBOL_GPL(skx_set_mem_cfg);
void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log)
{
driver_decode = decode;
skx_show_retry_rd_err_log = show_retry_log;
}
EXPORT_SYMBOL_GPL(skx_set_decode);
int skx_get_src_id(struct skx_dev *d, int off, u8 *id)
{
......@@ -206,6 +210,7 @@ int skx_get_src_id(struct skx_dev *d, int off, u8 *id)
*id = GET_BITFIELD(reg, 12, 14);
return 0;
}
EXPORT_SYMBOL_GPL(skx_get_src_id);
int skx_get_node_id(struct skx_dev *d, u8 *id)
{
......@@ -219,6 +224,7 @@ int skx_get_node_id(struct skx_dev *d, u8 *id)
*id = GET_BITFIELD(reg, 0, 2);
return 0;
}
EXPORT_SYMBOL_GPL(skx_get_node_id);
static int get_width(u32 mtr)
{
......@@ -284,6 +290,7 @@ int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list)
*list = &dev_edac_list;
return ndev;
}
EXPORT_SYMBOL_GPL(skx_get_all_bus_mappings);
int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm)
{
......@@ -323,6 +330,7 @@ int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm)
pci_dev_put(pdev);
return -ENODEV;
}
EXPORT_SYMBOL_GPL(skx_get_hi_lo);
static int skx_get_dimm_attr(u32 reg, int lobit, int hibit, int add,
int minval, int maxval, const char *name)
......@@ -394,6 +402,7 @@ int skx_get_dimm_info(u32 mtr, u32 mcmtr, u32 amap, struct dimm_info *dimm,
return 1;
}
EXPORT_SYMBOL_GPL(skx_get_dimm_info);
int skx_get_nvdimm_info(struct dimm_info *dimm, struct skx_imc *imc,
int chan, int dimmno, const char *mod_str)
......@@ -442,6 +451,7 @@ int skx_get_nvdimm_info(struct dimm_info *dimm, struct skx_imc *imc,
return (size == 0 || size == ~0ull) ? 0 : 1;
}
EXPORT_SYMBOL_GPL(skx_get_nvdimm_info);
int skx_register_mci(struct skx_imc *imc, struct pci_dev *pdev,
const char *ctl_name, const char *mod_str,
......@@ -512,6 +522,7 @@ int skx_register_mci(struct skx_imc *imc, struct pci_dev *pdev,
imc->mci = NULL;
return rc;
}
EXPORT_SYMBOL_GPL(skx_register_mci);
static void skx_unregister_mci(struct skx_imc *imc)
{
......@@ -688,6 +699,7 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
mce->kflags |= MCE_HANDLED_EDAC;
return NOTIFY_DONE;
}
EXPORT_SYMBOL_GPL(skx_mce_check_error);
void skx_remove(void)
{
......@@ -725,3 +737,8 @@ void skx_remove(void)
kfree(d);
}
}
EXPORT_SYMBOL_GPL(skx_remove);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Tony Luck");
MODULE_DESCRIPTION("MC Driver for Intel server processors");
......@@ -231,8 +231,8 @@ typedef int (*get_dimm_config_f)(struct mem_ctl_info *mci,
typedef bool (*skx_decode_f)(struct decoded_addr *res);
typedef void (*skx_show_retry_log_f)(struct decoded_addr *res, char *msg, int len, bool scrub_err);
int __init skx_adxl_get(void);
void __exit skx_adxl_put(void);
int skx_adxl_get(void);
void skx_adxl_put(void);
void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log);
void skx_set_mem_cfg(bool mem_cfg_2lm);
......
......@@ -35,12 +35,6 @@ enum {
ERR_UNKNOWN = 3,
};
#define MAX_SYNDROME_REGS 4
struct error_syndrome {
u64 reg[MAX_SYNDROME_REGS];
};
struct error_descr {
int type;
u64 mask;
......
......@@ -49,26 +49,26 @@ static bool legacy_hole_en(struct addr_ctx *ctx)
return FIELD_GET(DF_LEGACY_MMIO_HOLE_EN, reg);
}
static int add_legacy_hole(struct addr_ctx *ctx)
static u64 add_legacy_hole(struct addr_ctx *ctx, u64 addr)
{
u32 dram_hole_base;
u8 func = 0;
if (!legacy_hole_en(ctx))
return 0;
return addr;
if (df_cfg.rev >= DF4)
func = 7;
if (addr >= df_cfg.dram_hole_base)
addr += (BIT_ULL(32) - df_cfg.dram_hole_base);
if (df_indirect_read_broadcast(ctx->node_id, func, 0x104, &dram_hole_base))
return -EINVAL;
return addr;
}
dram_hole_base &= DF_DRAM_HOLE_BASE_MASK;
static u64 remove_legacy_hole(struct addr_ctx *ctx, u64 addr)
{
if (!legacy_hole_en(ctx))
return addr;
if (ctx->ret_addr >= dram_hole_base)
ctx->ret_addr += (BIT_ULL(32) - dram_hole_base);
if (addr >= df_cfg.dram_hole_base)
addr -= (BIT_ULL(32) - df_cfg.dram_hole_base);
return 0;
return addr;
}
static u64 get_base_addr(struct addr_ctx *ctx)
......@@ -83,14 +83,14 @@ static u64 get_base_addr(struct addr_ctx *ctx)
return base_addr << DF_DRAM_BASE_LIMIT_LSB;
}
static int add_base_and_hole(struct addr_ctx *ctx)
u64 add_base_and_hole(struct addr_ctx *ctx, u64 addr)
{
ctx->ret_addr += get_base_addr(ctx);
if (add_legacy_hole(ctx))
return -EINVAL;
return add_legacy_hole(ctx, addr + get_base_addr(ctx));
}
return 0;
u64 remove_base_and_hole(struct addr_ctx *ctx, u64 addr)
{
return remove_legacy_hole(ctx, addr) - get_base_addr(ctx);
}
static bool late_hole_remove(struct addr_ctx *ctx)
......@@ -125,6 +125,9 @@ unsigned long norm_to_sys_addr(u8 socket_id, u8 die_id, u8 coh_st_inst_id, unsig
ctx.inputs.die_id = die_id;
ctx.inputs.coh_st_inst_id = coh_st_inst_id;
if (legacy_hole_en(&ctx) && !df_cfg.dram_hole_base)
return -EINVAL;
if (determine_node_id(&ctx, socket_id, die_id))
return -EINVAL;
......@@ -134,14 +137,14 @@ unsigned long norm_to_sys_addr(u8 socket_id, u8 die_id, u8 coh_st_inst_id, unsig
if (denormalize_address(&ctx))
return -EINVAL;
if (!late_hole_remove(&ctx) && add_base_and_hole(&ctx))
return -EINVAL;
if (!late_hole_remove(&ctx))
ctx.ret_addr = add_base_and_hole(&ctx, ctx.ret_addr);
if (dehash_address(&ctx))
return -EINVAL;
if (late_hole_remove(&ctx) && add_base_and_hole(&ctx))
return -EINVAL;
if (late_hole_remove(&ctx))
ctx.ret_addr = add_base_and_hole(&ctx, ctx.ret_addr);
if (addr_over_limit(&ctx))
return -EINVAL;
......@@ -206,7 +209,7 @@ static int __init amd_atl_init(void)
__module_get(THIS_MODULE);
amd_atl_register_decoder(convert_umc_mca_addr_to_sys_addr);
pr_info("AMD Address Translation Library initialized");
pr_info("AMD Address Translation Library initialized\n");
return 0;
}
......@@ -222,4 +225,5 @@ static void __exit amd_atl_exit(void)
module_init(amd_atl_init);
module_exit(amd_atl_exit);
MODULE_DESCRIPTION("AMD Address Translation Library");
MODULE_LICENSE("GPL");
......@@ -12,41 +12,10 @@
#include "internal.h"
/*
* Verify the interleave bits are correct in the different interleaving
* settings.
*
* If @num_intlv_dies and/or @num_intlv_sockets are 1, it means the
* respective interleaving is disabled.
*/
static inline bool map_bits_valid(struct addr_ctx *ctx, u8 bit1, u8 bit2,
u8 num_intlv_dies, u8 num_intlv_sockets)
{
if (!(ctx->map.intlv_bit_pos == bit1 || ctx->map.intlv_bit_pos == bit2)) {
pr_debug("Invalid interleave bit: %u", ctx->map.intlv_bit_pos);
return false;
}
if (ctx->map.num_intlv_dies > num_intlv_dies) {
pr_debug("Invalid number of interleave dies: %u", ctx->map.num_intlv_dies);
return false;
}
if (ctx->map.num_intlv_sockets > num_intlv_sockets) {
pr_debug("Invalid number of interleave sockets: %u", ctx->map.num_intlv_sockets);
return false;
}
return true;
}
static int df2_dehash_addr(struct addr_ctx *ctx)
{
u8 hashed_bit, intlv_bit, intlv_bit_pos;
if (!map_bits_valid(ctx, 8, 9, 1, 1))
return -EINVAL;
intlv_bit_pos = ctx->map.intlv_bit_pos;
intlv_bit = !!(BIT_ULL(intlv_bit_pos) & ctx->ret_addr);
......@@ -67,9 +36,6 @@ static int df3_dehash_addr(struct addr_ctx *ctx)
bool hash_ctl_64k, hash_ctl_2M, hash_ctl_1G;
u8 hashed_bit, intlv_bit, intlv_bit_pos;
if (!map_bits_valid(ctx, 8, 9, 1, 1))
return -EINVAL;
hash_ctl_64k = FIELD_GET(DF3_HASH_CTL_64K, ctx->map.ctl);
hash_ctl_2M = FIELD_GET(DF3_HASH_CTL_2M, ctx->map.ctl);
hash_ctl_1G = FIELD_GET(DF3_HASH_CTL_1G, ctx->map.ctl);
......@@ -171,9 +137,6 @@ static int df4_dehash_addr(struct addr_ctx *ctx)
bool hash_ctl_64k, hash_ctl_2M, hash_ctl_1G;
u8 hashed_bit, intlv_bit;
if (!map_bits_valid(ctx, 8, 8, 1, 2))
return -EINVAL;
hash_ctl_64k = FIELD_GET(DF4_HASH_CTL_64K, ctx->map.ctl);
hash_ctl_2M = FIELD_GET(DF4_HASH_CTL_2M, ctx->map.ctl);
hash_ctl_1G = FIELD_GET(DF4_HASH_CTL_1G, ctx->map.ctl);
......@@ -247,9 +210,6 @@ static int df4p5_dehash_addr(struct addr_ctx *ctx)
u8 hashed_bit, intlv_bit;
u64 rehash_vector;
if (!map_bits_valid(ctx, 8, 8, 1, 2))
return -EINVAL;
hash_ctl_64k = FIELD_GET(DF4_HASH_CTL_64K, ctx->map.ctl);
hash_ctl_2M = FIELD_GET(DF4_HASH_CTL_2M, ctx->map.ctl);
hash_ctl_1G = FIELD_GET(DF4_HASH_CTL_1G, ctx->map.ctl);
......@@ -360,9 +320,6 @@ static int mi300_dehash_addr(struct addr_ctx *ctx)
bool hashed_bit, intlv_bit, test_bit;
u8 num_intlv_bits, base_bit, i;
if (!map_bits_valid(ctx, 8, 8, 4, 1))
return -EINVAL;
hash_ctl_4k = FIELD_GET(DF4p5_HASH_CTL_4K, ctx->map.ctl);
hash_ctl_64k = FIELD_GET(DF4_HASH_CTL_64K, ctx->map.ctl);
hash_ctl_2M = FIELD_GET(DF4_HASH_CTL_2M, ctx->map.ctl);
......
This diff is collapsed.
......@@ -21,6 +21,9 @@
#include "reg_fields.h"
#undef pr_fmt
#define pr_fmt(fmt) "amd_atl: " fmt
/* Maximum possible number of Coherent Stations within a single Data Fabric. */
#define MAX_COH_ST_CHANNELS 32
......@@ -34,6 +37,8 @@
#define DF_DRAM_BASE_LIMIT_LSB 28
#define MI300_DRAM_LIMIT_LSB 20
#define INVALID_SPA ~0ULL
enum df_revisions {
UNKNOWN,
DF2,
......@@ -90,6 +95,44 @@ enum intlv_modes {
DF4p5_NPS1_10CHAN_2K_HASH = 0x49,
};
struct df4p5_denorm_ctx {
/* Indicates the number of "lost" bits. This will be 1, 2, or 3. */
u8 perm_shift;
/* A mask indicating the bits that need to be rehashed. */
u16 rehash_vector;
/*
* Represents the value that the high bits of the normalized address
* are divided by during normalization. This value will be 3 for
* interleave modes with a number of channels divisible by 3 or the
* value will be 5 for interleave modes with a number of channels
* divisible by 5. Power-of-two interleave modes are handled
* separately.
*/
u8 mod_value;
/*
* Represents the bits that can be directly pulled from the normalized
* address. In each case, pass through bits [7:0] of the normalized
* address. The other bits depend on the interleave bit position which
* will be bit 10 for 1K interleave stripe cases and bit 11 for 2K
* interleave stripe cases.
*/
u64 base_denorm_addr;
/*
* Represents the high bits of the physical address that have been
* divided by the mod_value.
*/
u64 div_addr;
u64 current_spa;
u64 resolved_spa;
u16 coh_st_fabric_id;
};
struct df_flags {
__u8 legacy_ficaa : 1,
socket_id_shift_quirk : 1,
......@@ -132,6 +175,8 @@ struct df_config {
/* Number of DRAM Address maps visible in a Coherent Station. */
u8 num_coh_st_maps;
u32 dram_hole_base;
/* Global flags to handle special cases. */
struct df_flags flags;
};
......@@ -234,6 +279,9 @@ int dehash_address(struct addr_ctx *ctx);
unsigned long norm_to_sys_addr(u8 socket_id, u8 die_id, u8 coh_st_inst_id, unsigned long addr);
unsigned long convert_umc_mca_addr_to_sys_addr(struct atl_err *err);
u64 add_base_and_hole(struct addr_ctx *ctx, u64 addr);
u64 remove_base_and_hole(struct addr_ctx *ctx, u64 addr);
/*
* Make a gap in @data that is @num_bits long starting at @bit_num.
* e.g. data = 11111111'b
......
......@@ -642,6 +642,99 @@ static int get_global_map_data(struct addr_ctx *ctx)
return 0;
}
/*
* Verify the interleave bits are correct in the different interleaving
* settings.
*
* If @num_intlv_dies and/or @num_intlv_sockets are 1, it means the
* respective interleaving is disabled.
*/
static inline bool map_bits_valid(struct addr_ctx *ctx, u8 bit1, u8 bit2,
u8 num_intlv_dies, u8 num_intlv_sockets)
{
if (!(ctx->map.intlv_bit_pos == bit1 || ctx->map.intlv_bit_pos == bit2)) {
pr_debug("Invalid interleave bit: %u", ctx->map.intlv_bit_pos);
return false;
}
if (ctx->map.num_intlv_dies > num_intlv_dies) {
pr_debug("Invalid number of interleave dies: %u", ctx->map.num_intlv_dies);
return false;
}
if (ctx->map.num_intlv_sockets > num_intlv_sockets) {
pr_debug("Invalid number of interleave sockets: %u", ctx->map.num_intlv_sockets);
return false;
}
return true;
}
static int validate_address_map(struct addr_ctx *ctx)
{
switch (ctx->map.intlv_mode) {
case DF2_2CHAN_HASH:
case DF3_COD4_2CHAN_HASH:
case DF3_COD2_4CHAN_HASH:
case DF3_COD1_8CHAN_HASH:
if (!map_bits_valid(ctx, 8, 9, 1, 1))
goto err;
break;
case DF4_NPS4_2CHAN_HASH:
case DF4_NPS2_4CHAN_HASH:
case DF4_NPS1_8CHAN_HASH:
case DF4p5_NPS4_2CHAN_1K_HASH:
case DF4p5_NPS4_2CHAN_2K_HASH:
case DF4p5_NPS2_4CHAN_1K_HASH:
case DF4p5_NPS2_4CHAN_2K_HASH:
case DF4p5_NPS1_8CHAN_1K_HASH:
case DF4p5_NPS1_8CHAN_2K_HASH:
case DF4p5_NPS1_16CHAN_1K_HASH:
case DF4p5_NPS1_16CHAN_2K_HASH:
if (!map_bits_valid(ctx, 8, 8, 1, 2))
goto err;
break;
case DF4p5_NPS4_3CHAN_1K_HASH:
case DF4p5_NPS4_3CHAN_2K_HASH:
case DF4p5_NPS2_5CHAN_1K_HASH:
case DF4p5_NPS2_5CHAN_2K_HASH:
case DF4p5_NPS2_6CHAN_1K_HASH:
case DF4p5_NPS2_6CHAN_2K_HASH:
case DF4p5_NPS1_10CHAN_1K_HASH:
case DF4p5_NPS1_10CHAN_2K_HASH:
case DF4p5_NPS1_12CHAN_1K_HASH:
case DF4p5_NPS1_12CHAN_2K_HASH:
if (ctx->map.num_intlv_sockets != 1 || !map_bits_valid(ctx, 8, 0, 1, 1))
goto err;
break;
case DF4p5_NPS0_24CHAN_1K_HASH:
case DF4p5_NPS0_24CHAN_2K_HASH:
if (ctx->map.num_intlv_sockets < 2 || !map_bits_valid(ctx, 8, 0, 1, 2))
goto err;
break;
case MI3_HASH_8CHAN:
case MI3_HASH_16CHAN:
case MI3_HASH_32CHAN:
if (!map_bits_valid(ctx, 8, 8, 4, 1))
goto err;
break;
/* Nothing to do for modes that don't need special validation checks. */
default:
break;
}
return 0;
err:
atl_debug(ctx, "Inconsistent address map");
return -EINVAL;
}
static void dump_address_map(struct dram_addr_map *map)
{
u8 i;
......@@ -678,5 +771,9 @@ int get_address_map(struct addr_ctx *ctx)
dump_address_map(&ctx->map);
ret = validate_address_map(ctx);
if (ret)
return ret;
return ret;
}
......@@ -223,6 +223,21 @@ static int determine_df_rev(void)
return -EINVAL;
}
static int get_dram_hole_base(void)
{
u8 func = 0;
if (df_cfg.rev >= DF4)
func = 7;
if (df_indirect_read_broadcast(0, func, 0x104, &df_cfg.dram_hole_base))
return -EINVAL;
df_cfg.dram_hole_base &= DF_DRAM_HOLE_BASE_MASK;
return 0;
}
static void get_num_maps(void)
{
switch (df_cfg.rev) {
......@@ -266,6 +281,7 @@ static void dump_df_cfg(void)
pr_debug("num_coh_st_maps=%u", df_cfg.num_coh_st_maps);
pr_debug("dram_hole_base=0x%x", df_cfg.dram_hole_base);
pr_debug("flags.legacy_ficaa=%u", df_cfg.flags.legacy_ficaa);
pr_debug("flags.socket_id_shift_quirk=%u", df_cfg.flags.socket_id_shift_quirk);
}
......@@ -273,7 +289,7 @@ static void dump_df_cfg(void)
int get_df_system_info(void)
{
if (determine_df_rev()) {
pr_warn("amd_atl: Failed to determine DF Revision");
pr_warn("Failed to determine DF Revision");
df_cfg.rev = UNKNOWN;
return -EINVAL;
}
......@@ -282,6 +298,9 @@ int get_df_system_info(void)
get_num_maps();
if (get_dram_hole_base())
pr_warn("Failed to read DRAM hole base");
dump_df_cfg();
return 0;
......
......@@ -56,6 +56,8 @@
#include "../debugfs.h"
#include "atl/internal.h"
#define INVALID_CPU UINT_MAX
/* Validation Bits */
......@@ -116,8 +118,6 @@ static struct fru_rec **fru_records;
/* system physical addresses array */
static u64 *spa_entries;
#define INVALID_SPA ~0ULL
static struct dentry *fmpm_dfs_dir;
static struct dentry *fmpm_dfs_entries;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment