Commit 222dfb83 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86_misc_for_v6.11_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull misc x86 updates from Borislav Petkov:

 - Make error checking of AMD SMN accesses more robust in the callers as
   they're the only ones who can interpret the results properly

 - The usual cleanups and fixes, left and right

* tag 'x86_misc_for_v6.11_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/kmsan: Fix hook for unaligned accesses
  x86/platform/iosf_mbi: Convert PCIBIOS_* return codes to errnos
  x86/pci/xen: Fix PCIBIOS_* return code handling
  x86/pci/intel_mid_pci: Fix PCIBIOS_* return code handling
  x86/of: Return consistent error type from x86_of_pci_irq_enable()
  hwmon: (k10temp) Rename _data variable
  hwmon: (k10temp) Remove unused HAVE_TDIE() macro
  hwmon: (k10temp) Reduce k10temp_get_ccd_support() parameters
  hwmon: (k10temp) Define a helper function to read CCD temperature
  x86/amd_nb: Enhance SMN access error checking
  hwmon: (k10temp) Check return value of amd_smn_read()
  EDAC/amd64: Check return value of amd_smn_read()
  EDAC/amd64: Remove unused register accesses
  tools/x86/kcpuid: Add missing dir via Makefile
  x86, arm: Add missing license tag to syscall tables files
parents 1d86d352 bf6ab33d
# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
# #
# Linux system call numbers and entry vectors # Linux system call numbers and entry vectors
# #
......
# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
# #
# 32-bit system call numbers and entry vectors # 32-bit system call numbers and entry vectors
# #
......
# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
# #
# 64-bit system call numbers and entry vectors # 64-bit system call numbers and entry vectors
# #
......
...@@ -21,8 +21,8 @@ extern int amd_numa_init(void); ...@@ -21,8 +21,8 @@ extern int amd_numa_init(void);
extern int amd_get_subcaches(int); extern int amd_get_subcaches(int);
extern int amd_set_subcaches(int, unsigned long); extern int amd_set_subcaches(int, unsigned long);
extern int amd_smn_read(u16 node, u32 address, u32 *value); int __must_check amd_smn_read(u16 node, u32 address, u32 *value);
extern int amd_smn_write(u16 node, u32 address, u32 value); int __must_check amd_smn_write(u16 node, u32 address, u32 value);
struct amd_l3_cache { struct amd_l3_cache {
unsigned indices; unsigned indices;
......
...@@ -180,6 +180,43 @@ static struct pci_dev *next_northbridge(struct pci_dev *dev, ...@@ -180,6 +180,43 @@ static struct pci_dev *next_northbridge(struct pci_dev *dev,
return dev; return dev;
} }
/*
* SMN accesses may fail in ways that are difficult to detect here in the called
* functions amd_smn_read() and amd_smn_write(). Therefore, callers must do
* their own checking based on what behavior they expect.
*
* For SMN reads, the returned value may be zero if the register is Read-as-Zero.
* Or it may be a "PCI Error Response", e.g. all 0xFFs. The "PCI Error Response"
* can be checked here, and a proper error code can be returned.
*
* But the Read-as-Zero response cannot be verified here. A value of 0 may be
* correct in some cases, so callers must check that this correct is for the
* register/fields they need.
*
* For SMN writes, success can be determined through a "write and read back"
* However, this is not robust when done here.
*
* Possible issues:
*
* 1) Bits that are "Write-1-to-Clear". In this case, the read value should
* *not* match the write value.
*
* 2) Bits that are "Read-as-Zero"/"Writes-Ignored". This information cannot be
* known here.
*
* 3) Bits that are "Reserved / Set to 1". Ditto above.
*
* Callers of amd_smn_write() should do the "write and read back" check
* themselves, if needed.
*
* For #1, they can see if their target bits got cleared.
*
* For #2 and #3, they can check if their target bits got set as intended.
*
* This matches what is done for RDMSR/WRMSR. As long as there's no #GP, then
* the operation is considered a success, and the caller does their own
* checking.
*/
static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write) static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
{ {
struct pci_dev *root; struct pci_dev *root;
...@@ -202,9 +239,6 @@ static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write) ...@@ -202,9 +239,6 @@ static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
err = (write ? pci_write_config_dword(root, 0x64, *value) err = (write ? pci_write_config_dword(root, 0x64, *value)
: pci_read_config_dword(root, 0x64, value)); : pci_read_config_dword(root, 0x64, value));
if (err)
pr_warn("Error %s SMN address 0x%x.\n",
(write ? "writing to" : "reading from"), address);
out_unlock: out_unlock:
mutex_unlock(&smn_mutex); mutex_unlock(&smn_mutex);
...@@ -213,7 +247,7 @@ static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write) ...@@ -213,7 +247,7 @@ static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
return err; return err;
} }
int amd_smn_read(u16 node, u32 address, u32 *value) int __must_check amd_smn_read(u16 node, u32 address, u32 *value)
{ {
int err = __amd_smn_rw(node, address, value, false); int err = __amd_smn_rw(node, address, value, false);
...@@ -226,7 +260,7 @@ int amd_smn_read(u16 node, u32 address, u32 *value) ...@@ -226,7 +260,7 @@ int amd_smn_read(u16 node, u32 address, u32 *value)
} }
EXPORT_SYMBOL_GPL(amd_smn_read); EXPORT_SYMBOL_GPL(amd_smn_read);
int amd_smn_write(u16 node, u32 address, u32 value) int __must_check amd_smn_write(u16 node, u32 address, u32 value)
{ {
return __amd_smn_rw(node, address, &value, true); return __amd_smn_rw(node, address, &value, true);
} }
......
...@@ -83,7 +83,7 @@ static int x86_of_pci_irq_enable(struct pci_dev *dev) ...@@ -83,7 +83,7 @@ static int x86_of_pci_irq_enable(struct pci_dev *dev)
ret = pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); ret = pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
if (ret) if (ret)
return ret; return pcibios_err_to_errno(ret);
if (!pin) if (!pin)
return 0; return 0;
......
...@@ -25,6 +25,9 @@ static __always_inline void rep_movs(void *to, const void *from, size_t n) ...@@ -25,6 +25,9 @@ static __always_inline void rep_movs(void *to, const void *from, size_t n)
static void string_memcpy_fromio(void *to, const volatile void __iomem *from, size_t n) static void string_memcpy_fromio(void *to, const volatile void __iomem *from, size_t n)
{ {
const void *orig_to = to;
const size_t orig_n = n;
if (unlikely(!n)) if (unlikely(!n))
return; return;
...@@ -39,7 +42,7 @@ static void string_memcpy_fromio(void *to, const volatile void __iomem *from, si ...@@ -39,7 +42,7 @@ static void string_memcpy_fromio(void *to, const volatile void __iomem *from, si
} }
rep_movs(to, (const void *)from, n); rep_movs(to, (const void *)from, n);
/* KMSAN must treat values read from devices as initialized. */ /* KMSAN must treat values read from devices as initialized. */
kmsan_unpoison_memory(to, n); kmsan_unpoison_memory(orig_to, orig_n);
} }
static void string_memcpy_toio(volatile void __iomem *to, const void *from, size_t n) static void string_memcpy_toio(volatile void __iomem *to, const void *from, size_t n)
......
...@@ -233,9 +233,9 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev) ...@@ -233,9 +233,9 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
return 0; return 0;
ret = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi); ret = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi);
if (ret < 0) { if (ret) {
dev_warn(&dev->dev, "Failed to read interrupt line: %d\n", ret); dev_warn(&dev->dev, "Failed to read interrupt line: %d\n", ret);
return ret; return pcibios_err_to_errno(ret);
} }
id = x86_match_cpu(intel_mid_cpu_ids); id = x86_match_cpu(intel_mid_cpu_ids);
......
...@@ -38,10 +38,10 @@ static int xen_pcifront_enable_irq(struct pci_dev *dev) ...@@ -38,10 +38,10 @@ static int xen_pcifront_enable_irq(struct pci_dev *dev)
u8 gsi; u8 gsi;
rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi); rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi);
if (rc < 0) { if (rc) {
dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n", dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n",
rc); rc);
return rc; return pcibios_err_to_errno(rc);
} }
/* In PV DomU the Xen PCI backend puts the PIRQ in the interrupt line.*/ /* In PV DomU the Xen PCI backend puts the PIRQ in the interrupt line.*/
pirq = gsi; pirq = gsi;
......
...@@ -62,7 +62,7 @@ static int iosf_mbi_pci_read_mdr(u32 mcrx, u32 mcr, u32 *mdr) ...@@ -62,7 +62,7 @@ static int iosf_mbi_pci_read_mdr(u32 mcrx, u32 mcr, u32 *mdr)
fail_read: fail_read:
dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result); dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result);
return result; return pcibios_err_to_errno(result);
} }
static int iosf_mbi_pci_write_mdr(u32 mcrx, u32 mcr, u32 mdr) static int iosf_mbi_pci_write_mdr(u32 mcrx, u32 mcr, u32 mdr)
...@@ -91,7 +91,7 @@ static int iosf_mbi_pci_write_mdr(u32 mcrx, u32 mcr, u32 mdr) ...@@ -91,7 +91,7 @@ static int iosf_mbi_pci_write_mdr(u32 mcrx, u32 mcr, u32 mdr)
fail_write: fail_write:
dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result); dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result);
return result; return pcibios_err_to_errno(result);
} }
int iosf_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr) int iosf_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr)
......
...@@ -20,7 +20,6 @@ static inline u32 get_umc_reg(struct amd64_pvt *pvt, u32 reg) ...@@ -20,7 +20,6 @@ static inline u32 get_umc_reg(struct amd64_pvt *pvt, u32 reg)
return reg; return reg;
switch (reg) { switch (reg) {
case UMCCH_ADDR_CFG: return UMCCH_ADDR_CFG_DDR5;
case UMCCH_ADDR_MASK_SEC: return UMCCH_ADDR_MASK_SEC_DDR5; case UMCCH_ADDR_MASK_SEC: return UMCCH_ADDR_MASK_SEC_DDR5;
case UMCCH_DIMM_CFG: return UMCCH_DIMM_CFG_DDR5; case UMCCH_DIMM_CFG: return UMCCH_DIMM_CFG_DDR5;
} }
...@@ -1341,22 +1340,15 @@ static void umc_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl) ...@@ -1341,22 +1340,15 @@ static void umc_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
static void umc_dump_misc_regs(struct amd64_pvt *pvt) static void umc_dump_misc_regs(struct amd64_pvt *pvt)
{ {
struct amd64_umc *umc; struct amd64_umc *umc;
u32 i, tmp, umc_base; u32 i;
for_each_umc(i) { for_each_umc(i) {
umc_base = get_umc_base(i);
umc = &pvt->umc[i]; umc = &pvt->umc[i];
edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg); edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg);
edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg); edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg);
edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl); edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl);
edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl); edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl);
amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ECC_BAD_SYMBOL, &tmp);
edac_dbg(1, "UMC%d ECC bad symbol: 0x%x\n", i, tmp);
amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_UMC_CAP, &tmp);
edac_dbg(1, "UMC%d UMC cap: 0x%x\n", i, tmp);
edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i, umc->umc_cap_hi); edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i, umc->umc_cap_hi);
edac_dbg(1, "UMC%d ECC capable: %s, ChipKill ECC capable: %s\n", edac_dbg(1, "UMC%d ECC capable: %s, ChipKill ECC capable: %s\n",
...@@ -1369,14 +1361,6 @@ static void umc_dump_misc_regs(struct amd64_pvt *pvt) ...@@ -1369,14 +1361,6 @@ static void umc_dump_misc_regs(struct amd64_pvt *pvt)
edac_dbg(1, "UMC%d x16 DIMMs present: %s\n", edac_dbg(1, "UMC%d x16 DIMMs present: %s\n",
i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no"); i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no");
if (umc->dram_type == MEM_LRDDR4 || umc->dram_type == MEM_LRDDR5) {
amd_smn_read(pvt->mc_node_id,
umc_base + get_umc_reg(pvt, UMCCH_ADDR_CFG),
&tmp);
edac_dbg(1, "UMC%d LRDIMM %dx rank multiply\n",
i, 1 << ((tmp >> 4) & 0x3));
}
umc_debug_display_dimm_sizes(pvt, i); umc_debug_display_dimm_sizes(pvt, i);
} }
} }
...@@ -1454,6 +1438,7 @@ static void umc_read_base_mask(struct amd64_pvt *pvt) ...@@ -1454,6 +1438,7 @@ static void umc_read_base_mask(struct amd64_pvt *pvt)
u32 *base, *base_sec; u32 *base, *base_sec;
u32 *mask, *mask_sec; u32 *mask, *mask_sec;
int cs, umc; int cs, umc;
u32 tmp;
for_each_umc(umc) { for_each_umc(umc) {
umc_base_reg = get_umc_base(umc) + UMCCH_BASE_ADDR; umc_base_reg = get_umc_base(umc) + UMCCH_BASE_ADDR;
...@@ -1466,14 +1451,18 @@ static void umc_read_base_mask(struct amd64_pvt *pvt) ...@@ -1466,14 +1451,18 @@ static void umc_read_base_mask(struct amd64_pvt *pvt)
base_reg = umc_base_reg + (cs * 4); base_reg = umc_base_reg + (cs * 4);
base_reg_sec = umc_base_reg_sec + (cs * 4); base_reg_sec = umc_base_reg_sec + (cs * 4);
if (!amd_smn_read(pvt->mc_node_id, base_reg, base)) if (!amd_smn_read(pvt->mc_node_id, base_reg, &tmp)) {
*base = tmp;
edac_dbg(0, " DCSB%d[%d]=0x%08x reg: 0x%x\n", edac_dbg(0, " DCSB%d[%d]=0x%08x reg: 0x%x\n",
umc, cs, *base, base_reg); umc, cs, *base, base_reg);
}
if (!amd_smn_read(pvt->mc_node_id, base_reg_sec, base_sec)) if (!amd_smn_read(pvt->mc_node_id, base_reg_sec, &tmp)) {
*base_sec = tmp;
edac_dbg(0, " DCSB_SEC%d[%d]=0x%08x reg: 0x%x\n", edac_dbg(0, " DCSB_SEC%d[%d]=0x%08x reg: 0x%x\n",
umc, cs, *base_sec, base_reg_sec); umc, cs, *base_sec, base_reg_sec);
} }
}
umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK; umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK;
umc_mask_reg_sec = get_umc_base(umc) + get_umc_reg(pvt, UMCCH_ADDR_MASK_SEC); umc_mask_reg_sec = get_umc_base(umc) + get_umc_reg(pvt, UMCCH_ADDR_MASK_SEC);
...@@ -1485,15 +1474,19 @@ static void umc_read_base_mask(struct amd64_pvt *pvt) ...@@ -1485,15 +1474,19 @@ static void umc_read_base_mask(struct amd64_pvt *pvt)
mask_reg = umc_mask_reg + (cs * 4); mask_reg = umc_mask_reg + (cs * 4);
mask_reg_sec = umc_mask_reg_sec + (cs * 4); mask_reg_sec = umc_mask_reg_sec + (cs * 4);
if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask)) if (!amd_smn_read(pvt->mc_node_id, mask_reg, &tmp)) {
*mask = tmp;
edac_dbg(0, " DCSM%d[%d]=0x%08x reg: 0x%x\n", edac_dbg(0, " DCSM%d[%d]=0x%08x reg: 0x%x\n",
umc, cs, *mask, mask_reg); umc, cs, *mask, mask_reg);
}
if (!amd_smn_read(pvt->mc_node_id, mask_reg_sec, mask_sec)) if (!amd_smn_read(pvt->mc_node_id, mask_reg_sec, &tmp)) {
*mask_sec = tmp;
edac_dbg(0, " DCSM_SEC%d[%d]=0x%08x reg: 0x%x\n", edac_dbg(0, " DCSM_SEC%d[%d]=0x%08x reg: 0x%x\n",
umc, cs, *mask_sec, mask_reg_sec); umc, cs, *mask_sec, mask_reg_sec);
} }
} }
}
} }
/* /*
...@@ -2910,7 +2903,7 @@ static void umc_read_mc_regs(struct amd64_pvt *pvt) ...@@ -2910,7 +2903,7 @@ static void umc_read_mc_regs(struct amd64_pvt *pvt)
{ {
u8 nid = pvt->mc_node_id; u8 nid = pvt->mc_node_id;
struct amd64_umc *umc; struct amd64_umc *umc;
u32 i, umc_base; u32 i, tmp, umc_base;
/* Read registers from each UMC */ /* Read registers from each UMC */
for_each_umc(i) { for_each_umc(i) {
...@@ -2918,11 +2911,20 @@ static void umc_read_mc_regs(struct amd64_pvt *pvt) ...@@ -2918,11 +2911,20 @@ static void umc_read_mc_regs(struct amd64_pvt *pvt)
umc_base = get_umc_base(i); umc_base = get_umc_base(i);
umc = &pvt->umc[i]; umc = &pvt->umc[i];
amd_smn_read(nid, umc_base + get_umc_reg(pvt, UMCCH_DIMM_CFG), &umc->dimm_cfg); if (!amd_smn_read(nid, umc_base + get_umc_reg(pvt, UMCCH_DIMM_CFG), &tmp))
amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg); umc->dimm_cfg = tmp;
amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl); if (!amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &tmp))
amd_smn_read(nid, umc_base + UMCCH_UMC_CAP_HI, &umc->umc_cap_hi); umc->umc_cfg = tmp;
if (!amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &tmp))
umc->sdp_ctrl = tmp;
if (!amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &tmp))
umc->ecc_ctrl = tmp;
if (!amd_smn_read(nid, umc_base + UMCCH_UMC_CAP_HI, &tmp))
umc->umc_cap_hi = tmp;
} }
} }
...@@ -3651,16 +3653,21 @@ static void gpu_read_mc_regs(struct amd64_pvt *pvt) ...@@ -3651,16 +3653,21 @@ static void gpu_read_mc_regs(struct amd64_pvt *pvt)
{ {
u8 nid = pvt->mc_node_id; u8 nid = pvt->mc_node_id;
struct amd64_umc *umc; struct amd64_umc *umc;
u32 i, umc_base; u32 i, tmp, umc_base;
/* Read registers from each UMC */ /* Read registers from each UMC */
for_each_umc(i) { for_each_umc(i) {
umc_base = gpu_get_umc_base(pvt, i, 0); umc_base = gpu_get_umc_base(pvt, i, 0);
umc = &pvt->umc[i]; umc = &pvt->umc[i];
amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg); if (!amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &tmp))
amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl); umc->umc_cfg = tmp;
amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
if (!amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &tmp))
umc->sdp_ctrl = tmp;
if (!amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &tmp))
umc->ecc_ctrl = tmp;
} }
} }
......
...@@ -256,15 +256,11 @@ ...@@ -256,15 +256,11 @@
#define UMCCH_ADDR_MASK 0x20 #define UMCCH_ADDR_MASK 0x20
#define UMCCH_ADDR_MASK_SEC 0x28 #define UMCCH_ADDR_MASK_SEC 0x28
#define UMCCH_ADDR_MASK_SEC_DDR5 0x30 #define UMCCH_ADDR_MASK_SEC_DDR5 0x30
#define UMCCH_ADDR_CFG 0x30
#define UMCCH_ADDR_CFG_DDR5 0x40
#define UMCCH_DIMM_CFG 0x80 #define UMCCH_DIMM_CFG 0x80
#define UMCCH_DIMM_CFG_DDR5 0x90 #define UMCCH_DIMM_CFG_DDR5 0x90
#define UMCCH_UMC_CFG 0x100 #define UMCCH_UMC_CFG 0x100
#define UMCCH_SDP_CTRL 0x104 #define UMCCH_SDP_CTRL 0x104
#define UMCCH_ECC_CTRL 0x14C #define UMCCH_ECC_CTRL 0x14C
#define UMCCH_ECC_BAD_SYMBOL 0xD90
#define UMCCH_UMC_CAP 0xDF0
#define UMCCH_UMC_CAP_HI 0xDF4 #define UMCCH_UMC_CAP_HI 0xDF4
/* UMC CH bitfields */ /* UMC CH bitfields */
......
...@@ -101,7 +101,6 @@ struct k10temp_data { ...@@ -101,7 +101,6 @@ struct k10temp_data {
#define TCCD_BIT(x) ((x) + 2) #define TCCD_BIT(x) ((x) + 2)
#define HAVE_TEMP(d, channel) ((d)->show_temp & BIT(channel)) #define HAVE_TEMP(d, channel) ((d)->show_temp & BIT(channel))
#define HAVE_TDIE(d) HAVE_TEMP(d, TDIE_BIT)
struct tctl_offset { struct tctl_offset {
u8 model; u8 model;
...@@ -153,8 +152,16 @@ static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval) ...@@ -153,8 +152,16 @@ static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval)
static void read_tempreg_nb_zen(struct pci_dev *pdev, u32 *regval) static void read_tempreg_nb_zen(struct pci_dev *pdev, u32 *regval)
{ {
amd_smn_read(amd_pci_dev_to_node_id(pdev), if (amd_smn_read(amd_pci_dev_to_node_id(pdev),
ZEN_REPORTED_TEMP_CTRL_BASE, regval); ZEN_REPORTED_TEMP_CTRL_BASE, regval))
*regval = 0;
}
static int read_ccd_temp_reg(struct k10temp_data *data, int ccd, u32 *regval)
{
u16 node_id = amd_pci_dev_to_node_id(data->pdev);
return amd_smn_read(node_id, ZEN_CCD_TEMP(data->ccd_offset, ccd), regval);
} }
static long get_raw_temp(struct k10temp_data *data) static long get_raw_temp(struct k10temp_data *data)
...@@ -205,6 +212,7 @@ static int k10temp_read_temp(struct device *dev, u32 attr, int channel, ...@@ -205,6 +212,7 @@ static int k10temp_read_temp(struct device *dev, u32 attr, int channel,
long *val) long *val)
{ {
struct k10temp_data *data = dev_get_drvdata(dev); struct k10temp_data *data = dev_get_drvdata(dev);
int ret = -EOPNOTSUPP;
u32 regval; u32 regval;
switch (attr) { switch (attr) {
...@@ -221,13 +229,15 @@ static int k10temp_read_temp(struct device *dev, u32 attr, int channel, ...@@ -221,13 +229,15 @@ static int k10temp_read_temp(struct device *dev, u32 attr, int channel,
*val = 0; *val = 0;
break; break;
case 2 ... 13: /* Tccd{1-12} */ case 2 ... 13: /* Tccd{1-12} */
amd_smn_read(amd_pci_dev_to_node_id(data->pdev), ret = read_ccd_temp_reg(data, channel - 2, &regval);
ZEN_CCD_TEMP(data->ccd_offset, channel - 2),
&regval); if (ret)
return ret;
*val = (regval & ZEN_CCD_TEMP_MASK) * 125 - 49000; *val = (regval & ZEN_CCD_TEMP_MASK) * 125 - 49000;
break; break;
default: default:
return -EOPNOTSUPP; return ret;
} }
break; break;
case hwmon_temp_max: case hwmon_temp_max:
...@@ -243,7 +253,7 @@ static int k10temp_read_temp(struct device *dev, u32 attr, int channel, ...@@ -243,7 +253,7 @@ static int k10temp_read_temp(struct device *dev, u32 attr, int channel,
- ((regval >> 24) & 0xf)) * 500 + 52000; - ((regval >> 24) & 0xf)) * 500 + 52000;
break; break;
default: default:
return -EOPNOTSUPP; return ret;
} }
return 0; return 0;
} }
...@@ -259,11 +269,11 @@ static int k10temp_read(struct device *dev, enum hwmon_sensor_types type, ...@@ -259,11 +269,11 @@ static int k10temp_read(struct device *dev, enum hwmon_sensor_types type,
} }
} }
static umode_t k10temp_is_visible(const void *_data, static umode_t k10temp_is_visible(const void *drvdata,
enum hwmon_sensor_types type, enum hwmon_sensor_types type,
u32 attr, int channel) u32 attr, int channel)
{ {
const struct k10temp_data *data = _data; const struct k10temp_data *data = drvdata;
struct pci_dev *pdev = data->pdev; struct pci_dev *pdev = data->pdev;
u32 reg; u32 reg;
...@@ -374,15 +384,25 @@ static const struct hwmon_chip_info k10temp_chip_info = { ...@@ -374,15 +384,25 @@ static const struct hwmon_chip_info k10temp_chip_info = {
.info = k10temp_info, .info = k10temp_info,
}; };
static void k10temp_get_ccd_support(struct pci_dev *pdev, static void k10temp_get_ccd_support(struct k10temp_data *data, int limit)
struct k10temp_data *data, int limit)
{ {
u32 regval; u32 regval;
int i; int i;
for (i = 0; i < limit; i++) { for (i = 0; i < limit; i++) {
amd_smn_read(amd_pci_dev_to_node_id(pdev), /*
ZEN_CCD_TEMP(data->ccd_offset, i), &regval); * Ignore inaccessible CCDs.
*
* Some systems will return a register value of 0, and the TEMP_VALID
* bit check below will naturally fail.
*
* Other systems will return a PCI_ERROR_RESPONSE (0xFFFFFFFF) for
* the register value. And this will incorrectly pass the TEMP_VALID
* bit check.
*/
if (read_ccd_temp_reg(data, i, &regval))
continue;
if (regval & ZEN_CCD_TEMP_VALID) if (regval & ZEN_CCD_TEMP_VALID)
data->show_temp |= BIT(TCCD_BIT(i)); data->show_temp |= BIT(TCCD_BIT(i));
} }
...@@ -434,18 +454,18 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -434,18 +454,18 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
case 0x11: /* Zen APU */ case 0x11: /* Zen APU */
case 0x18: /* Zen+ APU */ case 0x18: /* Zen+ APU */
data->ccd_offset = 0x154; data->ccd_offset = 0x154;
k10temp_get_ccd_support(pdev, data, 4); k10temp_get_ccd_support(data, 4);
break; break;
case 0x31: /* Zen2 Threadripper */ case 0x31: /* Zen2 Threadripper */
case 0x60: /* Renoir */ case 0x60: /* Renoir */
case 0x68: /* Lucienne */ case 0x68: /* Lucienne */
case 0x71: /* Zen2 */ case 0x71: /* Zen2 */
data->ccd_offset = 0x154; data->ccd_offset = 0x154;
k10temp_get_ccd_support(pdev, data, 8); k10temp_get_ccd_support(data, 8);
break; break;
case 0xa0 ... 0xaf: case 0xa0 ... 0xaf:
data->ccd_offset = 0x300; data->ccd_offset = 0x300;
k10temp_get_ccd_support(pdev, data, 8); k10temp_get_ccd_support(data, 8);
break; break;
} }
} else if (boot_cpu_data.x86 == 0x19) { } else if (boot_cpu_data.x86 == 0x19) {
...@@ -459,21 +479,21 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -459,21 +479,21 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
case 0x21: /* Zen3 Ryzen Desktop */ case 0x21: /* Zen3 Ryzen Desktop */
case 0x50 ... 0x5f: /* Green Sardine */ case 0x50 ... 0x5f: /* Green Sardine */
data->ccd_offset = 0x154; data->ccd_offset = 0x154;
k10temp_get_ccd_support(pdev, data, 8); k10temp_get_ccd_support(data, 8);
break; break;
case 0x40 ... 0x4f: /* Yellow Carp */ case 0x40 ... 0x4f: /* Yellow Carp */
data->ccd_offset = 0x300; data->ccd_offset = 0x300;
k10temp_get_ccd_support(pdev, data, 8); k10temp_get_ccd_support(data, 8);
break; break;
case 0x60 ... 0x6f: case 0x60 ... 0x6f:
case 0x70 ... 0x7f: case 0x70 ... 0x7f:
data->ccd_offset = 0x308; data->ccd_offset = 0x308;
k10temp_get_ccd_support(pdev, data, 8); k10temp_get_ccd_support(data, 8);
break; break;
case 0x10 ... 0x1f: case 0x10 ... 0x1f:
case 0xa0 ... 0xaf: case 0xa0 ... 0xaf:
data->ccd_offset = 0x300; data->ccd_offset = 0x300;
k10temp_get_ccd_support(pdev, data, 12); k10temp_get_ccd_support(data, 12);
break; break;
} }
} else if (boot_cpu_data.x86 == 0x1a) { } else if (boot_cpu_data.x86 == 0x1a) {
......
...@@ -19,6 +19,6 @@ clean : ...@@ -19,6 +19,6 @@ clean :
@rm -f kcpuid @rm -f kcpuid
install : kcpuid install : kcpuid
install -d $(DESTDIR)$(BINDIR) install -d $(DESTDIR)$(BINDIR) $(DESTDIR)$(HWDATADIR)
install -m 755 -p kcpuid $(DESTDIR)$(BINDIR)/kcpuid install -m 755 -p kcpuid $(DESTDIR)$(BINDIR)/kcpuid
install -m 444 -p cpuid.csv $(HWDATADIR)/cpuid.csv install -m 444 -p cpuid.csv $(DESTDIR)$(HWDATADIR)/cpuid.csv
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment