Commit f155f3b3 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86_platform_for_6.7_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 platform updates from Borislav Petkov:

 - Make sure PCI function 4 IDs of AMD family 0x19, models 0x60-0x7f are
   actually used in the amd_nb.c enumeration

 - Add support for extracting NUMA information from devicetree for
   Hyper-V usages

 - Add PCI device IDs for the new AMD MI300 AI accelerators

 - Annotate an array in struct uv_rtc_timer_head with the new
   __counted_by attribute

 - Rework UV's NMI action parameter handling

* tag 'x86_platform_for_6.7_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/amd_nb: Use Family 19h Models 60h-7Fh Function 4 IDs
  x86/numa: Add Devicetree support
  x86/of: Move the x86_flattree_get_config() call out of x86_dtb_init()
  x86/amd_nb: Add AMD Family MI300 PCI IDs
  x86/platform/uv: Annotate struct uv_rtc_timer_head with __counted_by
  x86/platform/uv: Rework NMI "action" modparam handling
parents ca2e9c3b 2a565258
...@@ -1534,6 +1534,7 @@ config NUMA ...@@ -1534,6 +1534,7 @@ config NUMA
depends on X86_64 || (X86_32 && HIGHMEM64G && X86_BIGSMP) depends on X86_64 || (X86_32 && HIGHMEM64G && X86_BIGSMP)
default y if X86_BIGSMP default y if X86_BIGSMP
select USE_PERCPU_NUMA_NODE_ID select USE_PERCPU_NUMA_NODE_ID
select OF_NUMA if OF
help help
Enable NUMA (Non-Uniform Memory Access) support. Enable NUMA (Non-Uniform Memory Access) support.
......
...@@ -31,6 +31,11 @@ static inline void x86_dtb_init(void) { } ...@@ -31,6 +31,11 @@ static inline void x86_dtb_init(void) { }
#define of_ioapic 0 #define of_ioapic 0
#endif #endif
#ifdef CONFIG_OF_EARLY_FLATTREE
void x86_flattree_get_config(void);
#else
static inline void x86_flattree_get_config(void) { }
#endif
extern char cmd_line[COMMAND_LINE_SIZE]; extern char cmd_line[COMMAND_LINE_SIZE];
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#define PCI_DEVICE_ID_AMD_1AH_M00H_ROOT 0x153a #define PCI_DEVICE_ID_AMD_1AH_M00H_ROOT 0x153a
#define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT 0x1507 #define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT 0x1507
#define PCI_DEVICE_ID_AMD_MI200_ROOT 0x14bb #define PCI_DEVICE_ID_AMD_MI200_ROOT 0x14bb
#define PCI_DEVICE_ID_AMD_MI300_ROOT 0x14f8
#define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464 #define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
...@@ -43,6 +44,7 @@ ...@@ -43,6 +44,7 @@
#define PCI_DEVICE_ID_AMD_19H_M78H_DF_F4 0x12fc #define PCI_DEVICE_ID_AMD_19H_M78H_DF_F4 0x12fc
#define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4 0x12c4 #define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4 0x12c4
#define PCI_DEVICE_ID_AMD_MI200_DF_F4 0x14d4 #define PCI_DEVICE_ID_AMD_MI200_DF_F4 0x14d4
#define PCI_DEVICE_ID_AMD_MI300_DF_F4 0x152c
/* Protect the PCI config register pairs used for SMN. */ /* Protect the PCI config register pairs used for SMN. */
static DEFINE_MUTEX(smn_mutex); static DEFINE_MUTEX(smn_mutex);
...@@ -62,6 +64,7 @@ static const struct pci_device_id amd_root_ids[] = { ...@@ -62,6 +64,7 @@ static const struct pci_device_id amd_root_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_ROOT) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_ROOT) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_ROOT) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_ROOT) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI300_ROOT) },
{} {}
}; };
...@@ -93,6 +96,7 @@ static const struct pci_device_id amd_nb_misc_ids[] = { ...@@ -93,6 +96,7 @@ static const struct pci_device_id amd_nb_misc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI300_DF_F3) },
{} {}
}; };
...@@ -112,9 +116,13 @@ static const struct pci_device_id amd_nb_link_ids[] = { ...@@ -112,9 +116,13 @@ static const struct pci_device_id amd_nb_link_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI300_DF_F4) },
{} {}
}; };
......
...@@ -278,7 +278,7 @@ static void __init dtb_apic_setup(void) ...@@ -278,7 +278,7 @@ static void __init dtb_apic_setup(void)
} }
#ifdef CONFIG_OF_EARLY_FLATTREE #ifdef CONFIG_OF_EARLY_FLATTREE
static void __init x86_flattree_get_config(void) void __init x86_flattree_get_config(void)
{ {
u32 size, map_len; u32 size, map_len;
void *dt; void *dt;
...@@ -300,14 +300,10 @@ static void __init x86_flattree_get_config(void) ...@@ -300,14 +300,10 @@ static void __init x86_flattree_get_config(void)
unflatten_and_copy_device_tree(); unflatten_and_copy_device_tree();
early_memunmap(dt, map_len); early_memunmap(dt, map_len);
} }
#else
static inline void x86_flattree_get_config(void) { }
#endif #endif
void __init x86_dtb_init(void) void __init x86_dtb_init(void)
{ {
x86_flattree_get_config();
if (!of_have_populated_dt()) if (!of_have_populated_dt())
return; return;
......
...@@ -1217,6 +1217,8 @@ void __init setup_arch(char **cmdline_p) ...@@ -1217,6 +1217,8 @@ void __init setup_arch(char **cmdline_p)
early_acpi_boot_init(); early_acpi_boot_init();
x86_flattree_get_config();
initmem_init(); initmem_init();
dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT); dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT);
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/of.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/memblock.h> #include <linux/memblock.h>
...@@ -733,6 +734,8 @@ void __init x86_numa_init(void) ...@@ -733,6 +734,8 @@ void __init x86_numa_init(void)
if (!numa_init(amd_numa_init)) if (!numa_init(amd_numa_init))
return; return;
#endif #endif
if (acpi_disabled && !numa_init(of_numa_init))
return;
} }
numa_init(dummy_numa_init); numa_init(dummy_numa_init);
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/sched/debug.h> #include <linux/sched/debug.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/string.h>
#include <linux/clocksource.h> #include <linux/clocksource.h>
#include <asm/apic.h> #include <asm/apic.h>
...@@ -178,49 +179,56 @@ module_param_named(debug, uv_nmi_debug, int, 0644); ...@@ -178,49 +179,56 @@ module_param_named(debug, uv_nmi_debug, int, 0644);
} while (0) } while (0)
/* Valid NMI Actions */ /* Valid NMI Actions */
#define ACTION_LEN 16 enum action_t {
static struct nmi_action { nmi_act_kdump,
char *action; nmi_act_dump,
char *desc; nmi_act_ips,
} valid_acts[] = { nmi_act_kdb,
{ "kdump", "do kernel crash dump" }, nmi_act_kgdb,
{ "dump", "dump process stack for each cpu" }, nmi_act_health,
{ "ips", "dump Inst Ptr info for each cpu" }, nmi_act_max
{ "kdb", "enter KDB (needs kgdboc= assignment)" },
{ "kgdb", "enter KGDB (needs gdb target remote)" },
{ "health", "check if CPUs respond to NMI" },
}; };
typedef char action_t[ACTION_LEN];
static action_t uv_nmi_action = { "dump" }; static const char * const actions[nmi_act_max] = {
[nmi_act_kdump] = "kdump",
[nmi_act_dump] = "dump",
[nmi_act_ips] = "ips",
[nmi_act_kdb] = "kdb",
[nmi_act_kgdb] = "kgdb",
[nmi_act_health] = "health",
};
static const char * const actions_desc[nmi_act_max] = {
[nmi_act_kdump] = "do kernel crash dump",
[nmi_act_dump] = "dump process stack for each cpu",
[nmi_act_ips] = "dump Inst Ptr info for each cpu",
[nmi_act_kdb] = "enter KDB (needs kgdboc= assignment)",
[nmi_act_kgdb] = "enter KGDB (needs gdb target remote)",
[nmi_act_health] = "check if CPUs respond to NMI",
};
static enum action_t uv_nmi_action = nmi_act_dump;
static int param_get_action(char *buffer, const struct kernel_param *kp) static int param_get_action(char *buffer, const struct kernel_param *kp)
{ {
return sprintf(buffer, "%s\n", uv_nmi_action); return sprintf(buffer, "%s\n", actions[uv_nmi_action]);
} }
static int param_set_action(const char *val, const struct kernel_param *kp) static int param_set_action(const char *val, const struct kernel_param *kp)
{ {
int i; int i, n = ARRAY_SIZE(actions);
int n = ARRAY_SIZE(valid_acts);
char arg[ACTION_LEN];
/* (remove possible '\n') */
strscpy(arg, val, strnchrnul(val, sizeof(arg)-1, '\n') - val + 1);
for (i = 0; i < n; i++)
if (!strcmp(arg, valid_acts[i].action))
break;
if (i < n) { i = sysfs_match_string(actions, val);
strscpy(uv_nmi_action, arg, sizeof(uv_nmi_action)); if (i >= 0) {
pr_info("UV: New NMI action:%s\n", uv_nmi_action); uv_nmi_action = i;
pr_info("UV: New NMI action:%s\n", actions[i]);
return 0; return 0;
} }
pr_err("UV: Invalid NMI action:%s, valid actions are:\n", arg); pr_err("UV: Invalid NMI action. Valid actions are:\n");
for (i = 0; i < n; i++) for (i = 0; i < n; i++)
pr_err("UV: %-8s - %s\n", pr_err("UV: %-8s - %s\n", actions[i], actions_desc[i]);
valid_acts[i].action, valid_acts[i].desc);
return -EINVAL; return -EINVAL;
} }
...@@ -228,15 +236,10 @@ static const struct kernel_param_ops param_ops_action = { ...@@ -228,15 +236,10 @@ static const struct kernel_param_ops param_ops_action = {
.get = param_get_action, .get = param_get_action,
.set = param_set_action, .set = param_set_action,
}; };
#define param_check_action(name, p) __param_check(name, p, action_t) #define param_check_action(name, p) __param_check(name, p, enum action_t)
module_param_named(action, uv_nmi_action, action, 0644); module_param_named(action, uv_nmi_action, action, 0644);
static inline bool uv_nmi_action_is(const char *action)
{
return (strncmp(uv_nmi_action, action, strlen(action)) == 0);
}
/* Setup which NMI support is present in system */ /* Setup which NMI support is present in system */
static void uv_nmi_setup_mmrs(void) static void uv_nmi_setup_mmrs(void)
{ {
...@@ -727,10 +730,10 @@ static void uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs) ...@@ -727,10 +730,10 @@ static void uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs)
if (cpu == 0) if (cpu == 0)
uv_nmi_dump_cpu_ip_hdr(); uv_nmi_dump_cpu_ip_hdr();
if (current->pid != 0 || !uv_nmi_action_is("ips")) if (current->pid != 0 || uv_nmi_action != nmi_act_ips)
uv_nmi_dump_cpu_ip(cpu, regs); uv_nmi_dump_cpu_ip(cpu, regs);
if (uv_nmi_action_is("dump")) { if (uv_nmi_action == nmi_act_dump) {
pr_info("UV:%sNMI process trace for CPU %d\n", dots, cpu); pr_info("UV:%sNMI process trace for CPU %d\n", dots, cpu);
show_regs(regs); show_regs(regs);
} }
...@@ -798,7 +801,7 @@ static void uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master) ...@@ -798,7 +801,7 @@ static void uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master)
int saved_console_loglevel = console_loglevel; int saved_console_loglevel = console_loglevel;
pr_alert("UV: tracing %s for %d CPUs from CPU %d\n", pr_alert("UV: tracing %s for %d CPUs from CPU %d\n",
uv_nmi_action_is("ips") ? "IPs" : "processes", uv_nmi_action == nmi_act_ips ? "IPs" : "processes",
atomic_read(&uv_nmi_cpus_in_nmi), cpu); atomic_read(&uv_nmi_cpus_in_nmi), cpu);
console_loglevel = uv_nmi_loglevel; console_loglevel = uv_nmi_loglevel;
...@@ -874,7 +877,7 @@ static inline int uv_nmi_kdb_reason(void) ...@@ -874,7 +877,7 @@ static inline int uv_nmi_kdb_reason(void)
static inline int uv_nmi_kdb_reason(void) static inline int uv_nmi_kdb_reason(void)
{ {
/* Ensure user is expecting to attach gdb remote */ /* Ensure user is expecting to attach gdb remote */
if (uv_nmi_action_is("kgdb")) if (uv_nmi_action == nmi_act_kgdb)
return 0; return 0;
pr_err("UV: NMI error: KDB is not enabled in this kernel\n"); pr_err("UV: NMI error: KDB is not enabled in this kernel\n");
...@@ -950,28 +953,35 @@ static int uv_handle_nmi(unsigned int reason, struct pt_regs *regs) ...@@ -950,28 +953,35 @@ static int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
master = (atomic_read(&uv_nmi_cpu) == cpu); master = (atomic_read(&uv_nmi_cpu) == cpu);
/* If NMI action is "kdump", then attempt to do it */ /* If NMI action is "kdump", then attempt to do it */
if (uv_nmi_action_is("kdump")) { if (uv_nmi_action == nmi_act_kdump) {
uv_nmi_kdump(cpu, master, regs); uv_nmi_kdump(cpu, master, regs);
/* Unexpected return, revert action to "dump" */ /* Unexpected return, revert action to "dump" */
if (master) if (master)
strscpy(uv_nmi_action, "dump", sizeof(uv_nmi_action)); uv_nmi_action = nmi_act_dump;
} }
/* Pause as all CPU's enter the NMI handler */ /* Pause as all CPU's enter the NMI handler */
uv_nmi_wait(master); uv_nmi_wait(master);
/* Process actions other than "kdump": */ /* Process actions other than "kdump": */
if (uv_nmi_action_is("health")) { switch (uv_nmi_action) {
case nmi_act_health:
uv_nmi_action_health(cpu, regs, master); uv_nmi_action_health(cpu, regs, master);
} else if (uv_nmi_action_is("ips") || uv_nmi_action_is("dump")) { break;
case nmi_act_ips:
case nmi_act_dump:
uv_nmi_dump_state(cpu, regs, master); uv_nmi_dump_state(cpu, regs, master);
} else if (uv_nmi_action_is("kdb") || uv_nmi_action_is("kgdb")) { break;
case nmi_act_kdb:
case nmi_act_kgdb:
uv_call_kgdb_kdb(cpu, regs, master); uv_call_kgdb_kdb(cpu, regs, master);
} else { break;
default:
if (master) if (master)
pr_alert("UV: unknown NMI action: %s\n", uv_nmi_action); pr_alert("UV: unknown NMI action: %d\n", uv_nmi_action);
uv_nmi_sync_exit(master); uv_nmi_sync_exit(master);
break;
} }
/* Clear per_cpu "in_nmi" flag */ /* Clear per_cpu "in_nmi" flag */
......
...@@ -53,7 +53,7 @@ struct uv_rtc_timer_head { ...@@ -53,7 +53,7 @@ struct uv_rtc_timer_head {
struct { struct {
int lcpu; /* systemwide logical cpu number */ int lcpu; /* systemwide logical cpu number */
u64 expires; /* next timer expiration for this cpu */ u64 expires; /* next timer expiration for this cpu */
} cpu[]; } cpu[] __counted_by(ncpus);
}; };
/* /*
......
...@@ -579,6 +579,7 @@ ...@@ -579,6 +579,7 @@
#define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3 0x12c3 #define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3 0x12c3
#define PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3 0x16fb #define PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3 0x16fb
#define PCI_DEVICE_ID_AMD_MI200_DF_F3 0x14d3 #define PCI_DEVICE_ID_AMD_MI200_DF_F3 0x14d3
#define PCI_DEVICE_ID_AMD_MI300_DF_F3 0x152b
#define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703 #define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
#define PCI_DEVICE_ID_AMD_LANCE 0x2000 #define PCI_DEVICE_ID_AMD_LANCE 0x2000
#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment