Commit 62773112 authored by Helge Deller's avatar Helge Deller

parisc: Switch from GENERIC_CPU_DEVICES to GENERIC_ARCH_TOPOLOGY

Switch away from the own cpu topology code to common code which is used
by ARM64 and RISCV. That will allow us to enable CPU hotplug later on.
Signed-off-by: default avatarHelge Deller <deller@gmx.de>
parent 1e93848a
...@@ -37,7 +37,7 @@ config PARISC ...@@ -37,7 +37,7 @@ config PARISC
select GENERIC_PCI_IOMAP select GENERIC_PCI_IOMAP
select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_SMP_IDLE_THREAD select GENERIC_SMP_IDLE_THREAD
select GENERIC_CPU_DEVICES select GENERIC_ARCH_TOPOLOGY if SMP
select GENERIC_LIB_DEVMEM_IS_ALLOWED select GENERIC_LIB_DEVMEM_IS_ALLOWED
select SYSCTL_ARCH_UNALIGN_ALLOW select SYSCTL_ARCH_UNALIGN_ALLOW
select SYSCTL_EXCEPTION_TRACE select SYSCTL_EXCEPTION_TRACE
...@@ -279,16 +279,9 @@ config SMP ...@@ -279,16 +279,9 @@ config SMP
If you don't know what to do here, say N. If you don't know what to do here, say N.
config PARISC_CPU_TOPOLOGY
bool "Support cpu topology definition"
depends on SMP
default y
help
Support PARISC cpu topology definition.
config SCHED_MC config SCHED_MC
bool "Multi-core scheduler support" bool "Multi-core scheduler support"
depends on PARISC_CPU_TOPOLOGY && PA8X00 depends on GENERIC_ARCH_TOPOLOGY && PA8X00
help help
Multi-core scheduler support improves the CPU scheduler's decision Multi-core scheduler support improves the CPU scheduler's decision
making when dealing with multi-core CPU chips at a cost of slightly making when dealing with multi-core CPU chips at a cost of slightly
......
#ifndef _ASM_PARISC_TOPOLOGY_H #ifndef _ASM_PARISC_TOPOLOGY_H
#define _ASM_PARISC_TOPOLOGY_H #define _ASM_PARISC_TOPOLOGY_H
#ifdef CONFIG_PARISC_CPU_TOPOLOGY #ifdef CONFIG_GENERIC_ARCH_TOPOLOGY
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/arch_topology.h>
struct cputopo_parisc {
int thread_id;
int core_id;
int socket_id;
cpumask_t thread_sibling;
cpumask_t core_sibling;
};
extern struct cputopo_parisc cpu_topology[NR_CPUS];
#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
void init_cpu_topology(void);
void store_cpu_topology(unsigned int cpuid);
const struct cpumask *cpu_coregroup_mask(int cpu);
#else #else
static inline void init_cpu_topology(void) { } static inline void init_cpu_topology(void) { }
static inline void store_cpu_topology(unsigned int cpuid) { } static inline void store_cpu_topology(unsigned int cpuid) { }
static inline void reset_cpu_topology(void) { }
#endif #endif
......
...@@ -31,7 +31,7 @@ obj-$(CONFIG_AUDIT) += audit.o ...@@ -31,7 +31,7 @@ obj-$(CONFIG_AUDIT) += audit.o
obj64-$(CONFIG_AUDIT) += compat_audit.o obj64-$(CONFIG_AUDIT) += compat_audit.o
# only supported for PCX-W/U in 64-bit mode at the moment # only supported for PCX-W/U in 64-bit mode at the moment
obj-$(CONFIG_64BIT) += perf.o perf_asm.o $(obj64-y) obj-$(CONFIG_64BIT) += perf.o perf_asm.o $(obj64-y)
obj-$(CONFIG_PARISC_CPU_TOPOLOGY) += topology.o obj-$(CONFIG_GENERIC_ARCH_TOPOLOGY) += topology.o
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/random.h> #include <linux/random.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <asm/topology.h>
#include <asm/param.h> #include <asm/param.h>
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/hardware.h> /* for register_parisc_driver() stuff */ #include <asm/hardware.h> /* for register_parisc_driver() stuff */
...@@ -390,7 +391,7 @@ show_cpuinfo (struct seq_file *m, void *v) ...@@ -390,7 +391,7 @@ show_cpuinfo (struct seq_file *m, void *v)
boot_cpu_data.cpu_hz / 1000000, boot_cpu_data.cpu_hz / 1000000,
boot_cpu_data.cpu_hz % 1000000 ); boot_cpu_data.cpu_hz % 1000000 );
#ifdef CONFIG_PARISC_CPU_TOPOLOGY #ifdef CONFIG_GENERIC_ARCH_TOPOLOGY
seq_printf(m, "physical id\t: %d\n", seq_printf(m, "physical id\t: %d\n",
topology_physical_package_id(cpu)); topology_physical_package_id(cpu));
seq_printf(m, "siblings\t: %d\n", seq_printf(m, "siblings\t: %d\n",
...@@ -460,5 +461,6 @@ static struct parisc_driver cpu_driver __refdata = { ...@@ -460,5 +461,6 @@ static struct parisc_driver cpu_driver __refdata = {
*/ */
void __init processor_init(void) void __init processor_init(void)
{ {
reset_cpu_topology();
register_parisc_driver(&cpu_driver); register_parisc_driver(&cpu_driver);
} }
...@@ -13,45 +13,11 @@ ...@@ -13,45 +13,11 @@
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/sched/topology.h> #include <linux/sched/topology.h>
#include <linux/cpu.h>
#include <asm/topology.h> #include <asm/topology.h>
/* static DEFINE_PER_CPU(struct cpu, cpu_devices);
* cpu topology table
*/
struct cputopo_parisc cpu_topology[NR_CPUS] __read_mostly;
EXPORT_SYMBOL_GPL(cpu_topology);
const struct cpumask *cpu_coregroup_mask(int cpu)
{
return &cpu_topology[cpu].core_sibling;
}
static void update_siblings_masks(unsigned int cpuid)
{
struct cputopo_parisc *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
int cpu;
/* update core and thread sibling masks */
for_each_possible_cpu(cpu) {
cpu_topo = &cpu_topology[cpu];
if (cpuid_topo->socket_id != cpu_topo->socket_id)
continue;
cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
if (cpu != cpuid)
cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
if (cpuid_topo->core_id != cpu_topo->core_id)
continue;
cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
if (cpu != cpuid)
cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
}
smp_wmb();
}
static int dualcores_found __initdata; static int dualcores_found __initdata;
...@@ -62,7 +28,7 @@ static int dualcores_found __initdata; ...@@ -62,7 +28,7 @@ static int dualcores_found __initdata;
*/ */
void __init store_cpu_topology(unsigned int cpuid) void __init store_cpu_topology(unsigned int cpuid)
{ {
struct cputopo_parisc *cpuid_topo = &cpu_topology[cpuid]; struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
struct cpuinfo_parisc *p; struct cpuinfo_parisc *p;
int max_socket = -1; int max_socket = -1;
unsigned long cpu; unsigned long cpu;
...@@ -71,6 +37,12 @@ void __init store_cpu_topology(unsigned int cpuid) ...@@ -71,6 +37,12 @@ void __init store_cpu_topology(unsigned int cpuid)
if (cpuid_topo->core_id != -1) if (cpuid_topo->core_id != -1)
return; return;
#ifdef CONFIG_HOTPLUG_CPU
per_cpu(cpu_devices, cpuid).hotpluggable = 1;
#endif
if (register_cpu(&per_cpu(cpu_devices, cpuid), cpuid))
pr_warn("Failed to register CPU%d device", cpuid);
/* create cpu topology mapping */ /* create cpu topology mapping */
cpuid_topo->thread_id = -1; cpuid_topo->thread_id = -1;
cpuid_topo->core_id = 0; cpuid_topo->core_id = 0;
...@@ -86,25 +58,25 @@ void __init store_cpu_topology(unsigned int cpuid) ...@@ -86,25 +58,25 @@ void __init store_cpu_topology(unsigned int cpuid)
cpuid_topo->core_id = cpu_topology[cpu].core_id; cpuid_topo->core_id = cpu_topology[cpu].core_id;
if (p->cpu_loc) { if (p->cpu_loc) {
cpuid_topo->core_id++; cpuid_topo->core_id++;
cpuid_topo->socket_id = cpu_topology[cpu].socket_id; cpuid_topo->package_id = cpu_topology[cpu].package_id;
dualcores_found = 1; dualcores_found = 1;
continue; continue;
} }
} }
if (cpuid_topo->socket_id == -1) if (cpuid_topo->package_id == -1)
max_socket = max(max_socket, cpu_topology[cpu].socket_id); max_socket = max(max_socket, cpu_topology[cpu].package_id);
} }
if (cpuid_topo->socket_id == -1) if (cpuid_topo->package_id == -1)
cpuid_topo->socket_id = max_socket + 1; cpuid_topo->package_id = max_socket + 1;
update_siblings_masks(cpuid); update_siblings_masks(cpuid);
pr_info("CPU%u: cpu core %d of socket %d\n", pr_info("CPU%u: cpu core %d of socket %d\n",
cpuid, cpuid,
cpu_topology[cpuid].core_id, cpu_topology[cpuid].core_id,
cpu_topology[cpuid].socket_id); cpu_topology[cpuid].package_id);
} }
static struct sched_domain_topology_level parisc_mc_topology[] = { static struct sched_domain_topology_level parisc_mc_topology[] = {
...@@ -122,20 +94,6 @@ static struct sched_domain_topology_level parisc_mc_topology[] = { ...@@ -122,20 +94,6 @@ static struct sched_domain_topology_level parisc_mc_topology[] = {
*/ */
void __init init_cpu_topology(void) void __init init_cpu_topology(void)
{ {
unsigned int cpu;
/* init core mask and capacity */
for_each_possible_cpu(cpu) {
struct cputopo_parisc *cpu_topo = &(cpu_topology[cpu]);
cpu_topo->thread_id = -1;
cpu_topo->core_id = -1;
cpu_topo->socket_id = -1;
cpumask_clear(&cpu_topo->core_sibling);
cpumask_clear(&cpu_topo->thread_sibling);
}
smp_wmb();
/* Set scheduler topology descriptor */ /* Set scheduler topology descriptor */
if (dualcores_found) if (dualcores_found)
set_sched_topology(parisc_mc_topology); set_sched_topology(parisc_mc_topology);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment