Commit f78eae2e authored by David S. Miller's avatar David S. Miller Committed by David S. Miller

[SPARC64]: Proper multi-core scheduling support.

The scheduling domain hierarchy is:

   all cpus -->
      cpus that share an instruction cache -->
          cpus that share an integer execution unit
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d887ab3a
...@@ -396,6 +396,15 @@ config SCHED_SMT ...@@ -396,6 +396,15 @@ config SCHED_SMT
when dealing with UltraSPARC cpus at a cost of slightly increased when dealing with UltraSPARC cpus at a cost of slightly increased
overhead in some places. If unsure say N here. overhead in some places. If unsure say N here.
config SCHED_MC
bool "Multi-core scheduler support"
depends on SMP
default y
help
Multi-core scheduler support improves the CPU scheduler's decision
making when dealing with multi-core CPU chips at a cost of slightly
increased overhead in some places. If unsure say N here.
source "kernel/Kconfig.preempt" source "kernel/Kconfig.preempt"
config CMDLINE_BOOL config CMDLINE_BOOL
......
...@@ -473,6 +473,53 @@ static void __init set_core_ids(void) ...@@ -473,6 +473,53 @@ static void __init set_core_ids(void)
} }
} }
static void __init mark_proc_ids(struct mdesc_node *mp, int proc_id)
{
int i;
for (i = 0; i < mp->num_arcs; i++) {
struct mdesc_node *t = mp->arcs[i].arc;
const u64 *id;
if (strcmp(mp->arcs[i].name, "back"))
continue;
if (strcmp(t->name, "cpu"))
continue;
id = md_get_property(t, "id", NULL);
if (*id < NR_CPUS)
cpu_data(*id).proc_id = proc_id;
}
}
static void __init __set_proc_ids(const char *exec_unit_name)
{
struct mdesc_node *mp;
int idx;
idx = 0;
md_for_each_node_by_name(mp, exec_unit_name) {
const char *type;
int len;
type = md_get_property(mp, "type", &len);
if (!find_in_proplist(type, "int", len) &&
!find_in_proplist(type, "integer", len))
continue;
mark_proc_ids(mp, idx);
idx++;
}
}
static void __init set_proc_ids(void)
{
__set_proc_ids("exec_unit");
__set_proc_ids("exec-unit");
}
static void __init get_one_mondo_bits(const u64 *p, unsigned int *mask, unsigned char def) static void __init get_one_mondo_bits(const u64 *p, unsigned int *mask, unsigned char def)
{ {
u64 val; u64 val;
...@@ -574,9 +621,11 @@ static void __init mdesc_fill_in_cpu_data(void) ...@@ -574,9 +621,11 @@ static void __init mdesc_fill_in_cpu_data(void)
#endif #endif
c->core_id = 0; c->core_id = 0;
c->proc_id = -1;
} }
set_core_ids(); set_core_ids();
set_proc_ids();
smp_fill_in_sib_core_maps(); smp_fill_in_sib_core_maps();
} }
......
...@@ -1800,6 +1800,7 @@ static void __init of_fill_in_cpu_data(void) ...@@ -1800,6 +1800,7 @@ static void __init of_fill_in_cpu_data(void)
cpu_data(cpuid).core_id = 0; cpu_data(cpuid).core_id = 0;
} }
cpu_data(cpuid).proc_id = -1;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
cpu_set(cpuid, cpu_present_map); cpu_set(cpuid, cpu_present_map);
......
...@@ -51,6 +51,8 @@ cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; ...@@ -51,6 +51,8 @@ cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE; cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE;
cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly = cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly =
{ [0 ... NR_CPUS-1] = CPU_MASK_NONE }; { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
{ [0 ... NR_CPUS-1] = CPU_MASK_NONE };
static cpumask_t smp_commenced_mask; static cpumask_t smp_commenced_mask;
static cpumask_t cpu_callout_map; static cpumask_t cpu_callout_map;
...@@ -1217,13 +1219,28 @@ void __devinit smp_fill_in_sib_core_maps(void) ...@@ -1217,13 +1219,28 @@ void __devinit smp_fill_in_sib_core_maps(void)
unsigned int j; unsigned int j;
if (cpu_data(i).core_id == 0) { if (cpu_data(i).core_id == 0) {
cpu_set(i, cpu_sibling_map[i]); cpu_set(i, cpu_core_map[i]);
continue; continue;
} }
for_each_possible_cpu(j) { for_each_possible_cpu(j) {
if (cpu_data(i).core_id == if (cpu_data(i).core_id ==
cpu_data(j).core_id) cpu_data(j).core_id)
cpu_set(j, cpu_core_map[i]);
}
}
for_each_possible_cpu(i) {
unsigned int j;
if (cpu_data(i).proc_id == -1) {
cpu_set(i, cpu_sibling_map[i]);
continue;
}
for_each_possible_cpu(j) {
if (cpu_data(i).proc_id ==
cpu_data(j).proc_id)
cpu_set(j, cpu_sibling_map[i]); cpu_set(j, cpu_sibling_map[i]);
} }
} }
......
...@@ -31,7 +31,7 @@ typedef struct { ...@@ -31,7 +31,7 @@ typedef struct {
unsigned int ecache_size; unsigned int ecache_size;
unsigned int ecache_line_size; unsigned int ecache_line_size;
int core_id; int core_id;
unsigned int __pad3; int proc_id;
} cpuinfo_sparc; } cpuinfo_sparc;
DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
......
...@@ -33,6 +33,7 @@ extern cpumask_t phys_cpu_present_map; ...@@ -33,6 +33,7 @@ extern cpumask_t phys_cpu_present_map;
#define cpu_possible_map phys_cpu_present_map #define cpu_possible_map phys_cpu_present_map
extern cpumask_t cpu_sibling_map[NR_CPUS]; extern cpumask_t cpu_sibling_map[NR_CPUS];
extern cpumask_t cpu_core_map[NR_CPUS];
/* /*
* General functions that each host system must provide. * General functions that each host system must provide.
......
#ifndef _ASM_SPARC64_TOPOLOGY_H #ifndef _ASM_SPARC64_TOPOLOGY_H
#define _ASM_SPARC64_TOPOLOGY_H #define _ASM_SPARC64_TOPOLOGY_H
#ifdef CONFIG_SMP
#include <asm/spitfire.h> #include <asm/spitfire.h>
#define smt_capable() (tlb_type == hypervisor)
#include <asm-generic/topology.h>
#define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id)
#define topology_core_id(cpu) (cpu_data(cpu).core_id) #define topology_core_id(cpu) (cpu_data(cpu).core_id)
#define topology_core_siblings(cpu) (cpu_core_map[cpu])
#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu]) #define topology_thread_siblings(cpu) (cpu_sibling_map[cpu])
#define mc_capable() (tlb_type == hypervisor)
#define smt_capable() (tlb_type == hypervisor)
#endif /* CONFIG_SMP */
#include <asm-generic/topology.h>
#define cpu_coregroup_map(cpu) (cpu_core_map[cpu])
#endif /* _ASM_SPARC64_TOPOLOGY_H */ #endif /* _ASM_SPARC64_TOPOLOGY_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment