Commit 3881024f authored by David S. Miller's avatar David S. Miller

[SPARC64]: Kill linux_cpus[]/linux_num_cpus, replace with cpu probe helpers.

parent 52c72abc
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/ioport.h> #include <linux/ioport.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/errno.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/oplib.h> #include <asm/oplib.h>
...@@ -24,68 +25,113 @@ ...@@ -24,68 +25,113 @@
*/ */
spinlock_t ns87303_lock = SPIN_LOCK_UNLOCKED; spinlock_t ns87303_lock = SPIN_LOCK_UNLOCKED;
struct prom_cpuinfo linux_cpus[NR_CPUS] __initdata = { { 0 } };
int linux_num_cpus = 0;
extern void cpu_probe(void); extern void cpu_probe(void);
extern void central_probe(void); extern void central_probe(void);
void __init device_scan(void) static char *cpu_mid_prop(void)
{
if (tlb_type == spitfire)
return "upa-portid";
return "portid";
}
static int check_cpu_node(int nd, int *cur_inst,
int (*compare)(int, int, void *), void *compare_arg,
int *prom_node, int *mid)
{ {
char node_str[128]; char node_str[128];
int nd, prom_node_cpu, thismid;
int cpu_nds[64]; /* One node for each cpu */
int cpu_ctr = 0;
prom_getstring(nd, "device_type", node_str, sizeof(node_str));
if (strcmp(node_str, "cpu"))
return -ENODEV;
if (!compare(nd, *cur_inst, compare_arg)) {
if (prom_node)
*prom_node = nd;
if (mid)
*mid = prom_getintdefault(nd, cpu_mid_prop(), 0);
return 0;
}
(*cur_inst)++;
return -ENODEV;
}
static int __cpu_find_by(int (*compare)(int, int, void *), void *compare_arg,
int *prom_node, int *mid)
{
int nd, cur_inst, err;
nd = prom_root_node;
cur_inst = 0;
err = check_cpu_node(nd, &cur_inst,
compare, compare_arg,
prom_node, mid);
if (err == 0)
return 0;
nd = prom_getchild(nd);
while ((nd = prom_getsibling(nd)) != 0) {
err = check_cpu_node(nd, &cur_inst,
compare, compare_arg,
prom_node, mid);
if (err == 0)
return 0;
}
return -ENODEV;
}
static int cpu_instance_compare(int nd, int instance, void *_arg)
{
int desired_instance = (int) (long) _arg;
if (instance == desired_instance)
return 0;
return -ENODEV;
}
int cpu_find_by_instance(int instance, int *prom_node, int *mid)
{
return __cpu_find_by(cpu_instance_compare, (void *)(long)instance,
prom_node, mid);
}
static int cpu_mid_compare(int nd, int instance, void *_arg)
{
int desired_mid = (int) (long) _arg;
int this_mid;
this_mid = prom_getintdefault(nd, cpu_mid_prop(), 0);
if (this_mid == desired_mid)
return 0;
return -ENODEV;
}
int cpu_find_by_mid(int mid, int *prom_node)
{
return __cpu_find_by(cpu_mid_compare, (void *)(long)mid,
prom_node, NULL);
}
void __init device_scan(void)
{
/* FIX ME FAST... -DaveM */ /* FIX ME FAST... -DaveM */
ioport_resource.end = 0xffffffffffffffffUL; ioport_resource.end = 0xffffffffffffffffUL;
prom_getstring(prom_root_node, "device_type", node_str, sizeof(node_str));
prom_printf("Booting Linux...\n"); prom_printf("Booting Linux...\n");
if(strcmp(node_str, "cpu") == 0) {
cpu_nds[0] = prom_root_node;
linux_cpus[0].prom_node = prom_root_node;
linux_cpus[0].mid = 0;
cpu_ctr++;
} else {
int scan;
scan = prom_getchild(prom_root_node);
/* prom_printf("root child is %08x\n", (unsigned) scan); */
nd = 0;
while((scan = prom_getsibling(scan)) != 0) {
prom_getstring(scan, "device_type", node_str, sizeof(node_str));
if(strcmp(node_str, "cpu") == 0) {
cpu_nds[cpu_ctr] = scan;
linux_cpus[cpu_ctr].prom_node = scan;
thismid = 0;
if (tlb_type == spitfire) {
prom_getproperty(scan, "upa-portid",
(char *) &thismid, sizeof(thismid));
} else if (tlb_type == cheetah ||
tlb_type == cheetah_plus) {
prom_getproperty(scan, "portid",
(char *) &thismid, sizeof(thismid));
}
linux_cpus[cpu_ctr].mid = thismid;
printk("Found CPU %d (node=%08x,mid=%d)\n",
cpu_ctr, (unsigned) scan, thismid);
cpu_ctr++;
}
};
if(cpu_ctr == 0) {
prom_printf("No CPU nodes found, cannot continue.\n");
prom_halt();
}
printk("Found %d CPU prom device tree node(s).\n", cpu_ctr);
}
prom_node_cpu = cpu_nds[0];
linux_num_cpus = cpu_ctr;
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
{ {
up_clock_tick = prom_getintdefault(prom_node_cpu, int err, cpu_node;
err = cpu_find_by_instance(0, &cpu_node, NULL);
if (err) {
prom_printf("No cpu nodes, cannot continue\n");
prom_halt();
}
up_clock_tick = prom_getintdefault(cpu_node,
"clock-frequency", "clock-frequency",
0); 0);
} }
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <asm/iommu.h> #include <asm/iommu.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/oplib.h>
#include "pci_impl.h" #include "pci_impl.h"
#include "iommu_common.h" #include "iommu_common.h"
...@@ -1518,11 +1519,12 @@ void __init sabre_init(int pnode, char *model_name) ...@@ -1518,11 +1519,12 @@ void __init sabre_init(int pnode, char *model_name)
!strcmp(compat, "pci108e,a001")) { !strcmp(compat, "pci108e,a001")) {
hummingbird_p = 1; hummingbird_p = 1;
} else { } else {
int cpu_node = linux_cpus[0].prom_node; int cpu_node;
/* Of course, Sun has to encode things a thousand /* Of course, Sun has to encode things a thousand
* different ways, inconsistently. * different ways, inconsistently.
*/ */
cpu_find_by_instance(0, &cpu_node, NULL);
if (prom_getproperty(cpu_node, "name", if (prom_getproperty(cpu_node, "name",
compat, sizeof(compat)) > 0 && compat, sizeof(compat)) > 0 &&
!strcmp(compat, "SUNW,UltraSPARC-IIe")) !strcmp(compat, "SUNW,UltraSPARC-IIe"))
......
...@@ -618,7 +618,7 @@ static int show_cpuinfo(struct seq_file *m, void *__unused) ...@@ -618,7 +618,7 @@ static int show_cpuinfo(struct seq_file *m, void *__unused)
prom_prev >> 16, prom_prev >> 16,
(prom_prev >> 8) & 0xff, (prom_prev >> 8) & 0xff,
prom_prev & 0xff, prom_prev & 0xff,
(long)linux_num_cpus, (long)num_possible_cpus(),
(long)num_online_cpus() (long)num_online_cpus()
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
, loops_per_jiffy/(500000/HZ), , loops_per_jiffy/(500000/HZ),
......
...@@ -80,17 +80,14 @@ void smp_bogo(struct seq_file *m) ...@@ -80,17 +80,14 @@ void smp_bogo(struct seq_file *m)
void __init smp_store_cpu_info(int id) void __init smp_store_cpu_info(int id)
{ {
int i, no; int i, cpu_node;
/* multiplier and counter set by /* multiplier and counter set by
smp_setup_percpu_timer() */ smp_setup_percpu_timer() */
cpu_data[id].udelay_val = loops_per_jiffy; cpu_data[id].udelay_val = loops_per_jiffy;
for (no = 0; no < linux_num_cpus; no++) cpu_find_by_mid(id, &cpu_node);
if (linux_cpus[no].mid == id) cpu_data[id].clock_tick = prom_getintdefault(cpu_node,
break;
cpu_data[id].clock_tick = prom_getintdefault(linux_cpus[no].prom_node,
"clock-frequency", 0); "clock-frequency", 0);
cpu_data[id].pgcache_size = 0; cpu_data[id].pgcache_size = 0;
...@@ -297,8 +294,6 @@ static void smp_synchronize_one_tick(int cpu) ...@@ -297,8 +294,6 @@ static void smp_synchronize_one_tick(int cpu)
spin_unlock_irqrestore(&itc_sync_lock, flags); spin_unlock_irqrestore(&itc_sync_lock, flags);
} }
extern struct prom_cpuinfo linux_cpus[NR_CPUS];
extern unsigned long sparc64_cpu_startup; extern unsigned long sparc64_cpu_startup;
/* The OBP cpu startup callback truncates the 3rd arg cookie to /* The OBP cpu startup callback truncates the 3rd arg cookie to
...@@ -314,7 +309,7 @@ static int __devinit smp_boot_one_cpu(unsigned int cpu) ...@@ -314,7 +309,7 @@ static int __devinit smp_boot_one_cpu(unsigned int cpu)
unsigned long cookie = unsigned long cookie =
(unsigned long)(&cpu_new_thread); (unsigned long)(&cpu_new_thread);
struct task_struct *p; struct task_struct *p;
int timeout, no, ret; int timeout, ret, cpu_node;
kernel_thread(NULL, NULL, CLONE_IDLETASK); kernel_thread(NULL, NULL, CLONE_IDLETASK);
...@@ -325,12 +320,12 @@ static int __devinit smp_boot_one_cpu(unsigned int cpu) ...@@ -325,12 +320,12 @@ static int __devinit smp_boot_one_cpu(unsigned int cpu)
unhash_process(p); unhash_process(p);
callin_flag = 0; callin_flag = 0;
for (no = 0; no < linux_num_cpus; no++)
if (linux_cpus[no].mid == cpu)
break;
cpu_new_thread = p->thread_info; cpu_new_thread = p->thread_info;
cpu_set(cpu, cpu_callout_map); cpu_set(cpu, cpu_callout_map);
prom_startcpu(linux_cpus[no].prom_node, entry, cookie);
cpu_find_by_mid(cpu, &cpu_node);
prom_startcpu(cpu_node, entry, cookie);
for (timeout = 0; timeout < 5000000; timeout++) { for (timeout = 0; timeout < 5000000; timeout++) {
if (callin_flag) if (callin_flag)
break; break;
...@@ -1150,6 +1145,7 @@ static void __init smp_tune_scheduling(void) ...@@ -1150,6 +1145,7 @@ static void __init smp_tune_scheduling(void)
unsigned long orig_flush_base, flush_base, flags, *p; unsigned long orig_flush_base, flush_base, flags, *p;
unsigned int ecache_size, order; unsigned int ecache_size, order;
cycles_t tick1, tick2, raw; cycles_t tick1, tick2, raw;
int cpu_node;
/* Approximate heuristic for SMP scheduling. It is an /* Approximate heuristic for SMP scheduling. It is an
* estimation of the time it takes to flush the L2 cache * estimation of the time it takes to flush the L2 cache
...@@ -1167,7 +1163,8 @@ static void __init smp_tune_scheduling(void) ...@@ -1167,7 +1163,8 @@ static void __init smp_tune_scheduling(void)
goto report; goto report;
} }
ecache_size = prom_getintdefault(linux_cpus[0].prom_node, cpu_find_by_instance(0, &cpu_node, NULL);
ecache_size = prom_getintdefault(cpu_node,
"ecache-size", (512 * 1024)); "ecache-size", (512 * 1024));
if (ecache_size > (4 * 1024 * 1024)) if (ecache_size > (4 * 1024 * 1024))
ecache_size = (4 * 1024 * 1024); ecache_size = (4 * 1024 * 1024);
...@@ -1249,22 +1246,27 @@ int setup_profiling_timer(unsigned int multiplier) ...@@ -1249,22 +1246,27 @@ int setup_profiling_timer(unsigned int multiplier)
void __init smp_prepare_cpus(unsigned int max_cpus) void __init smp_prepare_cpus(unsigned int max_cpus)
{ {
int i; int instance, mid;
for (i = 0; i < linux_num_cpus; i++) { instance = 0;
if (linux_cpus[i].mid < max_cpus) { while (!cpu_find_by_instance(instance, NULL, &mid)) {
cpu_set(linux_cpus[i].mid, phys_cpu_present_map); if (mid < max_cpus) {
cpu_set(mid, phys_cpu_present_map);
atomic_inc(&sparc64_num_cpus_possible); atomic_inc(&sparc64_num_cpus_possible);
} }
instance++;
} }
if (atomic_read(&sparc64_num_cpus_possible) > max_cpus) { if (atomic_read(&sparc64_num_cpus_possible) > max_cpus) {
for (i = linux_num_cpus - 1; i >= 0; i--) { instance = 0;
if (linux_cpus[i].mid != boot_cpu_id) { while (!cpu_find_by_instance(instance, NULL, &mid)) {
cpu_clear(linux_cpus[i].mid, phys_cpu_present_map); if (mid != boot_cpu_id) {
cpu_clear(mid, phys_cpu_present_map);
atomic_dec(&sparc64_num_cpus_possible); atomic_dec(&sparc64_num_cpus_possible);
if (atomic_read(&sparc64_num_cpus_possible) <= max_cpus) if (atomic_read(&sparc64_num_cpus_possible) <= max_cpus)
break; break;
} }
instance++;
} }
} }
......
...@@ -956,7 +956,7 @@ static unsigned long sparc64_init_timers(irqreturn_t (*cfunc)(int, void *, struc ...@@ -956,7 +956,7 @@ static unsigned long sparc64_init_timers(irqreturn_t (*cfunc)(int, void *, struc
clock = prom_getint(node, "stick-frequency"); clock = prom_getint(node, "stick-frequency");
} else { } else {
tick_ops = &tick_operations; tick_ops = &tick_operations;
node = linux_cpus[0].prom_node; cpu_find_by_instance(0, &node, NULL);
clock = prom_getint(node, "clock-frequency"); clock = prom_getint(node, "clock-frequency");
} }
} else { } else {
......
...@@ -613,8 +613,7 @@ extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector ...@@ -613,8 +613,7 @@ extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector
void __init cheetah_ecache_flush_init(void) void __init cheetah_ecache_flush_init(void)
{ {
unsigned long largest_size, smallest_linesize, order, ver; unsigned long largest_size, smallest_linesize, order, ver;
char type[16]; int node, i, instance;
int node, i;
/* Scan all cpu device tree nodes, note two values: /* Scan all cpu device tree nodes, note two values:
* 1) largest E-cache size * 1) largest E-cache size
...@@ -622,21 +621,21 @@ void __init cheetah_ecache_flush_init(void) ...@@ -622,21 +621,21 @@ void __init cheetah_ecache_flush_init(void)
*/ */
largest_size = 0UL; largest_size = 0UL;
smallest_linesize = ~0UL; smallest_linesize = ~0UL;
node = prom_getchild(prom_root_node);
while ((node = prom_getsibling(node)) != 0) { instance = 0;
prom_getstring(node, "device_type", type, sizeof(type)); while (!cpu_find_by_instance(instance, &node, NULL)) {
if (!strcmp(type, "cpu")) { unsigned long val;
unsigned long val;
val = prom_getintdefault(node, "ecache-size",
val = prom_getintdefault(node, "ecache-size", (2 * 1024 * 1024));
(2 * 1024 * 1024)); if (val > largest_size)
if (val > largest_size) largest_size = val;
largest_size = val; val = prom_getintdefault(node, "ecache-line-size", 64);
val = prom_getintdefault(node, "ecache-line-size", 64); if (val < smallest_linesize)
if (val < smallest_linesize) smallest_linesize = val;
smallest_linesize = val; instance++;
}
} }
if (largest_size == 0UL || smallest_linesize == ~0UL) { if (largest_size == 0UL || smallest_linesize == ~0UL) {
prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache " prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
"parameters.\n"); "parameters.\n");
......
...@@ -327,6 +327,10 @@ extern int prom_setprop(int node, char *prop_name, char *prop_value, ...@@ -327,6 +327,10 @@ extern int prom_setprop(int node, char *prop_name, char *prop_value,
extern int prom_pathtoinode(char *path); extern int prom_pathtoinode(char *path);
extern int prom_inst2pkg(int); extern int prom_inst2pkg(int);
/* CPU probing helpers. */
int cpu_find_by_instance(int instance, int *prom_node, int *mid);
int cpu_find_by_mid(int mid, int *prom_node);
/* Client interface level routines. */ /* Client interface level routines. */
extern void prom_set_trap_table(unsigned long tba); extern void prom_set_trap_table(unsigned long tba);
......
...@@ -17,18 +17,6 @@ ...@@ -17,18 +17,6 @@
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/cache.h> #include <linux/cache.h>
/* PROM provided per-processor information we need
* to start them all up.
*/
struct prom_cpuinfo {
int prom_node;
int mid;
};
extern int linux_num_cpus; /* number of CPUs probed */
extern struct prom_cpuinfo linux_cpus[NR_CPUS];
#endif /* !(__ASSEMBLY__) */ #endif /* !(__ASSEMBLY__) */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment