Commit 2fa182f2 authored by David S. Miller's avatar David S. Miller

Sparc64: Update for CPU hotplugging changes.

parent e170a233
...@@ -26,9 +26,6 @@ struct cpu_fp_info { ...@@ -26,9 +26,6 @@ struct cpu_fp_info {
char* fp_name; char* fp_name;
}; };
/* In order to get the fpu type correct, you need to take the IDPROM's
* machine type value into consideration too. I will fix this.
*/
struct cpu_fp_info linux_sparc_fpu[] = { struct cpu_fp_info linux_sparc_fpu[] = {
{ 0x17, 0x10, 0, "UltraSparc I integrated FPU"}, { 0x17, 0x10, 0, "UltraSparc I integrated FPU"},
{ 0x22, 0x10, 0, "UltraSparc II integrated FPU"}, { 0x22, 0x10, 0, "UltraSparc II integrated FPU"},
...@@ -51,13 +48,8 @@ struct cpu_iu_info linux_sparc_chips[] = { ...@@ -51,13 +48,8 @@ struct cpu_iu_info linux_sparc_chips[] = {
#define NSPARCCHIPS (sizeof(linux_sparc_chips)/sizeof(struct cpu_iu_info)) #define NSPARCCHIPS (sizeof(linux_sparc_chips)/sizeof(struct cpu_iu_info))
#ifdef CONFIG_SMP char *sparc_cpu_type[NR_CPUS] = { "cpu-oops", };
char *sparc_cpu_type[64] = { "cpu-oops", "cpu-oops1", "cpu-oops2", "cpu-oops3" }; char *sparc_fpu_type[NR_CPUS] = { "fpu-oops", };
char *sparc_fpu_type[64] = { "fpu-oops", "fpu-oops1", "fpu-oops2", "fpu-oops3" };
#else
char *sparc_cpu_type[64] = { "cpu-oops", };
char *sparc_fpu_type[64] = { "fpu-oops", };
#endif
unsigned int fsr_storage; unsigned int fsr_storage;
...@@ -72,39 +64,47 @@ void __init cpu_probe(void) ...@@ -72,39 +64,47 @@ void __init cpu_probe(void)
fprs = fprs_read (); fprs = fprs_read ();
fprs_write (FPRS_FEF); fprs_write (FPRS_FEF);
__asm__ __volatile__ ("rdpr %%ver, %0; stx %%fsr, [%1]" : "=&r" (ver) : "r" (&fpu_vers)); __asm__ __volatile__ ("rdpr %%ver, %0; stx %%fsr, [%1]"
: "=&r" (ver)
: "r" (&fpu_vers));
fprs_write (fprs); fprs_write (fprs);
manuf = ((ver >> 48)&0xffff); manuf = ((ver >> 48) & 0xffff);
impl = ((ver >> 32)&0xffff); impl = ((ver >> 32) & 0xffff);
fpu_vers = ((fpu_vers>>17)&0x7); fpu_vers = ((fpu_vers >> 17) & 0x7);
for(i = 0; i<NSPARCCHIPS; i++) { for (i = 0; i < NSPARCCHIPS; i++) {
if(linux_sparc_chips[i].manuf == manuf) if (linux_sparc_chips[i].manuf == manuf) {
if(linux_sparc_chips[i].impl == impl) { if (linux_sparc_chips[i].impl == impl) {
sparc_cpu_type[cpuid] = linux_sparc_chips[i].cpu_name; sparc_cpu_type[cpuid] =
linux_sparc_chips[i].cpu_name;
break; break;
} }
}
} }
if(i==NSPARCCHIPS) { if (i == NSPARCCHIPS) {
printk("DEBUG: manuf = 0x%x impl = 0x%x\n", manuf, printk("DEBUG: manuf = 0x%x impl = 0x%x\n",
impl); manuf, impl);
sparc_cpu_type[cpuid] = "Unknown CPU"; sparc_cpu_type[cpuid] = "Unknown CPU";
} }
for(i = 0; i<NSPARCFPU; i++) { for (i = 0; i < NSPARCFPU; i++) {
if(linux_sparc_fpu[i].manuf == manuf && linux_sparc_fpu[i].impl == impl) if (linux_sparc_fpu[i].manuf == manuf &&
if(linux_sparc_fpu[i].fpu_vers == fpu_vers) { linux_sparc_fpu[i].impl == impl) {
sparc_fpu_type[cpuid] = linux_sparc_fpu[i].fp_name; if (linux_sparc_fpu[i].fpu_vers == fpu_vers) {
sparc_fpu_type[cpuid] =
linux_sparc_fpu[i].fp_name;
break; break;
} }
}
} }
if(i == NSPARCFPU) { if (i == NSPARCFPU) {
printk("DEBUG: manuf = 0x%x impl = 0x%x fsr.vers = 0x%x\n", manuf, impl, printk("DEBUG: manuf = 0x%x impl = 0x%x fsr.vers = 0x%x\n",
(unsigned)fpu_vers); manuf, impl,
(unsigned int) fpu_vers);
sparc_fpu_type[cpuid] = "Unknown FPU"; sparc_fpu_type[cpuid] = "Unknown FPU";
} }
} }
...@@ -17,8 +17,8 @@ ...@@ -17,8 +17,8 @@
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/spitfire.h> #include <asm/spitfire.h>
struct prom_cpuinfo linux_cpus[64] __initdata = { { 0 } }; struct prom_cpuinfo linux_cpus[NR_CPUS] __initdata = { { 0 } };
unsigned prom_cpu_nodes[64]; unsigned prom_cpu_nodes[NR_CPUS];
int linux_num_cpus = 0; int linux_num_cpus = 0;
extern void cpu_probe(void); extern void cpu_probe(void);
......
...@@ -122,9 +122,12 @@ int show_interrupts(struct seq_file *p, void *v) ...@@ -122,9 +122,12 @@ int show_interrupts(struct seq_file *p, void *v)
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
seq_printf(p, "%10u ", kstat_irqs(i)); seq_printf(p, "%10u ", kstat_irqs(i));
#else #else
for (j = 0; j < smp_num_cpus; j++) for (j = 0; j < NR_CPUS; j++) {
if (!cpu_online(j))
continue;
seq_printf(p, "%10u ", seq_printf(p, "%10u ",
kstat.irqs[cpu_logical_map(j)][i]); kstat.irqs[j][i]);
}
#endif #endif
seq_printf(p, " %s:%lx", action->name, seq_printf(p, " %s:%lx", action->name,
get_ino_in_irqaction(action)); get_ino_in_irqaction(action));
...@@ -574,12 +577,18 @@ static void show(char * str) ...@@ -574,12 +577,18 @@ static void show(char * str)
printk("\n%s, CPU %d:\n", str, cpu); printk("\n%s, CPU %d:\n", str, cpu);
printk("irq: %d [ ", irqs_running()); printk("irq: %d [ ", irqs_running());
for (i = 0; i < smp_num_cpus; i++) for (i = 0; i < NR_CPUS; i++) {
if (!cpu_online(i))
continue;
printk("%u ", __brlock_array[i][BR_GLOBALIRQ_LOCK]); printk("%u ", __brlock_array[i][BR_GLOBALIRQ_LOCK]);
}
printk("]\nbh: %d [ ", printk("]\nbh: %d [ ",
(spin_is_locked(&global_bh_lock) ? 1 : 0)); (spin_is_locked(&global_bh_lock) ? 1 : 0));
for (i = 0; i < smp_num_cpus; i++) for (i = 0; i < NR_CPUS; i++) {
if (!cpu_online(i))
continue;
printk("%u ", local_bh_count(i)); printk("%u ", local_bh_count(i));
}
printk("]\n"); printk("]\n");
} }
...@@ -743,8 +752,9 @@ static inline void redirect_intr(int cpu, struct ino_bucket *bp) ...@@ -743,8 +752,9 @@ static inline void redirect_intr(int cpu, struct ino_bucket *bp)
unsigned long cpu_mask = get_smpaff_in_irqaction(ap); unsigned long cpu_mask = get_smpaff_in_irqaction(ap);
unsigned int buddy, ticks; unsigned int buddy, ticks;
cpu_mask &= cpu_online_map;
if (cpu_mask == 0) if (cpu_mask == 0)
cpu_mask = ~0UL; cpu_mask = cpu_online_map;
if (this_is_starfire != 0 || if (this_is_starfire != 0 ||
bp->pil >= 10 || current->pid == 0) bp->pil >= 10 || current->pid == 0)
...@@ -753,28 +763,23 @@ static inline void redirect_intr(int cpu, struct ino_bucket *bp) ...@@ -753,28 +763,23 @@ static inline void redirect_intr(int cpu, struct ino_bucket *bp)
/* 'cpu' is the MID (ie. UPAID), calculate the MID /* 'cpu' is the MID (ie. UPAID), calculate the MID
* of our buddy. * of our buddy.
*/ */
buddy = cpu_number_map(cpu) + 1; buddy = cpu + 1;
if (buddy >= NR_CPUS || if (buddy >= NR_CPUS)
cpu_logical_map(buddy) == -1)
buddy = 0; buddy = 0;
ticks = 0; ticks = 0;
while ((cpu_mask & (1UL << buddy)) == 0) { while ((cpu_mask & (1UL << buddy)) == 0) {
buddy++; if (++buddy >= NR_CPUS)
if (buddy >= NR_CPUS || buddy = 0;
cpu_logical_map(buddy) == -1)
buddy = cpu_logical_map(0);
if (++ticks > NR_CPUS) { if (++ticks > NR_CPUS) {
put_smpaff_in_irqaction(ap, 0); put_smpaff_in_irqaction(ap, 0);
goto out; goto out;
} }
} }
if (buddy == cpu_number_map(cpu)) if (buddy == cpu)
goto out; goto out;
buddy = cpu_logical_map(buddy);
/* Voo-doo programming. */ /* Voo-doo programming. */
if (cpu_data[buddy].idle_volume < FORWARD_VOLUME) if (cpu_data[buddy].idle_volume < FORWARD_VOLUME)
goto out; goto out;
...@@ -1140,22 +1145,28 @@ static int retarget_one_irq(struct irqaction *p, int goal_cpu) ...@@ -1140,22 +1145,28 @@ static int retarget_one_irq(struct irqaction *p, int goal_cpu)
unsigned long imap = bucket->imap; unsigned long imap = bucket->imap;
unsigned int tid; unsigned int tid;
while (!cpu_online(goal_cpu)) {
if (++goal_cpu >= NR_CPUS)
goal_cpu = 0;
}
if (tlb_type == cheetah) { if (tlb_type == cheetah) {
tid = __cpu_logical_map[goal_cpu] << 26; tid = goal_cpu << 26;
tid &= IMAP_AID_SAFARI; tid &= IMAP_AID_SAFARI;
} else if (this_is_starfire == 0) { } else if (this_is_starfire == 0) {
tid = __cpu_logical_map[goal_cpu] << 26; tid = goal_cpu << 26;
tid &= IMAP_TID_UPA; tid &= IMAP_TID_UPA;
} else { } else {
tid = (starfire_translate(imap, __cpu_logical_map[goal_cpu]) << 26); tid = (starfire_translate(imap, goal_cpu) << 26);
tid &= IMAP_TID_UPA; tid &= IMAP_TID_UPA;
} }
upa_writel(tid | IMAP_VALID, imap); upa_writel(tid | IMAP_VALID, imap);
goal_cpu++; while (!cpu_online(goal_cpu)) {
if(goal_cpu >= NR_CPUS || if (++goal_cpu >= NR_CPUS)
__cpu_logical_map[goal_cpu] == -1) goal_cpu = 0;
goal_cpu = 0; }
return goal_cpu; return goal_cpu;
} }
...@@ -1326,38 +1337,6 @@ static unsigned int parse_hex_value (const char *buffer, ...@@ -1326,38 +1337,6 @@ static unsigned int parse_hex_value (const char *buffer,
return 0; return 0;
} }
static unsigned long hw_to_logical(unsigned long mask)
{
unsigned long new_mask = 0UL;
int i;
for (i = 0; i < NR_CPUS; i++) {
if (mask & (1UL << i)) {
int logical = cpu_number_map(i);
new_mask |= (1UL << logical);
}
}
return new_mask;
}
static unsigned long logical_to_hw(unsigned long mask)
{
unsigned long new_mask = 0UL;
int i;
for (i = 0; i < NR_CPUS; i++) {
if (mask & (1UL << i)) {
int hw = cpu_logical_map(i);
new_mask |= (1UL << hw);
}
}
return new_mask;
}
static int irq_affinity_read_proc (char *page, char **start, off_t off, static int irq_affinity_read_proc (char *page, char **start, off_t off,
int count, int *eof, void *data) int count, int *eof, void *data)
{ {
...@@ -1365,8 +1344,6 @@ static int irq_affinity_read_proc (char *page, char **start, off_t off, ...@@ -1365,8 +1344,6 @@ static int irq_affinity_read_proc (char *page, char **start, off_t off,
struct irqaction *ap = bp->irq_info; struct irqaction *ap = bp->irq_info;
unsigned long mask = get_smpaff_in_irqaction(ap); unsigned long mask = get_smpaff_in_irqaction(ap);
mask = logical_to_hw(mask);
if (count < HEX_DIGITS+1) if (count < HEX_DIGITS+1)
return -EINVAL; return -EINVAL;
return sprintf (page, "%016lx\n", mask == 0 ? ~0UL : mask); return sprintf (page, "%016lx\n", mask == 0 ? ~0UL : mask);
...@@ -1375,14 +1352,11 @@ static int irq_affinity_read_proc (char *page, char **start, off_t off, ...@@ -1375,14 +1352,11 @@ static int irq_affinity_read_proc (char *page, char **start, off_t off,
static inline void set_intr_affinity(int irq, unsigned long hw_aff) static inline void set_intr_affinity(int irq, unsigned long hw_aff)
{ {
struct ino_bucket *bp = ivector_table + irq; struct ino_bucket *bp = ivector_table + irq;
unsigned long aff = hw_to_logical(hw_aff);
/* /* Users specify affinity in terms of hw cpu ids.
* Users specify affinity in terms of cpu ids, which is what * As soon as we do this, handler_irq() might see and take action.
* is displayed via /proc/cpuinfo. As soon as we do this,
* handler_irq() might see and take action.
*/ */
put_smpaff_in_irqaction((struct irqaction *)bp->irq_info, aff); put_smpaff_in_irqaction((struct irqaction *)bp->irq_info, hw_aff);
/* Migration is simply done by the next cpu to service this /* Migration is simply done by the next cpu to service this
* interrupt. * interrupt.
...@@ -1393,7 +1367,7 @@ static int irq_affinity_write_proc (struct file *file, const char *buffer, ...@@ -1393,7 +1367,7 @@ static int irq_affinity_write_proc (struct file *file, const char *buffer,
unsigned long count, void *data) unsigned long count, void *data)
{ {
int irq = (long) data, full_count = count, err; int irq = (long) data, full_count = count, err;
unsigned long new_value; unsigned long new_value, i;
err = parse_hex_value(buffer, count, &new_value); err = parse_hex_value(buffer, count, &new_value);
...@@ -1402,7 +1376,12 @@ static int irq_affinity_write_proc (struct file *file, const char *buffer, ...@@ -1402,7 +1376,12 @@ static int irq_affinity_write_proc (struct file *file, const char *buffer,
* way to make the system unusable accidentally :-) At least * way to make the system unusable accidentally :-) At least
* one online CPU still has to be targeted. * one online CPU still has to be targeted.
*/ */
new_value &= cpu_online_map; for (i = 0; i < NR_CPUS; i++) {
if ((new_value & (1UL << i)) != 0 &&
!cpu_online(i))
new_value &= ~(1UL << i);
}
if (!new_value) if (!new_value)
return -EINVAL; return -EINVAL;
......
...@@ -649,7 +649,7 @@ static int show_cpuinfo(struct seq_file *m, void *__unused) ...@@ -649,7 +649,7 @@ static int show_cpuinfo(struct seq_file *m, void *__unused)
(prom_prev >> 8) & 0xff, (prom_prev >> 8) & 0xff,
prom_prev & 0xff, prom_prev & 0xff,
linux_num_cpus, linux_num_cpus,
smp_num_cpus num_online_cpus()
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
, loops_per_jiffy/(500000/HZ), , loops_per_jiffy/(500000/HZ),
(loops_per_jiffy/(5000/HZ)) % 100, (loops_per_jiffy/(5000/HZ)) % 100,
......
...@@ -40,13 +40,9 @@ ...@@ -40,13 +40,9 @@
extern int linux_num_cpus; extern int linux_num_cpus;
extern void calibrate_delay(void); extern void calibrate_delay(void);
extern unsigned prom_cpu_nodes[];
cpuinfo_sparc cpu_data[NR_CPUS]; cpuinfo_sparc cpu_data[NR_CPUS];
volatile int __cpu_number_map[NR_CPUS] __attribute__ ((aligned (SMP_CACHE_BYTES)));
volatile int __cpu_logical_map[NR_CPUS] __attribute__ ((aligned (SMP_CACHE_BYTES)));
/* Please don't make this stuff initdata!!! --DaveM */ /* Please don't make this stuff initdata!!! --DaveM */
static unsigned char boot_cpu_id; static unsigned char boot_cpu_id;
static int smp_activated; static int smp_activated;
...@@ -55,8 +51,8 @@ static int smp_activated; ...@@ -55,8 +51,8 @@ static int smp_activated;
spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
volatile int smp_processors_ready = 0; volatile int smp_processors_ready = 0;
unsigned long cpu_present_map = 0; atomic_t sparc64_num_cpus_online = ATOMIC_INIT(0);
int smp_num_cpus = 1; unsigned long cpu_online_map = 0;
int smp_threads_ready = 0; int smp_threads_ready = 0;
void __init smp_setup(char *str, int *ints) void __init smp_setup(char *str, int *ints)
...@@ -79,7 +75,7 @@ void smp_info(struct seq_file *m) ...@@ -79,7 +75,7 @@ void smp_info(struct seq_file *m)
seq_printf(m, "State:\n"); seq_printf(m, "State:\n");
for (i = 0; i < NR_CPUS; i++) { for (i = 0; i < NR_CPUS; i++) {
if (cpu_present_map & (1UL << i)) if (cpu_online(i))
seq_printf(m, seq_printf(m,
"CPU%d:\t\tonline\n", i); "CPU%d:\t\tonline\n", i);
} }
...@@ -90,7 +86,7 @@ void smp_bogo(struct seq_file *m) ...@@ -90,7 +86,7 @@ void smp_bogo(struct seq_file *m)
int i; int i;
for (i = 0; i < NR_CPUS; i++) for (i = 0; i < NR_CPUS; i++)
if (cpu_present_map & (1UL << i)) if (cpu_online(i))
seq_printf(m, seq_printf(m,
"Cpu%dBogo\t: %lu.%02lu\n" "Cpu%dBogo\t: %lu.%02lu\n"
"Cpu%dClkTck\t: %016lx\n", "Cpu%dClkTck\t: %016lx\n",
...@@ -230,7 +226,7 @@ void cpu_panic(void) ...@@ -230,7 +226,7 @@ void cpu_panic(void)
panic("SMP bolixed\n"); panic("SMP bolixed\n");
} }
extern struct prom_cpuinfo linux_cpus[64]; extern struct prom_cpuinfo linux_cpus[NR_CPUS];
extern unsigned long sparc64_cpu_startup; extern unsigned long sparc64_cpu_startup;
...@@ -261,9 +257,11 @@ void __init smp_boot_cpus(void) ...@@ -261,9 +257,11 @@ void __init smp_boot_cpus(void)
if ((cpucount + 1) == max_cpus) if ((cpucount + 1) == max_cpus)
goto ignorecpu; goto ignorecpu;
if (cpu_present_map & (1UL << i)) { if (cpu_online(i)) {
unsigned long entry = (unsigned long)(&sparc64_cpu_startup); unsigned long entry =
unsigned long cookie = (unsigned long)(&cpu_new_thread); (unsigned long)(&sparc64_cpu_startup);
unsigned long cookie =
(unsigned long)(&cpu_new_thread);
struct task_struct *p; struct task_struct *p;
int timeout; int timeout;
int no; int no;
...@@ -291,8 +289,7 @@ void __init smp_boot_cpus(void) ...@@ -291,8 +289,7 @@ void __init smp_boot_cpus(void)
udelay(100); udelay(100);
} }
if (callin_flag) { if (callin_flag) {
__cpu_number_map[i] = cpucount; atomic_inc(&sparc64_num_cpus_online);
__cpu_logical_map[cpucount] = i;
prom_cpu_nodes[i] = linux_cpus[no].prom_node; prom_cpu_nodes[i] = linux_cpus[no].prom_node;
prom_printf("OK\n"); prom_printf("OK\n");
} else { } else {
...@@ -300,31 +297,33 @@ void __init smp_boot_cpus(void) ...@@ -300,31 +297,33 @@ void __init smp_boot_cpus(void)
printk("Processor %d is stuck.\n", i); printk("Processor %d is stuck.\n", i);
prom_printf("FAILED\n"); prom_printf("FAILED\n");
} }
} if (!callin_flag) {
if (!callin_flag) {
ignorecpu: ignorecpu:
cpu_present_map &= ~(1UL << i); clear_bit(i, &cpu_online_map);
__cpu_number_map[i] = -1; }
} }
} }
cpu_new_thread = NULL; cpu_new_thread = NULL;
if (cpucount == 0) { if (cpucount == 0) {
if (max_cpus != 1) if (max_cpus != 1)
printk("Error: only one processor found.\n"); printk("Error: only one processor found.\n");
cpu_present_map = (1UL << smp_processor_id()); memset(&cpu_online_map, 0, sizeof(cpu_online_map));
set_bit(smp_processor_id(), &cpu_online_map);
atomic_set(&sparc64_num_cpus_online, 1);
} else { } else {
unsigned long bogosum = 0; unsigned long bogosum = 0;
for (i = 0; i < NR_CPUS; i++) { for (i = 0; i < NR_CPUS; i++) {
if (cpu_present_map & (1UL << i)) if (cpu_online(i))
bogosum += cpu_data[i].udelay_val; bogosum += cpu_data[i].udelay_val;
} }
printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n", printk("Total of %d processors activated "
"(%lu.%02lu BogoMIPS).\n",
cpucount + 1, cpucount + 1,
bogosum/(500000/HZ), bogosum/(500000/HZ),
(bogosum/(5000/HZ))%100); (bogosum/(5000/HZ))%100);
smp_activated = 1; smp_activated = 1;
smp_num_cpus = cpucount + 1;
} }
/* We want to run this with all the other cpus spinning /* We want to run this with all the other cpus spinning
...@@ -372,8 +371,9 @@ static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, u ...@@ -372,8 +371,9 @@ static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, u
membar #Sync" membar #Sync"
: "=r" (tmp) : "=r" (tmp)
: "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W), : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
"r" (data0), "r" (data1), "r" (data2), "r" (target), "r" (0x10), "0" (tmp) "r" (data0), "r" (data1), "r" (data2), "r" (target),
: "g1"); "r" (0x10), "0" (tmp)
: "g1");
/* NOTE: PSTATE_IE is still clear. */ /* NOTE: PSTATE_IE is still clear. */
stuck = 100000; stuck = 100000;
...@@ -403,15 +403,16 @@ static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, u ...@@ -403,15 +403,16 @@ static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, u
static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, unsigned long mask) static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, unsigned long mask)
{ {
int ncpus = smp_num_cpus - 1;
int i;
u64 pstate; u64 pstate;
int i;
__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
for (i = 0; (i < NR_CPUS) && ncpus; i++) { for (i = 0; i < NR_CPUS; i++) {
if (mask & (1UL << i)) { if (mask & (1UL << i)) {
spitfire_xcall_helper(data0, data1, data2, pstate, i); spitfire_xcall_helper(data0, data1, data2, pstate, i);
ncpus--; mask &= ~(1UL << i);
if (!mask)
break;
} }
} }
} }
...@@ -449,18 +450,22 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, unsigned long ...@@ -449,18 +450,22 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, unsigned long
nack_busy_id = 0; nack_busy_id = 0;
{ {
int i, ncpus = smp_num_cpus - 1; unsigned long work_mask = mask;
int i;
for (i = 0; (i < NR_CPUS) && ncpus; i++) { for (i = 0; i < NR_CPUS; i++) {
if (mask & (1UL << i)) { if (work_mask & (1UL << i)) {
u64 target = (i << 14) | 0x70; u64 target = (i << 14) | 0x70;
target |= (nack_busy_id++ << 24); target |= (nack_busy_id++ << 24);
__asm__ __volatile__("stxa %%g0, [%0] %1\n\t" __asm__ __volatile__(
"membar #Sync\n\t" "stxa %%g0, [%0] %1\n\t"
: /* no outputs */ "membar #Sync\n\t"
: "r" (target), "i" (ASI_INTR_W)); : /* no outputs */
ncpus--; : "r" (target), "i" (ASI_INTR_W));
work_mask &= ~(1UL << i);
if (!work_mask)
break;
} }
} }
} }
...@@ -494,6 +499,7 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, unsigned long ...@@ -494,6 +499,7 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, unsigned long
printk("CPU[%d]: mondo stuckage result[%016lx]\n", printk("CPU[%d]: mondo stuckage result[%016lx]\n",
smp_processor_id(), dispatch_stat); smp_processor_id(), dispatch_stat);
} else { } else {
unsigned long work_mask = mask;
int i, this_busy_nack = 0; int i, this_busy_nack = 0;
/* Delay some random time with interrupts enabled /* Delay some random time with interrupts enabled
...@@ -505,10 +511,14 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, unsigned long ...@@ -505,10 +511,14 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, unsigned long
* NACK us. * NACK us.
*/ */
for (i = 0; i < NR_CPUS; i++) { for (i = 0; i < NR_CPUS; i++) {
if (mask & (1UL << i)) { if (work_mask & (1UL << i)) {
if ((dispatch_stat & (0x2 << this_busy_nack)) == 0) if ((dispatch_stat &
(0x2 << this_busy_nack)) == 0)
mask &= ~(1UL << i); mask &= ~(1UL << i);
this_busy_nack += 2; this_busy_nack += 2;
work_mask &= ~(1UL << i);
if (!work_mask)
break;
} }
} }
...@@ -525,6 +535,7 @@ static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 d ...@@ -525,6 +535,7 @@ static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 d
if (smp_processors_ready) { if (smp_processors_ready) {
u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff)); u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
mask &= cpu_online_map;
mask &= ~(1UL<<smp_processor_id()); mask &= ~(1UL<<smp_processor_id());
if (tlb_type == spitfire) if (tlb_type == spitfire)
...@@ -538,7 +549,7 @@ static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 d ...@@ -538,7 +549,7 @@ static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 d
/* Send cross call to all processors except self. */ /* Send cross call to all processors except self. */
#define smp_cross_call(func, ctx, data1, data2) \ #define smp_cross_call(func, ctx, data1, data2) \
smp_cross_call_masked(func, ctx, data1, data2, cpu_present_map) smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map)
struct call_data_struct { struct call_data_struct {
void (*func) (void *info); void (*func) (void *info);
...@@ -560,7 +571,7 @@ int smp_call_function(void (*func)(void *info), void *info, ...@@ -560,7 +571,7 @@ int smp_call_function(void (*func)(void *info), void *info,
int nonatomic, int wait) int nonatomic, int wait)
{ {
struct call_data_struct data; struct call_data_struct data;
int cpus = smp_num_cpus - 1; int cpus = num_online_cpus() - 1;
long timeout; long timeout;
if (!cpus) if (!cpus)
...@@ -596,7 +607,7 @@ int smp_call_function(void (*func)(void *info), void *info, ...@@ -596,7 +607,7 @@ int smp_call_function(void (*func)(void *info), void *info,
out_timeout: out_timeout:
spin_unlock(&call_lock); spin_unlock(&call_lock);
printk("XCALL: Remote cpus not responding, ncpus=%d finished=%d\n", printk("XCALL: Remote cpus not responding, ncpus=%d finished=%d\n",
smp_num_cpus - 1, atomic_read(&data.finished)); num_online_cpus() - 1, atomic_read(&data.finished));
return 0; return 0;
} }
...@@ -657,11 +668,12 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu) ...@@ -657,11 +668,12 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
#endif #endif
if (cpu == smp_processor_id()) { if (cpu == smp_processor_id()) {
__local_flush_dcache_page(page); __local_flush_dcache_page(page);
} else if ((cpu_present_map & mask) != 0) { } else if ((cpu_online_map & mask) != 0) {
u64 data0; u64 data0;
if (tlb_type == spitfire) { if (tlb_type == spitfire) {
data0 = ((u64)&xcall_flush_dcache_page_spitfire); data0 =
((u64)&xcall_flush_dcache_page_spitfire);
if (page->mapping != NULL) if (page->mapping != NULL)
data0 |= ((u64)1 << 32); data0 |= ((u64)1 << 32);
spitfire_xcall_deliver(data0, spitfire_xcall_deliver(data0,
...@@ -669,7 +681,8 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu) ...@@ -669,7 +681,8 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
(u64) page->virtual, (u64) page->virtual,
mask); mask);
} else { } else {
data0 = ((u64)&xcall_flush_dcache_page_cheetah); data0 =
((u64)&xcall_flush_dcache_page_cheetah);
cheetah_xcall_deliver(data0, cheetah_xcall_deliver(data0,
__pa(page->virtual), __pa(page->virtual),
0, mask); 0, mask);
...@@ -684,7 +697,8 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu) ...@@ -684,7 +697,8 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
void flush_dcache_page_all(struct mm_struct *mm, struct page *page) void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
{ {
if (smp_processors_ready) { if (smp_processors_ready) {
unsigned long mask = cpu_present_map & ~(1UL << smp_processor_id()); unsigned long mask =
cpu_online_map & ~(1UL << smp_processor_id());
u64 data0; u64 data0;
#ifdef CONFIG_DEBUG_DCFLUSH #ifdef CONFIG_DEBUG_DCFLUSH
...@@ -719,8 +733,9 @@ void smp_receive_signal(int cpu) ...@@ -719,8 +733,9 @@ void smp_receive_signal(int cpu)
if (smp_processors_ready) { if (smp_processors_ready) {
unsigned long mask = 1UL << cpu; unsigned long mask = 1UL << cpu;
if ((cpu_present_map & mask) != 0) { if ((cpu_online_map & mask) != 0) {
u64 data0 = (((u64)&xcall_receive_signal) & 0xffffffff); u64 data0 =
(((u64)&xcall_receive_signal) & 0xffffffff);
if (tlb_type == spitfire) if (tlb_type == spitfire)
spitfire_xcall_deliver(data0, 0, 0, mask); spitfire_xcall_deliver(data0, 0, 0, mask);
...@@ -848,7 +863,8 @@ void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start, ...@@ -848,7 +863,8 @@ void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start,
mm->cpu_vm_mask); mm->cpu_vm_mask);
local_flush_and_out: local_flush_and_out:
__flush_tlb_range(ctx, start, SECONDARY_CONTEXT, end, PAGE_SIZE, (end-start)); __flush_tlb_range(ctx, start, SECONDARY_CONTEXT,
end, PAGE_SIZE, (end-start));
} }
void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end) void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
...@@ -870,31 +886,32 @@ void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page) ...@@ -870,31 +886,32 @@ void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page)
int cpu = smp_processor_id(); int cpu = smp_processor_id();
page &= PAGE_MASK; page &= PAGE_MASK;
if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1) { if (mm == current->active_mm &&
atomic_read(&mm->mm_users) == 1) {
/* By virtue of being the current address space, and /* By virtue of being the current address space, and
* having the only reference to it, the following operation * having the only reference to it, the following
* is safe. * operation is safe.
* *
* It would not be a win to perform the xcall tlb flush in * It would not be a win to perform the xcall tlb
* this case, because even if we switch back to one of the * flush in this case, because even if we switch back
* other processors in cpu_vm_mask it is almost certain that * to one of the other processors in cpu_vm_mask it
* all TLB entries for this context will be replaced by the * is almost certain that all TLB entries for this
* time that happens. * context will be replaced by the time that happens.
*/ */
mm->cpu_vm_mask = (1UL << cpu); mm->cpu_vm_mask = (1UL << cpu);
goto local_flush_and_out; goto local_flush_and_out;
} else { } else {
/* By virtue of running under the mm->page_table_lock, /* By virtue of running under the mm->page_table_lock,
* and mmu_context.h:switch_mm doing the same, the following * and mmu_context.h:switch_mm doing the same, the
* operation is safe. * following operation is safe.
*/ */
if (mm->cpu_vm_mask == (1UL << cpu)) if (mm->cpu_vm_mask == (1UL << cpu))
goto local_flush_and_out; goto local_flush_and_out;
} }
/* OK, we have to actually perform the cross call. Most likely /* OK, we have to actually perform the cross call. Most
* this is a cloned mm or kswapd is kicking out pages for a task * likely this is a cloned mm or kswapd is kicking out pages
* which has run recently on another cpu. * for a task which has run recently on another cpu.
*/ */
smp_cross_call_masked(&xcall_flush_tlb_page, smp_cross_call_masked(&xcall_flush_tlb_page,
ctx, page, 0, ctx, page, 0,
...@@ -922,7 +939,7 @@ void smp_capture(void) ...@@ -922,7 +939,7 @@ void smp_capture(void)
membar("#StoreStore | #LoadStore"); membar("#StoreStore | #LoadStore");
if (result == 1) { if (result == 1) {
int ncpus = smp_num_cpus; int ncpus = num_online_cpus();
#ifdef CAPTURE_DEBUG #ifdef CAPTURE_DEBUG
printk("CPU[%d]: Sending penguins to jail...", printk("CPU[%d]: Sending penguins to jail...",
...@@ -946,7 +963,8 @@ void smp_release(void) ...@@ -946,7 +963,8 @@ void smp_release(void)
if (smp_processors_ready) { if (smp_processors_ready) {
if (atomic_dec_and_test(&smp_capture_depth)) { if (atomic_dec_and_test(&smp_capture_depth)) {
#ifdef CAPTURE_DEBUG #ifdef CAPTURE_DEBUG
printk("CPU[%d]: Giving pardon to imprisoned penguins\n", printk("CPU[%d]: Giving pardon to "
"imprisoned penguins\n",
smp_processor_id()); smp_processor_id());
#endif #endif
penguins_are_doing_time = 0; penguins_are_doing_time = 0;
...@@ -1027,7 +1045,8 @@ void smp_percpu_timer_interrupt(struct pt_regs *regs) ...@@ -1027,7 +1045,8 @@ void smp_percpu_timer_interrupt(struct pt_regs *regs)
do { do {
if (!user) if (!user)
sparc64_do_profile(regs->tpc, regs->u_regs[UREG_RETPC]); sparc64_do_profile(regs->tpc,
regs->u_regs[UREG_RETPC]);
if (!--prof_counter(cpu)) { if (!--prof_counter(cpu)) {
if (cpu == boot_cpu_id) { if (cpu == boot_cpu_id) {
irq_enter(cpu, 0); irq_enter(cpu, 0);
...@@ -1151,16 +1170,20 @@ void __init smp_tick_init(void) ...@@ -1151,16 +1170,20 @@ void __init smp_tick_init(void)
boot_cpu_id = hard_smp_processor_id(); boot_cpu_id = hard_smp_processor_id();
current_tick_offset = timer_tick_offset; current_tick_offset = timer_tick_offset;
cpu_present_map = 0;
for (i = 0; i < linux_num_cpus; i++) if (boot_cpu_id >= NR_CPUS) {
cpu_present_map |= (1UL << linux_cpus[i].mid); prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
for (i = 0; i < NR_CPUS; i++) { prom_halt();
__cpu_number_map[i] = -1; }
__cpu_logical_map[i] = -1;
atomic_set(&sparc64_num_cpus_online, 1);
memset(&cpu_online_map, 0, sizeof(cpu_online_map));
for (i = 0; i < linux_num_cpus; i++) {
if (linux_cpus[i].mid < NR_CPUS)
set_bit(linux_cpus[i].mid, &cpu_online_map);
} }
__cpu_number_map[boot_cpu_id] = 0;
prom_cpu_nodes[boot_cpu_id] = linux_cpus[0].prom_node; prom_cpu_nodes[boot_cpu_id] = linux_cpus[0].prom_node;
__cpu_logical_map[0] = boot_cpu_id;
prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1; prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1;
} }
...@@ -1223,8 +1246,10 @@ static void __init smp_tune_scheduling(void) ...@@ -1223,8 +1246,10 @@ static void __init smp_tune_scheduling(void)
"bne,pt %%xcc, 1b\n\t" "bne,pt %%xcc, 1b\n\t"
" nop\n\t" " nop\n\t"
"rd %%tick, %1\n\t" "rd %%tick, %1\n\t"
: "=&r" (tick1), "=&r" (tick2), "=&r" (flush_base) : "=&r" (tick1), "=&r" (tick2),
: "2" (flush_base), "r" (flush_base + ecache_size) "=&r" (flush_base)
: "2" (flush_base),
"r" (flush_base + ecache_size)
: "g1", "g2", "g3", "g5"); : "g1", "g2", "g3", "g5");
} else { } else {
__asm__ __volatile__("b,pt %%xcc, 1f\n\t" __asm__ __volatile__("b,pt %%xcc, 1f\n\t"
...@@ -1239,8 +1264,10 @@ static void __init smp_tune_scheduling(void) ...@@ -1239,8 +1264,10 @@ static void __init smp_tune_scheduling(void)
"bne,pt %%xcc, 1b\n\t" "bne,pt %%xcc, 1b\n\t"
" nop\n\t" " nop\n\t"
"rd %%asr24, %1\n\t" "rd %%asr24, %1\n\t"
: "=&r" (tick1), "=&r" (tick2), "=&r" (flush_base) : "=&r" (tick1), "=&r" (tick2),
: "2" (flush_base), "r" (flush_base + ecache_size) "=&r" (flush_base)
: "2" (flush_base),
"r" (flush_base + ecache_size)
: "g1", "g2", "g3", "g5"); : "g1", "g2", "g3", "g5");
} }
...@@ -1276,10 +1303,8 @@ int setup_profiling_timer(unsigned int multiplier) ...@@ -1276,10 +1303,8 @@ int setup_profiling_timer(unsigned int multiplier)
return -EINVAL; return -EINVAL;
save_and_cli(flags); save_and_cli(flags);
for (i = 0; i < NR_CPUS; i++) { for (i = 0; i < NR_CPUS; i++)
if (cpu_present_map & (1UL << i)) prof_multiplier(i) = multiplier;
prof_multiplier(i) = multiplier;
}
current_tick_offset = (timer_tick_offset / multiplier); current_tick_offset = (timer_tick_offset / multiplier);
restore_flags(flags); restore_flags(flags);
......
...@@ -59,7 +59,6 @@ struct poll { ...@@ -59,7 +59,6 @@ struct poll {
short revents; short revents;
}; };
extern unsigned prom_cpu_nodes[64];
extern void die_if_kernel(char *str, struct pt_regs *regs); extern void die_if_kernel(char *str, struct pt_regs *regs);
extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
void _sigpause_common (unsigned int set, struct pt_regs *); void _sigpause_common (unsigned int set, struct pt_regs *);
...@@ -103,7 +102,6 @@ extern int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs); ...@@ -103,7 +102,6 @@ extern int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
extern spinlock_t kernel_flag; extern spinlock_t kernel_flag;
extern int smp_num_cpus;
#ifdef CONFIG_DEBUG_SPINLOCK #ifdef CONFIG_DEBUG_SPINLOCK
extern void _do_spin_lock (spinlock_t *lock, char *str); extern void _do_spin_lock (spinlock_t *lock, char *str);
extern void _do_spin_unlock (spinlock_t *lock); extern void _do_spin_unlock (spinlock_t *lock);
...@@ -149,12 +147,9 @@ EXPORT_SYMBOL_NOVERS(mcount); ...@@ -149,12 +147,9 @@ EXPORT_SYMBOL_NOVERS(mcount);
/* Per-CPU information table */ /* Per-CPU information table */
EXPORT_SYMBOL(cpu_data); EXPORT_SYMBOL(cpu_data);
/* Misc SMP information */ /* CPU online map and active count. */
#ifdef CONFIG_SMP EXPORT_SYMBOL(cpu_online_map);
EXPORT_SYMBOL(smp_num_cpus); EXPORT_SYMBOL(sparc64_num_cpus_online);
#endif
EXPORT_SYMBOL(__cpu_number_map);
EXPORT_SYMBOL(__cpu_logical_map);
/* Spinlock debugging library, optional. */ /* Spinlock debugging library, optional. */
#ifdef CONFIG_DEBUG_SPINLOCK #ifdef CONFIG_DEBUG_SPINLOCK
......
...@@ -24,37 +24,13 @@ int this_is_starfire = 0; ...@@ -24,37 +24,13 @@ int this_is_starfire = 0;
void check_if_starfire(void) void check_if_starfire(void)
{ {
int ssnode = prom_finddevice("/ssp-serial"); int ssnode = prom_finddevice("/ssp-serial");
if(ssnode != 0 && ssnode != -1) if (ssnode != 0 && ssnode != -1)
this_is_starfire = 1; this_is_starfire = 1;
} }
void starfire_cpu_setup(void) void starfire_cpu_setup(void)
{ {
if (this_is_starfire) { /* Currently, nothing to do. */
/*
* We do this in starfire_translate and xcall_deliver. When we fix our cpu
* arrays to support > 64 processors we can use the real upaid instead
* of the logical cpuid in __cpu_number_map etc, then we can get rid of
* the translations everywhere. - Anton
*/
#if 0
int i;
/*
* Now must fixup cpu MIDs. OBP gave us a logical
* linear cpuid number, not the real upaid.
*/
for(i = 0; i < linux_num_cpus; i++) {
unsigned int mid = linux_cpus[i].mid;
mid = (((mid & 0x3c) << 1) |
((mid & 0x40) >> 4) |
(mid & 0x3));
linux_cpus[i].mid = mid;
}
#endif
}
} }
int starfire_hard_smp_processor_id(void) int starfire_hard_smp_processor_id(void)
...@@ -84,7 +60,7 @@ void *starfire_hookup(int upaid) ...@@ -84,7 +60,7 @@ void *starfire_hookup(int upaid)
unsigned long treg_base, hwmid, i; unsigned long treg_base, hwmid, i;
p = kmalloc(sizeof(*p), GFP_KERNEL); p = kmalloc(sizeof(*p), GFP_KERNEL);
if(!p) { if (!p) {
prom_printf("starfire_hookup: No memory, this is insane.\n"); prom_printf("starfire_hookup: No memory, this is insane.\n");
prom_halt(); prom_halt();
} }
...@@ -95,7 +71,7 @@ void *starfire_hookup(int upaid) ...@@ -95,7 +71,7 @@ void *starfire_hookup(int upaid)
p->hwmid = hwmid; p->hwmid = hwmid;
treg_base += (hwmid << 33UL); treg_base += (hwmid << 33UL);
treg_base += 0x200UL; treg_base += 0x200UL;
for(i = 0; i < 32; i++) { for (i = 0; i < 32; i++) {
p->imap_slots[i] = 0UL; p->imap_slots[i] = 0UL;
p->tregs[i] = treg_base + (i * 0x10UL); p->tregs[i] = treg_base + (i * 0x10UL);
/* Lets play it safe and not overwrite existing mappings */ /* Lets play it safe and not overwrite existing mappings */
...@@ -117,20 +93,20 @@ unsigned int starfire_translate(unsigned long imap, ...@@ -117,20 +93,20 @@ unsigned int starfire_translate(unsigned long imap,
unsigned int i; unsigned int i;
bus_hwmid = (((unsigned long)imap) >> 33) & 0x7f; bus_hwmid = (((unsigned long)imap) >> 33) & 0x7f;
for(p = sflist; p != NULL; p = p->next) for (p = sflist; p != NULL; p = p->next)
if(p->hwmid == bus_hwmid) if (p->hwmid == bus_hwmid)
break; break;
if(p == NULL) { if (p == NULL) {
prom_printf("XFIRE: Cannot find irqinfo for imap %016lx\n", prom_printf("XFIRE: Cannot find irqinfo for imap %016lx\n",
((unsigned long)imap)); ((unsigned long)imap));
prom_halt(); prom_halt();
} }
for(i = 0; i < 32; i++) { for (i = 0; i < 32; i++) {
if(p->imap_slots[i] == imap || if (p->imap_slots[i] == imap ||
p->imap_slots[i] == 0UL) p->imap_slots[i] == 0UL)
break; break;
} }
if(i == 32) { if (i == 32) {
printk("starfire_translate: Are you kidding me?\n"); printk("starfire_translate: Are you kidding me?\n");
panic("Lucy in the sky...."); panic("Lucy in the sky....");
} }
...@@ -138,8 +114,8 @@ unsigned int starfire_translate(unsigned long imap, ...@@ -138,8 +114,8 @@ unsigned int starfire_translate(unsigned long imap,
/* map to real upaid */ /* map to real upaid */
upaid = (((upaid & 0x3c) << 1) | upaid = (((upaid & 0x3c) << 1) |
((upaid & 0x40) >> 4) | ((upaid & 0x40) >> 4) |
(upaid & 0x3)); (upaid & 0x3));
upa_writel(upaid, p->tregs[i]); upa_writel(upaid, p->tregs[i]);
......
...@@ -402,7 +402,7 @@ void __init cheetah_ecache_flush_init(void) ...@@ -402,7 +402,7 @@ void __init cheetah_ecache_flush_init(void)
{ {
unsigned long largest_size, smallest_linesize, order; unsigned long largest_size, smallest_linesize, order;
char type[16]; char type[16];
int node, highest_cpu, i; int node, i;
/* Scan all cpu device tree nodes, note two values: /* Scan all cpu device tree nodes, note two values:
* 1) largest E-cache size * 1) largest E-cache size
...@@ -458,15 +458,7 @@ void __init cheetah_ecache_flush_init(void) ...@@ -458,15 +458,7 @@ void __init cheetah_ecache_flush_init(void)
} }
/* Now allocate error trap reporting scoreboard. */ /* Now allocate error trap reporting scoreboard. */
highest_cpu = 0; node = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
#ifdef CONFIG_SMP
for (i = 0; i < NR_CPUS; i++) {
if ((1UL << i) & cpu_present_map)
highest_cpu = i;
}
#endif
highest_cpu++;
node = highest_cpu * (2 * sizeof(struct cheetah_err_info));
for (order = 0; order < MAX_ORDER; order++) { for (order = 0; order < MAX_ORDER; order++) {
if ((PAGE_SIZE << order) >= node) if ((PAGE_SIZE << order) >= node)
break; break;
...@@ -483,7 +475,7 @@ void __init cheetah_ecache_flush_init(void) ...@@ -483,7 +475,7 @@ void __init cheetah_ecache_flush_init(void)
/* Mark all AFSRs as invalid so that the trap handler will /* Mark all AFSRs as invalid so that the trap handler will
* log new new information there. * log new new information there.
*/ */
for (i = 0; i < 2 * highest_cpu; i++) for (i = 0; i < 2 * NR_CPUS; i++)
cheetah_error_log[i].afsr = CHAFSR_INVALID; cheetah_error_log[i].afsr = CHAFSR_INVALID;
/* Now patch trap tables. */ /* Now patch trap tables. */
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <asm/string.h> #include <asm/string.h>
#include <asm/oplib.h> #include <asm/oplib.h>
#include <asm/idprom.h> #include <asm/idprom.h>
#include <asm/smp.h>
#include "conv.h" #include "conv.h"
...@@ -336,8 +337,6 @@ asmlinkage int solaris_sysinfo(int cmd, u32 buf, s32 count) ...@@ -336,8 +337,6 @@ asmlinkage int solaris_sysinfo(int cmd, u32 buf, s32 count)
#define SOLARIS_CONFIG_PHYS_PAGES 26 #define SOLARIS_CONFIG_PHYS_PAGES 26
#define SOLARIS_CONFIG_AVPHYS_PAGES 27 #define SOLARIS_CONFIG_AVPHYS_PAGES 27
extern unsigned prom_cpu_nodes[NR_CPUS];
asmlinkage int solaris_sysconf(int id) asmlinkage int solaris_sysconf(int id)
{ {
switch (id) { switch (id) {
...@@ -353,7 +352,7 @@ asmlinkage int solaris_sysconf(int id) ...@@ -353,7 +352,7 @@ asmlinkage int solaris_sysconf(int id)
"clock-frequency", 167000000); "clock-frequency", 167000000);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
case SOLARIS_CONFIG_NPROC_CONF: return NR_CPUS; case SOLARIS_CONFIG_NPROC_CONF: return NR_CPUS;
case SOLARIS_CONFIG_NPROC_ONLN: return smp_num_cpus; case SOLARIS_CONFIG_NPROC_ONLN: return num_online_cpus();
#else #else
case SOLARIS_CONFIG_NPROC_CONF: return 1; case SOLARIS_CONFIG_NPROC_CONF: return 1;
case SOLARIS_CONFIG_NPROC_ONLN: return 1; case SOLARIS_CONFIG_NPROC_ONLN: return 1;
......
...@@ -64,9 +64,12 @@ static __inline__ int irqs_running(void) ...@@ -64,9 +64,12 @@ static __inline__ int irqs_running(void)
{ {
int i; int i;
for (i = 0; i < smp_num_cpus; i++) for (i = 0; i < NR_CPUS; i++) {
if (local_irq_count(cpu_logical_map(i))) if (!cpu_online(i))
continue;
if (local_irq_count(i))
return 1; return 1;
}
return 0; return 0;
} }
......
...@@ -24,7 +24,8 @@ struct prom_cpuinfo { ...@@ -24,7 +24,8 @@ struct prom_cpuinfo {
}; };
extern int linux_num_cpus; /* number of CPUs probed */ extern int linux_num_cpus; /* number of CPUs probed */
extern struct prom_cpuinfo linux_cpus[64]; extern struct prom_cpuinfo linux_cpus[NR_CPUS];
extern unsigned int prom_cpu_nodes[NR_CPUS];
#endif /* !(__ASSEMBLY__) */ #endif /* !(__ASSEMBLY__) */
...@@ -60,9 +61,21 @@ extern cpuinfo_sparc cpu_data[NR_CPUS]; ...@@ -60,9 +61,21 @@ extern cpuinfo_sparc cpu_data[NR_CPUS];
* Private routines/data * Private routines/data
*/ */
#include <asm/bitops.h>
#include <asm/atomic.h>
extern unsigned char boot_cpu_id; extern unsigned char boot_cpu_id;
extern unsigned long cpu_present_map; extern unsigned long cpu_online_map;
#define cpu_online_map cpu_present_map #define cpu_online(cpu) (cpu_online_map & (1UL << (cpu)))
extern atomic_t sparc64_num_cpus_online;
#define num_online_cpus() (atomic_read(&sparc64_num_cpus_online))
static inline int any_online_cpu(unsigned long mask)
{
if ((mask &= cpu_online_map) != 0UL)
return __ffs(mask);
return -1;
}
/* /*
* General functions that each host system must provide. * General functions that each host system must provide.
...@@ -72,18 +85,6 @@ extern void smp_callin(void); ...@@ -72,18 +85,6 @@ extern void smp_callin(void);
extern void smp_boot_cpus(void); extern void smp_boot_cpus(void);
extern void smp_store_cpu_info(int id); extern void smp_store_cpu_info(int id);
extern __volatile__ int __cpu_number_map[NR_CPUS];
extern __volatile__ int __cpu_logical_map[NR_CPUS];
extern __inline__ int cpu_logical_map(int cpu)
{
return __cpu_logical_map[cpu];
}
extern __inline__ int cpu_number_map(int cpu)
{
return __cpu_number_map[cpu];
}
extern __inline__ int hard_smp_processor_id(void) extern __inline__ int hard_smp_processor_id(void)
{ {
if (tlb_type == cheetah) { if (tlb_type == cheetah) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment