Commit e22aa6d3 authored by Anton Blanchard's avatar Anton Blanchard

ppc64: support for > 32 CPUs (24 way RS64 with HMT shows up as 48 way)

parent 8d93229f
......@@ -13,7 +13,7 @@ struct mm_struct init_mm = INIT_MM(init_mm);
/*
* Initial thread structure.
*
* We need to make sure that this is 8192-byte aligned due to the
* We need to make sure that this is 16384-byte aligned due to the
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
......
......@@ -395,7 +395,7 @@ handle_irq_event(int irq, struct pt_regs *regs, struct irqaction *action)
}
#ifdef CONFIG_SMP
extern unsigned int irq_affinity [NR_IRQS];
extern unsigned long irq_affinity [NR_IRQS];
typedef struct {
unsigned long cpu;
......@@ -409,7 +409,7 @@ static irq_balance_t irq_balance[NR_IRQS] __cacheline_aligned
(idle_cpu(cpu) && ((now) - irq_stat[(cpu)].idle_timestamp > 1))
#define IRQ_ALLOWED(cpu,allowed_mask) \
((1 << cpu) & (allowed_mask))
((1UL << cpu) & (allowed_mask))
#define IRQ_BALANCE_INTERVAL (HZ/50)
......@@ -461,7 +461,7 @@ static inline void balance_irq(int irq)
new_cpu = move(entry->cpu, allowed_mask, now, random_number);
if (entry->cpu != new_cpu) {
entry->cpu = new_cpu;
irq_desc[irq].handler->set_affinity(irq, 1 << new_cpu);
irq_desc[irq].handler->set_affinity(irq, 1UL << new_cpu);
}
}
}
......@@ -649,19 +649,19 @@ static struct proc_dir_entry * irq_dir [NR_IRQS];
static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
#ifdef CONFIG_IRQ_ALL_CPUS
unsigned int irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = 0xffffffff};
unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = -1UL};
#else /* CONFIG_IRQ_ALL_CPUS */
unsigned int irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = 0x00000000};
unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = 0x0};
#endif /* CONFIG_IRQ_ALL_CPUS */
#define HEX_DIGITS 8
#define HEX_DIGITS 16
static int irq_affinity_read_proc (char *page, char **start, off_t off,
int count, int *eof, void *data)
{
if (count < HEX_DIGITS+1)
return -EINVAL;
return sprintf (page, "%08x\n", irq_affinity[(int)(long)data]);
return sprintf(page, "%16lx\n", irq_affinity[(long)data]);
}
static unsigned int parse_hex_value (const char *buffer,
......@@ -679,7 +679,7 @@ static unsigned int parse_hex_value (const char *buffer,
return -EFAULT;
/*
* Parse the first 8 characters as a hex string, any non-hex char
* Parse the first 16 characters as a hex string, any non-hex char
* is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.
*/
value = 0;
......@@ -704,7 +704,7 @@ static unsigned int parse_hex_value (const char *buffer,
static int irq_affinity_write_proc (struct file *file, const char *buffer,
unsigned long count, void *data)
{
int irq = (int)(long) data, full_count = count, err;
int irq = (long)data, full_count = count, err;
unsigned long new_value;
if (!irq_desc[irq].handler->set_affinity)
......@@ -712,8 +712,6 @@ static int irq_affinity_write_proc (struct file *file, const char *buffer,
err = parse_hex_value(buffer, count, &new_value);
/* Why is this disabled ? --BenH */
#if 0/*CONFIG_SMP*/
/*
* Do not allow disabling IRQs completely - it's a too easy
* way to make the system unusable accidentally :-) At least
......@@ -721,7 +719,6 @@ static int irq_affinity_write_proc (struct file *file, const char *buffer,
*/
if (!(new_value & cpu_online_map))
return -EINVAL;
#endif
irq_affinity[irq] = new_value;
irq_desc[irq].handler->set_affinity(irq, new_value);
......
......@@ -56,7 +56,7 @@ unsigned long cpu_online_map = 0;
static struct smp_ops_t *smp_ops;
volatile unsigned long cpu_callin_map[NR_CPUS];
volatile unsigned int cpu_callin_map[NR_CPUS];
extern unsigned char stab_array[];
......@@ -564,26 +564,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
/* Fixup boot cpu */
smp_store_cpu_info(smp_processor_id());
cpu_callin_map[smp_processor_id()] = 1;
for (i = 0; i < NR_CPUS; i++) {
paca[i].prof_counter = 1;
paca[i].prof_multiplier = 1;
if (i != boot_cpuid) {
void *tmp;
/*
* the boot cpu segment table is statically
* initialized to real address 0x5000. The
* Other processor's tables are created and
* initialized here.
*/
tmp = &stab_array[PAGE_SIZE * (i-1)];
memset(tmp, 0, PAGE_SIZE);
paca[i].xStab_data.virt = (unsigned long)tmp;
paca[i].xStab_data.real = (unsigned long)__v2a(tmp);
paca[i].default_decr = tb_ticks_per_jiffy /
decr_overclock;
}
}
paca[smp_processor_id()].prof_counter = 1;
paca[smp_processor_id()].prof_multiplier = 1;
/*
* XXX very rough.
......@@ -611,6 +593,23 @@ int __devinit __cpu_up(unsigned int cpu)
struct task_struct *p;
int c;
paca[cpu].prof_counter = 1;
paca[cpu].prof_multiplier = 1;
paca[cpu].default_decr = tb_ticks_per_jiffy / decr_overclock;
if (!cpu_has_slb()) {
void *tmp;
/* maximum of 48 CPUs on machines with a segment table */
if (cpu >= 48)
BUG();
tmp = &stab_array[PAGE_SIZE * cpu];
memset(tmp, 0, PAGE_SIZE);
paca[cpu].xStab_data.virt = (unsigned long)tmp;
paca[cpu].xStab_data.real = (unsigned long)__v2a(tmp);
}
/* create a process for the processor */
/* only regs.msr is actually used, and 0 is OK for it */
memset(&regs, 0, sizeof(struct pt_regs));
......
......@@ -437,7 +437,7 @@ void xics_set_affinity(unsigned int virq, unsigned long cpumask)
unsigned long flags;
long status;
unsigned long xics_status[2];
u32 newmask;
unsigned long newmask;
virq -= XICS_IRQ_OFFSET;
irq = virt_irq_to_real(virq);
......@@ -455,12 +455,12 @@ void xics_set_affinity(unsigned int virq, unsigned long cpumask)
}
/* For the moment only implement delivery to all cpus or one cpu */
if (cpumask == 0xffffffff) {
if (cpumask == -1UL) {
newmask = default_distrib_server;
} else {
if (!(cpumask & cpu_online_map))
goto out;
newmask = find_first_bit(&cpumask, 32);
newmask = find_first_bit(&cpumask, 8*sizeof(unsigned long));
}
status = rtas_call(ibm_set_xive, 3, 1, NULL,
......
......@@ -52,7 +52,7 @@ static inline int num_online_cpus(void)
return nr;
}
extern volatile unsigned long cpu_callin_map[NR_CPUS];
extern volatile unsigned int cpu_callin_map[NR_CPUS];
#define smp_processor_id() (get_paca()->xPacaIndex)
......
......@@ -62,7 +62,7 @@ static inline void __tlb_remove_tlb_entry(mmu_gather_t *tlb, pte_t *ptep,
if (i == PPC64_TLB_BATCH_NR) {
int local = 0;
if (tlb->mm->cpu_vm_mask == (1 << cpu))
if (tlb->mm->cpu_vm_mask == (1UL << cpu))
local = 1;
flush_hash_range(tlb->mm->context, i, local);
......@@ -80,7 +80,7 @@ static inline void tlb_flush(struct free_pte_ctx *tlb)
struct ppc64_tlb_batch *batch = &ppc64_tlb_batch[cpu];
int local = 0;
if (tlb->mm->cpu_vm_mask == (1 << smp_processor_id()))
if (tlb->mm->cpu_vm_mask == (1UL << smp_processor_id()))
local = 1;
flush_hash_range(tlb->mm->context, batch->index, local);
......
......@@ -41,7 +41,7 @@ static inline unsigned long __node_to_cpu_mask(int node)
for(cpu = 0; cpu < NR_CPUS; cpu++)
if (numa_cpu_lookup_table[cpu] == node)
mask |= 1 << cpu;
mask |= 1UL << cpu;
return mask;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment