Commit 9f4c7ac2 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6

* 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6:
  [PATCH] x86-64: Only look at per_cpu data for online cpus.
  [PATCH] x86-64: Simplify the vector allocator.
parents 8c8a0eab 70a0a535
...@@ -63,7 +63,7 @@ int timer_over_8254 __initdata = 1; ...@@ -63,7 +63,7 @@ int timer_over_8254 __initdata = 1;
static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
static DEFINE_SPINLOCK(ioapic_lock); static DEFINE_SPINLOCK(ioapic_lock);
static DEFINE_SPINLOCK(vector_lock); DEFINE_SPINLOCK(vector_lock);
/* /*
* # of IRQ routing registers * # of IRQ routing registers
...@@ -612,15 +612,15 @@ static int __assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result) ...@@ -612,15 +612,15 @@ static int __assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result)
* Also, we've got to be careful not to trash gate * Also, we've got to be careful not to trash gate
* 0x80, because int 0x80 is hm, kind of importantish. ;) * 0x80, because int 0x80 is hm, kind of importantish. ;)
*/ */
static struct { static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
int vector;
int offset;
} pos[NR_CPUS] = { [ 0 ... NR_CPUS - 1] = {FIRST_DEVICE_VECTOR, 0} };
int old_vector = -1; int old_vector = -1;
int cpu; int cpu;
BUG_ON((unsigned)irq >= NR_IRQ_VECTORS); BUG_ON((unsigned)irq >= NR_IRQ_VECTORS);
/* Only try and allocate irqs on cpus that are present */
cpus_and(mask, mask, cpu_online_map);
if (irq_vector[irq] > 0) if (irq_vector[irq] > 0)
old_vector = irq_vector[irq]; old_vector = irq_vector[irq];
if (old_vector > 0) { if (old_vector > 0) {
...@@ -630,15 +630,15 @@ static int __assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result) ...@@ -630,15 +630,15 @@ static int __assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result)
} }
for_each_cpu_mask(cpu, mask) { for_each_cpu_mask(cpu, mask) {
cpumask_t domain; cpumask_t domain, new_mask;
int first, new_cpu; int new_cpu;
int vector, offset; int vector, offset;
domain = vector_allocation_domain(cpu); domain = vector_allocation_domain(cpu);
first = first_cpu(domain); cpus_and(new_mask, domain, cpu_online_map);
vector = pos[first].vector; vector = current_vector;
offset = pos[first].offset; offset = current_offset;
next: next:
vector += 8; vector += 8;
if (vector >= FIRST_SYSTEM_VECTOR) { if (vector >= FIRST_SYSTEM_VECTOR) {
...@@ -646,24 +646,24 @@ static int __assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result) ...@@ -646,24 +646,24 @@ static int __assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result)
offset = (offset + 1) % 8; offset = (offset + 1) % 8;
vector = FIRST_DEVICE_VECTOR + offset; vector = FIRST_DEVICE_VECTOR + offset;
} }
if (unlikely(pos[first].vector == vector)) if (unlikely(current_vector == vector))
continue; continue;
if (vector == IA32_SYSCALL_VECTOR) if (vector == IA32_SYSCALL_VECTOR)
goto next; goto next;
for_each_cpu_mask(new_cpu, domain) for_each_cpu_mask(new_cpu, new_mask)
if (per_cpu(vector_irq, new_cpu)[vector] != -1) if (per_cpu(vector_irq, new_cpu)[vector] != -1)
goto next; goto next;
/* Found one! */ /* Found one! */
for_each_cpu_mask(new_cpu, domain) { current_vector = vector;
pos[new_cpu].vector = vector; current_offset = offset;
pos[new_cpu].offset = offset;
}
if (old_vector >= 0) { if (old_vector >= 0) {
cpumask_t old_mask;
int old_cpu; int old_cpu;
for_each_cpu_mask(old_cpu, irq_domain[irq]) cpus_and(old_mask, irq_domain[irq], cpu_online_map);
for_each_cpu_mask(old_cpu, old_mask)
per_cpu(vector_irq, old_cpu)[old_vector] = -1; per_cpu(vector_irq, old_cpu)[old_vector] = -1;
} }
for_each_cpu_mask(new_cpu, domain) for_each_cpu_mask(new_cpu, new_mask)
per_cpu(vector_irq, new_cpu)[vector] = irq; per_cpu(vector_irq, new_cpu)[vector] = irq;
irq_vector[irq] = vector; irq_vector[irq] = vector;
irq_domain[irq] = domain; irq_domain[irq] = domain;
...@@ -684,6 +684,32 @@ static int assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result) ...@@ -684,6 +684,32 @@ static int assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result)
return vector; return vector;
} }
void __setup_vector_irq(int cpu)
{
/* Initialize vector_irq on a new cpu */
/* This function must be called with vector_lock held */
unsigned long flags;
int irq, vector;
/* Mark the inuse vectors */
for (irq = 0; irq < NR_IRQ_VECTORS; ++irq) {
if (!cpu_isset(cpu, irq_domain[irq]))
continue;
vector = irq_vector[irq];
per_cpu(vector_irq, cpu)[vector] = irq;
}
/* Mark the free vectors */
for (vector = 0; vector < NR_VECTORS; ++vector) {
irq = per_cpu(vector_irq, cpu)[vector];
if (irq < 0)
continue;
if (!cpu_isset(cpu, irq_domain[irq]))
per_cpu(vector_irq, cpu)[vector] = -1;
}
}
extern void (*interrupt[NR_IRQS])(void); extern void (*interrupt[NR_IRQS])(void);
static struct irq_chip ioapic_chip; static struct irq_chip ioapic_chip;
......
...@@ -581,12 +581,16 @@ void __cpuinit start_secondary(void) ...@@ -581,12 +581,16 @@ void __cpuinit start_secondary(void)
* smp_call_function(). * smp_call_function().
*/ */
lock_ipi_call_lock(); lock_ipi_call_lock();
spin_lock(&vector_lock);
/* Setup the per cpu irq handling data structures */
__setup_vector_irq(smp_processor_id());
/* /*
* Allow the master to continue. * Allow the master to continue.
*/ */
cpu_set(smp_processor_id(), cpu_online_map); cpu_set(smp_processor_id(), cpu_online_map);
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
spin_unlock(&vector_lock);
unlock_ipi_call_lock(); unlock_ipi_call_lock();
cpu_idle(); cpu_idle();
...@@ -799,7 +803,6 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid) ...@@ -799,7 +803,6 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid)
cpu, node); cpu, node);
} }
alternatives_smp_switch(1); alternatives_smp_switch(1);
c_idle.idle = get_idle_for_cpu(cpu); c_idle.idle = get_idle_for_cpu(cpu);
...@@ -1246,8 +1249,10 @@ int __cpu_disable(void) ...@@ -1246,8 +1249,10 @@ int __cpu_disable(void)
local_irq_disable(); local_irq_disable();
remove_siblinginfo(cpu); remove_siblinginfo(cpu);
spin_lock(&vector_lock);
/* It's now safe to remove this processor from the online map */ /* It's now safe to remove this processor from the online map */
cpu_clear(cpu, cpu_online_map); cpu_clear(cpu, cpu_online_map);
spin_unlock(&vector_lock);
remove_cpu_from_maps(); remove_cpu_from_maps();
fixup_irqs(cpu_online_map); fixup_irqs(cpu_online_map);
return 0; return 0;
......
...@@ -76,6 +76,8 @@ ...@@ -76,6 +76,8 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
typedef int vector_irq_t[NR_VECTORS]; typedef int vector_irq_t[NR_VECTORS];
DECLARE_PER_CPU(vector_irq_t, vector_irq); DECLARE_PER_CPU(vector_irq_t, vector_irq);
extern void __setup_vector_irq(int cpu);
extern spinlock_t vector_lock;
/* /*
* Various low-level irq details needed by irq.c, process.c, * Various low-level irq details needed by irq.c, process.c,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment