Commit b77b881f authored by Yinghai Lu's avatar Yinghai Lu Committed by Ingo Molnar

x86: fix lguest used_vectors breakage, -v2

Impact: fix lguest, clean up

32-bit lguest used used_vectors to record vectors, but that model of
allocating vectors changed and got broken, after we changed vector
allocation to a per_cpu array.

Try enable that for 64bit, and the array is used for all vectors that
are not managed by vector_irq per_cpu array.

Also kill system_vectors[], that is now a duplication of the
used_vectors bitmap.

[ merged in cpus4096 due to io_apic.c cpumask changes. ]
[ -v2, fix build failure ]
Signed-off-by: default avatarYinghai Lu <yinghai@kernel.org>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent a7883dec
...@@ -320,16 +320,14 @@ static inline void set_intr_gate(unsigned int n, void *addr) ...@@ -320,16 +320,14 @@ static inline void set_intr_gate(unsigned int n, void *addr)
_set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS); _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
} }
#define SYS_VECTOR_FREE 0
#define SYS_VECTOR_ALLOCED 1
extern int first_system_vector; extern int first_system_vector;
extern char system_vectors[]; /* used_vectors is BITMAP for irq is not managed by percpu vector_irq */
extern unsigned long used_vectors[];
static inline void alloc_system_vector(int vector) static inline void alloc_system_vector(int vector)
{ {
if (system_vectors[vector] == SYS_VECTOR_FREE) { if (!test_bit(vector, used_vectors)) {
system_vectors[vector] = SYS_VECTOR_ALLOCED; set_bit(vector, used_vectors);
if (first_system_vector > vector) if (first_system_vector > vector)
first_system_vector = vector; first_system_vector = vector;
} else } else
......
...@@ -46,5 +46,6 @@ extern void native_init_IRQ(void); ...@@ -46,5 +46,6 @@ extern void native_init_IRQ(void);
/* Interrupt vector management */ /* Interrupt vector management */
extern DECLARE_BITMAP(used_vectors, NR_VECTORS); extern DECLARE_BITMAP(used_vectors, NR_VECTORS);
extern int vector_used_by_percpu_irq(unsigned int vector);
#endif /* _ASM_X86_IRQ_H */ #endif /* _ASM_X86_IRQ_H */
...@@ -118,8 +118,6 @@ EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok); ...@@ -118,8 +118,6 @@ EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
int first_system_vector = 0xfe; int first_system_vector = 0xfe;
char system_vectors[NR_VECTORS] = { [0 ... NR_VECTORS-1] = SYS_VECTOR_FREE};
/* /*
* Debug level, exported for io_apic.c * Debug level, exported for io_apic.c
*/ */
......
...@@ -1326,13 +1326,10 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) ...@@ -1326,13 +1326,10 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
} }
if (unlikely(current_vector == vector)) if (unlikely(current_vector == vector))
continue; continue;
#ifdef CONFIG_X86_64
if (vector == IA32_SYSCALL_VECTOR) if (test_bit(vector, used_vectors))
goto next;
#else
if (vector == SYSCALL_VECTOR)
goto next; goto next;
#endif
for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
if (per_cpu(vector_irq, new_cpu)[vector] != -1) if (per_cpu(vector_irq, new_cpu)[vector] != -1)
goto next; goto next;
......
...@@ -110,6 +110,18 @@ DEFINE_PER_CPU(vector_irq_t, vector_irq) = { ...@@ -110,6 +110,18 @@ DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
[IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1 [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1
}; };
int vector_used_by_percpu_irq(unsigned int vector)
{
int cpu;
for_each_online_cpu(cpu) {
if (per_cpu(vector_irq, cpu)[vector] != -1)
return 1;
}
return 0;
}
/* Overridden in paravirt.c */ /* Overridden in paravirt.c */
void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ"))); void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
...@@ -146,10 +158,12 @@ void __init native_init_IRQ(void) ...@@ -146,10 +158,12 @@ void __init native_init_IRQ(void)
alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
/* IPI for single call function */ /* IPI for single call function */
set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, call_function_single_interrupt); alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR,
call_function_single_interrupt);
/* Low priority IPI to cleanup after moving an irq */ /* Low priority IPI to cleanup after moving an irq */
set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors);
#endif #endif
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
......
...@@ -135,6 +135,18 @@ DEFINE_PER_CPU(vector_irq_t, vector_irq) = { ...@@ -135,6 +135,18 @@ DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
[IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1 [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1
}; };
int vector_used_by_percpu_irq(unsigned int vector)
{
int cpu;
for_each_online_cpu(cpu) {
if (per_cpu(vector_irq, cpu)[vector] != -1)
return 1;
}
return 0;
}
void __init init_ISA_irqs(void) void __init init_ISA_irqs(void)
{ {
int i; int i;
...@@ -187,6 +199,7 @@ static void __init smp_intr_init(void) ...@@ -187,6 +199,7 @@ static void __init smp_intr_init(void)
/* Low priority IPI to cleanup after moving an irq */ /* Low priority IPI to cleanup after moving an irq */
set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors);
#endif #endif
} }
......
...@@ -72,9 +72,6 @@ ...@@ -72,9 +72,6 @@
#include "cpu/mcheck/mce.h" #include "cpu/mcheck/mce.h"
DECLARE_BITMAP(used_vectors, NR_VECTORS);
EXPORT_SYMBOL_GPL(used_vectors);
asmlinkage int system_call(void); asmlinkage int system_call(void);
/* Do we ignore FPU interrupts ? */ /* Do we ignore FPU interrupts ? */
...@@ -89,6 +86,9 @@ gate_desc idt_table[256] ...@@ -89,6 +86,9 @@ gate_desc idt_table[256]
__attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, }; __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, };
#endif #endif
DECLARE_BITMAP(used_vectors, NR_VECTORS);
EXPORT_SYMBOL_GPL(used_vectors);
static int ignore_nmis; static int ignore_nmis;
static inline void conditional_sti(struct pt_regs *regs) static inline void conditional_sti(struct pt_regs *regs)
...@@ -949,9 +949,7 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) ...@@ -949,9 +949,7 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
void __init trap_init(void) void __init trap_init(void)
{ {
#ifdef CONFIG_X86_32
int i; int i;
#endif
#ifdef CONFIG_EISA #ifdef CONFIG_EISA
void __iomem *p = early_ioremap(0x0FFFD9, 4); void __iomem *p = early_ioremap(0x0FFFD9, 4);
...@@ -1008,11 +1006,15 @@ void __init trap_init(void) ...@@ -1008,11 +1006,15 @@ void __init trap_init(void)
} }
set_system_trap_gate(SYSCALL_VECTOR, &system_call); set_system_trap_gate(SYSCALL_VECTOR, &system_call);
#endif
/* Reserve all the builtin and the syscall vector: */ /* Reserve all the builtin and the syscall vector: */
for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
set_bit(i, used_vectors); set_bit(i, used_vectors);
#ifdef CONFIG_X86_64
set_bit(IA32_SYSCALL_VECTOR, used_vectors);
#else
set_bit(SYSCALL_VECTOR, used_vectors); set_bit(SYSCALL_VECTOR, used_vectors);
#endif #endif
/* /*
......
...@@ -222,11 +222,16 @@ bool check_syscall_vector(struct lguest *lg) ...@@ -222,11 +222,16 @@ bool check_syscall_vector(struct lguest *lg)
int init_interrupts(void) int init_interrupts(void)
{ {
/* If they want some strange system call vector, reserve it now */ /* If they want some strange system call vector, reserve it now */
if (syscall_vector != SYSCALL_VECTOR if (syscall_vector != SYSCALL_VECTOR) {
&& test_and_set_bit(syscall_vector, used_vectors)) { if (test_bit(syscall_vector, used_vectors) ||
printk("lg: couldn't reserve syscall %u\n", syscall_vector); vector_used_by_percpu_irq(syscall_vector)) {
return -EBUSY; printk(KERN_ERR "lg: couldn't reserve syscall %u\n",
syscall_vector);
return -EBUSY;
}
set_bit(syscall_vector, used_vectors);
} }
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment