Commit 1514a193 authored by Brian Gerst's avatar Brian Gerst Committed by Linus Torvalds

[PATCH] fastcall fixes for x86 smp interrupts

I cross-checked the functions called by the BUILD_INTERRUPT macros and
came up with this patch.  Even though some of these functions currently 
take no args I made them all consistent.  Some functions in the Voyager 
code that are not directly called from asm code become static.
Signed-off-by: default avatarBrian Gerst <bgerst@didntduck.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent e5af34a2
...@@ -1149,7 +1149,7 @@ inline void smp_local_timer_interrupt(struct pt_regs * regs) ...@@ -1149,7 +1149,7 @@ inline void smp_local_timer_interrupt(struct pt_regs * regs)
* interrupt as well. Thus we cannot inline the local irq ... ] * interrupt as well. Thus we cannot inline the local irq ... ]
*/ */
void smp_apic_timer_interrupt(struct pt_regs regs) fastcall void smp_apic_timer_interrupt(struct pt_regs *regs)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
...@@ -1169,14 +1169,14 @@ void smp_apic_timer_interrupt(struct pt_regs regs) ...@@ -1169,14 +1169,14 @@ void smp_apic_timer_interrupt(struct pt_regs regs)
* interrupt lock, which is the WrongThing (tm) to do. * interrupt lock, which is the WrongThing (tm) to do.
*/ */
irq_enter(); irq_enter();
smp_local_timer_interrupt(&regs); smp_local_timer_interrupt(regs);
irq_exit(); irq_exit();
} }
/* /*
* This interrupt should _never_ happen with our APIC/SMP architecture * This interrupt should _never_ happen with our APIC/SMP architecture
*/ */
asmlinkage void smp_spurious_interrupt(void) fastcall void smp_spurious_interrupt(struct pt_regs *regs)
{ {
unsigned long v; unsigned long v;
...@@ -1200,7 +1200,7 @@ asmlinkage void smp_spurious_interrupt(void) ...@@ -1200,7 +1200,7 @@ asmlinkage void smp_spurious_interrupt(void)
* This interrupt should never happen with our APIC/SMP architecture * This interrupt should never happen with our APIC/SMP architecture
*/ */
asmlinkage void smp_error_interrupt(void) fastcall void smp_error_interrupt(struct pt_regs *regs)
{ {
unsigned long v, v1; unsigned long v, v1;
......
...@@ -308,7 +308,7 @@ static inline void leave_mm (unsigned long cpu) ...@@ -308,7 +308,7 @@ static inline void leave_mm (unsigned long cpu)
* 2) Leave the mm if we are in the lazy tlb mode. * 2) Leave the mm if we are in the lazy tlb mode.
*/ */
asmlinkage void smp_invalidate_interrupt (void) fastcall void smp_invalidate_interrupt(struct pt_regs *regs)
{ {
unsigned long cpu; unsigned long cpu;
...@@ -579,12 +579,12 @@ void smp_send_stop(void) ...@@ -579,12 +579,12 @@ void smp_send_stop(void)
* all the work is done automatically when * all the work is done automatically when
* we return from the interrupt. * we return from the interrupt.
*/ */
asmlinkage void smp_reschedule_interrupt(void) fastcall void smp_reschedule_interrupt(struct pt_regs *regs)
{ {
ack_APIC_irq(); ack_APIC_irq();
} }
asmlinkage void smp_call_function_interrupt(void) fastcall void smp_call_function_interrupt(struct pt_regs *regs)
{ {
void (*func) (void *info) = call_data->func; void (*func) (void *info) = call_data->func;
void *info = call_data->info; void *info = call_data->info;
......
...@@ -785,8 +785,8 @@ initialize_secondary(void) ...@@ -785,8 +785,8 @@ initialize_secondary(void)
* System interrupts occur because some problem was detected on the * System interrupts occur because some problem was detected on the
* various busses. To find out what you have to probe all the * various busses. To find out what you have to probe all the
* hardware via the CAT bus. FIXME: At the moment we do nothing. */ * hardware via the CAT bus. FIXME: At the moment we do nothing. */
asmlinkage void fastcall void
smp_vic_sys_interrupt(void) smp_vic_sys_interrupt(struct pt_regs *regs)
{ {
ack_CPI(VIC_SYS_INT); ack_CPI(VIC_SYS_INT);
printk("Voyager SYSTEM INTERRUPT\n"); printk("Voyager SYSTEM INTERRUPT\n");
...@@ -795,8 +795,8 @@ smp_vic_sys_interrupt(void) ...@@ -795,8 +795,8 @@ smp_vic_sys_interrupt(void)
/* Handle a voyager CMN_INT; These interrupts occur either because of /* Handle a voyager CMN_INT; These interrupts occur either because of
* a system status change or because a single bit memory error * a system status change or because a single bit memory error
* occurred. FIXME: At the moment, ignore all this. */ * occurred. FIXME: At the moment, ignore all this. */
asmlinkage void fastcall void
smp_vic_cmn_interrupt(void) smp_vic_cmn_interrupt(struct pt_regs *regs)
{ {
static __u8 in_cmn_int = 0; static __u8 in_cmn_int = 0;
static spinlock_t cmn_int_lock = SPIN_LOCK_UNLOCKED; static spinlock_t cmn_int_lock = SPIN_LOCK_UNLOCKED;
...@@ -824,7 +824,7 @@ smp_vic_cmn_interrupt(void) ...@@ -824,7 +824,7 @@ smp_vic_cmn_interrupt(void)
/* /*
* Reschedule call back. Nothing to do, all the work is done * Reschedule call back. Nothing to do, all the work is done
* automatically when we return from the interrupt. */ * automatically when we return from the interrupt. */
asmlinkage void static void
smp_reschedule_interrupt(void) smp_reschedule_interrupt(void)
{ {
/* do nothing */ /* do nothing */
...@@ -855,7 +855,7 @@ leave_mm (unsigned long cpu) ...@@ -855,7 +855,7 @@ leave_mm (unsigned long cpu)
/* /*
* Invalidate call-back * Invalidate call-back
*/ */
asmlinkage void static void
smp_invalidate_interrupt(void) smp_invalidate_interrupt(void)
{ {
__u8 cpu = smp_processor_id(); __u8 cpu = smp_processor_id();
...@@ -989,7 +989,7 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) ...@@ -989,7 +989,7 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
} }
/* enable the requested IRQs */ /* enable the requested IRQs */
asmlinkage void static void
smp_enable_irq_interrupt(void) smp_enable_irq_interrupt(void)
{ {
__u8 irq; __u8 irq;
...@@ -1038,7 +1038,7 @@ static struct call_data_struct * call_data; ...@@ -1038,7 +1038,7 @@ static struct call_data_struct * call_data;
* previously set up. This is used to schedule a function for * previously set up. This is used to schedule a function for
* execution on all CPU's - set up the function then broadcast a * execution on all CPU's - set up the function then broadcast a
* function_interrupt CPI to come here on each CPU */ * function_interrupt CPI to come here on each CPU */
asmlinkage void static void
smp_call_function_interrupt(void) smp_call_function_interrupt(void)
{ {
void (*func) (void *info) = call_data->func; void (*func) (void *info) = call_data->func;
...@@ -1133,50 +1133,50 @@ smp_call_function (void (*func) (void *info), void *info, int retry, ...@@ -1133,50 +1133,50 @@ smp_call_function (void (*func) (void *info), void *info, int retry,
* no local APIC, so I can't do this * no local APIC, so I can't do this
* *
* This function is currently a placeholder and is unused in the code */ * This function is currently a placeholder and is unused in the code */
asmlinkage void fastcall void
smp_apic_timer_interrupt(struct pt_regs regs) smp_apic_timer_interrupt(struct pt_regs *regs)
{ {
wrapper_smp_local_timer_interrupt(&regs); wrapper_smp_local_timer_interrupt(regs);
} }
/* All of the QUAD interrupt GATES */ /* All of the QUAD interrupt GATES */
asmlinkage void fastcall void
smp_qic_timer_interrupt(struct pt_regs regs) smp_qic_timer_interrupt(struct pt_regs *regs)
{ {
ack_QIC_CPI(QIC_TIMER_CPI); ack_QIC_CPI(QIC_TIMER_CPI);
wrapper_smp_local_timer_interrupt(&regs); wrapper_smp_local_timer_interrupt(regs);
} }
asmlinkage void fastcall void
smp_qic_invalidate_interrupt(void) smp_qic_invalidate_interrupt(struct pt_regs *regs)
{ {
ack_QIC_CPI(QIC_INVALIDATE_CPI); ack_QIC_CPI(QIC_INVALIDATE_CPI);
smp_invalidate_interrupt(); smp_invalidate_interrupt();
} }
asmlinkage void fastcall void
smp_qic_reschedule_interrupt(void) smp_qic_reschedule_interrupt(struct pt_regs *regs)
{ {
ack_QIC_CPI(QIC_RESCHEDULE_CPI); ack_QIC_CPI(QIC_RESCHEDULE_CPI);
smp_reschedule_interrupt(); smp_reschedule_interrupt();
} }
asmlinkage void fastcall void
smp_qic_enable_irq_interrupt(void) smp_qic_enable_irq_interrupt(struct pt_regs *regs)
{ {
ack_QIC_CPI(QIC_ENABLE_IRQ_CPI); ack_QIC_CPI(QIC_ENABLE_IRQ_CPI);
smp_enable_irq_interrupt(); smp_enable_irq_interrupt();
} }
asmlinkage void fastcall void
smp_qic_call_function_interrupt(void) smp_qic_call_function_interrupt(struct pt_regs *regs)
{ {
ack_QIC_CPI(QIC_CALL_FUNCTION_CPI); ack_QIC_CPI(QIC_CALL_FUNCTION_CPI);
smp_call_function_interrupt(); smp_call_function_interrupt();
} }
asmlinkage void fastcall void
smp_vic_cpi_interrupt(struct pt_regs regs) smp_vic_cpi_interrupt(struct pt_regs *regs)
{ {
__u8 cpu = smp_processor_id(); __u8 cpu = smp_processor_id();
...@@ -1186,7 +1186,7 @@ smp_vic_cpi_interrupt(struct pt_regs regs) ...@@ -1186,7 +1186,7 @@ smp_vic_cpi_interrupt(struct pt_regs regs)
ack_VIC_CPI(VIC_CPI_LEVEL0); ack_VIC_CPI(VIC_CPI_LEVEL0);
if(test_and_clear_bit(VIC_TIMER_CPI, &vic_cpi_mailbox[cpu])) if(test_and_clear_bit(VIC_TIMER_CPI, &vic_cpi_mailbox[cpu]))
wrapper_smp_local_timer_interrupt(&regs); wrapper_smp_local_timer_interrupt(regs);
if(test_and_clear_bit(VIC_INVALIDATE_CPI, &vic_cpi_mailbox[cpu])) if(test_and_clear_bit(VIC_INVALIDATE_CPI, &vic_cpi_mailbox[cpu]))
smp_invalidate_interrupt(); smp_invalidate_interrupt();
if(test_and_clear_bit(VIC_RESCHEDULE_CPI, &vic_cpi_mailbox[cpu])) if(test_and_clear_bit(VIC_RESCHEDULE_CPI, &vic_cpi_mailbox[cpu]))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment