Commit 19926630 authored by David S. Miller's avatar David S. Miller

sparc64: Call xcall_deliver() directly in some cases.

For these cases the callers make sure:

1) The cpus indicated are online.

2) The current cpu is not in the list of indicated cpus.

Therefore we can pass a pointer to the mask directly.

One of the motivations in this transformation is to make use of
"&cpumask_of_cpu(cpu)" which evaluates to a pointer to constant
data in the kernel and thus takes up no stack space.

Hopefully someone in the future will change the interface of
arch_send_call_function_ipi() such that it passes a const cpumask_t
pointer so that this will optimize ever further.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent cd5bc89d
...@@ -792,16 +792,15 @@ extern unsigned long xcall_call_function; ...@@ -792,16 +792,15 @@ extern unsigned long xcall_call_function;
void arch_send_call_function_ipi(cpumask_t mask) void arch_send_call_function_ipi(cpumask_t mask)
{ {
smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask); xcall_deliver((u64) &xcall_call_function, 0, 0, &mask);
} }
extern unsigned long xcall_call_function_single; extern unsigned long xcall_call_function_single;
void arch_send_call_function_single_ipi(int cpu) void arch_send_call_function_single_ipi(int cpu)
{ {
cpumask_t mask = cpumask_of_cpu(cpu); xcall_deliver((u64) &xcall_call_function_single, 0, 0,
&cpumask_of_cpu(cpu));
smp_cross_call_masked(&xcall_call_function_single, 0, 0, 0, mask);
} }
/* Send cross call to all processors except self. */ /* Send cross call to all processors except self. */
...@@ -959,24 +958,6 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) ...@@ -959,24 +958,6 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
put_cpu(); put_cpu();
} }
static void __smp_receive_signal_mask(cpumask_t mask)
{
smp_cross_call_masked(&xcall_receive_signal, 0, 0, 0, mask);
}
void smp_receive_signal(int cpu)
{
cpumask_t mask = cpumask_of_cpu(cpu);
if (cpu_online(cpu))
__smp_receive_signal_mask(mask);
}
void smp_receive_signal_client(int irq, struct pt_regs *regs)
{
clear_softint(1 << irq);
}
void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
{ {
struct mm_struct *mm; struct mm_struct *mm;
...@@ -1374,7 +1355,13 @@ void __init smp_cpus_done(unsigned int max_cpus) ...@@ -1374,7 +1355,13 @@ void __init smp_cpus_done(unsigned int max_cpus)
void smp_send_reschedule(int cpu) void smp_send_reschedule(int cpu)
{ {
smp_receive_signal(cpu); xcall_deliver((u64) &xcall_receive_signal, 0, 0,
&cpumask_of_cpu(cpu));
}
void smp_receive_signal_client(int irq, struct pt_regs *regs)
{
clear_softint(1 << irq);
} }
/* This is a nop because we capture all other cpus /* This is a nop because we capture all other cpus
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment