Commit e188f0a5 authored by Peter Xu's avatar Peter Xu Committed by Ingo Molnar

MIPS: smp: Remove tick_broadcast_count

Now smp_call_function_single_async() provides the protection that
we'll return with -EBUSY if the csd object is still pending, then we
don't need the tick_broadcast_count counter any more.
Signed-off-by: default avatarPeter Xu <peterx@redhat.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Link: https://lkml.kernel.org/r/20191216213125.9536-3-peterx@redhat.com
parent 5a18ceca
...@@ -696,29 +696,22 @@ EXPORT_SYMBOL(flush_tlb_one); ...@@ -696,29 +696,22 @@ EXPORT_SYMBOL(flush_tlb_one);
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
static DEFINE_PER_CPU(atomic_t, tick_broadcast_count);
static DEFINE_PER_CPU(call_single_data_t, tick_broadcast_csd); static DEFINE_PER_CPU(call_single_data_t, tick_broadcast_csd);
void tick_broadcast(const struct cpumask *mask) void tick_broadcast(const struct cpumask *mask)
{ {
atomic_t *count;
call_single_data_t *csd; call_single_data_t *csd;
int cpu; int cpu;
for_each_cpu(cpu, mask) { for_each_cpu(cpu, mask) {
count = &per_cpu(tick_broadcast_count, cpu);
csd = &per_cpu(tick_broadcast_csd, cpu); csd = &per_cpu(tick_broadcast_csd, cpu);
smp_call_function_single_async(cpu, csd);
if (atomic_inc_return(count) == 1)
smp_call_function_single_async(cpu, csd);
} }
} }
static void tick_broadcast_callee(void *info) static void tick_broadcast_callee(void *info)
{ {
int cpu = smp_processor_id();
tick_receive_broadcast(); tick_receive_broadcast();
atomic_set(&per_cpu(tick_broadcast_count, cpu), 0);
} }
static int __init tick_broadcast_init(void) static int __init tick_broadcast_init(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment