Commit 52103be0 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

smp: Optimize flush_smp_call_function_queue()

The call_single_queue can contain (two) different callbacks,
synchronous and asynchronous. The current interrupt handler runs them
in-order, which means that remote CPUs that are waiting for their
synchronous call can be delayed by running asynchronous callbacks.

Rework the interrupt handler to first run the synchonous callbacks.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20200526161907.836818381@infradead.org
parent 19a1f5ec
...@@ -209,9 +209,9 @@ void generic_smp_call_function_single_interrupt(void) ...@@ -209,9 +209,9 @@ void generic_smp_call_function_single_interrupt(void)
*/ */
static void flush_smp_call_function_queue(bool warn_cpu_offline) static void flush_smp_call_function_queue(bool warn_cpu_offline)
{ {
struct llist_head *head;
struct llist_node *entry;
call_single_data_t *csd, *csd_next; call_single_data_t *csd, *csd_next;
struct llist_node *entry, *prev;
struct llist_head *head;
static bool warned; static bool warned;
lockdep_assert_irqs_disabled(); lockdep_assert_irqs_disabled();
...@@ -235,19 +235,38 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline) ...@@ -235,19 +235,38 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
csd->func); csd->func);
} }
/*
* First; run all SYNC callbacks, people are waiting for us.
*/
prev = NULL;
llist_for_each_entry_safe(csd, csd_next, entry, llist) { llist_for_each_entry_safe(csd, csd_next, entry, llist) {
smp_call_func_t func = csd->func; smp_call_func_t func = csd->func;
void *info = csd->info; void *info = csd->info;
/* Do we wait until *after* callback? */ /* Do we wait until *after* callback? */
if (csd->flags & CSD_FLAG_SYNCHRONOUS) { if (csd->flags & CSD_FLAG_SYNCHRONOUS) {
if (prev) {
prev->next = &csd_next->llist;
} else {
entry = &csd_next->llist;
}
func(info); func(info);
csd_unlock(csd); csd_unlock(csd);
} else { } else {
prev = &csd->llist;
}
}
/*
* Second; run all !SYNC callbacks.
*/
llist_for_each_entry_safe(csd, csd_next, entry, llist) {
smp_call_func_t func = csd->func;
void *info = csd->info;
csd_unlock(csd); csd_unlock(csd);
func(info); func(info);
} }
}
/* /*
* Handle irq works queued remotely by irq_work_queue_on(). * Handle irq works queued remotely by irq_work_queue_on().
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment