Commit b2a02fc4 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

smp: Optimize send_call_function_single_ipi()

Just like the ttwu_queue_remote() IPI, make use of _TIF_POLLING_NRFLAG
to avoid sending IPIs to idle CPUs.

[ mingo: Fix UP build bug. ]
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20200526161907.953304789@infradead.org
parent afaa653c
...@@ -2296,6 +2296,16 @@ static void wake_csd_func(void *info) ...@@ -2296,6 +2296,16 @@ static void wake_csd_func(void *info)
sched_ttwu_pending(); sched_ttwu_pending();
} }
void send_call_function_single_ipi(int cpu)
{
struct rq *rq = cpu_rq(cpu);
if (!set_nr_if_polling(rq->idle))
arch_send_call_function_single_ipi(cpu);
else
trace_sched_wake_idle_without_ipi(cpu);
}
/* /*
* Queue a task on the target CPUs wake_list and wake the CPU via IPI if * Queue a task on the target CPUs wake_list and wake the CPU via IPI if
* necessary. The wakee CPU on receipt of the IPI will queue the task * necessary. The wakee CPU on receipt of the IPI will queue the task
......
...@@ -289,6 +289,11 @@ static void do_idle(void) ...@@ -289,6 +289,11 @@ static void do_idle(void)
*/ */
smp_mb__after_atomic(); smp_mb__after_atomic();
/*
* RCU relies on this call to be done outside of an RCU read-side
* critical section.
*/
flush_smp_call_function_from_idle();
sched_ttwu_pending(); sched_ttwu_pending();
schedule_idle(); schedule_idle();
......
...@@ -1506,11 +1506,12 @@ static inline void unregister_sched_domain_sysctl(void) ...@@ -1506,11 +1506,12 @@ static inline void unregister_sched_domain_sysctl(void)
} }
#endif #endif
#else extern void flush_smp_call_function_from_idle(void);
#else /* !CONFIG_SMP: */
static inline void flush_smp_call_function_from_idle(void) { }
static inline void sched_ttwu_pending(void) { } static inline void sched_ttwu_pending(void) { }
#endif
#endif /* CONFIG_SMP */
#include "stats.h" #include "stats.h"
#include "autogroup.h" #include "autogroup.h"
......
...@@ -135,6 +135,8 @@ static __always_inline void csd_unlock(call_single_data_t *csd) ...@@ -135,6 +135,8 @@ static __always_inline void csd_unlock(call_single_data_t *csd)
static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data); static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
extern void send_call_function_single_ipi(int cpu);
/* /*
* Insert a previously allocated call_single_data_t element * Insert a previously allocated call_single_data_t element
* for execution on the given CPU. data must already have * for execution on the given CPU. data must already have
...@@ -178,7 +180,7 @@ static int generic_exec_single(int cpu, call_single_data_t *csd, ...@@ -178,7 +180,7 @@ static int generic_exec_single(int cpu, call_single_data_t *csd,
* equipped to do the right thing... * equipped to do the right thing...
*/ */
if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
arch_send_call_function_single_ipi(cpu); send_call_function_single_ipi(cpu);
return 0; return 0;
} }
...@@ -278,6 +280,18 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline) ...@@ -278,6 +280,18 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
} }
} }
void flush_smp_call_function_from_idle(void)
{
unsigned long flags;
if (llist_empty(this_cpu_ptr(&call_single_queue)))
return;
local_irq_save(flags);
flush_smp_call_function_queue(true);
local_irq_restore(flags);
}
/* /*
* smp_call_function_single - Run a function on a specific CPU * smp_call_function_single - Run a function on a specific CPU
* @func: The function to run. This must be fast and non-blocking. * @func: The function to run. This must be fast and non-blocking.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment