Commit cb923159 authored by Sebastian Andrzej Siewior's avatar Sebastian Andrzej Siewior Committed by Thomas Gleixner

smp: Remove allocation mask from on_each_cpu_cond.*()

The allocation mask is no longer used by on_each_cpu_cond() and
on_each_cpu_cond_mask() and can be removed.
Signed-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20200117090137.1205765-4-bigeasy@linutronix.de
parent 67719ef2
...@@ -708,7 +708,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask, ...@@ -708,7 +708,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
(void *)info, 1); (void *)info, 1);
else else
on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func_remote, on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func_remote,
(void *)info, 1, GFP_ATOMIC, cpumask); (void *)info, 1, cpumask);
} }
/* /*
......
...@@ -1433,7 +1433,7 @@ static bool has_bh_in_lru(int cpu, void *dummy) ...@@ -1433,7 +1433,7 @@ static bool has_bh_in_lru(int cpu, void *dummy)
void invalidate_bh_lrus(void) void invalidate_bh_lrus(void)
{ {
on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1, GFP_KERNEL); on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1);
} }
EXPORT_SYMBOL_GPL(invalidate_bh_lrus); EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
......
...@@ -51,11 +51,10 @@ void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, ...@@ -51,11 +51,10 @@ void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
* processor. * processor.
*/ */
void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func, void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
void *info, bool wait, gfp_t gfp_flags); void *info, bool wait);
void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func, void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
void *info, bool wait, gfp_t gfp_flags, void *info, bool wait, const struct cpumask *mask);
const struct cpumask *mask);
int smp_call_function_single_async(int cpu, call_single_data_t *csd); int smp_call_function_single_async(int cpu, call_single_data_t *csd);
......
...@@ -679,11 +679,6 @@ EXPORT_SYMBOL(on_each_cpu_mask); ...@@ -679,11 +679,6 @@ EXPORT_SYMBOL(on_each_cpu_mask);
* @info: An arbitrary pointer to pass to both functions. * @info: An arbitrary pointer to pass to both functions.
* @wait: If true, wait (atomically) until function has * @wait: If true, wait (atomically) until function has
* completed on other CPUs. * completed on other CPUs.
* @gfp_flags: GFP flags to use when allocating the cpumask
* used internally by the function.
*
* The function might sleep if the GFP flags indicates a non
* atomic allocation is allowed.
* *
* Preemption is disabled to protect against CPUs going offline but not online. * Preemption is disabled to protect against CPUs going offline but not online.
* CPUs going online during the call will not be seen or sent an IPI. * CPUs going online during the call will not be seen or sent an IPI.
...@@ -692,8 +687,7 @@ EXPORT_SYMBOL(on_each_cpu_mask); ...@@ -692,8 +687,7 @@ EXPORT_SYMBOL(on_each_cpu_mask);
* from a hardware interrupt handler or from a bottom half handler. * from a hardware interrupt handler or from a bottom half handler.
*/ */
void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func, void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
void *info, bool wait, gfp_t gfp_flags, void *info, bool wait, const struct cpumask *mask)
const struct cpumask *mask)
{ {
int cpu = get_cpu(); int cpu = get_cpu();
...@@ -710,10 +704,9 @@ void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func, ...@@ -710,10 +704,9 @@ void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
EXPORT_SYMBOL(on_each_cpu_cond_mask); EXPORT_SYMBOL(on_each_cpu_cond_mask);
void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func, void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
void *info, bool wait, gfp_t gfp_flags) void *info, bool wait)
{ {
on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags, on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask);
cpu_online_mask);
} }
EXPORT_SYMBOL(on_each_cpu_cond); EXPORT_SYMBOL(on_each_cpu_cond);
......
...@@ -69,8 +69,7 @@ EXPORT_SYMBOL(on_each_cpu_mask); ...@@ -69,8 +69,7 @@ EXPORT_SYMBOL(on_each_cpu_mask);
* same condtions in UP and SMP. * same condtions in UP and SMP.
*/ */
void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func, void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
void *info, bool wait, gfp_t gfp_flags, void *info, bool wait, const struct cpumask *mask)
const struct cpumask *mask)
{ {
unsigned long flags; unsigned long flags;
...@@ -85,9 +84,9 @@ void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func, ...@@ -85,9 +84,9 @@ void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
EXPORT_SYMBOL(on_each_cpu_cond_mask); EXPORT_SYMBOL(on_each_cpu_cond_mask);
void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func, void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
void *info, bool wait, gfp_t gfp_flags) void *info, bool wait)
{ {
on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags, NULL); on_each_cpu_cond_mask(cond_func, func, info, wait, NULL);
} }
EXPORT_SYMBOL(on_each_cpu_cond); EXPORT_SYMBOL(on_each_cpu_cond);
......
...@@ -2341,7 +2341,7 @@ static bool has_cpu_slab(int cpu, void *info) ...@@ -2341,7 +2341,7 @@ static bool has_cpu_slab(int cpu, void *info)
static void flush_all(struct kmem_cache *s) static void flush_all(struct kmem_cache *s)
{ {
on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC); on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment