Commit 6fed85df authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'sched_urgent_for_v5.11_rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fix from Borislav Petkov:
 "Revert an attempt to not spread IRQ threads on isolated CPUs which has
  a bunch of problems"

* tag 'sched_urgent_for_v5.11_rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  Revert "lib: Restrict cpumask_local_spread to houskeeping CPUs"
parents 814daadb 2452483d
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
#include <linux/export.h> #include <linux/export.h>
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/numa.h> #include <linux/numa.h>
#include <linux/sched/isolation.h>
/** /**
* cpumask_next - get the next cpu in a cpumask * cpumask_next - get the next cpu in a cpumask
...@@ -206,27 +205,22 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask) ...@@ -206,27 +205,22 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask)
*/ */
unsigned int cpumask_local_spread(unsigned int i, int node) unsigned int cpumask_local_spread(unsigned int i, int node)
{ {
int cpu, hk_flags; int cpu;
const struct cpumask *mask;
hk_flags = HK_FLAG_DOMAIN | HK_FLAG_MANAGED_IRQ;
mask = housekeeping_cpumask(hk_flags);
/* Wrap: we always want a cpu. */ /* Wrap: we always want a cpu. */
i %= cpumask_weight(mask); i %= num_online_cpus();
if (node == NUMA_NO_NODE) { if (node == NUMA_NO_NODE) {
for_each_cpu(cpu, mask) { for_each_cpu(cpu, cpu_online_mask)
if (i-- == 0) if (i-- == 0)
return cpu; return cpu;
}
} else { } else {
/* NUMA first. */ /* NUMA first. */
for_each_cpu_and(cpu, cpumask_of_node(node), mask) { for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask)
if (i-- == 0) if (i-- == 0)
return cpu; return cpu;
}
for_each_cpu(cpu, mask) { for_each_cpu(cpu, cpu_online_mask) {
/* Skip NUMA nodes, done above. */ /* Skip NUMA nodes, done above. */
if (cpumask_test_cpu(cpu, cpumask_of_node(node))) if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
continue; continue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment