Commit 786d6dc7 authored by Suresh Siddha's avatar Suresh Siddha Committed by Ingo Molnar

sched, nohz: Clean up the find_new_ilb() using sched groups nr_busy_cpus

nr_busy_cpus in the sched_group_power indicates whether the group
is semi idle or not. This helps remove the is_semi_idle_group() and simplify
the find_new_ilb() in the context of finding an optimal cpu that can do
idle load balancing.
Signed-off-by: default avatarSuresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20111202010832.656983582@sbsiddha-desk.sc.intel.comSigned-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 0b005cf5
...@@ -4733,7 +4733,6 @@ static int active_load_balance_cpu_stop(void *data) ...@@ -4733,7 +4733,6 @@ static int active_load_balance_cpu_stop(void *data)
*/ */
static struct { static struct {
cpumask_var_t idle_cpus_mask; cpumask_var_t idle_cpus_mask;
cpumask_var_t grp_idle_mask;
atomic_t nr_cpus; atomic_t nr_cpus;
unsigned long next_balance; /* in jiffy units */ unsigned long next_balance; /* in jiffy units */
} nohz ____cacheline_aligned; } nohz ____cacheline_aligned;
...@@ -4773,33 +4772,6 @@ static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) ...@@ -4773,33 +4772,6 @@ static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
for (sd = lowest_flag_domain(cpu, flag); \ for (sd = lowest_flag_domain(cpu, flag); \
(sd && (sd->flags & flag)); sd = sd->parent) (sd && (sd->flags & flag)); sd = sd->parent)
/**
* is_semi_idle_group - Checks if the given sched_group is semi-idle.
* @ilb_group: group to be checked for semi-idleness
*
* Returns: 1 if the group is semi-idle. 0 otherwise.
*
* We define a sched_group to be semi idle if it has atleast one idle-CPU
* and atleast one non-idle CPU. This helper function checks if the given
* sched_group is semi-idle or not.
*/
static inline int is_semi_idle_group(struct sched_group *ilb_group)
{
cpumask_and(nohz.grp_idle_mask, nohz.idle_cpus_mask,
sched_group_cpus(ilb_group));
/*
* A sched_group is semi-idle when it has atleast one busy cpu
* and atleast one idle cpu.
*/
if (cpumask_empty(nohz.grp_idle_mask))
return 0;
if (cpumask_equal(nohz.grp_idle_mask, sched_group_cpus(ilb_group)))
return 0;
return 1;
}
/** /**
* find_new_ilb - Finds the optimum idle load balancer for nomination. * find_new_ilb - Finds the optimum idle load balancer for nomination.
* @cpu: The cpu which is nominating a new idle_load_balancer. * @cpu: The cpu which is nominating a new idle_load_balancer.
...@@ -4815,8 +4787,8 @@ static inline int is_semi_idle_group(struct sched_group *ilb_group) ...@@ -4815,8 +4787,8 @@ static inline int is_semi_idle_group(struct sched_group *ilb_group)
static int find_new_ilb(int cpu) static int find_new_ilb(int cpu)
{ {
int ilb = cpumask_first(nohz.idle_cpus_mask); int ilb = cpumask_first(nohz.idle_cpus_mask);
struct sched_group *ilbg;
struct sched_domain *sd; struct sched_domain *sd;
struct sched_group *ilb_group;
/* /*
* Have idle load balancer selection from semi-idle packages only * Have idle load balancer selection from semi-idle packages only
...@@ -4834,23 +4806,28 @@ static int find_new_ilb(int cpu) ...@@ -4834,23 +4806,28 @@ static int find_new_ilb(int cpu)
rcu_read_lock(); rcu_read_lock();
for_each_flag_domain(cpu, sd, SD_POWERSAVINGS_BALANCE) { for_each_flag_domain(cpu, sd, SD_POWERSAVINGS_BALANCE) {
ilb_group = sd->groups; ilbg = sd->groups;
do { do {
if (is_semi_idle_group(ilb_group)) { if (ilbg->group_weight !=
ilb = cpumask_first(nohz.grp_idle_mask); atomic_read(&ilbg->sgp->nr_busy_cpus)) {
ilb = cpumask_first_and(nohz.idle_cpus_mask,
sched_group_cpus(ilbg));
goto unlock; goto unlock;
} }
ilb_group = ilb_group->next; ilbg = ilbg->next;
} while (ilb_group != sd->groups); } while (ilbg != sd->groups);
} }
unlock: unlock:
rcu_read_unlock(); rcu_read_unlock();
out_done: out_done:
return ilb; if (ilb < nr_cpu_ids && idle_cpu(ilb))
return ilb;
return nr_cpu_ids;
} }
#else /* (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */ #else /* (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */
static inline int find_new_ilb(int call_cpu) static inline int find_new_ilb(int call_cpu)
...@@ -5588,7 +5565,6 @@ __init void init_sched_fair_class(void) ...@@ -5588,7 +5565,6 @@ __init void init_sched_fair_class(void)
#ifdef CONFIG_NO_HZ #ifdef CONFIG_NO_HZ
zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
alloc_cpumask_var(&nohz.grp_idle_mask, GFP_NOWAIT);
#endif #endif
#endif /* SMP */ #endif /* SMP */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment