Commit eb755805 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched: extract walk_tg_tree()

Extract walk_tg_tree() and make it a little more generic so we can use it
in the schedulablity test.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 0b148fa0
...@@ -1387,38 +1387,24 @@ static inline void dec_cpu_load(struct rq *rq, unsigned long load) ...@@ -1387,38 +1387,24 @@ static inline void dec_cpu_load(struct rq *rq, unsigned long load)
update_load_sub(&rq->load, load); update_load_sub(&rq->load, load);
} }
#ifdef CONFIG_SMP #if (defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED))
static unsigned long source_load(int cpu, int type); typedef int (*tg_visitor)(struct task_group *, void *);
static unsigned long target_load(int cpu, int type);
static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
static unsigned long cpu_avg_load_per_task(int cpu)
{
struct rq *rq = cpu_rq(cpu);
if (rq->nr_running)
rq->avg_load_per_task = rq->load.weight / rq->nr_running;
return rq->avg_load_per_task;
}
#ifdef CONFIG_FAIR_GROUP_SCHED
typedef void (*tg_visitor)(struct task_group *, int, struct sched_domain *);
/* /*
* Iterate the full tree, calling @down when first entering a node and @up when * Iterate the full tree, calling @down when first entering a node and @up when
* leaving it for the final time. * leaving it for the final time.
*/ */
static void static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
walk_tg_tree(tg_visitor down, tg_visitor up, int cpu, struct sched_domain *sd)
{ {
struct task_group *parent, *child; struct task_group *parent, *child;
int ret;
rcu_read_lock(); rcu_read_lock();
parent = &root_task_group; parent = &root_task_group;
down: down:
(*down)(parent, cpu, sd); ret = (*down)(parent, data);
if (ret)
goto out_unlock;
list_for_each_entry_rcu(child, &parent->children, siblings) { list_for_each_entry_rcu(child, &parent->children, siblings) {
parent = child; parent = child;
goto down; goto down;
...@@ -1426,14 +1412,42 @@ walk_tg_tree(tg_visitor down, tg_visitor up, int cpu, struct sched_domain *sd) ...@@ -1426,14 +1412,42 @@ walk_tg_tree(tg_visitor down, tg_visitor up, int cpu, struct sched_domain *sd)
up: up:
continue; continue;
} }
(*up)(parent, cpu, sd); ret = (*up)(parent, data);
if (ret)
goto out_unlock;
child = parent; child = parent;
parent = parent->parent; parent = parent->parent;
if (parent) if (parent)
goto up; goto up;
out_unlock:
rcu_read_unlock(); rcu_read_unlock();
return ret;
}
static int tg_nop(struct task_group *tg, void *data)
{
return 0;
} }
#endif
#ifdef CONFIG_SMP
static unsigned long source_load(int cpu, int type);
static unsigned long target_load(int cpu, int type);
static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
static unsigned long cpu_avg_load_per_task(int cpu)
{
struct rq *rq = cpu_rq(cpu);
if (rq->nr_running)
rq->avg_load_per_task = rq->load.weight / rq->nr_running;
return rq->avg_load_per_task;
}
#ifdef CONFIG_FAIR_GROUP_SCHED
static void __set_se_shares(struct sched_entity *se, unsigned long shares); static void __set_se_shares(struct sched_entity *se, unsigned long shares);
...@@ -1493,11 +1507,11 @@ __update_group_shares_cpu(struct task_group *tg, int cpu, ...@@ -1493,11 +1507,11 @@ __update_group_shares_cpu(struct task_group *tg, int cpu,
* This needs to be done in a bottom-up fashion because the rq weight of a * This needs to be done in a bottom-up fashion because the rq weight of a
* parent group depends on the shares of its child groups. * parent group depends on the shares of its child groups.
*/ */
static void static int tg_shares_up(struct task_group *tg, void *data)
tg_shares_up(struct task_group *tg, int cpu, struct sched_domain *sd)
{ {
unsigned long rq_weight = 0; unsigned long rq_weight = 0;
unsigned long shares = 0; unsigned long shares = 0;
struct sched_domain *sd = data;
int i; int i;
for_each_cpu_mask(i, sd->span) { for_each_cpu_mask(i, sd->span) {
...@@ -1522,6 +1536,8 @@ tg_shares_up(struct task_group *tg, int cpu, struct sched_domain *sd) ...@@ -1522,6 +1536,8 @@ tg_shares_up(struct task_group *tg, int cpu, struct sched_domain *sd)
__update_group_shares_cpu(tg, i, shares, rq_weight); __update_group_shares_cpu(tg, i, shares, rq_weight);
spin_unlock_irqrestore(&rq->lock, flags); spin_unlock_irqrestore(&rq->lock, flags);
} }
return 0;
} }
/* /*
...@@ -1529,10 +1545,10 @@ tg_shares_up(struct task_group *tg, int cpu, struct sched_domain *sd) ...@@ -1529,10 +1545,10 @@ tg_shares_up(struct task_group *tg, int cpu, struct sched_domain *sd)
* This needs to be done in a top-down fashion because the load of a child * This needs to be done in a top-down fashion because the load of a child
* group is a fraction of its parents load. * group is a fraction of its parents load.
*/ */
static void static int tg_load_down(struct task_group *tg, void *data)
tg_load_down(struct task_group *tg, int cpu, struct sched_domain *sd)
{ {
unsigned long load; unsigned long load;
long cpu = (long)data;
if (!tg->parent) { if (!tg->parent) {
load = cpu_rq(cpu)->load.weight; load = cpu_rq(cpu)->load.weight;
...@@ -1543,11 +1559,8 @@ tg_load_down(struct task_group *tg, int cpu, struct sched_domain *sd) ...@@ -1543,11 +1559,8 @@ tg_load_down(struct task_group *tg, int cpu, struct sched_domain *sd)
} }
tg->cfs_rq[cpu]->h_load = load; tg->cfs_rq[cpu]->h_load = load;
}
static void return 0;
tg_nop(struct task_group *tg, int cpu, struct sched_domain *sd)
{
} }
static void update_shares(struct sched_domain *sd) static void update_shares(struct sched_domain *sd)
...@@ -1557,7 +1570,7 @@ static void update_shares(struct sched_domain *sd) ...@@ -1557,7 +1570,7 @@ static void update_shares(struct sched_domain *sd)
if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) { if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) {
sd->last_update = now; sd->last_update = now;
walk_tg_tree(tg_nop, tg_shares_up, 0, sd); walk_tg_tree(tg_nop, tg_shares_up, sd);
} }
} }
...@@ -1568,9 +1581,9 @@ static void update_shares_locked(struct rq *rq, struct sched_domain *sd) ...@@ -1568,9 +1581,9 @@ static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
spin_lock(&rq->lock); spin_lock(&rq->lock);
} }
static void update_h_load(int cpu) static void update_h_load(long cpu)
{ {
walk_tg_tree(tg_load_down, tg_nop, cpu, NULL); walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
} }
#else #else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment