Commit 6e83125c authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Thomas Gleixner

sched/fair: Remove idle_balance() declaration in sched.h

Remove idle_balance() from the public life; also reduce some #ifdef
clutter by folding the pick_next_task_fair() idle path into
idle_balance().

Cc: mingo@kernel.org
Reported-by: default avatarDaniel Lezcano <daniel.lezcano@linaro.org>
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20140211151148.GP27965@twins.programming.kicks-ass.netSigned-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent eb7a59b2
...@@ -2374,13 +2374,13 @@ static inline void __update_group_entity_contrib(struct sched_entity *se) ...@@ -2374,13 +2374,13 @@ static inline void __update_group_entity_contrib(struct sched_entity *se)
se->avg.load_avg_contrib >>= NICE_0_SHIFT; se->avg.load_avg_contrib >>= NICE_0_SHIFT;
} }
} }
#else #else /* CONFIG_FAIR_GROUP_SCHED */
static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq, static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
int force_update) {} int force_update) {}
static inline void __update_tg_runnable_avg(struct sched_avg *sa, static inline void __update_tg_runnable_avg(struct sched_avg *sa,
struct cfs_rq *cfs_rq) {} struct cfs_rq *cfs_rq) {}
static inline void __update_group_entity_contrib(struct sched_entity *se) {} static inline void __update_group_entity_contrib(struct sched_entity *se) {}
#endif #endif /* CONFIG_FAIR_GROUP_SCHED */
static inline void __update_task_entity_contrib(struct sched_entity *se) static inline void __update_task_entity_contrib(struct sched_entity *se)
{ {
...@@ -2571,6 +2571,8 @@ void idle_exit_fair(struct rq *this_rq) ...@@ -2571,6 +2571,8 @@ void idle_exit_fair(struct rq *this_rq)
update_rq_runnable_avg(this_rq, 0); update_rq_runnable_avg(this_rq, 0);
} }
static int idle_balance(struct rq *this_rq);
#else /* CONFIG_SMP */ #else /* CONFIG_SMP */
static inline void update_entity_load_avg(struct sched_entity *se, static inline void update_entity_load_avg(struct sched_entity *se,
...@@ -2584,6 +2586,12 @@ static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq, ...@@ -2584,6 +2586,12 @@ static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
int sleep) {} int sleep) {}
static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
int force_update) {} int force_update) {}
static inline int idle_balance(struct rq *rq)
{
return 0;
}
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
...@@ -4677,7 +4685,7 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev) ...@@ -4677,7 +4685,7 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev)
struct sched_entity *se; struct sched_entity *se;
struct task_struct *p; struct task_struct *p;
again: __maybe_unused again:
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
if (!cfs_rq->nr_running) if (!cfs_rq->nr_running)
goto idle; goto idle;
...@@ -4775,18 +4783,8 @@ again: __maybe_unused ...@@ -4775,18 +4783,8 @@ again: __maybe_unused
return p; return p;
idle: idle:
#ifdef CONFIG_SMP if (idle_balance(rq)) /* drops rq->lock */
idle_enter_fair(rq);
/*
* We must set idle_stamp _before_ calling idle_balance(), such that we
* measure the duration of idle_balance() as idle time.
*/
rq->idle_stamp = rq_clock(rq);
if (idle_balance(rq)) { /* drops rq->lock */
rq->idle_stamp = 0;
goto again; goto again;
}
#endif
return NULL; return NULL;
} }
...@@ -6634,7 +6632,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, ...@@ -6634,7 +6632,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
* idle_balance is called by schedule() if this_cpu is about to become * idle_balance is called by schedule() if this_cpu is about to become
* idle. Attempts to pull tasks from other CPUs. * idle. Attempts to pull tasks from other CPUs.
*/ */
int idle_balance(struct rq *this_rq) static int idle_balance(struct rq *this_rq)
{ {
struct sched_domain *sd; struct sched_domain *sd;
int pulled_task = 0; int pulled_task = 0;
...@@ -6642,8 +6640,15 @@ int idle_balance(struct rq *this_rq) ...@@ -6642,8 +6640,15 @@ int idle_balance(struct rq *this_rq)
u64 curr_cost = 0; u64 curr_cost = 0;
int this_cpu = this_rq->cpu; int this_cpu = this_rq->cpu;
idle_enter_fair(this_rq);
/*
* We must set idle_stamp _before_ calling idle_balance(), such that we
* measure the duration of idle_balance() as idle time.
*/
this_rq->idle_stamp = rq_clock(this_rq);
if (this_rq->avg_idle < sysctl_sched_migration_cost) if (this_rq->avg_idle < sysctl_sched_migration_cost)
return 0; goto out;
/* /*
* Drop the rq->lock, but keep IRQ/preempt disabled. * Drop the rq->lock, but keep IRQ/preempt disabled.
...@@ -6692,8 +6697,10 @@ int idle_balance(struct rq *this_rq) ...@@ -6692,8 +6697,10 @@ int idle_balance(struct rq *this_rq)
* While browsing the domains, we released the rq lock. * While browsing the domains, we released the rq lock.
* A task could have be enqueued in the meantime * A task could have be enqueued in the meantime
*/ */
if (this_rq->nr_running && !pulled_task) if (this_rq->nr_running && !pulled_task) {
return 1; pulled_task = 1;
goto out;
}
if (pulled_task || time_after(jiffies, this_rq->next_balance)) { if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
/* /*
...@@ -6706,6 +6713,10 @@ int idle_balance(struct rq *this_rq) ...@@ -6706,6 +6713,10 @@ int idle_balance(struct rq *this_rq)
if (curr_cost > this_rq->max_idle_balance_cost) if (curr_cost > this_rq->max_idle_balance_cost)
this_rq->max_idle_balance_cost = curr_cost; this_rq->max_idle_balance_cost = curr_cost;
out:
if (pulled_task)
this_rq->idle_stamp = 0;
return pulled_task; return pulled_task;
} }
......
...@@ -1163,17 +1163,10 @@ extern const struct sched_class idle_sched_class; ...@@ -1163,17 +1163,10 @@ extern const struct sched_class idle_sched_class;
extern void update_group_power(struct sched_domain *sd, int cpu); extern void update_group_power(struct sched_domain *sd, int cpu);
extern void trigger_load_balance(struct rq *rq); extern void trigger_load_balance(struct rq *rq);
extern int idle_balance(struct rq *this_rq);
extern void idle_enter_fair(struct rq *this_rq); extern void idle_enter_fair(struct rq *this_rq);
extern void idle_exit_fair(struct rq *this_rq); extern void idle_exit_fair(struct rq *this_rq);
#else /* CONFIG_SMP */
static inline void idle_balance(int cpu, struct rq *rq)
{
}
#endif #endif
extern void sysrq_sched_debug_show(void); extern void sysrq_sched_debug_show(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment