Commit 141965c7 authored by Alex Shi's avatar Alex Shi Committed by Ingo Molnar

Revert "sched: Introduce temporary FAIR_GROUP_SCHED dependency for load-tracking"

Remove CONFIG_FAIR_GROUP_SCHED that covers the runnable info, then
we can use runnable load variables.

Also remove 2 CONFIG_FAIR_GROUP_SCHED setting which is not in reverted
patch(introduced in 9ee474f5), but also need to revert.
Signed-off-by: default avatarAlex Shi <alex.shi@intel.com>
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/51CA76A3.3050207@intel.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent be7002e6
...@@ -994,12 +994,7 @@ struct sched_entity { ...@@ -994,12 +994,7 @@ struct sched_entity {
struct cfs_rq *my_q; struct cfs_rq *my_q;
#endif #endif
/* #ifdef CONFIG_SMP
* Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
* removed when useful for applications beyond shares distribution (e.g.
* load-balance).
*/
#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
/* Per-entity load-tracking */ /* Per-entity load-tracking */
struct sched_avg avg; struct sched_avg avg;
#endif #endif
......
...@@ -1611,12 +1611,7 @@ static void __sched_fork(struct task_struct *p) ...@@ -1611,12 +1611,7 @@ static void __sched_fork(struct task_struct *p)
p->se.vruntime = 0; p->se.vruntime = 0;
INIT_LIST_HEAD(&p->se.group_node); INIT_LIST_HEAD(&p->se.group_node);
/* #ifdef CONFIG_SMP
* Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
* removed when useful for applications beyond shares distribution (e.g.
* load-balance).
*/
#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
p->se.avg.runnable_avg_period = 0; p->se.avg.runnable_avg_period = 0;
p->se.avg.runnable_avg_sum = 0; p->se.avg.runnable_avg_sum = 0;
#endif #endif
......
...@@ -1128,8 +1128,7 @@ static inline void update_cfs_shares(struct cfs_rq *cfs_rq) ...@@ -1128,8 +1128,7 @@ static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
} }
#endif /* CONFIG_FAIR_GROUP_SCHED */ #endif /* CONFIG_FAIR_GROUP_SCHED */
/* Only depends on SMP, FAIR_GROUP_SCHED may be removed when useful in lb */ #ifdef CONFIG_SMP
#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
/* /*
* We choose a half-life close to 1 scheduling period. * We choose a half-life close to 1 scheduling period.
* Note: The tables below are dependent on this value. * Note: The tables below are dependent on this value.
...@@ -3430,12 +3429,6 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags) ...@@ -3430,12 +3429,6 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
return new_cpu; return new_cpu;
} }
/*
* Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
* removed when useful for applications beyond shares distribution (e.g.
* load-balance).
*/
#ifdef CONFIG_FAIR_GROUP_SCHED
/* /*
* Called immediately before a task is migrated to a new cpu; task_cpu(p) and * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
* cfs_rq_of(p) references at time of call are still valid and identify the * cfs_rq_of(p) references at time of call are still valid and identify the
...@@ -3459,7 +3452,6 @@ migrate_task_rq_fair(struct task_struct *p, int next_cpu) ...@@ -3459,7 +3452,6 @@ migrate_task_rq_fair(struct task_struct *p, int next_cpu)
atomic64_add(se->avg.load_avg_contrib, &cfs_rq->removed_load); atomic64_add(se->avg.load_avg_contrib, &cfs_rq->removed_load);
} }
} }
#endif
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
static unsigned long static unsigned long
...@@ -5861,7 +5853,7 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p) ...@@ -5861,7 +5853,7 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
se->vruntime -= cfs_rq->min_vruntime; se->vruntime -= cfs_rq->min_vruntime;
} }
#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP) #ifdef CONFIG_SMP
/* /*
* Remove our load from contribution when we leave sched_fair * Remove our load from contribution when we leave sched_fair
* and ensure we don't carry in an old decay_count if we * and ensure we don't carry in an old decay_count if we
...@@ -5920,7 +5912,7 @@ void init_cfs_rq(struct cfs_rq *cfs_rq) ...@@ -5920,7 +5912,7 @@ void init_cfs_rq(struct cfs_rq *cfs_rq)
#ifndef CONFIG_64BIT #ifndef CONFIG_64BIT
cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
#endif #endif
#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP) #ifdef CONFIG_SMP
atomic64_set(&cfs_rq->decay_counter, 1); atomic64_set(&cfs_rq->decay_counter, 1);
atomic64_set(&cfs_rq->removed_load, 0); atomic64_set(&cfs_rq->removed_load, 0);
#endif #endif
...@@ -6162,9 +6154,8 @@ const struct sched_class fair_sched_class = { ...@@ -6162,9 +6154,8 @@ const struct sched_class fair_sched_class = {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
.select_task_rq = select_task_rq_fair, .select_task_rq = select_task_rq_fair,
#ifdef CONFIG_FAIR_GROUP_SCHED
.migrate_task_rq = migrate_task_rq_fair, .migrate_task_rq = migrate_task_rq_fair,
#endif
.rq_online = rq_online_fair, .rq_online = rq_online_fair,
.rq_offline = rq_offline_fair, .rq_offline = rq_offline_fair,
......
...@@ -269,12 +269,6 @@ struct cfs_rq { ...@@ -269,12 +269,6 @@ struct cfs_rq {
#endif #endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/*
* Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
* removed when useful for applications beyond shares distribution (e.g.
* load-balance).
*/
#ifdef CONFIG_FAIR_GROUP_SCHED
/* /*
* CFS Load tracking * CFS Load tracking
* Under CFS, load is tracked on a per-entity basis and aggregated up. * Under CFS, load is tracked on a per-entity basis and aggregated up.
...@@ -284,9 +278,9 @@ struct cfs_rq { ...@@ -284,9 +278,9 @@ struct cfs_rq {
u64 runnable_load_avg, blocked_load_avg; u64 runnable_load_avg, blocked_load_avg;
atomic64_t decay_counter, removed_load; atomic64_t decay_counter, removed_load;
u64 last_decay; u64 last_decay;
#endif /* CONFIG_FAIR_GROUP_SCHED */
/* These always depend on CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
/* Required to track per-cpu representation of a task_group */
u32 tg_runnable_contrib; u32 tg_runnable_contrib;
u64 tg_load_contrib; u64 tg_load_contrib;
#endif /* CONFIG_FAIR_GROUP_SCHED */ #endif /* CONFIG_FAIR_GROUP_SCHED */
...@@ -1027,17 +1021,8 @@ extern void update_group_power(struct sched_domain *sd, int cpu); ...@@ -1027,17 +1021,8 @@ extern void update_group_power(struct sched_domain *sd, int cpu);
extern void trigger_load_balance(struct rq *rq, int cpu); extern void trigger_load_balance(struct rq *rq, int cpu);
extern void idle_balance(int this_cpu, struct rq *this_rq); extern void idle_balance(int this_cpu, struct rq *this_rq);
/*
* Only depends on SMP, FAIR_GROUP_SCHED may be removed when runnable_avg
* becomes useful in lb
*/
#if defined(CONFIG_FAIR_GROUP_SCHED)
extern void idle_enter_fair(struct rq *this_rq); extern void idle_enter_fair(struct rq *this_rq);
extern void idle_exit_fair(struct rq *this_rq); extern void idle_exit_fair(struct rq *this_rq);
#else
static inline void idle_enter_fair(struct rq *this_rq) {}
static inline void idle_exit_fair(struct rq *this_rq) {}
#endif
#else /* CONFIG_SMP */ #else /* CONFIG_SMP */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment