Commit 4fa8d299 authored by Josh Poimboeuf's avatar Josh Poimboeuf Committed by Ingo Molnar

sched/debug: Remove several CONFIG_SCHEDSTATS guards

Clean up the sched code by removing several of the CONFIG_SCHEDSTATS
guards, using schedstat_*() macros where needed.

Code size:

  !CONFIG_SCHEDSTATS defconfig:

      text	   data	    bss	     dec	    hex	filename
  10209818	4368184	1105920	15683922	 ef5152	vmlinux.before.nostats
  10209818	4368184	1105920	15683922	 ef5152	vmlinux.after.nostats

  CONFIG_SCHEDSTATS defconfig:

      text	   data	    bss	    dec	    hex	filename
  10214210	4370040	1105920	15690170	 ef69ba	vmlinux.before.stats
  10214210	4370680	1105920	15690810	 ef6c3a	vmlinux.after.stats
Signed-off-by: default avatarJosh Poimboeuf <jpoimboe@redhat.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Matt Fleming <matt@codeblueprint.co.uk>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/e51e0ebe5af95ac295de720dd252e7c0d2142e4a.1466184592.git.jpoimboe@redhat.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 20e1d486
...@@ -1629,13 +1629,15 @@ static inline int __set_cpus_allowed_ptr(struct task_struct *p, ...@@ -1629,13 +1629,15 @@ static inline int __set_cpus_allowed_ptr(struct task_struct *p,
static void static void
ttwu_stat(struct task_struct *p, int cpu, int wake_flags) ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
{ {
#ifdef CONFIG_SCHEDSTATS struct rq *rq;
struct rq *rq = this_rq();
#ifdef CONFIG_SMP if (!schedstat_enabled())
int this_cpu = smp_processor_id(); return;
rq = this_rq();
if (cpu == this_cpu) { #ifdef CONFIG_SMP
if (cpu == rq->cpu) {
schedstat_inc(rq->ttwu_local); schedstat_inc(rq->ttwu_local);
schedstat_inc(p->se.statistics.nr_wakeups_local); schedstat_inc(p->se.statistics.nr_wakeups_local);
} else { } else {
...@@ -1643,7 +1645,7 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags) ...@@ -1643,7 +1645,7 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
schedstat_inc(p->se.statistics.nr_wakeups_remote); schedstat_inc(p->se.statistics.nr_wakeups_remote);
rcu_read_lock(); rcu_read_lock();
for_each_domain(this_cpu, sd) { for_each_domain(rq->cpu, sd) {
if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
schedstat_inc(sd->ttwu_wake_remote); schedstat_inc(sd->ttwu_wake_remote);
break; break;
...@@ -1654,7 +1656,6 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags) ...@@ -1654,7 +1656,6 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
if (wake_flags & WF_MIGRATED) if (wake_flags & WF_MIGRATED)
schedstat_inc(p->se.statistics.nr_wakeups_migrate); schedstat_inc(p->se.statistics.nr_wakeups_migrate);
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
schedstat_inc(rq->ttwu_count); schedstat_inc(rq->ttwu_count);
...@@ -1662,8 +1663,6 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags) ...@@ -1662,8 +1663,6 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
if (wake_flags & WF_SYNC) if (wake_flags & WF_SYNC)
schedstat_inc(p->se.statistics.nr_wakeups_sync); schedstat_inc(p->se.statistics.nr_wakeups_sync);
#endif /* CONFIG_SCHEDSTATS */
} }
static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags) static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
...@@ -2084,8 +2083,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) ...@@ -2084,8 +2083,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
ttwu_queue(p, cpu, wake_flags); ttwu_queue(p, cpu, wake_flags);
stat: stat:
if (schedstat_enabled()) ttwu_stat(p, cpu, wake_flags);
ttwu_stat(p, cpu, wake_flags);
out: out:
raw_spin_unlock_irqrestore(&p->pi_lock, flags); raw_spin_unlock_irqrestore(&p->pi_lock, flags);
...@@ -2134,8 +2132,7 @@ static void try_to_wake_up_local(struct task_struct *p, struct pin_cookie cookie ...@@ -2134,8 +2132,7 @@ static void try_to_wake_up_local(struct task_struct *p, struct pin_cookie cookie
ttwu_activate(rq, p, ENQUEUE_WAKEUP); ttwu_activate(rq, p, ENQUEUE_WAKEUP);
ttwu_do_wakeup(rq, p, 0, cookie); ttwu_do_wakeup(rq, p, 0, cookie);
if (schedstat_enabled()) ttwu_stat(p, smp_processor_id(), 0);
ttwu_stat(p, smp_processor_id(), 0);
out: out:
raw_spin_unlock(&p->pi_lock); raw_spin_unlock(&p->pi_lock);
} }
...@@ -7675,12 +7672,10 @@ void normalize_rt_tasks(void) ...@@ -7675,12 +7672,10 @@ void normalize_rt_tasks(void)
if (p->flags & PF_KTHREAD) if (p->flags & PF_KTHREAD)
continue; continue;
p->se.exec_start = 0; p->se.exec_start = 0;
#ifdef CONFIG_SCHEDSTATS schedstat_set(p->se.statistics.wait_start, 0);
p->se.statistics.wait_start = 0; schedstat_set(p->se.statistics.sleep_start, 0);
p->se.statistics.sleep_start = 0; schedstat_set(p->se.statistics.block_start, 0);
p->se.statistics.block_start = 0;
#endif
if (!dl_task(p) && !rt_task(p)) { if (!dl_task(p) && !rt_task(p)) {
/* /*
......
...@@ -369,8 +369,12 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group ...@@ -369,8 +369,12 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
#define P(F) \ #define P(F) \
SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
#define P_SCHEDSTAT(F) \
SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)schedstat_val(F))
#define PN(F) \ #define PN(F) \
SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F)) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
#define PN_SCHEDSTAT(F) \
SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F)))
if (!se) if (!se)
return; return;
...@@ -378,26 +382,27 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group ...@@ -378,26 +382,27 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
PN(se->exec_start); PN(se->exec_start);
PN(se->vruntime); PN(se->vruntime);
PN(se->sum_exec_runtime); PN(se->sum_exec_runtime);
#ifdef CONFIG_SCHEDSTATS
if (schedstat_enabled()) { if (schedstat_enabled()) {
PN(se->statistics.wait_start); PN_SCHEDSTAT(se->statistics.wait_start);
PN(se->statistics.sleep_start); PN_SCHEDSTAT(se->statistics.sleep_start);
PN(se->statistics.block_start); PN_SCHEDSTAT(se->statistics.block_start);
PN(se->statistics.sleep_max); PN_SCHEDSTAT(se->statistics.sleep_max);
PN(se->statistics.block_max); PN_SCHEDSTAT(se->statistics.block_max);
PN(se->statistics.exec_max); PN_SCHEDSTAT(se->statistics.exec_max);
PN(se->statistics.slice_max); PN_SCHEDSTAT(se->statistics.slice_max);
PN(se->statistics.wait_max); PN_SCHEDSTAT(se->statistics.wait_max);
PN(se->statistics.wait_sum); PN_SCHEDSTAT(se->statistics.wait_sum);
P(se->statistics.wait_count); P_SCHEDSTAT(se->statistics.wait_count);
} }
#endif
P(se->load.weight); P(se->load.weight);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
P(se->avg.load_avg); P(se->avg.load_avg);
P(se->avg.util_avg); P(se->avg.util_avg);
#endif #endif
#undef PN_SCHEDSTAT
#undef PN #undef PN
#undef P_SCHEDSTAT
#undef P #undef P
} }
#endif #endif
...@@ -626,9 +631,7 @@ do { \ ...@@ -626,9 +631,7 @@ do { \
#undef P64 #undef P64
#endif #endif
#ifdef CONFIG_SCHEDSTATS #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n));
#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n);
if (schedstat_enabled()) { if (schedstat_enabled()) {
P(yld_count); P(yld_count);
P(sched_count); P(sched_count);
...@@ -636,9 +639,8 @@ do { \ ...@@ -636,9 +639,8 @@ do { \
P(ttwu_count); P(ttwu_count);
P(ttwu_local); P(ttwu_local);
} }
#undef P #undef P
#endif
spin_lock_irqsave(&sched_debug_lock, flags); spin_lock_irqsave(&sched_debug_lock, flags);
print_cfs_stats(m, cpu); print_cfs_stats(m, cpu);
print_rt_stats(m, cpu); print_rt_stats(m, cpu);
...@@ -868,10 +870,14 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) ...@@ -868,10 +870,14 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F) SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
#define P(F) \ #define P(F) \
SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F) SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
#define P_SCHEDSTAT(F) \
SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)schedstat_val(p->F))
#define __PN(F) \ #define __PN(F) \
SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F)) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
#define PN(F) \ #define PN(F) \
SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F)) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
#define PN_SCHEDSTAT(F) \
SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(p->F)))
PN(se.exec_start); PN(se.exec_start);
PN(se.vruntime); PN(se.vruntime);
...@@ -881,37 +887,36 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) ...@@ -881,37 +887,36 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
P(se.nr_migrations); P(se.nr_migrations);
#ifdef CONFIG_SCHEDSTATS
if (schedstat_enabled()) { if (schedstat_enabled()) {
u64 avg_atom, avg_per_cpu; u64 avg_atom, avg_per_cpu;
PN(se.statistics.sum_sleep_runtime); PN_SCHEDSTAT(se.statistics.sum_sleep_runtime);
PN(se.statistics.wait_start); PN_SCHEDSTAT(se.statistics.wait_start);
PN(se.statistics.sleep_start); PN_SCHEDSTAT(se.statistics.sleep_start);
PN(se.statistics.block_start); PN_SCHEDSTAT(se.statistics.block_start);
PN(se.statistics.sleep_max); PN_SCHEDSTAT(se.statistics.sleep_max);
PN(se.statistics.block_max); PN_SCHEDSTAT(se.statistics.block_max);
PN(se.statistics.exec_max); PN_SCHEDSTAT(se.statistics.exec_max);
PN(se.statistics.slice_max); PN_SCHEDSTAT(se.statistics.slice_max);
PN(se.statistics.wait_max); PN_SCHEDSTAT(se.statistics.wait_max);
PN(se.statistics.wait_sum); PN_SCHEDSTAT(se.statistics.wait_sum);
P(se.statistics.wait_count); P_SCHEDSTAT(se.statistics.wait_count);
PN(se.statistics.iowait_sum); PN_SCHEDSTAT(se.statistics.iowait_sum);
P(se.statistics.iowait_count); P_SCHEDSTAT(se.statistics.iowait_count);
P(se.statistics.nr_migrations_cold); P_SCHEDSTAT(se.statistics.nr_migrations_cold);
P(se.statistics.nr_failed_migrations_affine); P_SCHEDSTAT(se.statistics.nr_failed_migrations_affine);
P(se.statistics.nr_failed_migrations_running); P_SCHEDSTAT(se.statistics.nr_failed_migrations_running);
P(se.statistics.nr_failed_migrations_hot); P_SCHEDSTAT(se.statistics.nr_failed_migrations_hot);
P(se.statistics.nr_forced_migrations); P_SCHEDSTAT(se.statistics.nr_forced_migrations);
P(se.statistics.nr_wakeups); P_SCHEDSTAT(se.statistics.nr_wakeups);
P(se.statistics.nr_wakeups_sync); P_SCHEDSTAT(se.statistics.nr_wakeups_sync);
P(se.statistics.nr_wakeups_migrate); P_SCHEDSTAT(se.statistics.nr_wakeups_migrate);
P(se.statistics.nr_wakeups_local); P_SCHEDSTAT(se.statistics.nr_wakeups_local);
P(se.statistics.nr_wakeups_remote); P_SCHEDSTAT(se.statistics.nr_wakeups_remote);
P(se.statistics.nr_wakeups_affine); P_SCHEDSTAT(se.statistics.nr_wakeups_affine);
P(se.statistics.nr_wakeups_affine_attempts); P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts);
P(se.statistics.nr_wakeups_passive); P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
P(se.statistics.nr_wakeups_idle); P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
avg_atom = p->se.sum_exec_runtime; avg_atom = p->se.sum_exec_runtime;
if (nr_switches) if (nr_switches)
...@@ -930,7 +935,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) ...@@ -930,7 +935,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
__PN(avg_atom); __PN(avg_atom);
__PN(avg_per_cpu); __PN(avg_per_cpu);
} }
#endif
__P(nr_switches); __P(nr_switches);
SEQ_printf(m, "%-45s:%21Ld\n", SEQ_printf(m, "%-45s:%21Ld\n",
"nr_voluntary_switches", (long long)p->nvcsw); "nr_voluntary_switches", (long long)p->nvcsw);
...@@ -947,8 +952,10 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) ...@@ -947,8 +952,10 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
#endif #endif
P(policy); P(policy);
P(prio); P(prio);
#undef PN_SCHEDSTAT
#undef PN #undef PN
#undef __PN #undef __PN
#undef P_SCHEDSTAT
#undef P #undef P
#undef __P #undef __P
......
...@@ -821,26 +821,34 @@ static void update_curr_fair(struct rq *rq) ...@@ -821,26 +821,34 @@ static void update_curr_fair(struct rq *rq)
update_curr(cfs_rq_of(&rq->curr->se)); update_curr(cfs_rq_of(&rq->curr->se));
} }
#ifdef CONFIG_SCHEDSTATS
static inline void static inline void
update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
{ {
u64 wait_start = rq_clock(rq_of(cfs_rq)); u64 wait_start, prev_wait_start;
if (!schedstat_enabled())
return;
wait_start = rq_clock(rq_of(cfs_rq));
prev_wait_start = schedstat_val(se->statistics.wait_start);
if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) && if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) &&
likely(wait_start > se->statistics.wait_start)) likely(wait_start > prev_wait_start))
wait_start -= se->statistics.wait_start; wait_start -= prev_wait_start;
se->statistics.wait_start = wait_start; schedstat_set(se->statistics.wait_start, wait_start);
} }
static void static inline void
update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
{ {
struct task_struct *p; struct task_struct *p;
u64 delta; u64 delta;
delta = rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start; if (!schedstat_enabled())
return;
delta = rq_clock(rq_of(cfs_rq)) - schedstat_val(se->statistics.wait_start);
if (entity_is_task(se)) { if (entity_is_task(se)) {
p = task_of(se); p = task_of(se);
...@@ -850,59 +858,67 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) ...@@ -850,59 +858,67 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
* time stamp can be adjusted to accumulate wait time * time stamp can be adjusted to accumulate wait time
* prior to migration. * prior to migration.
*/ */
se->statistics.wait_start = delta; schedstat_set(se->statistics.wait_start, delta);
return; return;
} }
trace_sched_stat_wait(p, delta); trace_sched_stat_wait(p, delta);
} }
se->statistics.wait_max = max(se->statistics.wait_max, delta); schedstat_set(se->statistics.wait_max,
se->statistics.wait_count++; max(schedstat_val(se->statistics.wait_max), delta));
se->statistics.wait_sum += delta; schedstat_inc(se->statistics.wait_count);
se->statistics.wait_start = 0; schedstat_add(se->statistics.wait_sum, delta);
schedstat_set(se->statistics.wait_start, 0);
} }
static void static inline void
update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
{ {
struct task_struct *tsk = NULL; struct task_struct *tsk = NULL;
u64 sleep_start, block_start;
if (!schedstat_enabled())
return;
sleep_start = schedstat_val(se->statistics.sleep_start);
block_start = schedstat_val(se->statistics.block_start);
if (entity_is_task(se)) if (entity_is_task(se))
tsk = task_of(se); tsk = task_of(se);
if (se->statistics.sleep_start) { if (sleep_start) {
u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.sleep_start; u64 delta = rq_clock(rq_of(cfs_rq)) - sleep_start;
if ((s64)delta < 0) if ((s64)delta < 0)
delta = 0; delta = 0;
if (unlikely(delta > se->statistics.sleep_max)) if (unlikely(delta > schedstat_val(se->statistics.sleep_max)))
se->statistics.sleep_max = delta; schedstat_set(se->statistics.sleep_max, delta);
se->statistics.sleep_start = 0; schedstat_set(se->statistics.sleep_start, 0);
se->statistics.sum_sleep_runtime += delta; schedstat_add(se->statistics.sum_sleep_runtime, delta);
if (tsk) { if (tsk) {
account_scheduler_latency(tsk, delta >> 10, 1); account_scheduler_latency(tsk, delta >> 10, 1);
trace_sched_stat_sleep(tsk, delta); trace_sched_stat_sleep(tsk, delta);
} }
} }
if (se->statistics.block_start) { if (block_start) {
u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.block_start; u64 delta = rq_clock(rq_of(cfs_rq)) - block_start;
if ((s64)delta < 0) if ((s64)delta < 0)
delta = 0; delta = 0;
if (unlikely(delta > se->statistics.block_max)) if (unlikely(delta > schedstat_val(se->statistics.block_max)))
se->statistics.block_max = delta; schedstat_set(se->statistics.block_max, delta);
se->statistics.block_start = 0; schedstat_set(se->statistics.block_start, 0);
se->statistics.sum_sleep_runtime += delta; schedstat_add(se->statistics.sum_sleep_runtime, delta);
if (tsk) { if (tsk) {
if (tsk->in_iowait) { if (tsk->in_iowait) {
se->statistics.iowait_sum += delta; schedstat_add(se->statistics.iowait_sum, delta);
se->statistics.iowait_count++; schedstat_inc(se->statistics.iowait_count);
trace_sched_stat_iowait(tsk, delta); trace_sched_stat_iowait(tsk, delta);
} }
...@@ -929,6 +945,9 @@ update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) ...@@ -929,6 +945,9 @@ update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
static inline void static inline void
update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{ {
if (!schedstat_enabled())
return;
/* /*
* Are we enqueueing a waiting task? (for current tasks * Are we enqueueing a waiting task? (for current tasks
* a dequeue/enqueue event is a NOP) * a dequeue/enqueue event is a NOP)
...@@ -943,6 +962,10 @@ update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) ...@@ -943,6 +962,10 @@ update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
static inline void static inline void
update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{ {
if (!schedstat_enabled())
return;
/* /*
* Mark the end of the wait period if dequeueing a * Mark the end of the wait period if dequeueing a
* waiting task: * waiting task:
...@@ -950,45 +973,18 @@ update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) ...@@ -950,45 +973,18 @@ update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
if (se != cfs_rq->curr) if (se != cfs_rq->curr)
update_stats_wait_end(cfs_rq, se); update_stats_wait_end(cfs_rq, se);
if (flags & DEQUEUE_SLEEP) { if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) {
if (entity_is_task(se)) { struct task_struct *tsk = task_of(se);
struct task_struct *tsk = task_of(se);
if (tsk->state & TASK_INTERRUPTIBLE) if (tsk->state & TASK_INTERRUPTIBLE)
se->statistics.sleep_start = rq_clock(rq_of(cfs_rq)); schedstat_set(se->statistics.sleep_start,
if (tsk->state & TASK_UNINTERRUPTIBLE) rq_clock(rq_of(cfs_rq)));
se->statistics.block_start = rq_clock(rq_of(cfs_rq)); if (tsk->state & TASK_UNINTERRUPTIBLE)
} schedstat_set(se->statistics.block_start,
rq_clock(rq_of(cfs_rq)));
} }
}
#else
static inline void
update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
}
static inline void
update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
}
static inline void
update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
} }
static inline void
update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
}
static inline void
update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
}
#endif
/* /*
* We are picking a new current task - update its stats: * We are picking a new current task - update its stats:
*/ */
...@@ -3396,10 +3392,8 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) ...@@ -3396,10 +3392,8 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
place_entity(cfs_rq, se, 0); place_entity(cfs_rq, se, 0);
check_schedstat_required(); check_schedstat_required();
if (schedstat_enabled()) { update_stats_enqueue(cfs_rq, se, flags);
update_stats_enqueue(cfs_rq, se, flags); check_spread(cfs_rq, se);
check_spread(cfs_rq, se);
}
if (!curr) if (!curr)
__enqueue_entity(cfs_rq, se); __enqueue_entity(cfs_rq, se);
se->on_rq = 1; se->on_rq = 1;
...@@ -3466,8 +3460,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) ...@@ -3466,8 +3460,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
update_curr(cfs_rq); update_curr(cfs_rq);
dequeue_entity_load_avg(cfs_rq, se); dequeue_entity_load_avg(cfs_rq, se);
if (schedstat_enabled()) update_stats_dequeue(cfs_rq, se, flags);
update_stats_dequeue(cfs_rq, se, flags);
clear_buddies(cfs_rq, se); clear_buddies(cfs_rq, se);
...@@ -3541,25 +3534,25 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) ...@@ -3541,25 +3534,25 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
* a CPU. So account for the time it spent waiting on the * a CPU. So account for the time it spent waiting on the
* runqueue. * runqueue.
*/ */
if (schedstat_enabled()) update_stats_wait_end(cfs_rq, se);
update_stats_wait_end(cfs_rq, se);
__dequeue_entity(cfs_rq, se); __dequeue_entity(cfs_rq, se);
update_load_avg(se, 1); update_load_avg(se, 1);
} }
update_stats_curr_start(cfs_rq, se); update_stats_curr_start(cfs_rq, se);
cfs_rq->curr = se; cfs_rq->curr = se;
#ifdef CONFIG_SCHEDSTATS
/* /*
* Track our maximum slice length, if the CPU's load is at * Track our maximum slice length, if the CPU's load is at
* least twice that of our own weight (i.e. dont track it * least twice that of our own weight (i.e. dont track it
* when there are only lesser-weight tasks around): * when there are only lesser-weight tasks around):
*/ */
if (schedstat_enabled() && rq_of(cfs_rq)->load.weight >= 2*se->load.weight) { if (schedstat_enabled() && rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
se->statistics.slice_max = max(se->statistics.slice_max, schedstat_set(se->statistics.slice_max,
se->sum_exec_runtime - se->prev_sum_exec_runtime); max((u64)schedstat_val(se->statistics.slice_max),
se->sum_exec_runtime - se->prev_sum_exec_runtime));
} }
#endif
se->prev_sum_exec_runtime = se->sum_exec_runtime; se->prev_sum_exec_runtime = se->sum_exec_runtime;
} }
...@@ -3638,13 +3631,10 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) ...@@ -3638,13 +3631,10 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
/* throttle cfs_rqs exceeding runtime */ /* throttle cfs_rqs exceeding runtime */
check_cfs_rq_runtime(cfs_rq); check_cfs_rq_runtime(cfs_rq);
if (schedstat_enabled()) { check_spread(cfs_rq, prev);
check_spread(cfs_rq, prev);
if (prev->on_rq)
update_stats_wait_start(cfs_rq, prev);
}
if (prev->on_rq) { if (prev->on_rq) {
update_stats_wait_start(cfs_rq, prev);
/* Put 'current' back into the tree. */ /* Put 'current' back into the tree. */
__enqueue_entity(cfs_rq, prev); __enqueue_entity(cfs_rq, prev);
/* in !on_rq case, update occurred at dequeue */ /* in !on_rq case, update occurred at dequeue */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment