Commit ae92882e authored by Josh Poimboeuf's avatar Josh Poimboeuf Committed by Ingo Molnar

sched/debug: Clean up schedstat macros

The schedstat_*() macros are inconsistent: most of them take a pointer
and a field which the macro combines, whereas schedstat_set() takes the
already combined ptr->field.

The already combined ptr->field argument is actually more intuitive and
easier to use, and there's no reason to require the user to split the
variable up, so convert the macros to use the combined argument.
Signed-off-by: default avatarJosh Poimboeuf <jpoimboe@redhat.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Matt Fleming <matt@codeblueprint.co.uk>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/54953ca25bb579f3a5946432dee409b0e05222c6.1466184592.git.jpoimboe@redhat.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 1a3d027c
...@@ -1636,16 +1636,16 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags) ...@@ -1636,16 +1636,16 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
int this_cpu = smp_processor_id(); int this_cpu = smp_processor_id();
if (cpu == this_cpu) { if (cpu == this_cpu) {
schedstat_inc(rq, ttwu_local); schedstat_inc(rq->ttwu_local);
schedstat_inc(p, se.statistics.nr_wakeups_local); schedstat_inc(p->se.statistics.nr_wakeups_local);
} else { } else {
struct sched_domain *sd; struct sched_domain *sd;
schedstat_inc(p, se.statistics.nr_wakeups_remote); schedstat_inc(p->se.statistics.nr_wakeups_remote);
rcu_read_lock(); rcu_read_lock();
for_each_domain(this_cpu, sd) { for_each_domain(this_cpu, sd) {
if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
schedstat_inc(sd, ttwu_wake_remote); schedstat_inc(sd->ttwu_wake_remote);
break; break;
} }
} }
...@@ -1653,15 +1653,15 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags) ...@@ -1653,15 +1653,15 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
} }
if (wake_flags & WF_MIGRATED) if (wake_flags & WF_MIGRATED)
schedstat_inc(p, se.statistics.nr_wakeups_migrate); schedstat_inc(p->se.statistics.nr_wakeups_migrate);
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
schedstat_inc(rq, ttwu_count); schedstat_inc(rq->ttwu_count);
schedstat_inc(p, se.statistics.nr_wakeups); schedstat_inc(p->se.statistics.nr_wakeups);
if (wake_flags & WF_SYNC) if (wake_flags & WF_SYNC)
schedstat_inc(p, se.statistics.nr_wakeups_sync); schedstat_inc(p->se.statistics.nr_wakeups_sync);
#endif /* CONFIG_SCHEDSTATS */ #endif /* CONFIG_SCHEDSTATS */
} }
...@@ -3237,7 +3237,7 @@ static inline void schedule_debug(struct task_struct *prev) ...@@ -3237,7 +3237,7 @@ static inline void schedule_debug(struct task_struct *prev)
profile_hit(SCHED_PROFILING, __builtin_return_address(0)); profile_hit(SCHED_PROFILING, __builtin_return_address(0));
schedstat_inc(this_rq(), sched_count); schedstat_inc(this_rq()->sched_count);
} }
/* /*
...@@ -4849,7 +4849,7 @@ SYSCALL_DEFINE0(sched_yield) ...@@ -4849,7 +4849,7 @@ SYSCALL_DEFINE0(sched_yield)
{ {
struct rq *rq = this_rq_lock(); struct rq *rq = this_rq_lock();
schedstat_inc(rq, yld_count); schedstat_inc(rq->yld_count);
current->sched_class->yield_task(rq); current->sched_class->yield_task(rq);
/* /*
...@@ -5000,7 +5000,7 @@ int __sched yield_to(struct task_struct *p, bool preempt) ...@@ -5000,7 +5000,7 @@ int __sched yield_to(struct task_struct *p, bool preempt)
yielded = curr->sched_class->yield_to_task(rq, p, preempt); yielded = curr->sched_class->yield_to_task(rq, p, preempt);
if (yielded) { if (yielded) {
schedstat_inc(rq, yld_count); schedstat_inc(rq->yld_count);
/* /*
* Make p's CPU reschedule; pick_next_entity takes care of * Make p's CPU reschedule; pick_next_entity takes care of
* fairness. * fairness.
......
...@@ -429,9 +429,9 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) ...@@ -429,9 +429,9 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
p->prio); p->prio);
SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld", SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
SPLIT_NS(schedstat_val(p, se.statistics.wait_sum)), SPLIT_NS(schedstat_val(p->se.statistics.wait_sum)),
SPLIT_NS(p->se.sum_exec_runtime), SPLIT_NS(p->se.sum_exec_runtime),
SPLIT_NS(schedstat_val(p, se.statistics.sum_sleep_runtime))); SPLIT_NS(schedstat_val(p->se.statistics.sum_sleep_runtime)));
#ifdef CONFIG_NUMA_BALANCING #ifdef CONFIG_NUMA_BALANCING
SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p)); SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
......
...@@ -800,7 +800,7 @@ static void update_curr(struct cfs_rq *cfs_rq) ...@@ -800,7 +800,7 @@ static void update_curr(struct cfs_rq *cfs_rq)
max(delta_exec, curr->statistics.exec_max)); max(delta_exec, curr->statistics.exec_max));
curr->sum_exec_runtime += delta_exec; curr->sum_exec_runtime += delta_exec;
schedstat_add(cfs_rq, exec_clock, delta_exec); schedstat_add(cfs_rq->exec_clock, delta_exec);
curr->vruntime += calc_delta_fair(delta_exec, curr); curr->vruntime += calc_delta_fair(delta_exec, curr);
update_min_vruntime(cfs_rq); update_min_vruntime(cfs_rq);
...@@ -3275,7 +3275,7 @@ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) ...@@ -3275,7 +3275,7 @@ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
d = -d; d = -d;
if (d > 3*sysctl_sched_latency) if (d > 3*sysctl_sched_latency)
schedstat_inc(cfs_rq, nr_spread_over); schedstat_inc(cfs_rq->nr_spread_over);
#endif #endif
} }
...@@ -5164,13 +5164,13 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, ...@@ -5164,13 +5164,13 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,
balanced = this_eff_load <= prev_eff_load; balanced = this_eff_load <= prev_eff_load;
schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts); schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts);
if (!balanced) if (!balanced)
return 0; return 0;
schedstat_inc(sd, ttwu_move_affine); schedstat_inc(sd->ttwu_move_affine);
schedstat_inc(p, se.statistics.nr_wakeups_affine); schedstat_inc(p->se.statistics.nr_wakeups_affine);
return 1; return 1;
} }
...@@ -6183,7 +6183,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) ...@@ -6183,7 +6183,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) { if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
int cpu; int cpu;
schedstat_inc(p, se.statistics.nr_failed_migrations_affine); schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
env->flags |= LBF_SOME_PINNED; env->flags |= LBF_SOME_PINNED;
...@@ -6214,7 +6214,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) ...@@ -6214,7 +6214,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
env->flags &= ~LBF_ALL_PINNED; env->flags &= ~LBF_ALL_PINNED;
if (task_running(env->src_rq, p)) { if (task_running(env->src_rq, p)) {
schedstat_inc(p, se.statistics.nr_failed_migrations_running); schedstat_inc(p->se.statistics.nr_failed_migrations_running);
return 0; return 0;
} }
...@@ -6231,13 +6231,13 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) ...@@ -6231,13 +6231,13 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
if (tsk_cache_hot <= 0 || if (tsk_cache_hot <= 0 ||
env->sd->nr_balance_failed > env->sd->cache_nice_tries) { env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
if (tsk_cache_hot == 1) { if (tsk_cache_hot == 1) {
schedstat_inc(env->sd, lb_hot_gained[env->idle]); schedstat_inc(env->sd->lb_hot_gained[env->idle]);
schedstat_inc(p, se.statistics.nr_forced_migrations); schedstat_inc(p->se.statistics.nr_forced_migrations);
} }
return 1; return 1;
} }
schedstat_inc(p, se.statistics.nr_failed_migrations_hot); schedstat_inc(p->se.statistics.nr_failed_migrations_hot);
return 0; return 0;
} }
...@@ -6277,7 +6277,7 @@ static struct task_struct *detach_one_task(struct lb_env *env) ...@@ -6277,7 +6277,7 @@ static struct task_struct *detach_one_task(struct lb_env *env)
* so we can safely collect stats here rather than * so we can safely collect stats here rather than
* inside detach_tasks(). * inside detach_tasks().
*/ */
schedstat_inc(env->sd, lb_gained[env->idle]); schedstat_inc(env->sd->lb_gained[env->idle]);
return p; return p;
} }
return NULL; return NULL;
...@@ -6369,7 +6369,7 @@ static int detach_tasks(struct lb_env *env) ...@@ -6369,7 +6369,7 @@ static int detach_tasks(struct lb_env *env)
* so we can safely collect detach_one_task() stats here rather * so we can safely collect detach_one_task() stats here rather
* than inside detach_one_task(). * than inside detach_one_task().
*/ */
schedstat_add(env->sd, lb_gained[env->idle], detached); schedstat_add(env->sd->lb_gained[env->idle], detached);
return detached; return detached;
} }
...@@ -7510,7 +7510,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, ...@@ -7510,7 +7510,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
cpumask_copy(cpus, cpu_active_mask); cpumask_copy(cpus, cpu_active_mask);
schedstat_inc(sd, lb_count[idle]); schedstat_inc(sd->lb_count[idle]);
redo: redo:
if (!should_we_balance(&env)) { if (!should_we_balance(&env)) {
...@@ -7520,19 +7520,19 @@ static int load_balance(int this_cpu, struct rq *this_rq, ...@@ -7520,19 +7520,19 @@ static int load_balance(int this_cpu, struct rq *this_rq,
group = find_busiest_group(&env); group = find_busiest_group(&env);
if (!group) { if (!group) {
schedstat_inc(sd, lb_nobusyg[idle]); schedstat_inc(sd->lb_nobusyg[idle]);
goto out_balanced; goto out_balanced;
} }
busiest = find_busiest_queue(&env, group); busiest = find_busiest_queue(&env, group);
if (!busiest) { if (!busiest) {
schedstat_inc(sd, lb_nobusyq[idle]); schedstat_inc(sd->lb_nobusyq[idle]);
goto out_balanced; goto out_balanced;
} }
BUG_ON(busiest == env.dst_rq); BUG_ON(busiest == env.dst_rq);
schedstat_add(sd, lb_imbalance[idle], env.imbalance); schedstat_add(sd->lb_imbalance[idle], env.imbalance);
env.src_cpu = busiest->cpu; env.src_cpu = busiest->cpu;
env.src_rq = busiest; env.src_rq = busiest;
...@@ -7639,7 +7639,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, ...@@ -7639,7 +7639,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
} }
if (!ld_moved) { if (!ld_moved) {
schedstat_inc(sd, lb_failed[idle]); schedstat_inc(sd->lb_failed[idle]);
/* /*
* Increment the failure counter only on periodic balance. * Increment the failure counter only on periodic balance.
* We do not want newidle balance, which can be very * We do not want newidle balance, which can be very
...@@ -7722,7 +7722,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, ...@@ -7722,7 +7722,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
* we can't migrate them. Let the imbalance flag set so parent level * we can't migrate them. Let the imbalance flag set so parent level
* can try to migrate them. * can try to migrate them.
*/ */
schedstat_inc(sd, lb_balanced[idle]); schedstat_inc(sd->lb_balanced[idle]);
sd->nr_balance_failed = 0; sd->nr_balance_failed = 0;
...@@ -7915,15 +7915,15 @@ static int active_load_balance_cpu_stop(void *data) ...@@ -7915,15 +7915,15 @@ static int active_load_balance_cpu_stop(void *data)
.idle = CPU_IDLE, .idle = CPU_IDLE,
}; };
schedstat_inc(sd, alb_count); schedstat_inc(sd->alb_count);
p = detach_one_task(&env); p = detach_one_task(&env);
if (p) { if (p) {
schedstat_inc(sd, alb_pushed); schedstat_inc(sd->alb_pushed);
/* Active balancing done, reset the failure counter. */ /* Active balancing done, reset the failure counter. */
sd->nr_balance_failed = 0; sd->nr_balance_failed = 0;
} else { } else {
schedstat_inc(sd, alb_failed); schedstat_inc(sd->alb_failed);
} }
} }
rcu_read_unlock(); rcu_read_unlock();
......
...@@ -28,7 +28,7 @@ pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct pin_cookie c ...@@ -28,7 +28,7 @@ pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct pin_cookie c
{ {
put_prev_task(rq, prev); put_prev_task(rq, prev);
schedstat_inc(rq, sched_goidle); schedstat_inc(rq->sched_goidle);
return rq->idle; return rq->idle;
} }
......
...@@ -29,11 +29,11 @@ rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) ...@@ -29,11 +29,11 @@ rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
if (rq) if (rq)
rq->rq_sched_info.run_delay += delta; rq->rq_sched_info.run_delay += delta;
} }
# define schedstat_enabled() static_branch_unlikely(&sched_schedstats) #define schedstat_enabled() static_branch_unlikely(&sched_schedstats)
# define schedstat_inc(rq, field) do { if (schedstat_enabled()) { (rq)->field++; } } while (0) #define schedstat_inc(var) do { if (schedstat_enabled()) { var++; } } while (0)
# define schedstat_add(rq, field, amt) do { if (schedstat_enabled()) { (rq)->field += (amt); } } while (0) #define schedstat_add(var, amt) do { if (schedstat_enabled()) { var += (amt); } } while (0)
# define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0) #define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0)
# define schedstat_val(rq, field) ((schedstat_enabled()) ? (rq)->field : 0) #define schedstat_val(var) ((schedstat_enabled()) ? (var) : 0)
#else /* !CONFIG_SCHEDSTATS */ #else /* !CONFIG_SCHEDSTATS */
static inline void static inline void
...@@ -45,12 +45,12 @@ rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) ...@@ -45,12 +45,12 @@ rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
static inline void static inline void
rq_sched_info_depart(struct rq *rq, unsigned long long delta) rq_sched_info_depart(struct rq *rq, unsigned long long delta)
{} {}
# define schedstat_enabled() 0 #define schedstat_enabled() 0
# define schedstat_inc(rq, field) do { } while (0) #define schedstat_inc(var) do { } while (0)
# define schedstat_add(rq, field, amt) do { } while (0) #define schedstat_add(var, amt) do { } while (0)
# define schedstat_set(var, val) do { } while (0) #define schedstat_set(var, val) do { } while (0)
# define schedstat_val(rq, field) 0 #define schedstat_val(var) 0
#endif #endif /* CONFIG_SCHEDSTATS */
#ifdef CONFIG_SCHED_INFO #ifdef CONFIG_SCHED_INFO
static inline void sched_info_reset_dequeued(struct task_struct *t) static inline void sched_info_reset_dequeued(struct task_struct *t)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment