Commit 52f17b6c authored by Chandra Seetharaman's avatar Chandra Seetharaman Committed by Linus Torvalds

[PATCH] per-task-delay-accounting: cpu delay collection via schedstats

Make the task-related schedstats functions callable by delay accounting even
if schedstats collection isn't turned on.  This removes the dependency of
delay accounting on schedstats.
Signed-off-by: default avatarChandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: default avatarShailabh Nagar <nagar@watson.ibm.com>
Signed-off-by: default avatarBalbir Singh <balbir@in.ibm.com>
Cc: Jes Sorensen <jes@sgi.com>
Cc: Peter Chubb <peterc@gelato.unsw.edu.au>
Cc: Erich Focht <efocht@ess.nec.de>
Cc: Levent Serinol <lserinol@gmail.com>
Cc: Jay Lan <jlan@engr.sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 0ff92245
...@@ -537,7 +537,7 @@ extern struct user_struct root_user; ...@@ -537,7 +537,7 @@ extern struct user_struct root_user;
struct backing_dev_info; struct backing_dev_info;
struct reclaim_state; struct reclaim_state;
#ifdef CONFIG_SCHEDSTATS #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
struct sched_info { struct sched_info {
/* cumulative counters */ /* cumulative counters */
unsigned long cpu_time, /* time spent on the cpu */ unsigned long cpu_time, /* time spent on the cpu */
...@@ -548,9 +548,11 @@ struct sched_info { ...@@ -548,9 +548,11 @@ struct sched_info {
unsigned long last_arrival, /* when we last ran on a cpu */ unsigned long last_arrival, /* when we last ran on a cpu */
last_queued; /* when we were last queued to run */ last_queued; /* when we were last queued to run */
}; };
#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
#ifdef CONFIG_SCHEDSTATS
extern struct file_operations proc_schedstat_operations; extern struct file_operations proc_schedstat_operations;
#endif #endif /* CONFIG_SCHEDSTATS */
#ifdef CONFIG_TASK_DELAY_ACCT #ifdef CONFIG_TASK_DELAY_ACCT
struct task_delay_info { struct task_delay_info {
...@@ -580,7 +582,19 @@ struct task_delay_info { ...@@ -580,7 +582,19 @@ struct task_delay_info {
u32 swapin_count; /* total count of the number of swapin block */ u32 swapin_count; /* total count of the number of swapin block */
/* io operations performed */ /* io operations performed */
}; };
#endif /* CONFIG_TASK_DELAY_ACCT */
static inline int sched_info_on(void)
{
#ifdef CONFIG_SCHEDSTATS
return 1;
#elif defined(CONFIG_TASK_DELAY_ACCT)
extern int delayacct_on;
return delayacct_on;
#else
return 0;
#endif #endif
}
enum idle_type enum idle_type
{ {
...@@ -777,7 +791,7 @@ struct task_struct { ...@@ -777,7 +791,7 @@ struct task_struct {
cpumask_t cpus_allowed; cpumask_t cpus_allowed;
unsigned int time_slice, first_time_slice; unsigned int time_slice, first_time_slice;
#ifdef CONFIG_SCHEDSTATS #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
struct sched_info sched_info; struct sched_info sched_info;
#endif #endif
......
...@@ -502,9 +502,36 @@ struct file_operations proc_schedstat_operations = { ...@@ -502,9 +502,36 @@ struct file_operations proc_schedstat_operations = {
.release = single_release, .release = single_release,
}; };
/*
* Expects runqueue lock to be held for atomicity of update
*/
static inline void
rq_sched_info_arrive(struct rq *rq, unsigned long delta_jiffies)
{
if (rq) {
rq->rq_sched_info.run_delay += delta_jiffies;
rq->rq_sched_info.pcnt++;
}
}
/*
* Expects runqueue lock to be held for atomicity of update
*/
static inline void
rq_sched_info_depart(struct rq *rq, unsigned long delta_jiffies)
{
if (rq)
rq->rq_sched_info.cpu_time += delta_jiffies;
}
# define schedstat_inc(rq, field) do { (rq)->field++; } while (0) # define schedstat_inc(rq, field) do { (rq)->field++; } while (0)
# define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0) # define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0)
#else /* !CONFIG_SCHEDSTATS */ #else /* !CONFIG_SCHEDSTATS */
static inline void
rq_sched_info_arrive(struct rq *rq, unsigned long delta_jiffies)
{}
static inline void
rq_sched_info_depart(struct rq *rq, unsigned long delta_jiffies)
{}
# define schedstat_inc(rq, field) do { } while (0) # define schedstat_inc(rq, field) do { } while (0)
# define schedstat_add(rq, field, amt) do { } while (0) # define schedstat_add(rq, field, amt) do { } while (0)
#endif #endif
...@@ -524,7 +551,7 @@ static inline struct rq *this_rq_lock(void) ...@@ -524,7 +551,7 @@ static inline struct rq *this_rq_lock(void)
return rq; return rq;
} }
#ifdef CONFIG_SCHEDSTATS #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
/* /*
* Called when a process is dequeued from the active array and given * Called when a process is dequeued from the active array and given
* the cpu. We should note that with the exception of interactive * the cpu. We should note that with the exception of interactive
...@@ -552,21 +579,16 @@ static inline void sched_info_dequeued(struct task_struct *t) ...@@ -552,21 +579,16 @@ static inline void sched_info_dequeued(struct task_struct *t)
*/ */
static void sched_info_arrive(struct task_struct *t) static void sched_info_arrive(struct task_struct *t)
{ {
unsigned long now = jiffies, diff = 0; unsigned long now = jiffies, delta_jiffies = 0;
struct rq *rq = task_rq(t);
if (t->sched_info.last_queued) if (t->sched_info.last_queued)
diff = now - t->sched_info.last_queued; delta_jiffies = now - t->sched_info.last_queued;
sched_info_dequeued(t); sched_info_dequeued(t);
t->sched_info.run_delay += diff; t->sched_info.run_delay += delta_jiffies;
t->sched_info.last_arrival = now; t->sched_info.last_arrival = now;
t->sched_info.pcnt++; t->sched_info.pcnt++;
if (!rq) rq_sched_info_arrive(task_rq(t), delta_jiffies);
return;
rq->rq_sched_info.run_delay += diff;
rq->rq_sched_info.pcnt++;
} }
/* /*
...@@ -586,6 +608,7 @@ static void sched_info_arrive(struct task_struct *t) ...@@ -586,6 +608,7 @@ static void sched_info_arrive(struct task_struct *t)
*/ */
static inline void sched_info_queued(struct task_struct *t) static inline void sched_info_queued(struct task_struct *t)
{ {
if (unlikely(sched_info_on()))
if (!t->sched_info.last_queued) if (!t->sched_info.last_queued)
t->sched_info.last_queued = jiffies; t->sched_info.last_queued = jiffies;
} }
...@@ -596,13 +619,10 @@ static inline void sched_info_queued(struct task_struct *t) ...@@ -596,13 +619,10 @@ static inline void sched_info_queued(struct task_struct *t)
*/ */
static inline void sched_info_depart(struct task_struct *t) static inline void sched_info_depart(struct task_struct *t)
{ {
struct rq *rq = task_rq(t); unsigned long delta_jiffies = jiffies - t->sched_info.last_arrival;
unsigned long diff = jiffies - t->sched_info.last_arrival;
t->sched_info.cpu_time += diff;
if (rq) t->sched_info.cpu_time += delta_jiffies;
rq->rq_sched_info.cpu_time += diff; rq_sched_info_depart(task_rq(t), delta_jiffies);
} }
/* /*
...@@ -611,7 +631,7 @@ static inline void sched_info_depart(struct task_struct *t) ...@@ -611,7 +631,7 @@ static inline void sched_info_depart(struct task_struct *t)
* the idle task.) We are only called when prev != next. * the idle task.) We are only called when prev != next.
*/ */
static inline void static inline void
sched_info_switch(struct task_struct *prev, struct task_struct *next) __sched_info_switch(struct task_struct *prev, struct task_struct *next)
{ {
struct rq *rq = task_rq(prev); struct rq *rq = task_rq(prev);
...@@ -626,10 +646,16 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next) ...@@ -626,10 +646,16 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next)
if (next != rq->idle) if (next != rq->idle)
sched_info_arrive(next); sched_info_arrive(next);
} }
static inline void
sched_info_switch(struct task_struct *prev, struct task_struct *next)
{
if (unlikely(sched_info_on()))
__sched_info_switch(prev, next);
}
#else #else
#define sched_info_queued(t) do { } while (0) #define sched_info_queued(t) do { } while (0)
#define sched_info_switch(t, next) do { } while (0) #define sched_info_switch(t, next) do { } while (0)
#endif /* CONFIG_SCHEDSTATS */ #endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
/* /*
* Adding/removing a task to/from a priority array: * Adding/removing a task to/from a priority array:
...@@ -1531,7 +1557,8 @@ void fastcall sched_fork(struct task_struct *p, int clone_flags) ...@@ -1531,7 +1557,8 @@ void fastcall sched_fork(struct task_struct *p, int clone_flags)
INIT_LIST_HEAD(&p->run_list); INIT_LIST_HEAD(&p->run_list);
p->array = NULL; p->array = NULL;
#ifdef CONFIG_SCHEDSTATS #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
if (unlikely(sched_info_on()))
memset(&p->sched_info, 0, sizeof(p->sched_info)); memset(&p->sched_info, 0, sizeof(p->sched_info));
#endif #endif
#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment