Commit 91c27493 authored by Vincent Guittot's avatar Vincent Guittot Committed by Ingo Molnar

sched/irq: Add IRQ utilization tracking

interrupt and steal time are the only remaining activities tracked by
rt_avg. Like for sched classes, we can use PELT to track their average
utilization of the CPU. But unlike sched class, we don't track when
entering/leaving interrupt; Instead, we take into account the time spent
under interrupt context when we update rqs' clock (rq_clock_task).
This also means that we have to decay the normal context time and account
for interrupt time during the update.

That's also important to note that because:

  rq_clock == rq_clock_task + interrupt time

and rq_clock_task is used by a sched class to compute its utilization, the
util_avg of a sched class only reflects the utilization of the time spent
in normal context and not of the whole time of the CPU. The utilization of
interrupt gives an more accurate level of utilization of CPU.

The CPU utilization is:

  avg_irq + (1 - avg_irq / max capacity) * /Sum avg_rq

Most of the time, avg_irq is small and neglictible so the use of the
approximation CPU utilization = /Sum avg_rq was enough.
Signed-off-by: default avatarVincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Morten.Rasmussen@arm.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: claudio@evidence.eu.com
Cc: daniel.lezcano@linaro.org
Cc: dietmar.eggemann@arm.com
Cc: joel@joelfernandes.org
Cc: juri.lelli@redhat.com
Cc: luca.abeni@santannapisa.it
Cc: patrick.bellasi@arm.com
Cc: quentin.perret@arm.com
Cc: rjw@rjwysocki.net
Cc: valentin.schneider@arm.com
Cc: viresh.kumar@linaro.org
Link: http://lkml.kernel.org/r/1530200714-4504-7-git-send-email-vincent.guittot@linaro.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 8cc90515
...@@ -17,6 +17,8 @@ ...@@ -17,6 +17,8 @@
#include "../workqueue_internal.h" #include "../workqueue_internal.h"
#include "../smpboot.h" #include "../smpboot.h"
#include "pelt.h"
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <trace/events/sched.h> #include <trace/events/sched.h>
...@@ -185,7 +187,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta) ...@@ -185,7 +187,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY)) if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
sched_rt_avg_update(rq, irq_delta + steal); update_irq_load_avg(rq, irq_delta + steal);
#endif #endif
} }
......
...@@ -7290,7 +7290,7 @@ static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) ...@@ -7290,7 +7290,7 @@ static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq)
return false; return false;
} }
static inline bool others_rqs_have_blocked(struct rq *rq) static inline bool others_have_blocked(struct rq *rq)
{ {
if (READ_ONCE(rq->avg_rt.util_avg)) if (READ_ONCE(rq->avg_rt.util_avg))
return true; return true;
...@@ -7298,6 +7298,11 @@ static inline bool others_rqs_have_blocked(struct rq *rq) ...@@ -7298,6 +7298,11 @@ static inline bool others_rqs_have_blocked(struct rq *rq)
if (READ_ONCE(rq->avg_dl.util_avg)) if (READ_ONCE(rq->avg_dl.util_avg))
return true; return true;
#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
if (READ_ONCE(rq->avg_irq.util_avg))
return true;
#endif
return false; return false;
} }
...@@ -7362,8 +7367,9 @@ static void update_blocked_averages(int cpu) ...@@ -7362,8 +7367,9 @@ static void update_blocked_averages(int cpu)
} }
update_rt_rq_load_avg(rq_clock_task(rq), rq, 0); update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
update_dl_rq_load_avg(rq_clock_task(rq), rq, 0); update_dl_rq_load_avg(rq_clock_task(rq), rq, 0);
update_irq_load_avg(rq, 0);
/* Don't need periodic decay once load/util_avg are null */ /* Don't need periodic decay once load/util_avg are null */
if (others_rqs_have_blocked(rq)) if (others_have_blocked(rq))
done = false; done = false;
#ifdef CONFIG_NO_HZ_COMMON #ifdef CONFIG_NO_HZ_COMMON
...@@ -7432,9 +7438,10 @@ static inline void update_blocked_averages(int cpu) ...@@ -7432,9 +7438,10 @@ static inline void update_blocked_averages(int cpu)
update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq); update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
update_rt_rq_load_avg(rq_clock_task(rq), rq, 0); update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
update_dl_rq_load_avg(rq_clock_task(rq), rq, 0); update_dl_rq_load_avg(rq_clock_task(rq), rq, 0);
update_irq_load_avg(rq, 0);
#ifdef CONFIG_NO_HZ_COMMON #ifdef CONFIG_NO_HZ_COMMON
rq->last_blocked_load_update_tick = jiffies; rq->last_blocked_load_update_tick = jiffies;
if (!cfs_rq_has_blocked(cfs_rq) && !others_rqs_have_blocked(rq)) if (!cfs_rq_has_blocked(cfs_rq) && !others_have_blocked(rq))
rq->has_blocked_load = 0; rq->has_blocked_load = 0;
#endif #endif
rq_unlock_irqrestore(rq, &rf); rq_unlock_irqrestore(rq, &rf);
......
...@@ -357,3 +357,43 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running) ...@@ -357,3 +357,43 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
return 0; return 0;
} }
#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
/*
* irq:
*
* util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
* util_sum = cpu_scale * load_sum
* runnable_load_sum = load_sum
*
*/
int update_irq_load_avg(struct rq *rq, u64 running)
{
int ret = 0;
/*
* We know the time that has been used by interrupt since last update
* but we don't when. Let be pessimistic and assume that interrupt has
* happened just before the update. This is not so far from reality
* because interrupt will most probably wake up task and trig an update
* of rq clock during which the metric si updated.
* We start to decay with normal context time and then we add the
* interrupt context time.
* We can safely remove running from rq->clock because
* rq->clock += delta with delta >= running
*/
ret = ___update_load_sum(rq->clock - running, rq->cpu, &rq->avg_irq,
0,
0,
0);
ret += ___update_load_sum(rq->clock, rq->cpu, &rq->avg_irq,
1,
1,
1);
if (ret)
___update_load_avg(&rq->avg_irq, 1, 1);
return ret;
}
#endif
...@@ -6,6 +6,16 @@ int __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq); ...@@ -6,6 +6,16 @@ int __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq);
int update_rt_rq_load_avg(u64 now, struct rq *rq, int running); int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
int update_dl_rq_load_avg(u64 now, struct rq *rq, int running); int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
int update_irq_load_avg(struct rq *rq, u64 running);
#else
static inline int
update_irq_load_avg(struct rq *rq, u64 running)
{
return 0;
}
#endif
/* /*
* When a task is dequeued, its estimated utilization should not be update if * When a task is dequeued, its estimated utilization should not be update if
* its util_avg has not been updated at least once. * its util_avg has not been updated at least once.
...@@ -51,6 +61,12 @@ update_dl_rq_load_avg(u64 now, struct rq *rq, int running) ...@@ -51,6 +61,12 @@ update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
{ {
return 0; return 0;
} }
static inline int
update_irq_load_avg(struct rq *rq, u64 running)
{
return 0;
}
#endif #endif
...@@ -857,6 +857,9 @@ struct rq { ...@@ -857,6 +857,9 @@ struct rq {
u64 age_stamp; u64 age_stamp;
struct sched_avg avg_rt; struct sched_avg avg_rt;
struct sched_avg avg_dl; struct sched_avg avg_dl;
#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
struct sched_avg avg_irq;
#endif
u64 idle_stamp; u64 idle_stamp;
u64 avg_idle; u64 avg_idle;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment