Commit a499a5a1 authored by Frederic Weisbecker's avatar Frederic Weisbecker Committed by Ingo Molnar

sched/cputime: Increment kcpustat directly on irqtime account

The irqtime is accounted is nsecs and stored in
cpu_irq_time.hardirq_time and cpu_irq_time.softirq_time. Once the
accumulated amount reaches a new jiffy, this one gets accounted to the
kcpustat.

This was necessary when kcpustat was stored in cputime_t, which could at
worst have jiffies granularity. But now kcpustat is stored in nsecs
so this whole discretization game with temporary irqtime storage has
become unnecessary.

We can now directly account the irqtime to the kcpustat.
Signed-off-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Stanislaw Gruszka <sgruszka@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Wanpeng Li <wanpeng.li@hotmail.com>
Link: http://lkml.kernel.org/r/1485832191-26889-17-git-send-email-fweisbec@gmail.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent bde8285e
...@@ -44,6 +44,7 @@ void disable_sched_clock_irqtime(void) ...@@ -44,6 +44,7 @@ void disable_sched_clock_irqtime(void)
void irqtime_account_irq(struct task_struct *curr) void irqtime_account_irq(struct task_struct *curr)
{ {
struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime); struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
u64 *cpustat = kcpustat_this_cpu->cpustat;
s64 delta; s64 delta;
int cpu; int cpu;
...@@ -61,49 +62,35 @@ void irqtime_account_irq(struct task_struct *curr) ...@@ -61,49 +62,35 @@ void irqtime_account_irq(struct task_struct *curr)
* in that case, so as not to confuse scheduler with a special task * in that case, so as not to confuse scheduler with a special task
* that do not consume any time, but still wants to run. * that do not consume any time, but still wants to run.
*/ */
if (hardirq_count()) if (hardirq_count()) {
irqtime->hardirq_time += delta; cpustat[CPUTIME_IRQ] += delta;
else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) irqtime->tick_delta += delta;
irqtime->softirq_time += delta; } else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) {
cpustat[CPUTIME_SOFTIRQ] += delta;
irqtime->tick_delta += delta;
}
u64_stats_update_end(&irqtime->sync); u64_stats_update_end(&irqtime->sync);
} }
EXPORT_SYMBOL_GPL(irqtime_account_irq); EXPORT_SYMBOL_GPL(irqtime_account_irq);
static cputime_t irqtime_account_update(u64 irqtime, int idx, cputime_t maxtime) static cputime_t irqtime_tick_accounted(cputime_t maxtime)
{ {
u64 *cpustat = kcpustat_this_cpu->cpustat; struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
cputime_t irq_cputime; cputime_t delta;
irq_cputime = nsecs_to_cputime64(irqtime - cpustat[idx]);
irq_cputime = min(irq_cputime, maxtime);
cpustat[idx] += cputime_to_nsecs(irq_cputime);
return irq_cputime;
}
static cputime_t irqtime_account_hi_update(cputime_t maxtime) delta = nsecs_to_cputime(irqtime->tick_delta);
{ delta = min(delta, maxtime);
return irqtime_account_update(__this_cpu_read(cpu_irqtime.hardirq_time), irqtime->tick_delta -= cputime_to_nsecs(delta);
CPUTIME_IRQ, maxtime);
}
static cputime_t irqtime_account_si_update(cputime_t maxtime) return delta;
{
return irqtime_account_update(__this_cpu_read(cpu_irqtime.softirq_time),
CPUTIME_SOFTIRQ, maxtime);
} }
#else /* CONFIG_IRQ_TIME_ACCOUNTING */ #else /* CONFIG_IRQ_TIME_ACCOUNTING */
#define sched_clock_irqtime (0) #define sched_clock_irqtime (0)
static cputime_t irqtime_account_hi_update(cputime_t dummy) static cputime_t irqtime_tick_accounted(cputime_t dummy)
{
return 0;
}
static cputime_t irqtime_account_si_update(cputime_t dummy)
{ {
return 0; return 0;
} }
...@@ -280,10 +267,7 @@ static inline cputime_t account_other_time(cputime_t max) ...@@ -280,10 +267,7 @@ static inline cputime_t account_other_time(cputime_t max)
accounted = steal_account_process_time(max); accounted = steal_account_process_time(max);
if (accounted < max) if (accounted < max)
accounted += irqtime_account_hi_update(max - accounted); accounted += irqtime_tick_accounted(max - accounted);
if (accounted < max)
accounted += irqtime_account_si_update(max - accounted);
return accounted; return accounted;
} }
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <linux/sched/rt.h> #include <linux/sched/rt.h>
#include <linux/u64_stats_sync.h> #include <linux/u64_stats_sync.h>
#include <linux/sched/deadline.h> #include <linux/sched/deadline.h>
#include <linux/kernel_stat.h>
#include <linux/binfmts.h> #include <linux/binfmts.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
...@@ -1827,8 +1828,7 @@ static inline void nohz_balance_exit_idle(unsigned int cpu) { } ...@@ -1827,8 +1828,7 @@ static inline void nohz_balance_exit_idle(unsigned int cpu) { }
#ifdef CONFIG_IRQ_TIME_ACCOUNTING #ifdef CONFIG_IRQ_TIME_ACCOUNTING
struct irqtime { struct irqtime {
u64 hardirq_time; u64 tick_delta;
u64 softirq_time;
u64 irq_start_time; u64 irq_start_time;
struct u64_stats_sync sync; struct u64_stats_sync sync;
}; };
...@@ -1838,12 +1838,13 @@ DECLARE_PER_CPU(struct irqtime, cpu_irqtime); ...@@ -1838,12 +1838,13 @@ DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
static inline u64 irq_time_read(int cpu) static inline u64 irq_time_read(int cpu)
{ {
struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu); struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
u64 *cpustat = kcpustat_cpu(cpu).cpustat;
unsigned int seq; unsigned int seq;
u64 total; u64 total;
do { do {
seq = __u64_stats_fetch_begin(&irqtime->sync); seq = __u64_stats_fetch_begin(&irqtime->sync);
total = irqtime->softirq_time + irqtime->hardirq_time; total = cpustat[CPUTIME_SOFTIRQ] + cpustat[CPUTIME_IRQ];
} while (__u64_stats_fetch_retry(&irqtime->sync, seq)); } while (__u64_stats_fetch_retry(&irqtime->sync, seq));
return total; return total;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment