Commit 71107445 authored by Jason Low's avatar Jason Low Committed by Ingo Molnar

sched, timer: Use the atomic task_cputime in thread_group_cputimer

Recent optimizations were made to thread_group_cputimer to improve its
scalability by keeping track of cputime stats without a lock. However,
the values were open coded to the structure, causing them to be at
a different abstraction level from the regular task_cputime structure.
Furthermore, any subsequent similar optimizations would not be able to
share the new code, since they are specific to thread_group_cputimer.

This patch adds the new task_cputime_atomic data structure (introduced in
the previous patch in the series) to thread_group_cputimer for keeping
track of the cputime atomically, which also helps generalize the code.
Suggested-by: default avatarIngo Molnar <mingo@kernel.org>
Signed-off-by: default avatarJason Low <jason.low2@hp.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarThomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarRik van Riel <riel@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Aswin Chandramouleeswaran <aswin@hp.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Mike Galbraith <umgwanakikbuti@gmail.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Preeti U Murthy <preeti@linux.vnet.ibm.com>
Cc: Scott J Norton <scott.norton@hp.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Waiman Long <Waiman.Long@hp.com>
Link: http://lkml.kernel.org/r/1430251224-5764-6-git-send-email-jason.low2@hp.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 971e8a98
...@@ -50,10 +50,8 @@ extern struct fs_struct init_fs; ...@@ -50,10 +50,8 @@ extern struct fs_struct init_fs;
.cpu_timers = INIT_CPU_TIMERS(sig.cpu_timers), \ .cpu_timers = INIT_CPU_TIMERS(sig.cpu_timers), \
.rlim = INIT_RLIMITS, \ .rlim = INIT_RLIMITS, \
.cputimer = { \ .cputimer = { \
.utime = ATOMIC64_INIT(0), \ .cputime_atomic = INIT_CPUTIME_ATOMIC, \
.stime = ATOMIC64_INIT(0), \ .running = 0, \
.sum_exec_runtime = ATOMIC64_INIT(0), \
.running = 0 \
}, \ }, \
.cred_guard_mutex = \ .cred_guard_mutex = \
__MUTEX_INITIALIZER(sig.cred_guard_mutex), \ __MUTEX_INITIALIZER(sig.cred_guard_mutex), \
......
...@@ -615,9 +615,7 @@ struct task_cputime_atomic { ...@@ -615,9 +615,7 @@ struct task_cputime_atomic {
* used for thread group CPU timer calculations. * used for thread group CPU timer calculations.
*/ */
struct thread_group_cputimer { struct thread_group_cputimer {
atomic64_t utime; struct task_cputime_atomic cputime_atomic;
atomic64_t stime;
atomic64_t sum_exec_runtime;
int running; int running;
}; };
......
...@@ -216,7 +216,7 @@ static inline void account_group_user_time(struct task_struct *tsk, ...@@ -216,7 +216,7 @@ static inline void account_group_user_time(struct task_struct *tsk,
if (!cputimer_running(tsk)) if (!cputimer_running(tsk))
return; return;
atomic64_add(cputime, &cputimer->utime); atomic64_add(cputime, &cputimer->cputime_atomic.utime);
} }
/** /**
...@@ -237,7 +237,7 @@ static inline void account_group_system_time(struct task_struct *tsk, ...@@ -237,7 +237,7 @@ static inline void account_group_system_time(struct task_struct *tsk,
if (!cputimer_running(tsk)) if (!cputimer_running(tsk))
return; return;
atomic64_add(cputime, &cputimer->stime); atomic64_add(cputime, &cputimer->cputime_atomic.stime);
} }
/** /**
...@@ -258,5 +258,5 @@ static inline void account_group_exec_runtime(struct task_struct *tsk, ...@@ -258,5 +258,5 @@ static inline void account_group_exec_runtime(struct task_struct *tsk,
if (!cputimer_running(tsk)) if (!cputimer_running(tsk))
return; return;
atomic64_add(ns, &cputimer->sum_exec_runtime); atomic64_add(ns, &cputimer->cputime_atomic.sum_exec_runtime);
} }
...@@ -211,20 +211,20 @@ static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime) ...@@ -211,20 +211,20 @@ static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime)
} }
} }
static void update_gt_cputime(struct thread_group_cputimer *cputimer, struct task_cputime *sum) static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, struct task_cputime *sum)
{ {
__update_gt_cputime(&cputimer->utime, sum->utime); __update_gt_cputime(&cputime_atomic->utime, sum->utime);
__update_gt_cputime(&cputimer->stime, sum->stime); __update_gt_cputime(&cputime_atomic->stime, sum->stime);
__update_gt_cputime(&cputimer->sum_exec_runtime, sum->sum_exec_runtime); __update_gt_cputime(&cputime_atomic->sum_exec_runtime, sum->sum_exec_runtime);
} }
/* Sample thread_group_cputimer values in "cputimer", store results in "times". */ /* Sample task_cputime_atomic values in "atomic_timers", store results in "times". */
static inline void sample_group_cputimer(struct task_cputime *times, static inline void sample_cputime_atomic(struct task_cputime *times,
struct thread_group_cputimer *cputimer) struct task_cputime_atomic *atomic_times)
{ {
times->utime = atomic64_read(&cputimer->utime); times->utime = atomic64_read(&atomic_times->utime);
times->stime = atomic64_read(&cputimer->stime); times->stime = atomic64_read(&atomic_times->stime);
times->sum_exec_runtime = atomic64_read(&cputimer->sum_exec_runtime); times->sum_exec_runtime = atomic64_read(&atomic_times->sum_exec_runtime);
} }
void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
...@@ -240,7 +240,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) ...@@ -240,7 +240,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
* to synchronize the timer to the clock every time we start it. * to synchronize the timer to the clock every time we start it.
*/ */
thread_group_cputime(tsk, &sum); thread_group_cputime(tsk, &sum);
update_gt_cputime(cputimer, &sum); update_gt_cputime(&cputimer->cputime_atomic, &sum);
/* /*
* We're setting cputimer->running without a lock. Ensure * We're setting cputimer->running without a lock. Ensure
...@@ -251,7 +251,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) ...@@ -251,7 +251,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
*/ */
WRITE_ONCE(cputimer->running, 1); WRITE_ONCE(cputimer->running, 1);
} }
sample_group_cputimer(times, cputimer); sample_cputime_atomic(times, &cputimer->cputime_atomic);
} }
/* /*
...@@ -1137,7 +1137,7 @@ static inline int fastpath_timer_check(struct task_struct *tsk) ...@@ -1137,7 +1137,7 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
if (READ_ONCE(sig->cputimer.running)) { if (READ_ONCE(sig->cputimer.running)) {
struct task_cputime group_sample; struct task_cputime group_sample;
sample_group_cputimer(&group_sample, &sig->cputimer); sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic);
if (task_cputime_expired(&group_sample, &sig->cputime_expires)) if (task_cputime_expired(&group_sample, &sig->cputime_expires))
return 1; return 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment