Commit 0db49b72 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (40 commits)
  sched/tracing: Add a new tracepoint for sleeptime
  sched: Disable scheduler warnings during oopses
  sched: Fix cgroup movement of waking process
  sched: Fix cgroup movement of newly created process
  sched: Fix cgroup movement of forking process
  sched: Remove cfs bandwidth period check in tg_set_cfs_period()
  sched: Fix load-balance lock-breaking
  sched: Replace all_pinned with a generic flags field
  sched: Only queue remote wakeups when crossing cache boundaries
  sched: Add missing rcu_dereference() around ->real_parent usage
  [S390] fix cputime overflow in uptime_proc_show
  [S390] cputime: add sparse checking and cleanup
  sched: Mark parent and real_parent as __rcu
  sched, nohz: Fix missing RCU read lock
  sched, nohz: Set the NOHZ_BALANCE_KICK flag for idle load balancer
  sched, nohz: Fix the idle cpu check in nohz_idle_balance
  sched: Use jump_labels for sched_feat
  sched/accounting: Fix parameter passing in task_group_account_field
  sched/accounting: Fix user/system tick double accounting
  sched/accounting: Re-use scheduler statistics for the root cgroup
  ...

Fix up conflicts in
 - arch/ia64/include/asm/cputime.h, include/asm-generic/cputime.h
	usecs_to_cputime64() vs the sparse cleanups
 - kernel/sched/fair.c, kernel/time/tick-sched.c
	scheduler changes in multiple branches
parents 35b740e4 1ac9bc69
...@@ -26,60 +26,53 @@ ...@@ -26,60 +26,53 @@
#include <linux/jiffies.h> #include <linux/jiffies.h>
#include <asm/processor.h> #include <asm/processor.h>
typedef u64 cputime_t; typedef u64 __nocast cputime_t;
typedef u64 cputime64_t; typedef u64 __nocast cputime64_t;
#define cputime_zero ((cputime_t)0)
#define cputime_one_jiffy jiffies_to_cputime(1) #define cputime_one_jiffy jiffies_to_cputime(1)
#define cputime_max ((~((cputime_t)0) >> 1) - 1)
#define cputime_add(__a, __b) ((__a) + (__b))
#define cputime_sub(__a, __b) ((__a) - (__b))
#define cputime_div(__a, __n) ((__a) / (__n))
#define cputime_halve(__a) ((__a) >> 1)
#define cputime_eq(__a, __b) ((__a) == (__b))
#define cputime_gt(__a, __b) ((__a) > (__b))
#define cputime_ge(__a, __b) ((__a) >= (__b))
#define cputime_lt(__a, __b) ((__a) < (__b))
#define cputime_le(__a, __b) ((__a) <= (__b))
#define cputime64_zero ((cputime64_t)0)
#define cputime64_add(__a, __b) ((__a) + (__b))
#define cputime64_sub(__a, __b) ((__a) - (__b))
#define cputime_to_cputime64(__ct) (__ct)
/* /*
* Convert cputime <-> jiffies (HZ) * Convert cputime <-> jiffies (HZ)
*/ */
#define cputime_to_jiffies(__ct) ((__ct) / (NSEC_PER_SEC / HZ)) #define cputime_to_jiffies(__ct) \
#define jiffies_to_cputime(__jif) ((__jif) * (NSEC_PER_SEC / HZ)) ((__force u64)(__ct) / (NSEC_PER_SEC / HZ))
#define cputime64_to_jiffies64(__ct) ((__ct) / (NSEC_PER_SEC / HZ)) #define jiffies_to_cputime(__jif) \
#define jiffies64_to_cputime64(__jif) ((__jif) * (NSEC_PER_SEC / HZ)) (__force cputime_t)((__jif) * (NSEC_PER_SEC / HZ))
#define cputime64_to_jiffies64(__ct) \
((__force u64)(__ct) / (NSEC_PER_SEC / HZ))
#define jiffies64_to_cputime64(__jif) \
(__force cputime64_t)((__jif) * (NSEC_PER_SEC / HZ))
/* /*
* Convert cputime <-> microseconds * Convert cputime <-> microseconds
*/ */
#define cputime_to_usecs(__ct) ((__ct) / NSEC_PER_USEC) #define cputime_to_usecs(__ct) \
#define usecs_to_cputime(__usecs) ((__usecs) * NSEC_PER_USEC) ((__force u64)(__ct) / NSEC_PER_USEC)
#define usecs_to_cputime64(__usecs) usecs_to_cputime(__usecs) #define usecs_to_cputime(__usecs) \
(__force cputime_t)((__usecs) * NSEC_PER_USEC)
#define usecs_to_cputime64(__usecs) \
(__force cputime64_t)((__usecs) * NSEC_PER_USEC)
/* /*
* Convert cputime <-> seconds * Convert cputime <-> seconds
*/ */
#define cputime_to_secs(__ct) ((__ct) / NSEC_PER_SEC) #define cputime_to_secs(__ct) \
#define secs_to_cputime(__secs) ((__secs) * NSEC_PER_SEC) ((__force u64)(__ct) / NSEC_PER_SEC)
#define secs_to_cputime(__secs) \
(__force cputime_t)((__secs) * NSEC_PER_SEC)
/* /*
* Convert cputime <-> timespec (nsec) * Convert cputime <-> timespec (nsec)
*/ */
static inline cputime_t timespec_to_cputime(const struct timespec *val) static inline cputime_t timespec_to_cputime(const struct timespec *val)
{ {
cputime_t ret = val->tv_sec * NSEC_PER_SEC; u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
return (ret + val->tv_nsec); return (__force cputime_t) ret;
} }
static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val) static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
{ {
val->tv_sec = ct / NSEC_PER_SEC; val->tv_sec = (__force u64) ct / NSEC_PER_SEC;
val->tv_nsec = ct % NSEC_PER_SEC; val->tv_nsec = (__force u64) ct % NSEC_PER_SEC;
} }
/* /*
...@@ -87,25 +80,28 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val) ...@@ -87,25 +80,28 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
*/ */
static inline cputime_t timeval_to_cputime(struct timeval *val) static inline cputime_t timeval_to_cputime(struct timeval *val)
{ {
cputime_t ret = val->tv_sec * NSEC_PER_SEC; u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC;
return (ret + val->tv_usec * NSEC_PER_USEC); return (__force cputime_t) ret;
} }
static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val) static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
{ {
val->tv_sec = ct / NSEC_PER_SEC; val->tv_sec = (__force u64) ct / NSEC_PER_SEC;
val->tv_usec = (ct % NSEC_PER_SEC) / NSEC_PER_USEC; val->tv_usec = ((__force u64) ct % NSEC_PER_SEC) / NSEC_PER_USEC;
} }
/* /*
* Convert cputime <-> clock (USER_HZ) * Convert cputime <-> clock (USER_HZ)
*/ */
#define cputime_to_clock_t(__ct) ((__ct) / (NSEC_PER_SEC / USER_HZ)) #define cputime_to_clock_t(__ct) \
#define clock_t_to_cputime(__x) ((__x) * (NSEC_PER_SEC / USER_HZ)) ((__force u64)(__ct) / (NSEC_PER_SEC / USER_HZ))
#define clock_t_to_cputime(__x) \
(__force cputime_t)((__x) * (NSEC_PER_SEC / USER_HZ))
/* /*
* Convert cputime64 to clock. * Convert cputime64 to clock.
*/ */
#define cputime64_to_clock_t(__ct) cputime_to_clock_t((cputime_t)__ct) #define cputime64_to_clock_t(__ct) \
cputime_to_clock_t((__force cputime_t)__ct)
#endif /* CONFIG_VIRT_CPU_ACCOUNTING */ #endif /* CONFIG_VIRT_CPU_ACCOUNTING */
#endif /* __IA64_CPUTIME_H */ #endif /* __IA64_CPUTIME_H */
...@@ -29,25 +29,8 @@ static inline void setup_cputime_one_jiffy(void) { } ...@@ -29,25 +29,8 @@ static inline void setup_cputime_one_jiffy(void) { }
#include <asm/time.h> #include <asm/time.h>
#include <asm/param.h> #include <asm/param.h>
typedef u64 cputime_t; typedef u64 __nocast cputime_t;
typedef u64 cputime64_t; typedef u64 __nocast cputime64_t;
#define cputime_zero ((cputime_t)0)
#define cputime_max ((~((cputime_t)0) >> 1) - 1)
#define cputime_add(__a, __b) ((__a) + (__b))
#define cputime_sub(__a, __b) ((__a) - (__b))
#define cputime_div(__a, __n) ((__a) / (__n))
#define cputime_halve(__a) ((__a) >> 1)
#define cputime_eq(__a, __b) ((__a) == (__b))
#define cputime_gt(__a, __b) ((__a) > (__b))
#define cputime_ge(__a, __b) ((__a) >= (__b))
#define cputime_lt(__a, __b) ((__a) < (__b))
#define cputime_le(__a, __b) ((__a) <= (__b))
#define cputime64_zero ((cputime64_t)0)
#define cputime64_add(__a, __b) ((__a) + (__b))
#define cputime64_sub(__a, __b) ((__a) - (__b))
#define cputime_to_cputime64(__ct) (__ct)
#ifdef __KERNEL__ #ifdef __KERNEL__
...@@ -65,7 +48,7 @@ DECLARE_PER_CPU(unsigned long, cputime_scaled_last_delta); ...@@ -65,7 +48,7 @@ DECLARE_PER_CPU(unsigned long, cputime_scaled_last_delta);
static inline unsigned long cputime_to_jiffies(const cputime_t ct) static inline unsigned long cputime_to_jiffies(const cputime_t ct)
{ {
return mulhdu(ct, __cputime_jiffies_factor); return mulhdu((__force u64) ct, __cputime_jiffies_factor);
} }
/* Estimate the scaled cputime by scaling the real cputime based on /* Estimate the scaled cputime by scaling the real cputime based on
...@@ -74,14 +57,15 @@ static inline cputime_t cputime_to_scaled(const cputime_t ct) ...@@ -74,14 +57,15 @@ static inline cputime_t cputime_to_scaled(const cputime_t ct)
{ {
if (cpu_has_feature(CPU_FTR_SPURR) && if (cpu_has_feature(CPU_FTR_SPURR) &&
__get_cpu_var(cputime_last_delta)) __get_cpu_var(cputime_last_delta))
return ct * __get_cpu_var(cputime_scaled_last_delta) / return (__force u64) ct *
__get_cpu_var(cputime_last_delta); __get_cpu_var(cputime_scaled_last_delta) /
__get_cpu_var(cputime_last_delta);
return ct; return ct;
} }
static inline cputime_t jiffies_to_cputime(const unsigned long jif) static inline cputime_t jiffies_to_cputime(const unsigned long jif)
{ {
cputime_t ct; u64 ct;
unsigned long sec; unsigned long sec;
/* have to be a little careful about overflow */ /* have to be a little careful about overflow */
...@@ -93,7 +77,7 @@ static inline cputime_t jiffies_to_cputime(const unsigned long jif) ...@@ -93,7 +77,7 @@ static inline cputime_t jiffies_to_cputime(const unsigned long jif)
} }
if (sec) if (sec)
ct += (cputime_t) sec * tb_ticks_per_sec; ct += (cputime_t) sec * tb_ticks_per_sec;
return ct; return (__force cputime_t) ct;
} }
static inline void setup_cputime_one_jiffy(void) static inline void setup_cputime_one_jiffy(void)
...@@ -103,7 +87,7 @@ static inline void setup_cputime_one_jiffy(void) ...@@ -103,7 +87,7 @@ static inline void setup_cputime_one_jiffy(void)
static inline cputime64_t jiffies64_to_cputime64(const u64 jif) static inline cputime64_t jiffies64_to_cputime64(const u64 jif)
{ {
cputime_t ct; u64 ct;
u64 sec; u64 sec;
/* have to be a little careful about overflow */ /* have to be a little careful about overflow */
...@@ -114,13 +98,13 @@ static inline cputime64_t jiffies64_to_cputime64(const u64 jif) ...@@ -114,13 +98,13 @@ static inline cputime64_t jiffies64_to_cputime64(const u64 jif)
do_div(ct, HZ); do_div(ct, HZ);
} }
if (sec) if (sec)
ct += (cputime_t) sec * tb_ticks_per_sec; ct += (u64) sec * tb_ticks_per_sec;
return ct; return (__force cputime64_t) ct;
} }
static inline u64 cputime64_to_jiffies64(const cputime_t ct) static inline u64 cputime64_to_jiffies64(const cputime_t ct)
{ {
return mulhdu(ct, __cputime_jiffies_factor); return mulhdu((__force u64) ct, __cputime_jiffies_factor);
} }
/* /*
...@@ -130,12 +114,12 @@ extern u64 __cputime_msec_factor; ...@@ -130,12 +114,12 @@ extern u64 __cputime_msec_factor;
static inline unsigned long cputime_to_usecs(const cputime_t ct) static inline unsigned long cputime_to_usecs(const cputime_t ct)
{ {
return mulhdu(ct, __cputime_msec_factor) * USEC_PER_MSEC; return mulhdu((__force u64) ct, __cputime_msec_factor) * USEC_PER_MSEC;
} }
static inline cputime_t usecs_to_cputime(const unsigned long us) static inline cputime_t usecs_to_cputime(const unsigned long us)
{ {
cputime_t ct; u64 ct;
unsigned long sec; unsigned long sec;
/* have to be a little careful about overflow */ /* have to be a little careful about overflow */
...@@ -147,7 +131,7 @@ static inline cputime_t usecs_to_cputime(const unsigned long us) ...@@ -147,7 +131,7 @@ static inline cputime_t usecs_to_cputime(const unsigned long us)
} }
if (sec) if (sec)
ct += (cputime_t) sec * tb_ticks_per_sec; ct += (cputime_t) sec * tb_ticks_per_sec;
return ct; return (__force cputime_t) ct;
} }
#define usecs_to_cputime64(us) usecs_to_cputime(us) #define usecs_to_cputime64(us) usecs_to_cputime(us)
...@@ -159,12 +143,12 @@ extern u64 __cputime_sec_factor; ...@@ -159,12 +143,12 @@ extern u64 __cputime_sec_factor;
static inline unsigned long cputime_to_secs(const cputime_t ct) static inline unsigned long cputime_to_secs(const cputime_t ct)
{ {
return mulhdu(ct, __cputime_sec_factor); return mulhdu((__force u64) ct, __cputime_sec_factor);
} }
static inline cputime_t secs_to_cputime(const unsigned long sec) static inline cputime_t secs_to_cputime(const unsigned long sec)
{ {
return (cputime_t) sec * tb_ticks_per_sec; return (__force cputime_t)((u64) sec * tb_ticks_per_sec);
} }
/* /*
...@@ -172,7 +156,7 @@ static inline cputime_t secs_to_cputime(const unsigned long sec) ...@@ -172,7 +156,7 @@ static inline cputime_t secs_to_cputime(const unsigned long sec)
*/ */
static inline void cputime_to_timespec(const cputime_t ct, struct timespec *p) static inline void cputime_to_timespec(const cputime_t ct, struct timespec *p)
{ {
u64 x = ct; u64 x = (__force u64) ct;
unsigned int frac; unsigned int frac;
frac = do_div(x, tb_ticks_per_sec); frac = do_div(x, tb_ticks_per_sec);
...@@ -184,11 +168,11 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *p) ...@@ -184,11 +168,11 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *p)
static inline cputime_t timespec_to_cputime(const struct timespec *p) static inline cputime_t timespec_to_cputime(const struct timespec *p)
{ {
cputime_t ct; u64 ct;
ct = (u64) p->tv_nsec * tb_ticks_per_sec; ct = (u64) p->tv_nsec * tb_ticks_per_sec;
do_div(ct, 1000000000); do_div(ct, 1000000000);
return ct + (u64) p->tv_sec * tb_ticks_per_sec; return (__force cputime_t)(ct + (u64) p->tv_sec * tb_ticks_per_sec);
} }
/* /*
...@@ -196,7 +180,7 @@ static inline cputime_t timespec_to_cputime(const struct timespec *p) ...@@ -196,7 +180,7 @@ static inline cputime_t timespec_to_cputime(const struct timespec *p)
*/ */
static inline void cputime_to_timeval(const cputime_t ct, struct timeval *p) static inline void cputime_to_timeval(const cputime_t ct, struct timeval *p)
{ {
u64 x = ct; u64 x = (__force u64) ct;
unsigned int frac; unsigned int frac;
frac = do_div(x, tb_ticks_per_sec); frac = do_div(x, tb_ticks_per_sec);
...@@ -208,11 +192,11 @@ static inline void cputime_to_timeval(const cputime_t ct, struct timeval *p) ...@@ -208,11 +192,11 @@ static inline void cputime_to_timeval(const cputime_t ct, struct timeval *p)
static inline cputime_t timeval_to_cputime(const struct timeval *p) static inline cputime_t timeval_to_cputime(const struct timeval *p)
{ {
cputime_t ct; u64 ct;
ct = (u64) p->tv_usec * tb_ticks_per_sec; ct = (u64) p->tv_usec * tb_ticks_per_sec;
do_div(ct, 1000000); do_div(ct, 1000000);
return ct + (u64) p->tv_sec * tb_ticks_per_sec; return (__force cputime_t)(ct + (u64) p->tv_sec * tb_ticks_per_sec);
} }
/* /*
...@@ -222,12 +206,12 @@ extern u64 __cputime_clockt_factor; ...@@ -222,12 +206,12 @@ extern u64 __cputime_clockt_factor;
static inline unsigned long cputime_to_clock_t(const cputime_t ct) static inline unsigned long cputime_to_clock_t(const cputime_t ct)
{ {
return mulhdu(ct, __cputime_clockt_factor); return mulhdu((__force u64) ct, __cputime_clockt_factor);
} }
static inline cputime_t clock_t_to_cputime(const unsigned long clk) static inline cputime_t clock_t_to_cputime(const unsigned long clk)
{ {
cputime_t ct; u64 ct;
unsigned long sec; unsigned long sec;
/* have to be a little careful about overflow */ /* have to be a little careful about overflow */
...@@ -238,8 +222,8 @@ static inline cputime_t clock_t_to_cputime(const unsigned long clk) ...@@ -238,8 +222,8 @@ static inline cputime_t clock_t_to_cputime(const unsigned long clk)
do_div(ct, USER_HZ); do_div(ct, USER_HZ);
} }
if (sec) if (sec)
ct += (cputime_t) sec * tb_ticks_per_sec; ct += (u64) sec * tb_ticks_per_sec;
return ct; return (__force cputime_t) ct;
} }
#define cputime64_to_clock_t(ct) cputime_to_clock_t((cputime_t)(ct)) #define cputime64_to_clock_t(ct) cputime_to_clock_t((cputime_t)(ct))
......
...@@ -115,21 +115,21 @@ static void appldata_get_os_data(void *data) ...@@ -115,21 +115,21 @@ static void appldata_get_os_data(void *data)
j = 0; j = 0;
for_each_online_cpu(i) { for_each_online_cpu(i) {
os_data->os_cpu[j].per_cpu_user = os_data->os_cpu[j].per_cpu_user =
cputime_to_jiffies(kstat_cpu(i).cpustat.user); cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_USER]);
os_data->os_cpu[j].per_cpu_nice = os_data->os_cpu[j].per_cpu_nice =
cputime_to_jiffies(kstat_cpu(i).cpustat.nice); cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_NICE]);
os_data->os_cpu[j].per_cpu_system = os_data->os_cpu[j].per_cpu_system =
cputime_to_jiffies(kstat_cpu(i).cpustat.system); cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]);
os_data->os_cpu[j].per_cpu_idle = os_data->os_cpu[j].per_cpu_idle =
cputime_to_jiffies(kstat_cpu(i).cpustat.idle); cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IDLE]);
os_data->os_cpu[j].per_cpu_irq = os_data->os_cpu[j].per_cpu_irq =
cputime_to_jiffies(kstat_cpu(i).cpustat.irq); cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IRQ]);
os_data->os_cpu[j].per_cpu_softirq = os_data->os_cpu[j].per_cpu_softirq =
cputime_to_jiffies(kstat_cpu(i).cpustat.softirq); cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]);
os_data->os_cpu[j].per_cpu_iowait = os_data->os_cpu[j].per_cpu_iowait =
cputime_to_jiffies(kstat_cpu(i).cpustat.iowait); cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IOWAIT]);
os_data->os_cpu[j].per_cpu_steal = os_data->os_cpu[j].per_cpu_steal =
cputime_to_jiffies(kstat_cpu(i).cpustat.steal); cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_STEAL]);
os_data->os_cpu[j].cpu_id = i; os_data->os_cpu[j].cpu_id = i;
j++; j++;
} }
......
...@@ -16,75 +16,60 @@ ...@@ -16,75 +16,60 @@
/* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */ /* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */
typedef unsigned long long cputime_t; typedef unsigned long long __nocast cputime_t;
typedef unsigned long long cputime64_t; typedef unsigned long long __nocast cputime64_t;
#ifndef __s390x__ static inline unsigned long __div(unsigned long long n, unsigned long base)
static inline unsigned int
__div(unsigned long long n, unsigned int base)
{ {
#ifndef __s390x__
register_pair rp; register_pair rp;
rp.pair = n >> 1; rp.pair = n >> 1;
asm ("dr %0,%1" : "+d" (rp) : "d" (base >> 1)); asm ("dr %0,%1" : "+d" (rp) : "d" (base >> 1));
return rp.subreg.odd; return rp.subreg.odd;
#else /* __s390x__ */
return n / base;
#endif /* __s390x__ */
} }
#else /* __s390x__ */ #define cputime_one_jiffy jiffies_to_cputime(1)
static inline unsigned int /*
__div(unsigned long long n, unsigned int base) * Convert cputime to jiffies and back.
*/
static inline unsigned long cputime_to_jiffies(const cputime_t cputime)
{ {
return n / base; return __div((__force unsigned long long) cputime, 4096000000ULL / HZ);
} }
#endif /* __s390x__ */ static inline cputime_t jiffies_to_cputime(const unsigned int jif)
{
return (__force cputime_t)(jif * (4096000000ULL / HZ));
}
#define cputime_zero (0ULL) static inline u64 cputime64_to_jiffies64(cputime64_t cputime)
#define cputime_one_jiffy jiffies_to_cputime(1) {
#define cputime_max ((~0UL >> 1) - 1) unsigned long long jif = (__force unsigned long long) cputime;
#define cputime_add(__a, __b) ((__a) + (__b)) do_div(jif, 4096000000ULL / HZ);
#define cputime_sub(__a, __b) ((__a) - (__b)) return jif;
#define cputime_div(__a, __n) ({ \ }
unsigned long long __div = (__a); \
do_div(__div,__n); \ static inline cputime64_t jiffies64_to_cputime64(const u64 jif)
__div; \ {
}) return (__force cputime64_t)(jif * (4096000000ULL / HZ));
#define cputime_halve(__a) ((__a) >> 1)
#define cputime_eq(__a, __b) ((__a) == (__b))
#define cputime_gt(__a, __b) ((__a) > (__b))
#define cputime_ge(__a, __b) ((__a) >= (__b))
#define cputime_lt(__a, __b) ((__a) < (__b))
#define cputime_le(__a, __b) ((__a) <= (__b))
#define cputime_to_jiffies(__ct) (__div((__ct), 4096000000ULL / HZ))
#define cputime_to_scaled(__ct) (__ct)
#define jiffies_to_cputime(__hz) ((cputime_t)(__hz) * (4096000000ULL / HZ))
#define cputime64_zero (0ULL)
#define cputime64_add(__a, __b) ((__a) + (__b))
#define cputime_to_cputime64(__ct) (__ct)
static inline u64
cputime64_to_jiffies64(cputime64_t cputime)
{
do_div(cputime, 4096000000ULL / HZ);
return cputime;
} }
/* /*
* Convert cputime to microseconds and back. * Convert cputime to microseconds and back.
*/ */
static inline unsigned int static inline unsigned int cputime_to_usecs(const cputime_t cputime)
cputime_to_usecs(const cputime_t cputime)
{ {
return cputime_div(cputime, 4096); return (__force unsigned long long) cputime >> 12;
} }
static inline cputime_t static inline cputime_t usecs_to_cputime(const unsigned int m)
usecs_to_cputime(const unsigned int m)
{ {
return (cputime_t) m * 4096; return (__force cputime_t)(m * 4096ULL);
} }
#define usecs_to_cputime64(m) usecs_to_cputime(m) #define usecs_to_cputime64(m) usecs_to_cputime(m)
...@@ -92,40 +77,39 @@ usecs_to_cputime(const unsigned int m) ...@@ -92,40 +77,39 @@ usecs_to_cputime(const unsigned int m)
/* /*
* Convert cputime to milliseconds and back. * Convert cputime to milliseconds and back.
*/ */
static inline unsigned int static inline unsigned int cputime_to_secs(const cputime_t cputime)
cputime_to_secs(const cputime_t cputime)
{ {
return __div(cputime, 2048000000) >> 1; return __div((__force unsigned long long) cputime, 2048000000) >> 1;
} }
static inline cputime_t static inline cputime_t secs_to_cputime(const unsigned int s)
secs_to_cputime(const unsigned int s)
{ {
return (cputime_t) s * 4096000000ULL; return (__force cputime_t)(s * 4096000000ULL);
} }
/* /*
* Convert cputime to timespec and back. * Convert cputime to timespec and back.
*/ */
static inline cputime_t static inline cputime_t timespec_to_cputime(const struct timespec *value)
timespec_to_cputime(const struct timespec *value)
{ {
return value->tv_nsec * 4096 / 1000 + (u64) value->tv_sec * 4096000000ULL; unsigned long long ret = value->tv_sec * 4096000000ULL;
return (__force cputime_t)(ret + value->tv_nsec * 4096 / 1000);
} }
static inline void static inline void cputime_to_timespec(const cputime_t cputime,
cputime_to_timespec(const cputime_t cputime, struct timespec *value) struct timespec *value)
{ {
unsigned long long __cputime = (__force unsigned long long) cputime;
#ifndef __s390x__ #ifndef __s390x__
register_pair rp; register_pair rp;
rp.pair = cputime >> 1; rp.pair = __cputime >> 1;
asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL)); asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL));
value->tv_nsec = rp.subreg.even * 1000 / 4096; value->tv_nsec = rp.subreg.even * 1000 / 4096;
value->tv_sec = rp.subreg.odd; value->tv_sec = rp.subreg.odd;
#else #else
value->tv_nsec = (cputime % 4096000000ULL) * 1000 / 4096; value->tv_nsec = (__cputime % 4096000000ULL) * 1000 / 4096;
value->tv_sec = cputime / 4096000000ULL; value->tv_sec = __cputime / 4096000000ULL;
#endif #endif
} }
...@@ -134,50 +118,52 @@ cputime_to_timespec(const cputime_t cputime, struct timespec *value) ...@@ -134,50 +118,52 @@ cputime_to_timespec(const cputime_t cputime, struct timespec *value)
* Since cputime and timeval have the same resolution (microseconds) * Since cputime and timeval have the same resolution (microseconds)
* this is easy. * this is easy.
*/ */
static inline cputime_t static inline cputime_t timeval_to_cputime(const struct timeval *value)
timeval_to_cputime(const struct timeval *value)
{ {
return value->tv_usec * 4096 + (u64) value->tv_sec * 4096000000ULL; unsigned long long ret = value->tv_sec * 4096000000ULL;
return (__force cputime_t)(ret + value->tv_usec * 4096ULL);
} }
static inline void static inline void cputime_to_timeval(const cputime_t cputime,
cputime_to_timeval(const cputime_t cputime, struct timeval *value) struct timeval *value)
{ {
unsigned long long __cputime = (__force unsigned long long) cputime;
#ifndef __s390x__ #ifndef __s390x__
register_pair rp; register_pair rp;
rp.pair = cputime >> 1; rp.pair = __cputime >> 1;
asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL)); asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL));
value->tv_usec = rp.subreg.even / 4096; value->tv_usec = rp.subreg.even / 4096;
value->tv_sec = rp.subreg.odd; value->tv_sec = rp.subreg.odd;
#else #else
value->tv_usec = (cputime % 4096000000ULL) / 4096; value->tv_usec = (__cputime % 4096000000ULL) / 4096;
value->tv_sec = cputime / 4096000000ULL; value->tv_sec = __cputime / 4096000000ULL;
#endif #endif
} }
/* /*
* Convert cputime to clock and back. * Convert cputime to clock and back.
*/ */
static inline clock_t static inline clock_t cputime_to_clock_t(cputime_t cputime)
cputime_to_clock_t(cputime_t cputime)
{ {
return cputime_div(cputime, 4096000000ULL / USER_HZ); unsigned long long clock = (__force unsigned long long) cputime;
do_div(clock, 4096000000ULL / USER_HZ);
return clock;
} }
static inline cputime_t static inline cputime_t clock_t_to_cputime(unsigned long x)
clock_t_to_cputime(unsigned long x)
{ {
return (cputime_t) x * (4096000000ULL / USER_HZ); return (__force cputime_t)(x * (4096000000ULL / USER_HZ));
} }
/* /*
* Convert cputime64 to clock. * Convert cputime64 to clock.
*/ */
static inline clock_t static inline clock_t cputime64_to_clock_t(cputime64_t cputime)
cputime64_to_clock_t(cputime64_t cputime)
{ {
return cputime_div(cputime, 4096000000ULL / USER_HZ); unsigned long long clock = (__force unsigned long long) cputime;
do_div(clock, 4096000000ULL / USER_HZ);
return clock;
} }
struct s390_idle_data { struct s390_idle_data {
......
...@@ -218,7 +218,7 @@ static inline void fpu_fxsave(struct fpu *fpu) ...@@ -218,7 +218,7 @@ static inline void fpu_fxsave(struct fpu *fpu)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define safe_address (__per_cpu_offset[0]) #define safe_address (__per_cpu_offset[0])
#else #else
#define safe_address (kstat_cpu(0).cpustat.user) #define safe_address (__get_cpu_var(kernel_cpustat).cpustat[CPUTIME_USER])
#endif #endif
/* /*
......
...@@ -95,27 +95,26 @@ static struct dbs_tuners { ...@@ -95,27 +95,26 @@ static struct dbs_tuners {
.freq_step = 5, .freq_step = 5,
}; };
static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
cputime64_t *wall)
{ {
cputime64_t idle_time; u64 idle_time;
cputime64_t cur_wall_time; u64 cur_wall_time;
cputime64_t busy_time; u64 busy_time;
cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
kstat_cpu(cpu).cpustat.system);
busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
idle_time = cputime64_sub(cur_wall_time, busy_time); idle_time = cur_wall_time - busy_time;
if (wall) if (wall)
*wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); *wall = jiffies_to_usecs(cur_wall_time);
return (cputime64_t)jiffies_to_usecs(idle_time); return jiffies_to_usecs(idle_time);
} }
static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
...@@ -272,7 +271,7 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, ...@@ -272,7 +271,7 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
dbs_info->prev_cpu_idle = get_cpu_idle_time(j, dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
&dbs_info->prev_cpu_wall); &dbs_info->prev_cpu_wall);
if (dbs_tuners_ins.ignore_nice) if (dbs_tuners_ins.ignore_nice)
dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
} }
return count; return count;
} }
...@@ -353,20 +352,20 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) ...@@ -353,20 +352,20 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
wall_time = (unsigned int) cputime64_sub(cur_wall_time, wall_time = (unsigned int)
j_dbs_info->prev_cpu_wall); (cur_wall_time - j_dbs_info->prev_cpu_wall);
j_dbs_info->prev_cpu_wall = cur_wall_time; j_dbs_info->prev_cpu_wall = cur_wall_time;
idle_time = (unsigned int) cputime64_sub(cur_idle_time, idle_time = (unsigned int)
j_dbs_info->prev_cpu_idle); (cur_idle_time - j_dbs_info->prev_cpu_idle);
j_dbs_info->prev_cpu_idle = cur_idle_time; j_dbs_info->prev_cpu_idle = cur_idle_time;
if (dbs_tuners_ins.ignore_nice) { if (dbs_tuners_ins.ignore_nice) {
cputime64_t cur_nice; u64 cur_nice;
unsigned long cur_nice_jiffies; unsigned long cur_nice_jiffies;
cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
j_dbs_info->prev_cpu_nice); j_dbs_info->prev_cpu_nice;
/* /*
* Assumption: nice time between sampling periods will * Assumption: nice time between sampling periods will
* be less than 2^32 jiffies for 32 bit sys * be less than 2^32 jiffies for 32 bit sys
...@@ -374,7 +373,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) ...@@ -374,7 +373,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
cur_nice_jiffies = (unsigned long) cur_nice_jiffies = (unsigned long)
cputime64_to_jiffies64(cur_nice); cputime64_to_jiffies64(cur_nice);
j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
idle_time += jiffies_to_usecs(cur_nice_jiffies); idle_time += jiffies_to_usecs(cur_nice_jiffies);
} }
...@@ -501,10 +500,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, ...@@ -501,10 +500,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
&j_dbs_info->prev_cpu_wall); &j_dbs_info->prev_cpu_wall);
if (dbs_tuners_ins.ignore_nice) { if (dbs_tuners_ins.ignore_nice)
j_dbs_info->prev_cpu_nice = j_dbs_info->prev_cpu_nice =
kstat_cpu(j).cpustat.nice; kcpustat_cpu(j).cpustat[CPUTIME_NICE];
}
} }
this_dbs_info->down_skip = 0; this_dbs_info->down_skip = 0;
this_dbs_info->requested_freq = policy->cur; this_dbs_info->requested_freq = policy->cur;
......
...@@ -119,27 +119,26 @@ static struct dbs_tuners { ...@@ -119,27 +119,26 @@ static struct dbs_tuners {
.powersave_bias = 0, .powersave_bias = 0,
}; };
static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
cputime64_t *wall)
{ {
cputime64_t idle_time; u64 idle_time;
cputime64_t cur_wall_time; u64 cur_wall_time;
cputime64_t busy_time; u64 busy_time;
cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
kstat_cpu(cpu).cpustat.system);
busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
idle_time = cputime64_sub(cur_wall_time, busy_time); idle_time = cur_wall_time - busy_time;
if (wall) if (wall)
*wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); *wall = jiffies_to_usecs(cur_wall_time);
return (cputime64_t)jiffies_to_usecs(idle_time); return jiffies_to_usecs(idle_time);
} }
static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
...@@ -345,7 +344,7 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, ...@@ -345,7 +344,7 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
dbs_info->prev_cpu_idle = get_cpu_idle_time(j, dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
&dbs_info->prev_cpu_wall); &dbs_info->prev_cpu_wall);
if (dbs_tuners_ins.ignore_nice) if (dbs_tuners_ins.ignore_nice)
dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
} }
return count; return count;
...@@ -442,24 +441,24 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) ...@@ -442,24 +441,24 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time);
wall_time = (unsigned int) cputime64_sub(cur_wall_time, wall_time = (unsigned int)
j_dbs_info->prev_cpu_wall); (cur_wall_time - j_dbs_info->prev_cpu_wall);
j_dbs_info->prev_cpu_wall = cur_wall_time; j_dbs_info->prev_cpu_wall = cur_wall_time;
idle_time = (unsigned int) cputime64_sub(cur_idle_time, idle_time = (unsigned int)
j_dbs_info->prev_cpu_idle); (cur_idle_time - j_dbs_info->prev_cpu_idle);
j_dbs_info->prev_cpu_idle = cur_idle_time; j_dbs_info->prev_cpu_idle = cur_idle_time;
iowait_time = (unsigned int) cputime64_sub(cur_iowait_time, iowait_time = (unsigned int)
j_dbs_info->prev_cpu_iowait); (cur_iowait_time - j_dbs_info->prev_cpu_iowait);
j_dbs_info->prev_cpu_iowait = cur_iowait_time; j_dbs_info->prev_cpu_iowait = cur_iowait_time;
if (dbs_tuners_ins.ignore_nice) { if (dbs_tuners_ins.ignore_nice) {
cputime64_t cur_nice; u64 cur_nice;
unsigned long cur_nice_jiffies; unsigned long cur_nice_jiffies;
cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
j_dbs_info->prev_cpu_nice); j_dbs_info->prev_cpu_nice;
/* /*
* Assumption: nice time between sampling periods will * Assumption: nice time between sampling periods will
* be less than 2^32 jiffies for 32 bit sys * be less than 2^32 jiffies for 32 bit sys
...@@ -467,7 +466,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) ...@@ -467,7 +466,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
cur_nice_jiffies = (unsigned long) cur_nice_jiffies = (unsigned long)
cputime64_to_jiffies64(cur_nice); cputime64_to_jiffies64(cur_nice);
j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
idle_time += jiffies_to_usecs(cur_nice_jiffies); idle_time += jiffies_to_usecs(cur_nice_jiffies);
} }
...@@ -646,10 +645,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, ...@@ -646,10 +645,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
&j_dbs_info->prev_cpu_wall); &j_dbs_info->prev_cpu_wall);
if (dbs_tuners_ins.ignore_nice) { if (dbs_tuners_ins.ignore_nice)
j_dbs_info->prev_cpu_nice = j_dbs_info->prev_cpu_nice =
kstat_cpu(j).cpustat.nice; kcpustat_cpu(j).cpustat[CPUTIME_NICE];
}
} }
this_dbs_info->cpu = cpu; this_dbs_info->cpu = cpu;
this_dbs_info->rate_mult = 1; this_dbs_info->rate_mult = 1;
......
...@@ -61,9 +61,8 @@ static int cpufreq_stats_update(unsigned int cpu) ...@@ -61,9 +61,8 @@ static int cpufreq_stats_update(unsigned int cpu)
spin_lock(&cpufreq_stats_lock); spin_lock(&cpufreq_stats_lock);
stat = per_cpu(cpufreq_stats_table, cpu); stat = per_cpu(cpufreq_stats_table, cpu);
if (stat->time_in_state) if (stat->time_in_state)
stat->time_in_state[stat->last_index] = stat->time_in_state[stat->last_index] +=
cputime64_add(stat->time_in_state[stat->last_index], cur_time - stat->last_time;
cputime_sub(cur_time, stat->last_time));
stat->last_time = cur_time; stat->last_time = cur_time;
spin_unlock(&cpufreq_stats_lock); spin_unlock(&cpufreq_stats_lock);
return 0; return 0;
......
...@@ -81,13 +81,13 @@ static int rackmeter_ignore_nice; ...@@ -81,13 +81,13 @@ static int rackmeter_ignore_nice;
*/ */
static inline cputime64_t get_cpu_idle_time(unsigned int cpu) static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
{ {
cputime64_t retval; u64 retval;
retval = cputime64_add(kstat_cpu(cpu).cpustat.idle, retval = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE] +
kstat_cpu(cpu).cpustat.iowait); kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
if (rackmeter_ignore_nice) if (rackmeter_ignore_nice)
retval = cputime64_add(retval, kstat_cpu(cpu).cpustat.nice); retval += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
return retval; return retval;
} }
...@@ -220,13 +220,11 @@ static void rackmeter_do_timer(struct work_struct *work) ...@@ -220,13 +220,11 @@ static void rackmeter_do_timer(struct work_struct *work)
int i, offset, load, cumm, pause; int i, offset, load, cumm, pause;
cur_jiffies = jiffies64_to_cputime64(get_jiffies_64()); cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
total_ticks = (unsigned int)cputime64_sub(cur_jiffies, total_ticks = (unsigned int) (cur_jiffies - rcpu->prev_wall);
rcpu->prev_wall);
rcpu->prev_wall = cur_jiffies; rcpu->prev_wall = cur_jiffies;
total_idle_ticks = get_cpu_idle_time(cpu); total_idle_ticks = get_cpu_idle_time(cpu);
idle_ticks = (unsigned int) cputime64_sub(total_idle_ticks, idle_ticks = (unsigned int) (total_idle_ticks - rcpu->prev_idle);
rcpu->prev_idle);
rcpu->prev_idle = total_idle_ticks; rcpu->prev_idle = total_idle_ticks;
/* We do a very dumb calculation to update the LEDs for now, /* We do a very dumb calculation to update the LEDs for now,
......
...@@ -394,8 +394,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, ...@@ -394,8 +394,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
sigemptyset(&sigign); sigemptyset(&sigign);
sigemptyset(&sigcatch); sigemptyset(&sigcatch);
cutime = cstime = utime = stime = cputime_zero; cutime = cstime = utime = stime = 0;
cgtime = gtime = cputime_zero; cgtime = gtime = 0;
if (lock_task_sighand(task, &flags)) { if (lock_task_sighand(task, &flags)) {
struct signal_struct *sig = task->signal; struct signal_struct *sig = task->signal;
...@@ -423,14 +423,14 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, ...@@ -423,14 +423,14 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
do { do {
min_flt += t->min_flt; min_flt += t->min_flt;
maj_flt += t->maj_flt; maj_flt += t->maj_flt;
gtime = cputime_add(gtime, t->gtime); gtime += t->gtime;
t = next_thread(t); t = next_thread(t);
} while (t != task); } while (t != task);
min_flt += sig->min_flt; min_flt += sig->min_flt;
maj_flt += sig->maj_flt; maj_flt += sig->maj_flt;
thread_group_times(task, &utime, &stime); thread_group_times(task, &utime, &stime);
gtime = cputime_add(gtime, sig->gtime); gtime += sig->gtime;
} }
sid = task_session_nr_ns(task, ns); sid = task_session_nr_ns(task, ns);
......
...@@ -22,29 +22,27 @@ ...@@ -22,29 +22,27 @@
#define arch_idle_time(cpu) 0 #define arch_idle_time(cpu) 0
#endif #endif
static cputime64_t get_idle_time(int cpu) static u64 get_idle_time(int cpu)
{ {
u64 idle_time = get_cpu_idle_time_us(cpu, NULL); u64 idle, idle_time = get_cpu_idle_time_us(cpu, NULL);
cputime64_t idle;
if (idle_time == -1ULL) { if (idle_time == -1ULL) {
/* !NO_HZ so we can rely on cpustat.idle */ /* !NO_HZ so we can rely on cpustat.idle */
idle = kstat_cpu(cpu).cpustat.idle; idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
idle = cputime64_add(idle, arch_idle_time(cpu)); idle += arch_idle_time(cpu);
} else } else
idle = usecs_to_cputime64(idle_time); idle = usecs_to_cputime64(idle_time);
return idle; return idle;
} }
static cputime64_t get_iowait_time(int cpu) static u64 get_iowait_time(int cpu)
{ {
u64 iowait_time = get_cpu_iowait_time_us(cpu, NULL); u64 iowait, iowait_time = get_cpu_iowait_time_us(cpu, NULL);
cputime64_t iowait;
if (iowait_time == -1ULL) if (iowait_time == -1ULL)
/* !NO_HZ so we can rely on cpustat.iowait */ /* !NO_HZ so we can rely on cpustat.iowait */
iowait = kstat_cpu(cpu).cpustat.iowait; iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
else else
iowait = usecs_to_cputime64(iowait_time); iowait = usecs_to_cputime64(iowait_time);
...@@ -55,33 +53,30 @@ static int show_stat(struct seq_file *p, void *v) ...@@ -55,33 +53,30 @@ static int show_stat(struct seq_file *p, void *v)
{ {
int i, j; int i, j;
unsigned long jif; unsigned long jif;
cputime64_t user, nice, system, idle, iowait, irq, softirq, steal; u64 user, nice, system, idle, iowait, irq, softirq, steal;
cputime64_t guest, guest_nice; u64 guest, guest_nice;
u64 sum = 0; u64 sum = 0;
u64 sum_softirq = 0; u64 sum_softirq = 0;
unsigned int per_softirq_sums[NR_SOFTIRQS] = {0}; unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
struct timespec boottime; struct timespec boottime;
user = nice = system = idle = iowait = user = nice = system = idle = iowait =
irq = softirq = steal = cputime64_zero; irq = softirq = steal = 0;
guest = guest_nice = cputime64_zero; guest = guest_nice = 0;
getboottime(&boottime); getboottime(&boottime);
jif = boottime.tv_sec; jif = boottime.tv_sec;
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
user = cputime64_add(user, kstat_cpu(i).cpustat.user); user += kcpustat_cpu(i).cpustat[CPUTIME_USER];
nice = cputime64_add(nice, kstat_cpu(i).cpustat.nice); nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
system = cputime64_add(system, kstat_cpu(i).cpustat.system); system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
idle = cputime64_add(idle, get_idle_time(i)); idle += get_idle_time(i);
iowait = cputime64_add(iowait, get_iowait_time(i)); iowait += get_iowait_time(i);
irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq); irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq); softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal); steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest); guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
guest_nice = cputime64_add(guest_nice, guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
kstat_cpu(i).cpustat.guest_nice);
sum += kstat_cpu_irqs_sum(i);
sum += arch_irq_stat_cpu(i);
for (j = 0; j < NR_SOFTIRQS; j++) { for (j = 0; j < NR_SOFTIRQS; j++) {
unsigned int softirq_stat = kstat_softirqs_cpu(j, i); unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
...@@ -106,16 +101,16 @@ static int show_stat(struct seq_file *p, void *v) ...@@ -106,16 +101,16 @@ static int show_stat(struct seq_file *p, void *v)
(unsigned long long)cputime64_to_clock_t(guest_nice)); (unsigned long long)cputime64_to_clock_t(guest_nice));
for_each_online_cpu(i) { for_each_online_cpu(i) {
/* Copy values here to work around gcc-2.95.3, gcc-2.96 */ /* Copy values here to work around gcc-2.95.3, gcc-2.96 */
user = kstat_cpu(i).cpustat.user; user = kcpustat_cpu(i).cpustat[CPUTIME_USER];
nice = kstat_cpu(i).cpustat.nice; nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
system = kstat_cpu(i).cpustat.system; system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
idle = get_idle_time(i); idle = get_idle_time(i);
iowait = get_iowait_time(i); iowait = get_iowait_time(i);
irq = kstat_cpu(i).cpustat.irq; irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
softirq = kstat_cpu(i).cpustat.softirq; softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
steal = kstat_cpu(i).cpustat.steal; steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
guest = kstat_cpu(i).cpustat.guest; guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
guest_nice = kstat_cpu(i).cpustat.guest_nice; guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
seq_printf(p, seq_printf(p,
"cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu " "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu "
"%llu\n", "%llu\n",
......
...@@ -11,15 +11,20 @@ static int uptime_proc_show(struct seq_file *m, void *v) ...@@ -11,15 +11,20 @@ static int uptime_proc_show(struct seq_file *m, void *v)
{ {
struct timespec uptime; struct timespec uptime;
struct timespec idle; struct timespec idle;
u64 idletime;
u64 nsec;
u32 rem;
int i; int i;
cputime_t idletime = cputime_zero;
idletime = 0;
for_each_possible_cpu(i) for_each_possible_cpu(i)
idletime = cputime64_add(idletime, kstat_cpu(i).cpustat.idle); idletime += (__force u64) kcpustat_cpu(i).cpustat[CPUTIME_IDLE];
do_posix_clock_monotonic_gettime(&uptime); do_posix_clock_monotonic_gettime(&uptime);
monotonic_to_bootbased(&uptime); monotonic_to_bootbased(&uptime);
cputime_to_timespec(idletime, &idle); nsec = cputime64_to_jiffies64(idletime) * TICK_NSEC;
idle.tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
idle.tv_nsec = rem;
seq_printf(m, "%lu.%02lu %lu.%02lu\n", seq_printf(m, "%lu.%02lu %lu.%02lu\n",
(unsigned long) uptime.tv_sec, (unsigned long) uptime.tv_sec,
(uptime.tv_nsec / (NSEC_PER_SEC / 100)), (uptime.tv_nsec / (NSEC_PER_SEC / 100)),
......
...@@ -4,71 +4,66 @@ ...@@ -4,71 +4,66 @@
#include <linux/time.h> #include <linux/time.h>
#include <linux/jiffies.h> #include <linux/jiffies.h>
typedef unsigned long cputime_t; typedef unsigned long __nocast cputime_t;
#define cputime_zero (0UL)
#define cputime_one_jiffy jiffies_to_cputime(1) #define cputime_one_jiffy jiffies_to_cputime(1)
#define cputime_max ((~0UL >> 1) - 1) #define cputime_to_jiffies(__ct) (__force unsigned long)(__ct)
#define cputime_add(__a, __b) ((__a) + (__b))
#define cputime_sub(__a, __b) ((__a) - (__b))
#define cputime_div(__a, __n) ((__a) / (__n))
#define cputime_halve(__a) ((__a) >> 1)
#define cputime_eq(__a, __b) ((__a) == (__b))
#define cputime_gt(__a, __b) ((__a) > (__b))
#define cputime_ge(__a, __b) ((__a) >= (__b))
#define cputime_lt(__a, __b) ((__a) < (__b))
#define cputime_le(__a, __b) ((__a) <= (__b))
#define cputime_to_jiffies(__ct) (__ct)
#define cputime_to_scaled(__ct) (__ct) #define cputime_to_scaled(__ct) (__ct)
#define jiffies_to_cputime(__hz) (__hz) #define jiffies_to_cputime(__hz) (__force cputime_t)(__hz)
typedef u64 cputime64_t; typedef u64 __nocast cputime64_t;
#define cputime64_zero (0ULL) #define cputime64_to_jiffies64(__ct) (__force u64)(__ct)
#define cputime64_add(__a, __b) ((__a) + (__b)) #define jiffies64_to_cputime64(__jif) (__force cputime64_t)(__jif)
#define cputime64_sub(__a, __b) ((__a) - (__b))
#define cputime64_to_jiffies64(__ct) (__ct)
#define jiffies64_to_cputime64(__jif) (__jif)
#define cputime_to_cputime64(__ct) ((u64) __ct)
#define cputime64_gt(__a, __b) ((__a) > (__b))
#define nsecs_to_cputime64(__ct) nsecs_to_jiffies64(__ct) #define nsecs_to_cputime64(__ct) \
jiffies64_to_cputime64(nsecs_to_jiffies64(__ct))
/* /*
* Convert cputime to microseconds and back. * Convert cputime to microseconds and back.
*/ */
#define cputime_to_usecs(__ct) jiffies_to_usecs(__ct) #define cputime_to_usecs(__ct) \
#define usecs_to_cputime(__msecs) usecs_to_jiffies(__msecs) jiffies_to_usecs(cputime_to_jiffies(__ct))
#define usecs_to_cputime64(__msecs) nsecs_to_jiffies64((__msecs) * 1000) #define usecs_to_cputime(__usec) \
jiffies_to_cputime(usecs_to_jiffies(__usec))
#define usecs_to_cputime64(__usec) \
jiffies64_to_cputime64(nsecs_to_jiffies64((__usec) * 1000))
/* /*
* Convert cputime to seconds and back. * Convert cputime to seconds and back.
*/ */
#define cputime_to_secs(jif) ((jif) / HZ) #define cputime_to_secs(jif) (cputime_to_jiffies(jif) / HZ)
#define secs_to_cputime(sec) ((sec) * HZ) #define secs_to_cputime(sec) jiffies_to_cputime((sec) * HZ)
/* /*
* Convert cputime to timespec and back. * Convert cputime to timespec and back.
*/ */
#define timespec_to_cputime(__val) timespec_to_jiffies(__val) #define timespec_to_cputime(__val) \
#define cputime_to_timespec(__ct,__val) jiffies_to_timespec(__ct,__val) jiffies_to_cputime(timespec_to_jiffies(__val))
#define cputime_to_timespec(__ct,__val) \
jiffies_to_timespec(cputime_to_jiffies(__ct),__val)
/* /*
* Convert cputime to timeval and back. * Convert cputime to timeval and back.
*/ */
#define timeval_to_cputime(__val) timeval_to_jiffies(__val) #define timeval_to_cputime(__val) \
#define cputime_to_timeval(__ct,__val) jiffies_to_timeval(__ct,__val) jiffies_to_cputime(timeval_to_jiffies(__val))
#define cputime_to_timeval(__ct,__val) \
jiffies_to_timeval(cputime_to_jiffies(__ct),__val)
/* /*
* Convert cputime to clock and back. * Convert cputime to clock and back.
*/ */
#define cputime_to_clock_t(__ct) jiffies_to_clock_t(__ct) #define cputime_to_clock_t(__ct) \
#define clock_t_to_cputime(__x) clock_t_to_jiffies(__x) jiffies_to_clock_t(cputime_to_jiffies(__ct))
#define clock_t_to_cputime(__x) \
jiffies_to_cputime(clock_t_to_jiffies(__x))
/* /*
* Convert cputime64 to clock. * Convert cputime64 to clock.
*/ */
#define cputime64_to_clock_t(__ct) jiffies_64_to_clock_t(__ct) #define cputime64_to_clock_t(__ct) \
jiffies_64_to_clock_t(cputime64_to_jiffies64(__ct))
#endif #endif
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/sched.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/cputime.h> #include <asm/cputime.h>
...@@ -15,21 +16,25 @@ ...@@ -15,21 +16,25 @@
* used by rstatd/perfmeter * used by rstatd/perfmeter
*/ */
struct cpu_usage_stat { enum cpu_usage_stat {
cputime64_t user; CPUTIME_USER,
cputime64_t nice; CPUTIME_NICE,
cputime64_t system; CPUTIME_SYSTEM,
cputime64_t softirq; CPUTIME_SOFTIRQ,
cputime64_t irq; CPUTIME_IRQ,
cputime64_t idle; CPUTIME_IDLE,
cputime64_t iowait; CPUTIME_IOWAIT,
cputime64_t steal; CPUTIME_STEAL,
cputime64_t guest; CPUTIME_GUEST,
cputime64_t guest_nice; CPUTIME_GUEST_NICE,
NR_STATS,
};
struct kernel_cpustat {
u64 cpustat[NR_STATS];
}; };
struct kernel_stat { struct kernel_stat {
struct cpu_usage_stat cpustat;
#ifndef CONFIG_GENERIC_HARDIRQS #ifndef CONFIG_GENERIC_HARDIRQS
unsigned int irqs[NR_IRQS]; unsigned int irqs[NR_IRQS];
#endif #endif
...@@ -38,10 +43,13 @@ struct kernel_stat { ...@@ -38,10 +43,13 @@ struct kernel_stat {
}; };
DECLARE_PER_CPU(struct kernel_stat, kstat); DECLARE_PER_CPU(struct kernel_stat, kstat);
DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
#define kstat_cpu(cpu) per_cpu(kstat, cpu)
/* Must have preemption disabled for this to be meaningful. */ /* Must have preemption disabled for this to be meaningful. */
#define kstat_this_cpu __get_cpu_var(kstat) #define kstat_this_cpu (&__get_cpu_var(kstat))
#define kcpustat_this_cpu (&__get_cpu_var(kernel_cpustat))
#define kstat_cpu(cpu) per_cpu(kstat, cpu)
#define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu)
extern unsigned long long nr_context_switches(void); extern unsigned long long nr_context_switches(void);
......
...@@ -10,6 +10,8 @@ ...@@ -10,6 +10,8 @@
#define _INCLUDE_GUARD_LATENCYTOP_H_ #define _INCLUDE_GUARD_LATENCYTOP_H_
#include <linux/compiler.h> #include <linux/compiler.h>
struct task_struct;
#ifdef CONFIG_LATENCYTOP #ifdef CONFIG_LATENCYTOP
#define LT_SAVECOUNT 32 #define LT_SAVECOUNT 32
...@@ -23,7 +25,6 @@ struct latency_record { ...@@ -23,7 +25,6 @@ struct latency_record {
}; };
struct task_struct;
extern int latencytop_enabled; extern int latencytop_enabled;
void __account_scheduler_latency(struct task_struct *task, int usecs, int inter); void __account_scheduler_latency(struct task_struct *task, int usecs, int inter);
......
...@@ -273,9 +273,11 @@ extern int runqueue_is_locked(int cpu); ...@@ -273,9 +273,11 @@ extern int runqueue_is_locked(int cpu);
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
extern void select_nohz_load_balancer(int stop_tick); extern void select_nohz_load_balancer(int stop_tick);
extern void set_cpu_sd_state_idle(void);
extern int get_nohz_timer_target(void); extern int get_nohz_timer_target(void);
#else #else
static inline void select_nohz_load_balancer(int stop_tick) { } static inline void select_nohz_load_balancer(int stop_tick) { }
static inline void set_cpu_sd_state_idle(void) { }
#endif #endif
/* /*
...@@ -483,8 +485,8 @@ struct task_cputime { ...@@ -483,8 +485,8 @@ struct task_cputime {
#define INIT_CPUTIME \ #define INIT_CPUTIME \
(struct task_cputime) { \ (struct task_cputime) { \
.utime = cputime_zero, \ .utime = 0, \
.stime = cputime_zero, \ .stime = 0, \
.sum_exec_runtime = 0, \ .sum_exec_runtime = 0, \
} }
...@@ -901,6 +903,10 @@ struct sched_group_power { ...@@ -901,6 +903,10 @@ struct sched_group_power {
* single CPU. * single CPU.
*/ */
unsigned int power, power_orig; unsigned int power, power_orig;
/*
* Number of busy cpus in this group.
*/
atomic_t nr_busy_cpus;
}; };
struct sched_group { struct sched_group {
...@@ -925,6 +931,15 @@ static inline struct cpumask *sched_group_cpus(struct sched_group *sg) ...@@ -925,6 +931,15 @@ static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
return to_cpumask(sg->cpumask); return to_cpumask(sg->cpumask);
} }
/**
* group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
* @group: The group whose first cpu is to be returned.
*/
static inline unsigned int group_first_cpu(struct sched_group *group)
{
return cpumask_first(sched_group_cpus(group));
}
struct sched_domain_attr { struct sched_domain_attr {
int relax_domain_level; int relax_domain_level;
}; };
...@@ -1315,8 +1330,8 @@ struct task_struct { ...@@ -1315,8 +1330,8 @@ struct task_struct {
* older sibling, respectively. (p->father can be replaced with * older sibling, respectively. (p->father can be replaced with
* p->real_parent->pid) * p->real_parent->pid)
*/ */
struct task_struct *real_parent; /* real parent process */ struct task_struct __rcu *real_parent; /* real parent process */
struct task_struct *parent; /* recipient of SIGCHLD, wait4() reports */ struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
/* /*
* children/sibling forms the list of my natural children * children/sibling forms the list of my natural children
*/ */
......
...@@ -330,6 +330,13 @@ DEFINE_EVENT(sched_stat_template, sched_stat_iowait, ...@@ -330,6 +330,13 @@ DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
TP_PROTO(struct task_struct *tsk, u64 delay), TP_PROTO(struct task_struct *tsk, u64 delay),
TP_ARGS(tsk, delay)); TP_ARGS(tsk, delay));
/*
* Tracepoint for accounting blocked time (time the task is in uninterruptible).
*/
DEFINE_EVENT(sched_stat_template, sched_stat_blocked,
TP_PROTO(struct task_struct *tsk, u64 delay),
TP_ARGS(tsk, delay));
/* /*
* Tracepoint for accounting runtime (time the task is executing * Tracepoint for accounting runtime (time the task is executing
* on a CPU). * on a CPU).
...@@ -363,6 +370,56 @@ TRACE_EVENT(sched_stat_runtime, ...@@ -363,6 +370,56 @@ TRACE_EVENT(sched_stat_runtime,
(unsigned long long)__entry->vruntime) (unsigned long long)__entry->vruntime)
); );
#ifdef CREATE_TRACE_POINTS
static inline u64 trace_get_sleeptime(struct task_struct *tsk)
{
#ifdef CONFIG_SCHEDSTATS
u64 block, sleep;
block = tsk->se.statistics.block_start;
sleep = tsk->se.statistics.sleep_start;
tsk->se.statistics.block_start = 0;
tsk->se.statistics.sleep_start = 0;
return block ? block : sleep ? sleep : 0;
#else
return 0;
#endif
}
#endif
/*
* Tracepoint for accounting sleeptime (time the task is sleeping
* or waiting for I/O).
*/
TRACE_EVENT(sched_stat_sleeptime,
TP_PROTO(struct task_struct *tsk, u64 now),
TP_ARGS(tsk, now),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( u64, sleeptime )
),
TP_fast_assign(
memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
__entry->pid = tsk->pid;
__entry->sleeptime = trace_get_sleeptime(tsk);
__entry->sleeptime = __entry->sleeptime ?
now - __entry->sleeptime : 0;
)
TP_perf_assign(
__perf_count(__entry->sleeptime);
),
TP_printk("comm=%s pid=%d sleeptime=%Lu [ns]",
__entry->comm, __entry->pid,
(unsigned long long)__entry->sleeptime)
);
/* /*
* Tracepoint for showing priority inheritance modifying a tasks * Tracepoint for showing priority inheritance modifying a tasks
* priority. * priority.
......
...@@ -2,16 +2,15 @@ ...@@ -2,16 +2,15 @@
# Makefile for the linux kernel. # Makefile for the linux kernel.
# #
obj-y = sched.o fork.o exec_domain.o panic.o printk.o \ obj-y = fork.o exec_domain.o panic.o printk.o \
cpu.o exit.o itimer.o time.o softirq.o resource.o \ cpu.o exit.o itimer.o time.o softirq.o resource.o \
sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \ sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \
signal.o sys.o kmod.o workqueue.o pid.o \ signal.o sys.o kmod.o workqueue.o pid.o \
rcupdate.o extable.o params.o posix-timers.o \ rcupdate.o extable.o params.o posix-timers.o \
kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
notifier.o ksysfs.o sched_clock.o cred.o \ notifier.o ksysfs.o cred.o \
async.o range.o async.o range.o groups.o
obj-y += groups.o
ifdef CONFIG_FUNCTION_TRACER ifdef CONFIG_FUNCTION_TRACER
# Do not trace debug files and internal ftrace files # Do not trace debug files and internal ftrace files
...@@ -20,10 +19,11 @@ CFLAGS_REMOVE_lockdep_proc.o = -pg ...@@ -20,10 +19,11 @@ CFLAGS_REMOVE_lockdep_proc.o = -pg
CFLAGS_REMOVE_mutex-debug.o = -pg CFLAGS_REMOVE_mutex-debug.o = -pg
CFLAGS_REMOVE_rtmutex-debug.o = -pg CFLAGS_REMOVE_rtmutex-debug.o = -pg
CFLAGS_REMOVE_cgroup-debug.o = -pg CFLAGS_REMOVE_cgroup-debug.o = -pg
CFLAGS_REMOVE_sched_clock.o = -pg
CFLAGS_REMOVE_irq_work.o = -pg CFLAGS_REMOVE_irq_work.o = -pg
endif endif
obj-y += sched/
obj-$(CONFIG_FREEZER) += freezer.o obj-$(CONFIG_FREEZER) += freezer.o
obj-$(CONFIG_PROFILING) += profile.o obj-$(CONFIG_PROFILING) += profile.o
obj-$(CONFIG_SYSCTL_SYSCALL_CHECK) += sysctl_check.o obj-$(CONFIG_SYSCTL_SYSCALL_CHECK) += sysctl_check.o
...@@ -99,7 +99,6 @@ obj-$(CONFIG_TRACING) += trace/ ...@@ -99,7 +99,6 @@ obj-$(CONFIG_TRACING) += trace/
obj-$(CONFIG_X86_DS) += trace/ obj-$(CONFIG_X86_DS) += trace/
obj-$(CONFIG_RING_BUFFER) += trace/ obj-$(CONFIG_RING_BUFFER) += trace/
obj-$(CONFIG_TRACEPOINTS) += trace/ obj-$(CONFIG_TRACEPOINTS) += trace/
obj-$(CONFIG_SMP) += sched_cpupri.o
obj-$(CONFIG_IRQ_WORK) += irq_work.o obj-$(CONFIG_IRQ_WORK) += irq_work.o
obj-$(CONFIG_CPU_PM) += cpu_pm.o obj-$(CONFIG_CPU_PM) += cpu_pm.o
...@@ -110,15 +109,6 @@ obj-$(CONFIG_PADATA) += padata.o ...@@ -110,15 +109,6 @@ obj-$(CONFIG_PADATA) += padata.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o
ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
# needed for x86 only. Why this used to be enabled for all architectures is beyond
# me. I suspect most platforms don't need this, but until we know that for sure
# I turn this off for IA-64 only. Andreas Schwab says it's also needed on m68k
# to get a correct value for the wait-channel (WCHAN in ps). --davidm
CFLAGS_sched.o := $(PROFILING) -fno-omit-frame-pointer
endif
$(obj)/configs.o: $(obj)/config_data.h $(obj)/configs.o: $(obj)/config_data.h
# config_data.h contains the same information as ikconfig.h but gzipped. # config_data.h contains the same information as ikconfig.h but gzipped.
......
...@@ -613,8 +613,8 @@ void acct_collect(long exitcode, int group_dead) ...@@ -613,8 +613,8 @@ void acct_collect(long exitcode, int group_dead)
pacct->ac_flag |= ACORE; pacct->ac_flag |= ACORE;
if (current->flags & PF_SIGNALED) if (current->flags & PF_SIGNALED)
pacct->ac_flag |= AXSIG; pacct->ac_flag |= AXSIG;
pacct->ac_utime = cputime_add(pacct->ac_utime, current->utime); pacct->ac_utime += current->utime;
pacct->ac_stime = cputime_add(pacct->ac_stime, current->stime); pacct->ac_stime += current->stime;
pacct->ac_minflt += current->min_flt; pacct->ac_minflt += current->min_flt;
pacct->ac_majflt += current->maj_flt; pacct->ac_majflt += current->maj_flt;
spin_unlock_irq(&current->sighand->siglock); spin_unlock_irq(&current->sighand->siglock);
......
...@@ -178,8 +178,7 @@ static inline void check_for_tasks(int cpu) ...@@ -178,8 +178,7 @@ static inline void check_for_tasks(int cpu)
write_lock_irq(&tasklist_lock); write_lock_irq(&tasklist_lock);
for_each_process(p) { for_each_process(p) {
if (task_cpu(p) == cpu && p->state == TASK_RUNNING && if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
(!cputime_eq(p->utime, cputime_zero) || (p->utime || p->stime))
!cputime_eq(p->stime, cputime_zero)))
printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d " printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d "
"(state = %ld, flags = %x)\n", "(state = %ld, flags = %x)\n",
p->comm, task_pid_nr(p), cpu, p->comm, task_pid_nr(p), cpu,
......
...@@ -121,9 +121,9 @@ static void __exit_signal(struct task_struct *tsk) ...@@ -121,9 +121,9 @@ static void __exit_signal(struct task_struct *tsk)
* We won't ever get here for the group leader, since it * We won't ever get here for the group leader, since it
* will have been the last reference on the signal_struct. * will have been the last reference on the signal_struct.
*/ */
sig->utime = cputime_add(sig->utime, tsk->utime); sig->utime += tsk->utime;
sig->stime = cputime_add(sig->stime, tsk->stime); sig->stime += tsk->stime;
sig->gtime = cputime_add(sig->gtime, tsk->gtime); sig->gtime += tsk->gtime;
sig->min_flt += tsk->min_flt; sig->min_flt += tsk->min_flt;
sig->maj_flt += tsk->maj_flt; sig->maj_flt += tsk->maj_flt;
sig->nvcsw += tsk->nvcsw; sig->nvcsw += tsk->nvcsw;
...@@ -1255,19 +1255,9 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) ...@@ -1255,19 +1255,9 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
spin_lock_irq(&p->real_parent->sighand->siglock); spin_lock_irq(&p->real_parent->sighand->siglock);
psig = p->real_parent->signal; psig = p->real_parent->signal;
sig = p->signal; sig = p->signal;
psig->cutime = psig->cutime += tgutime + sig->cutime;
cputime_add(psig->cutime, psig->cstime += tgstime + sig->cstime;
cputime_add(tgutime, psig->cgtime += p->gtime + sig->gtime + sig->cgtime;
sig->cutime));
psig->cstime =
cputime_add(psig->cstime,
cputime_add(tgstime,
sig->cstime));
psig->cgtime =
cputime_add(psig->cgtime,
cputime_add(p->gtime,
cputime_add(sig->gtime,
sig->cgtime)));
psig->cmin_flt += psig->cmin_flt +=
p->min_flt + sig->min_flt + sig->cmin_flt; p->min_flt + sig->min_flt + sig->cmin_flt;
psig->cmaj_flt += psig->cmaj_flt +=
......
...@@ -1023,8 +1023,8 @@ void mm_init_owner(struct mm_struct *mm, struct task_struct *p) ...@@ -1023,8 +1023,8 @@ void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
*/ */
static void posix_cpu_timers_init(struct task_struct *tsk) static void posix_cpu_timers_init(struct task_struct *tsk)
{ {
tsk->cputime_expires.prof_exp = cputime_zero; tsk->cputime_expires.prof_exp = 0;
tsk->cputime_expires.virt_exp = cputime_zero; tsk->cputime_expires.virt_exp = 0;
tsk->cputime_expires.sched_exp = 0; tsk->cputime_expires.sched_exp = 0;
INIT_LIST_HEAD(&tsk->cpu_timers[0]); INIT_LIST_HEAD(&tsk->cpu_timers[0]);
INIT_LIST_HEAD(&tsk->cpu_timers[1]); INIT_LIST_HEAD(&tsk->cpu_timers[1]);
...@@ -1132,14 +1132,10 @@ static struct task_struct *copy_process(unsigned long clone_flags, ...@@ -1132,14 +1132,10 @@ static struct task_struct *copy_process(unsigned long clone_flags,
init_sigpending(&p->pending); init_sigpending(&p->pending);
p->utime = cputime_zero; p->utime = p->stime = p->gtime = 0;
p->stime = cputime_zero; p->utimescaled = p->stimescaled = 0;
p->gtime = cputime_zero;
p->utimescaled = cputime_zero;
p->stimescaled = cputime_zero;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING #ifndef CONFIG_VIRT_CPU_ACCOUNTING
p->prev_utime = cputime_zero; p->prev_utime = p->prev_stime = 0;
p->prev_stime = cputime_zero;
#endif #endif
#if defined(SPLIT_RSS_COUNTING) #if defined(SPLIT_RSS_COUNTING)
memset(&p->rss_stat, 0, sizeof(p->rss_stat)); memset(&p->rss_stat, 0, sizeof(p->rss_stat));
......
...@@ -52,22 +52,22 @@ static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id, ...@@ -52,22 +52,22 @@ static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
cval = it->expires; cval = it->expires;
cinterval = it->incr; cinterval = it->incr;
if (!cputime_eq(cval, cputime_zero)) { if (cval) {
struct task_cputime cputime; struct task_cputime cputime;
cputime_t t; cputime_t t;
thread_group_cputimer(tsk, &cputime); thread_group_cputimer(tsk, &cputime);
if (clock_id == CPUCLOCK_PROF) if (clock_id == CPUCLOCK_PROF)
t = cputime_add(cputime.utime, cputime.stime); t = cputime.utime + cputime.stime;
else else
/* CPUCLOCK_VIRT */ /* CPUCLOCK_VIRT */
t = cputime.utime; t = cputime.utime;
if (cputime_le(cval, t)) if (cval < t)
/* about to fire */ /* about to fire */
cval = cputime_one_jiffy; cval = cputime_one_jiffy;
else else
cval = cputime_sub(cval, t); cval = cval - t;
} }
spin_unlock_irq(&tsk->sighand->siglock); spin_unlock_irq(&tsk->sighand->siglock);
...@@ -161,10 +161,9 @@ static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id, ...@@ -161,10 +161,9 @@ static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
cval = it->expires; cval = it->expires;
cinterval = it->incr; cinterval = it->incr;
if (!cputime_eq(cval, cputime_zero) || if (cval || nval) {
!cputime_eq(nval, cputime_zero)) { if (nval > 0)
if (cputime_gt(nval, cputime_zero)) nval += cputime_one_jiffy;
nval = cputime_add(nval, cputime_one_jiffy);
set_process_cpu_timer(tsk, clock_id, &nval, &cval); set_process_cpu_timer(tsk, clock_id, &nval, &cval);
} }
it->expires = nval; it->expires = nval;
......
This diff is collapsed.
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_clock.o = -pg
endif
ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
# needed for x86 only. Why this used to be enabled for all architectures is beyond
# me. I suspect most platforms don't need this, but until we know that for sure
# I turn this off for IA-64 only. Andreas Schwab says it's also needed on m68k
# to get a correct value for the wait-channel (WCHAN in ps). --davidm
CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer
endif
obj-y += core.o clock.o idle_task.o fair.o rt.o stop_task.o
obj-$(CONFIG_SMP) += cpupri.o
obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
obj-$(CONFIG_SCHEDSTATS) += stats.o
obj-$(CONFIG_SCHED_DEBUG) += debug.o
#ifdef CONFIG_SCHED_AUTOGROUP #ifdef CONFIG_SCHED_AUTOGROUP
#include "sched.h"
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
#include <linux/utsname.h> #include <linux/utsname.h>
#include <linux/security.h>
#include <linux/export.h>
unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1; unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
static struct autogroup autogroup_default; static struct autogroup autogroup_default;
static atomic_t autogroup_seq_nr; static atomic_t autogroup_seq_nr;
static void __init autogroup_init(struct task_struct *init_task) void __init autogroup_init(struct task_struct *init_task)
{ {
autogroup_default.tg = &root_task_group; autogroup_default.tg = &root_task_group;
kref_init(&autogroup_default.kref); kref_init(&autogroup_default.kref);
...@@ -17,7 +21,7 @@ static void __init autogroup_init(struct task_struct *init_task) ...@@ -17,7 +21,7 @@ static void __init autogroup_init(struct task_struct *init_task)
init_task->signal->autogroup = &autogroup_default; init_task->signal->autogroup = &autogroup_default;
} }
static inline void autogroup_free(struct task_group *tg) void autogroup_free(struct task_group *tg)
{ {
kfree(tg->autogroup); kfree(tg->autogroup);
} }
...@@ -59,10 +63,6 @@ static inline struct autogroup *autogroup_task_get(struct task_struct *p) ...@@ -59,10 +63,6 @@ static inline struct autogroup *autogroup_task_get(struct task_struct *p)
return ag; return ag;
} }
#ifdef CONFIG_RT_GROUP_SCHED
static void free_rt_sched_group(struct task_group *tg);
#endif
static inline struct autogroup *autogroup_create(void) static inline struct autogroup *autogroup_create(void)
{ {
struct autogroup *ag = kzalloc(sizeof(*ag), GFP_KERNEL); struct autogroup *ag = kzalloc(sizeof(*ag), GFP_KERNEL);
...@@ -108,8 +108,7 @@ static inline struct autogroup *autogroup_create(void) ...@@ -108,8 +108,7 @@ static inline struct autogroup *autogroup_create(void)
return autogroup_kref_get(&autogroup_default); return autogroup_kref_get(&autogroup_default);
} }
static inline bool bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
task_wants_autogroup(struct task_struct *p, struct task_group *tg)
{ {
if (tg != &root_task_group) if (tg != &root_task_group)
return false; return false;
...@@ -127,22 +126,6 @@ task_wants_autogroup(struct task_struct *p, struct task_group *tg) ...@@ -127,22 +126,6 @@ task_wants_autogroup(struct task_struct *p, struct task_group *tg)
return true; return true;
} }
static inline bool task_group_is_autogroup(struct task_group *tg)
{
return !!tg->autogroup;
}
static inline struct task_group *
autogroup_task_group(struct task_struct *p, struct task_group *tg)
{
int enabled = ACCESS_ONCE(sysctl_sched_autogroup_enabled);
if (enabled && task_wants_autogroup(p, tg))
return p->signal->autogroup->tg;
return tg;
}
static void static void
autogroup_move_group(struct task_struct *p, struct autogroup *ag) autogroup_move_group(struct task_struct *p, struct autogroup *ag)
{ {
...@@ -263,7 +246,7 @@ void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m) ...@@ -263,7 +246,7 @@ void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m)
#endif /* CONFIG_PROC_FS */ #endif /* CONFIG_PROC_FS */
#ifdef CONFIG_SCHED_DEBUG #ifdef CONFIG_SCHED_DEBUG
static inline int autogroup_path(struct task_group *tg, char *buf, int buflen) int autogroup_path(struct task_group *tg, char *buf, int buflen)
{ {
if (!task_group_is_autogroup(tg)) if (!task_group_is_autogroup(tg))
return 0; return 0;
......
#ifdef CONFIG_SCHED_AUTOGROUP #ifdef CONFIG_SCHED_AUTOGROUP
#include <linux/kref.h>
#include <linux/rwsem.h>
struct autogroup { struct autogroup {
/* /*
* reference doesn't mean how many thread attach to this * reference doesn't mean how many thread attach to this
...@@ -13,9 +16,28 @@ struct autogroup { ...@@ -13,9 +16,28 @@ struct autogroup {
int nice; int nice;
}; };
static inline bool task_group_is_autogroup(struct task_group *tg); extern void autogroup_init(struct task_struct *init_task);
extern void autogroup_free(struct task_group *tg);
static inline bool task_group_is_autogroup(struct task_group *tg)
{
return !!tg->autogroup;
}
extern bool task_wants_autogroup(struct task_struct *p, struct task_group *tg);
static inline struct task_group * static inline struct task_group *
autogroup_task_group(struct task_struct *p, struct task_group *tg); autogroup_task_group(struct task_struct *p, struct task_group *tg)
{
int enabled = ACCESS_ONCE(sysctl_sched_autogroup_enabled);
if (enabled && task_wants_autogroup(p, tg))
return p->signal->autogroup->tg;
return tg;
}
extern int autogroup_path(struct task_group *tg, char *buf, int buflen);
#else /* !CONFIG_SCHED_AUTOGROUP */ #else /* !CONFIG_SCHED_AUTOGROUP */
......
This diff is collapsed.
/* /*
* kernel/sched_cpupri.c * kernel/sched/cpupri.c
* *
* CPU priority management * CPU priority management
* *
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
*/ */
#include <linux/gfp.h> #include <linux/gfp.h>
#include "sched_cpupri.h" #include "cpupri.h"
/* Convert between a 140 based task->prio, and our 102 based cpupri */ /* Convert between a 140 based task->prio, and our 102 based cpupri */
static int convert_prio(int prio) static int convert_prio(int prio)
......
/* /*
* kernel/time/sched_debug.c * kernel/sched/debug.c
* *
* Print the CFS rbtree * Print the CFS rbtree
* *
...@@ -16,6 +16,8 @@ ...@@ -16,6 +16,8 @@
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
#include <linux/utsname.h> #include <linux/utsname.h>
#include "sched.h"
static DEFINE_SPINLOCK(sched_debug_lock); static DEFINE_SPINLOCK(sched_debug_lock);
/* /*
...@@ -373,7 +375,7 @@ static int sched_debug_show(struct seq_file *m, void *v) ...@@ -373,7 +375,7 @@ static int sched_debug_show(struct seq_file *m, void *v)
return 0; return 0;
} }
static void sysrq_sched_debug_show(void) void sysrq_sched_debug_show(void)
{ {
sched_debug_show(NULL, NULL); sched_debug_show(NULL, NULL);
} }
......
This diff is collapsed.
...@@ -3,13 +3,13 @@ ...@@ -3,13 +3,13 @@
* them to run sooner, but does not allow tons of sleepers to * them to run sooner, but does not allow tons of sleepers to
* rip the spread apart. * rip the spread apart.
*/ */
SCHED_FEAT(GENTLE_FAIR_SLEEPERS, 1) SCHED_FEAT(GENTLE_FAIR_SLEEPERS, true)
/* /*
* Place new tasks ahead so that they do not starve already running * Place new tasks ahead so that they do not starve already running
* tasks * tasks
*/ */
SCHED_FEAT(START_DEBIT, 1) SCHED_FEAT(START_DEBIT, true)
/* /*
* Based on load and program behaviour, see if it makes sense to place * Based on load and program behaviour, see if it makes sense to place
...@@ -17,54 +17,54 @@ SCHED_FEAT(START_DEBIT, 1) ...@@ -17,54 +17,54 @@ SCHED_FEAT(START_DEBIT, 1)
* improve cache locality. Typically used with SYNC wakeups as * improve cache locality. Typically used with SYNC wakeups as
* generated by pipes and the like, see also SYNC_WAKEUPS. * generated by pipes and the like, see also SYNC_WAKEUPS.
*/ */
SCHED_FEAT(AFFINE_WAKEUPS, 1) SCHED_FEAT(AFFINE_WAKEUPS, true)
/* /*
* Prefer to schedule the task we woke last (assuming it failed * Prefer to schedule the task we woke last (assuming it failed
* wakeup-preemption), since its likely going to consume data we * wakeup-preemption), since its likely going to consume data we
* touched, increases cache locality. * touched, increases cache locality.
*/ */
SCHED_FEAT(NEXT_BUDDY, 0) SCHED_FEAT(NEXT_BUDDY, false)
/* /*
* Prefer to schedule the task that ran last (when we did * Prefer to schedule the task that ran last (when we did
* wake-preempt) as that likely will touch the same data, increases * wake-preempt) as that likely will touch the same data, increases
* cache locality. * cache locality.
*/ */
SCHED_FEAT(LAST_BUDDY, 1) SCHED_FEAT(LAST_BUDDY, true)
/* /*
* Consider buddies to be cache hot, decreases the likelyness of a * Consider buddies to be cache hot, decreases the likelyness of a
* cache buddy being migrated away, increases cache locality. * cache buddy being migrated away, increases cache locality.
*/ */
SCHED_FEAT(CACHE_HOT_BUDDY, 1) SCHED_FEAT(CACHE_HOT_BUDDY, true)
/* /*
* Use arch dependent cpu power functions * Use arch dependent cpu power functions
*/ */
SCHED_FEAT(ARCH_POWER, 0) SCHED_FEAT(ARCH_POWER, false)
SCHED_FEAT(HRTICK, 0) SCHED_FEAT(HRTICK, false)
SCHED_FEAT(DOUBLE_TICK, 0) SCHED_FEAT(DOUBLE_TICK, false)
SCHED_FEAT(LB_BIAS, 1) SCHED_FEAT(LB_BIAS, true)
/* /*
* Spin-wait on mutex acquisition when the mutex owner is running on * Spin-wait on mutex acquisition when the mutex owner is running on
* another cpu -- assumes that when the owner is running, it will soon * another cpu -- assumes that when the owner is running, it will soon
* release the lock. Decreases scheduling overhead. * release the lock. Decreases scheduling overhead.
*/ */
SCHED_FEAT(OWNER_SPIN, 1) SCHED_FEAT(OWNER_SPIN, true)
/* /*
* Decrement CPU power based on time not spent running tasks * Decrement CPU power based on time not spent running tasks
*/ */
SCHED_FEAT(NONTASK_POWER, 1) SCHED_FEAT(NONTASK_POWER, true)
/* /*
* Queue remote wakeups on the target CPU and process them * Queue remote wakeups on the target CPU and process them
* using the scheduler IPI. Reduces rq->lock contention/bounces. * using the scheduler IPI. Reduces rq->lock contention/bounces.
*/ */
SCHED_FEAT(TTWU_QUEUE, 1) SCHED_FEAT(TTWU_QUEUE, true)
SCHED_FEAT(FORCE_SD_OVERLAP, 0) SCHED_FEAT(FORCE_SD_OVERLAP, false)
SCHED_FEAT(RT_RUNTIME_SHARE, 1) SCHED_FEAT(RT_RUNTIME_SHARE, true)
#include "sched.h"
/* /*
* idle-task scheduling class. * idle-task scheduling class.
* *
...@@ -71,7 +73,7 @@ static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task ...@@ -71,7 +73,7 @@ static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task
/* /*
* Simple, special scheduling class for the per-CPU idle tasks: * Simple, special scheduling class for the per-CPU idle tasks:
*/ */
static const struct sched_class idle_sched_class = { const struct sched_class idle_sched_class = {
/* .next is NULL */ /* .next is NULL */
/* no enqueue/yield_task for idle tasks */ /* no enqueue/yield_task for idle tasks */
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
#include "sched.h"
/* /*
* stop-task scheduling class. * stop-task scheduling class.
* *
...@@ -80,7 +82,7 @@ get_rr_interval_stop(struct rq *rq, struct task_struct *task) ...@@ -80,7 +82,7 @@ get_rr_interval_stop(struct rq *rq, struct task_struct *task)
/* /*
* Simple, special scheduling class for the per-CPU stop tasks: * Simple, special scheduling class for the per-CPU stop tasks:
*/ */
static const struct sched_class stop_sched_class = { const struct sched_class stop_sched_class = {
.next = &rt_sched_class, .next = &rt_sched_class,
.enqueue_task = enqueue_task_stop, .enqueue_task = enqueue_task_stop,
......
...@@ -1629,10 +1629,8 @@ bool do_notify_parent(struct task_struct *tsk, int sig) ...@@ -1629,10 +1629,8 @@ bool do_notify_parent(struct task_struct *tsk, int sig)
info.si_uid = __task_cred(tsk)->uid; info.si_uid = __task_cred(tsk)->uid;
rcu_read_unlock(); rcu_read_unlock();
info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime, info.si_utime = cputime_to_clock_t(tsk->utime + tsk->signal->utime);
tsk->signal->utime)); info.si_stime = cputime_to_clock_t(tsk->stime + tsk->signal->stime);
info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
tsk->signal->stime));
info.si_status = tsk->exit_code & 0x7f; info.si_status = tsk->exit_code & 0x7f;
if (tsk->exit_code & 0x80) if (tsk->exit_code & 0x80)
......
...@@ -1605,7 +1605,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) ...@@ -1605,7 +1605,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
unsigned long maxrss = 0; unsigned long maxrss = 0;
memset((char *) r, 0, sizeof *r); memset((char *) r, 0, sizeof *r);
utime = stime = cputime_zero; utime = stime = 0;
if (who == RUSAGE_THREAD) { if (who == RUSAGE_THREAD) {
task_times(current, &utime, &stime); task_times(current, &utime, &stime);
...@@ -1635,8 +1635,8 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) ...@@ -1635,8 +1635,8 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
case RUSAGE_SELF: case RUSAGE_SELF:
thread_group_times(p, &tgutime, &tgstime); thread_group_times(p, &tgutime, &tgstime);
utime = cputime_add(utime, tgutime); utime += tgutime;
stime = cputime_add(stime, tgstime); stime += tgstime;
r->ru_nvcsw += p->signal->nvcsw; r->ru_nvcsw += p->signal->nvcsw;
r->ru_nivcsw += p->signal->nivcsw; r->ru_nivcsw += p->signal->nivcsw;
r->ru_minflt += p->signal->min_flt; r->ru_minflt += p->signal->min_flt;
......
...@@ -466,6 +466,14 @@ void tick_nohz_idle_enter(void) ...@@ -466,6 +466,14 @@ void tick_nohz_idle_enter(void)
WARN_ON_ONCE(irqs_disabled()); WARN_ON_ONCE(irqs_disabled());
/*
* Update the idle state in the scheduler domain hierarchy
* when tick_nohz_stop_sched_tick() is called from the idle loop.
* State will be updated to busy during the first busy tick after
* exiting idle.
*/
set_cpu_sd_state_idle();
local_irq_disable(); local_irq_disable();
ts = &__get_cpu_var(tick_cpu_sched); ts = &__get_cpu_var(tick_cpu_sched);
......
...@@ -127,7 +127,7 @@ void acct_update_integrals(struct task_struct *tsk) ...@@ -127,7 +127,7 @@ void acct_update_integrals(struct task_struct *tsk)
local_irq_save(flags); local_irq_save(flags);
time = tsk->stime + tsk->utime; time = tsk->stime + tsk->utime;
dtime = cputime_sub(time, tsk->acct_timexpd); dtime = time - tsk->acct_timexpd;
jiffies_to_timeval(cputime_to_jiffies(dtime), &value); jiffies_to_timeval(cputime_to_jiffies(dtime), &value);
delta = value.tv_sec; delta = value.tv_sec;
delta = delta * USEC_PER_SEC + value.tv_usec; delta = delta * USEC_PER_SEC + value.tv_usec;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment