Commit 99ebcf82 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'v28-timers-for-linus' of...

Merge branch 'v28-timers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'v28-timers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (36 commits)
  fix documentation of sysrq-q really
  Fix documentation of sysrq-q
  timer_list: add base address to clock base
  timer_list: print cpu number of clockevents device
  timer_list: print real timer address
  NOHZ: restart tick device from irq_enter()
  NOHZ: split tick_nohz_restart_sched_tick()
  NOHZ: unify the nohz function calls in irq_enter()
  timers: fix itimer/many thread hang, fix
  timers: fix itimer/many thread hang, v3
  ntp: improve adjtimex frequency rounding
  timekeeping: fix rounding problem during clock update
  ntp: let update_persistent_clock() sleep
  hrtimer: reorder struct hrtimer to save 8 bytes on 64bit builds
  posix-timers: lock_timer: make it readable
  posix-timers: lock_timer: kill the bogus ->it_id check
  posix-timers: kill ->it_sigev_signo and ->it_sigev_value
  posix-timers: sys_timer_create: cleanup the error handling
  posix-timers: move the initialization of timer->sigq from send to create path
  posix-timers: sys_timer_create: simplify and s/tasklist/rcu/
  ...

Fix trivial conflicts due to sysrq-q description clahes in
Documentation/sysrq.txt and drivers/char/sysrq.c
parents 72558dde c465a76a
...@@ -95,8 +95,9 @@ On all - write a character to /proc/sysrq-trigger. e.g.: ...@@ -95,8 +95,9 @@ On all - write a character to /proc/sysrq-trigger. e.g.:
'p' - Will dump the current registers and flags to your console. 'p' - Will dump the current registers and flags to your console.
'q' - Will dump a list of all running hrtimers. 'q' - Will dump per CPU lists of all armed hrtimers (but NOT regular
WARNING: Does not cover any other timers timer_list timers) and detailed information about all
clockevent devices.
'r' - Turns off keyboard raw mode and sets it to XLATE. 'r' - Turns off keyboard raw mode and sets it to XLATE.
......
...@@ -168,7 +168,7 @@ static void sysrq_handle_show_timers(int key, struct tty_struct *tty) ...@@ -168,7 +168,7 @@ static void sysrq_handle_show_timers(int key, struct tty_struct *tty)
static struct sysrq_key_op sysrq_show_timers_op = { static struct sysrq_key_op sysrq_show_timers_op = {
.handler = sysrq_handle_show_timers, .handler = sysrq_handle_show_timers,
.help_msg = "show-all-timers(Q)", .help_msg = "show-all-timers(Q)",
.action_msg = "Show pending hrtimers (no others)", .action_msg = "Show clockevent devices & pending hrtimers (no others)",
}; };
static void sysrq_handle_mountro(int key, struct tty_struct *tty) static void sysrq_handle_mountro(int key, struct tty_struct *tty)
......
...@@ -237,9 +237,12 @@ static int __init parse_pmtmr(char *arg) ...@@ -237,9 +237,12 @@ static int __init parse_pmtmr(char *arg)
if (strict_strtoul(arg, 16, &base)) if (strict_strtoul(arg, 16, &base))
return -EINVAL; return -EINVAL;
#ifdef CONFIG_X86_64
if (base > UINT_MAX)
return -ERANGE;
#endif
printk(KERN_INFO "PMTMR IOPort override: 0x%04x -> 0x%04lx\n", printk(KERN_INFO "PMTMR IOPort override: 0x%04x -> 0x%04lx\n",
(unsigned int)pmtmr_ioport, base); pmtmr_ioport, base);
pmtmr_ioport = base; pmtmr_ioport = base;
return 1; return 1;
......
...@@ -1341,20 +1341,15 @@ static void fill_prstatus(struct elf_prstatus *prstatus, ...@@ -1341,20 +1341,15 @@ static void fill_prstatus(struct elf_prstatus *prstatus,
prstatus->pr_pgrp = task_pgrp_vnr(p); prstatus->pr_pgrp = task_pgrp_vnr(p);
prstatus->pr_sid = task_session_vnr(p); prstatus->pr_sid = task_session_vnr(p);
if (thread_group_leader(p)) { if (thread_group_leader(p)) {
struct task_cputime cputime;
/* /*
* This is the record for the group leader. Add in the * This is the record for the group leader. It shows the
* cumulative times of previous dead threads. This total * group-wide total, not its individual thread total.
* won't include the time of each live thread whose state
* is included in the core dump. The final total reported
* to our parent process when it calls wait4 will include
* those sums as well as the little bit more time it takes
* this and each other thread to finish dying after the
* core dump synchronization phase.
*/ */
cputime_to_timeval(cputime_add(p->utime, p->signal->utime), thread_group_cputime(p, &cputime);
&prstatus->pr_utime); cputime_to_timeval(cputime.utime, &prstatus->pr_utime);
cputime_to_timeval(cputime_add(p->stime, p->signal->stime), cputime_to_timeval(cputime.stime, &prstatus->pr_stime);
&prstatus->pr_stime);
} else { } else {
cputime_to_timeval(p->utime, &prstatus->pr_utime); cputime_to_timeval(p->utime, &prstatus->pr_utime);
cputime_to_timeval(p->stime, &prstatus->pr_stime); cputime_to_timeval(p->stime, &prstatus->pr_stime);
......
...@@ -388,20 +388,20 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, ...@@ -388,20 +388,20 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
/* add up live thread stats at the group level */ /* add up live thread stats at the group level */
if (whole) { if (whole) {
struct task_cputime cputime;
struct task_struct *t = task; struct task_struct *t = task;
do { do {
min_flt += t->min_flt; min_flt += t->min_flt;
maj_flt += t->maj_flt; maj_flt += t->maj_flt;
utime = cputime_add(utime, task_utime(t));
stime = cputime_add(stime, task_stime(t));
gtime = cputime_add(gtime, task_gtime(t)); gtime = cputime_add(gtime, task_gtime(t));
t = next_thread(t); t = next_thread(t);
} while (t != task); } while (t != task);
min_flt += sig->min_flt; min_flt += sig->min_flt;
maj_flt += sig->maj_flt; maj_flt += sig->maj_flt;
utime = cputime_add(utime, sig->utime); thread_group_cputime(task, &cputime);
stime = cputime_add(stime, sig->stime); utime = cputime.utime;
stime = cputime.stime;
gtime = cputime_add(gtime, sig->gtime); gtime = cputime_add(gtime, sig->gtime);
} }
......
...@@ -45,7 +45,8 @@ struct clocksource; ...@@ -45,7 +45,8 @@ struct clocksource;
* @read: returns a cycle value * @read: returns a cycle value
* @mask: bitmask for two's complement * @mask: bitmask for two's complement
* subtraction of non 64 bit counters * subtraction of non 64 bit counters
* @mult: cycle to nanosecond multiplier * @mult: cycle to nanosecond multiplier (adjusted by NTP)
* @mult_orig: cycle to nanosecond multiplier (unadjusted by NTP)
* @shift: cycle to nanosecond divisor (power of two) * @shift: cycle to nanosecond divisor (power of two)
* @flags: flags describing special properties * @flags: flags describing special properties
* @vread: vsyscall based read * @vread: vsyscall based read
...@@ -63,6 +64,7 @@ struct clocksource { ...@@ -63,6 +64,7 @@ struct clocksource {
cycle_t (*read)(void); cycle_t (*read)(void);
cycle_t mask; cycle_t mask;
u32 mult; u32 mult;
u32 mult_orig;
u32 shift; u32 shift;
unsigned long flags; unsigned long flags;
cycle_t (*vread)(void); cycle_t (*vread)(void);
...@@ -77,6 +79,7 @@ struct clocksource { ...@@ -77,6 +79,7 @@ struct clocksource {
/* timekeeping specific data, ignore */ /* timekeeping specific data, ignore */
cycle_t cycle_interval; cycle_t cycle_interval;
u64 xtime_interval; u64 xtime_interval;
u32 raw_interval;
/* /*
* Second part is written at each timer interrupt * Second part is written at each timer interrupt
* Keep it in a different cache line to dirty no * Keep it in a different cache line to dirty no
...@@ -85,6 +88,7 @@ struct clocksource { ...@@ -85,6 +88,7 @@ struct clocksource {
cycle_t cycle_last ____cacheline_aligned_in_smp; cycle_t cycle_last ____cacheline_aligned_in_smp;
u64 xtime_nsec; u64 xtime_nsec;
s64 error; s64 error;
struct timespec raw_time;
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG #ifdef CONFIG_CLOCKSOURCE_WATCHDOG
/* Watchdog related data, used by the framework */ /* Watchdog related data, used by the framework */
...@@ -201,17 +205,19 @@ static inline void clocksource_calculate_interval(struct clocksource *c, ...@@ -201,17 +205,19 @@ static inline void clocksource_calculate_interval(struct clocksource *c,
{ {
u64 tmp; u64 tmp;
/* XXX - All of this could use a whole lot of optimization */ /* Do the ns -> cycle conversion first, using original mult */
tmp = length_nsec; tmp = length_nsec;
tmp <<= c->shift; tmp <<= c->shift;
tmp += c->mult/2; tmp += c->mult_orig/2;
do_div(tmp, c->mult); do_div(tmp, c->mult_orig);
c->cycle_interval = (cycle_t)tmp; c->cycle_interval = (cycle_t)tmp;
if (c->cycle_interval == 0) if (c->cycle_interval == 0)
c->cycle_interval = 1; c->cycle_interval = 1;
/* Go back from cycles -> shifted ns, this time use ntp adjused mult */
c->xtime_interval = (u64)c->cycle_interval * c->mult; c->xtime_interval = (u64)c->cycle_interval * c->mult;
c->raw_interval = ((u64)c->cycle_interval * c->mult_orig) >> c->shift;
} }
......
...@@ -125,12 +125,12 @@ struct hrtimer { ...@@ -125,12 +125,12 @@ struct hrtimer {
enum hrtimer_restart (*function)(struct hrtimer *); enum hrtimer_restart (*function)(struct hrtimer *);
struct hrtimer_clock_base *base; struct hrtimer_clock_base *base;
unsigned long state; unsigned long state;
enum hrtimer_cb_mode cb_mode;
struct list_head cb_entry; struct list_head cb_entry;
enum hrtimer_cb_mode cb_mode;
#ifdef CONFIG_TIMER_STATS #ifdef CONFIG_TIMER_STATS
int start_pid;
void *start_site; void *start_site;
char start_comm[16]; char start_comm[16];
int start_pid;
#endif #endif
}; };
...@@ -155,10 +155,8 @@ struct hrtimer_sleeper { ...@@ -155,10 +155,8 @@ struct hrtimer_sleeper {
* @first: pointer to the timer node which expires first * @first: pointer to the timer node which expires first
* @resolution: the resolution of the clock, in nanoseconds * @resolution: the resolution of the clock, in nanoseconds
* @get_time: function to retrieve the current time of the clock * @get_time: function to retrieve the current time of the clock
* @get_softirq_time: function to retrieve the current time from the softirq
* @softirq_time: the time when running the hrtimer queue in the softirq * @softirq_time: the time when running the hrtimer queue in the softirq
* @offset: offset of this clock to the monotonic base * @offset: offset of this clock to the monotonic base
* @reprogram: function to reprogram the timer event
*/ */
struct hrtimer_clock_base { struct hrtimer_clock_base {
struct hrtimer_cpu_base *cpu_base; struct hrtimer_cpu_base *cpu_base;
...@@ -167,13 +165,9 @@ struct hrtimer_clock_base { ...@@ -167,13 +165,9 @@ struct hrtimer_clock_base {
struct rb_node *first; struct rb_node *first;
ktime_t resolution; ktime_t resolution;
ktime_t (*get_time)(void); ktime_t (*get_time)(void);
ktime_t (*get_softirq_time)(void);
ktime_t softirq_time; ktime_t softirq_time;
#ifdef CONFIG_HIGH_RES_TIMERS #ifdef CONFIG_HIGH_RES_TIMERS
ktime_t offset; ktime_t offset;
int (*reprogram)(struct hrtimer *t,
struct hrtimer_clock_base *b,
ktime_t n);
#endif #endif
}; };
......
...@@ -52,6 +52,7 @@ static inline int kstat_irqs(int irq) ...@@ -52,6 +52,7 @@ static inline int kstat_irqs(int irq)
return sum; return sum;
} }
extern unsigned long long task_delta_exec(struct task_struct *);
extern void account_user_time(struct task_struct *, cputime_t); extern void account_user_time(struct task_struct *, cputime_t);
extern void account_user_time_scaled(struct task_struct *, cputime_t); extern void account_user_time_scaled(struct task_struct *, cputime_t);
extern void account_system_time(struct task_struct *, int, cputime_t); extern void account_system_time(struct task_struct *, int, cputime_t);
......
...@@ -45,8 +45,6 @@ struct k_itimer { ...@@ -45,8 +45,6 @@ struct k_itimer {
int it_requeue_pending; /* waiting to requeue this timer */ int it_requeue_pending; /* waiting to requeue this timer */
#define REQUEUE_PENDING 1 #define REQUEUE_PENDING 1
int it_sigev_notify; /* notify word of sigevent struct */ int it_sigev_notify; /* notify word of sigevent struct */
int it_sigev_signo; /* signo word of sigevent struct */
sigval_t it_sigev_value; /* value word of sigevent struct */
struct task_struct *it_process; /* process to send signal to */ struct task_struct *it_process; /* process to send signal to */
struct sigqueue *sigq; /* signal queue entry. */ struct sigqueue *sigq; /* signal queue entry. */
union { union {
...@@ -115,4 +113,6 @@ void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx, ...@@ -115,4 +113,6 @@ void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx,
long clock_nanosleep_restart(struct restart_block *restart_block); long clock_nanosleep_restart(struct restart_block *restart_block);
void update_rlimit_cpu(unsigned long rlim_new);
#endif #endif
...@@ -434,6 +434,39 @@ struct pacct_struct { ...@@ -434,6 +434,39 @@ struct pacct_struct {
unsigned long ac_minflt, ac_majflt; unsigned long ac_minflt, ac_majflt;
}; };
/**
* struct task_cputime - collected CPU time counts
* @utime: time spent in user mode, in &cputime_t units
* @stime: time spent in kernel mode, in &cputime_t units
* @sum_exec_runtime: total time spent on the CPU, in nanoseconds
*
* This structure groups together three kinds of CPU time that are
* tracked for threads and thread groups. Most things considering
* CPU time want to group these counts together and treat all three
* of them in parallel.
*/
struct task_cputime {
cputime_t utime;
cputime_t stime;
unsigned long long sum_exec_runtime;
};
/* Alternate field names when used to cache expirations. */
#define prof_exp stime
#define virt_exp utime
#define sched_exp sum_exec_runtime
/**
* struct thread_group_cputime - thread group interval timer counts
* @totals: thread group interval timers; substructure for
* uniprocessor kernel, per-cpu for SMP kernel.
*
* This structure contains the version of task_cputime, above, that is
* used for thread group CPU clock calculations.
*/
struct thread_group_cputime {
struct task_cputime *totals;
};
/* /*
* NOTE! "signal_struct" does not have it's own * NOTE! "signal_struct" does not have it's own
* locking, because a shared signal_struct always * locking, because a shared signal_struct always
...@@ -479,6 +512,17 @@ struct signal_struct { ...@@ -479,6 +512,17 @@ struct signal_struct {
cputime_t it_prof_expires, it_virt_expires; cputime_t it_prof_expires, it_virt_expires;
cputime_t it_prof_incr, it_virt_incr; cputime_t it_prof_incr, it_virt_incr;
/*
* Thread group totals for process CPU clocks.
* See thread_group_cputime(), et al, for details.
*/
struct thread_group_cputime cputime;
/* Earliest-expiration cache. */
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
/* job control IDs */ /* job control IDs */
/* /*
...@@ -509,7 +553,7 @@ struct signal_struct { ...@@ -509,7 +553,7 @@ struct signal_struct {
* Live threads maintain their own counters and add to these * Live threads maintain their own counters and add to these
* in __exit_signal, except for the group leader. * in __exit_signal, except for the group leader.
*/ */
cputime_t utime, stime, cutime, cstime; cputime_t cutime, cstime;
cputime_t gtime; cputime_t gtime;
cputime_t cgtime; cputime_t cgtime;
unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
...@@ -517,14 +561,6 @@ struct signal_struct { ...@@ -517,14 +561,6 @@ struct signal_struct {
unsigned long inblock, oublock, cinblock, coublock; unsigned long inblock, oublock, cinblock, coublock;
struct task_io_accounting ioac; struct task_io_accounting ioac;
/*
* Cumulative ns of scheduled CPU time for dead threads in the
* group, not including a zombie group leader. (This only differs
* from jiffies_to_ns(utime + stime) if sched_clock uses something
* other than jiffies.)
*/
unsigned long long sum_sched_runtime;
/* /*
* We don't bother to synchronize most readers of this at all, * We don't bother to synchronize most readers of this at all,
* because there is no reader checking a limit that actually needs * because there is no reader checking a limit that actually needs
...@@ -536,8 +572,6 @@ struct signal_struct { ...@@ -536,8 +572,6 @@ struct signal_struct {
*/ */
struct rlimit rlim[RLIM_NLIMITS]; struct rlimit rlim[RLIM_NLIMITS];
struct list_head cpu_timers[3];
/* keep the process-shared keyrings here so that they do the right /* keep the process-shared keyrings here so that they do the right
* thing in threads created with CLONE_THREAD */ * thing in threads created with CLONE_THREAD */
#ifdef CONFIG_KEYS #ifdef CONFIG_KEYS
...@@ -1146,8 +1180,7 @@ struct task_struct { ...@@ -1146,8 +1180,7 @@ struct task_struct {
/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
unsigned long min_flt, maj_flt; unsigned long min_flt, maj_flt;
cputime_t it_prof_expires, it_virt_expires; struct task_cputime cputime_expires;
unsigned long long it_sched_expires;
struct list_head cpu_timers[3]; struct list_head cpu_timers[3];
/* process credentials */ /* process credentials */
...@@ -1597,6 +1630,7 @@ extern unsigned long long cpu_clock(int cpu); ...@@ -1597,6 +1630,7 @@ extern unsigned long long cpu_clock(int cpu);
extern unsigned long long extern unsigned long long
task_sched_runtime(struct task_struct *task); task_sched_runtime(struct task_struct *task);
extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
/* sched_exec is called by processes performing an exec */ /* sched_exec is called by processes performing an exec */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -2093,6 +2127,30 @@ static inline int spin_needbreak(spinlock_t *lock) ...@@ -2093,6 +2127,30 @@ static inline int spin_needbreak(spinlock_t *lock)
#endif #endif
} }
/*
* Thread group CPU time accounting.
*/
extern int thread_group_cputime_alloc(struct task_struct *);
extern void thread_group_cputime(struct task_struct *, struct task_cputime *);
static inline void thread_group_cputime_init(struct signal_struct *sig)
{
sig->cputime.totals = NULL;
}
static inline int thread_group_cputime_clone_thread(struct task_struct *curr)
{
if (curr->signal->cputime.totals)
return 0;
return thread_group_cputime_alloc(curr);
}
static inline void thread_group_cputime_free(struct signal_struct *sig)
{
free_percpu(sig->cputime.totals);
}
/* /*
* Reevaluate whether the task has signals pending delivery. * Reevaluate whether the task has signals pending delivery.
* Wake the task if so. * Wake the task if so.
......
...@@ -96,9 +96,11 @@ extern cpumask_t *tick_get_broadcast_oneshot_mask(void); ...@@ -96,9 +96,11 @@ extern cpumask_t *tick_get_broadcast_oneshot_mask(void);
extern void tick_clock_notify(void); extern void tick_clock_notify(void);
extern int tick_check_oneshot_change(int allow_nohz); extern int tick_check_oneshot_change(int allow_nohz);
extern struct tick_sched *tick_get_tick_sched(int cpu); extern struct tick_sched *tick_get_tick_sched(int cpu);
extern void tick_check_idle(int cpu);
# else # else
static inline void tick_clock_notify(void) { } static inline void tick_clock_notify(void) { }
static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
static inline void tick_check_idle(int cpu) { }
# endif # endif
#else /* CONFIG_GENERIC_CLOCKEVENTS */ #else /* CONFIG_GENERIC_CLOCKEVENTS */
...@@ -106,26 +108,23 @@ static inline void tick_init(void) { } ...@@ -106,26 +108,23 @@ static inline void tick_init(void) { }
static inline void tick_cancel_sched_timer(int cpu) { } static inline void tick_cancel_sched_timer(int cpu) { }
static inline void tick_clock_notify(void) { } static inline void tick_clock_notify(void) { }
static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
static inline void tick_check_idle(int cpu) { }
#endif /* !CONFIG_GENERIC_CLOCKEVENTS */ #endif /* !CONFIG_GENERIC_CLOCKEVENTS */
# ifdef CONFIG_NO_HZ # ifdef CONFIG_NO_HZ
extern void tick_nohz_stop_sched_tick(int inidle); extern void tick_nohz_stop_sched_tick(int inidle);
extern void tick_nohz_restart_sched_tick(void); extern void tick_nohz_restart_sched_tick(void);
extern void tick_nohz_update_jiffies(void);
extern ktime_t tick_nohz_get_sleep_length(void); extern ktime_t tick_nohz_get_sleep_length(void);
extern void tick_nohz_stop_idle(int cpu);
extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
# else # else
static inline void tick_nohz_stop_sched_tick(int inidle) { } static inline void tick_nohz_stop_sched_tick(int inidle) { }
static inline void tick_nohz_restart_sched_tick(void) { } static inline void tick_nohz_restart_sched_tick(void) { }
static inline void tick_nohz_update_jiffies(void) { }
static inline ktime_t tick_nohz_get_sleep_length(void) static inline ktime_t tick_nohz_get_sleep_length(void)
{ {
ktime_t len = { .tv64 = NSEC_PER_SEC/HZ }; ktime_t len = { .tv64 = NSEC_PER_SEC/HZ };
return len; return len;
} }
static inline void tick_nohz_stop_idle(int cpu) { }
static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
# endif /* !NO_HZ */ # endif /* !NO_HZ */
......
...@@ -119,6 +119,7 @@ extern int do_setitimer(int which, struct itimerval *value, ...@@ -119,6 +119,7 @@ extern int do_setitimer(int which, struct itimerval *value,
extern unsigned int alarm_setitimer(unsigned int seconds); extern unsigned int alarm_setitimer(unsigned int seconds);
extern int do_getitimer(int which, struct itimerval *value); extern int do_getitimer(int which, struct itimerval *value);
extern void getnstimeofday(struct timespec *tv); extern void getnstimeofday(struct timespec *tv);
extern void getrawmonotonic(struct timespec *ts);
extern void getboottime(struct timespec *ts); extern void getboottime(struct timespec *ts);
extern void monotonic_to_bootbased(struct timespec *ts); extern void monotonic_to_bootbased(struct timespec *ts);
...@@ -127,6 +128,9 @@ extern int timekeeping_valid_for_hres(void); ...@@ -127,6 +128,9 @@ extern int timekeeping_valid_for_hres(void);
extern void update_wall_time(void); extern void update_wall_time(void);
extern void update_xtime_cache(u64 nsec); extern void update_xtime_cache(u64 nsec);
struct tms;
extern void do_sys_times(struct tms *);
/** /**
* timespec_to_ns - Convert timespec to nanoseconds * timespec_to_ns - Convert timespec to nanoseconds
* @ts: pointer to the timespec variable to be converted * @ts: pointer to the timespec variable to be converted
...@@ -216,6 +220,7 @@ struct itimerval { ...@@ -216,6 +220,7 @@ struct itimerval {
#define CLOCK_MONOTONIC 1 #define CLOCK_MONOTONIC 1
#define CLOCK_PROCESS_CPUTIME_ID 2 #define CLOCK_PROCESS_CPUTIME_ID 2
#define CLOCK_THREAD_CPUTIME_ID 3 #define CLOCK_THREAD_CPUTIME_ID 3
#define CLOCK_MONOTONIC_RAW 4
/* /*
* The IDs of various hardware clocks: * The IDs of various hardware clocks:
......
...@@ -82,7 +82,7 @@ ...@@ -82,7 +82,7 @@
*/ */
#define SHIFT_USEC 16 /* frequency offset scale (shift) */ #define SHIFT_USEC 16 /* frequency offset scale (shift) */
#define PPM_SCALE (NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC)) #define PPM_SCALE (NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC))
#define PPM_SCALE_INV_SHIFT 20 #define PPM_SCALE_INV_SHIFT 19
#define PPM_SCALE_INV ((1ll << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \ #define PPM_SCALE_INV ((1ll << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \
PPM_SCALE + 1) PPM_SCALE + 1)
...@@ -141,8 +141,15 @@ struct timex { ...@@ -141,8 +141,15 @@ struct timex {
#define ADJ_MICRO 0x1000 /* select microsecond resolution */ #define ADJ_MICRO 0x1000 /* select microsecond resolution */
#define ADJ_NANO 0x2000 /* select nanosecond resolution */ #define ADJ_NANO 0x2000 /* select nanosecond resolution */
#define ADJ_TICK 0x4000 /* tick value */ #define ADJ_TICK 0x4000 /* tick value */
#ifdef __KERNEL__
#define ADJ_ADJTIME 0x8000 /* switch between adjtime/adjtimex modes */
#define ADJ_OFFSET_SINGLESHOT 0x0001 /* old-fashioned adjtime */
#define ADJ_OFFSET_READONLY 0x2000 /* read-only adjtime */
#else
#define ADJ_OFFSET_SINGLESHOT 0x8001 /* old-fashioned adjtime */ #define ADJ_OFFSET_SINGLESHOT 0x8001 /* old-fashioned adjtime */
#define ADJ_OFFSET_SS_READ 0xa001 /* read-only adjtime */ #define ADJ_OFFSET_SS_READ 0xa001 /* read-only adjtime */
#endif
/* xntp 3.4 compatibility names */ /* xntp 3.4 compatibility names */
#define MOD_OFFSET ADJ_OFFSET #define MOD_OFFSET ADJ_OFFSET
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <linux/timex.h> #include <linux/timex.h>
#include <linux/migrate.h> #include <linux/migrate.h>
#include <linux/posix-timers.h> #include <linux/posix-timers.h>
#include <linux/times.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
...@@ -208,49 +209,23 @@ asmlinkage long compat_sys_setitimer(int which, ...@@ -208,49 +209,23 @@ asmlinkage long compat_sys_setitimer(int which,
return 0; return 0;
} }
static compat_clock_t clock_t_to_compat_clock_t(clock_t x)
{
return compat_jiffies_to_clock_t(clock_t_to_jiffies(x));
}
asmlinkage long compat_sys_times(struct compat_tms __user *tbuf) asmlinkage long compat_sys_times(struct compat_tms __user *tbuf)
{ {
/*
* In the SMP world we might just be unlucky and have one of
* the times increment as we use it. Since the value is an
* atomically safe type this is just fine. Conceptually its
* as if the syscall took an instant longer to occur.
*/
if (tbuf) { if (tbuf) {
struct tms tms;
struct compat_tms tmp; struct compat_tms tmp;
struct task_struct *tsk = current;
struct task_struct *t;
cputime_t utime, stime, cutime, cstime;
read_lock(&tasklist_lock);
utime = tsk->signal->utime;
stime = tsk->signal->stime;
t = tsk;
do {
utime = cputime_add(utime, t->utime);
stime = cputime_add(stime, t->stime);
t = next_thread(t);
} while (t != tsk);
/* do_sys_times(&tms);
* While we have tasklist_lock read-locked, no dying thread /* Convert our struct tms to the compat version. */
* can be updating current->signal->[us]time. Instead, tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime);
* we got their counts included in the live thread loop. tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime);
* However, another thread can come in right now and tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime);
* do a wait call that updates current->signal->c[us]time. tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime);
* To make sure we always see that pair updated atomically,
* we take the siglock around fetching them.
*/
spin_lock_irq(&tsk->sighand->siglock);
cutime = tsk->signal->cutime;
cstime = tsk->signal->cstime;
spin_unlock_irq(&tsk->sighand->siglock);
read_unlock(&tasklist_lock);
tmp.tms_utime = compat_jiffies_to_clock_t(cputime_to_jiffies(utime));
tmp.tms_stime = compat_jiffies_to_clock_t(cputime_to_jiffies(stime));
tmp.tms_cutime = compat_jiffies_to_clock_t(cputime_to_jiffies(cutime));
tmp.tms_cstime = compat_jiffies_to_clock_t(cputime_to_jiffies(cstime));
if (copy_to_user(tbuf, &tmp, sizeof(tmp))) if (copy_to_user(tbuf, &tmp, sizeof(tmp)))
return -EFAULT; return -EFAULT;
} }
......
...@@ -112,8 +112,6 @@ static void __exit_signal(struct task_struct *tsk) ...@@ -112,8 +112,6 @@ static void __exit_signal(struct task_struct *tsk)
* We won't ever get here for the group leader, since it * We won't ever get here for the group leader, since it
* will have been the last reference on the signal_struct. * will have been the last reference on the signal_struct.
*/ */
sig->utime = cputime_add(sig->utime, task_utime(tsk));
sig->stime = cputime_add(sig->stime, task_stime(tsk));
sig->gtime = cputime_add(sig->gtime, task_gtime(tsk)); sig->gtime = cputime_add(sig->gtime, task_gtime(tsk));
sig->min_flt += tsk->min_flt; sig->min_flt += tsk->min_flt;
sig->maj_flt += tsk->maj_flt; sig->maj_flt += tsk->maj_flt;
...@@ -122,7 +120,6 @@ static void __exit_signal(struct task_struct *tsk) ...@@ -122,7 +120,6 @@ static void __exit_signal(struct task_struct *tsk)
sig->inblock += task_io_get_inblock(tsk); sig->inblock += task_io_get_inblock(tsk);
sig->oublock += task_io_get_oublock(tsk); sig->oublock += task_io_get_oublock(tsk);
task_io_accounting_add(&sig->ioac, &tsk->ioac); task_io_accounting_add(&sig->ioac, &tsk->ioac);
sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
sig = NULL; /* Marker for below. */ sig = NULL; /* Marker for below. */
} }
...@@ -1301,6 +1298,7 @@ static int wait_task_zombie(struct task_struct *p, int options, ...@@ -1301,6 +1298,7 @@ static int wait_task_zombie(struct task_struct *p, int options,
if (likely(!traced)) { if (likely(!traced)) {
struct signal_struct *psig; struct signal_struct *psig;
struct signal_struct *sig; struct signal_struct *sig;
struct task_cputime cputime;
/* /*
* The resource counters for the group leader are in its * The resource counters for the group leader are in its
...@@ -1316,20 +1314,23 @@ static int wait_task_zombie(struct task_struct *p, int options, ...@@ -1316,20 +1314,23 @@ static int wait_task_zombie(struct task_struct *p, int options,
* need to protect the access to p->parent->signal fields, * need to protect the access to p->parent->signal fields,
* as other threads in the parent group can be right * as other threads in the parent group can be right
* here reaping other children at the same time. * here reaping other children at the same time.
*
* We use thread_group_cputime() to get times for the thread
* group, which consolidates times for all threads in the
* group including the group leader.
*/ */
spin_lock_irq(&p->parent->sighand->siglock); spin_lock_irq(&p->parent->sighand->siglock);
psig = p->parent->signal; psig = p->parent->signal;
sig = p->signal; sig = p->signal;
thread_group_cputime(p, &cputime);
psig->cutime = psig->cutime =
cputime_add(psig->cutime, cputime_add(psig->cutime,
cputime_add(p->utime, cputime_add(cputime.utime,
cputime_add(sig->utime, sig->cutime));
sig->cutime)));
psig->cstime = psig->cstime =
cputime_add(psig->cstime, cputime_add(psig->cstime,
cputime_add(p->stime, cputime_add(cputime.stime,
cputime_add(sig->stime, sig->cstime));
sig->cstime)));
psig->cgtime = psig->cgtime =
cputime_add(psig->cgtime, cputime_add(psig->cgtime,
cputime_add(p->gtime, cputime_add(p->gtime,
......
...@@ -759,15 +759,44 @@ void __cleanup_sighand(struct sighand_struct *sighand) ...@@ -759,15 +759,44 @@ void __cleanup_sighand(struct sighand_struct *sighand)
kmem_cache_free(sighand_cachep, sighand); kmem_cache_free(sighand_cachep, sighand);
} }
/*
* Initialize POSIX timer handling for a thread group.
*/
static void posix_cpu_timers_init_group(struct signal_struct *sig)
{
/* Thread group counters. */
thread_group_cputime_init(sig);
/* Expiration times and increments. */
sig->it_virt_expires = cputime_zero;
sig->it_virt_incr = cputime_zero;
sig->it_prof_expires = cputime_zero;
sig->it_prof_incr = cputime_zero;
/* Cached expiration times. */
sig->cputime_expires.prof_exp = cputime_zero;
sig->cputime_expires.virt_exp = cputime_zero;
sig->cputime_expires.sched_exp = 0;
/* The timer lists. */
INIT_LIST_HEAD(&sig->cpu_timers[0]);
INIT_LIST_HEAD(&sig->cpu_timers[1]);
INIT_LIST_HEAD(&sig->cpu_timers[2]);
}
static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
{ {
struct signal_struct *sig; struct signal_struct *sig;
int ret; int ret;
if (clone_flags & CLONE_THREAD) { if (clone_flags & CLONE_THREAD) {
ret = thread_group_cputime_clone_thread(current);
if (likely(!ret)) {
atomic_inc(&current->signal->count); atomic_inc(&current->signal->count);
atomic_inc(&current->signal->live); atomic_inc(&current->signal->live);
return 0; }
return ret;
} }
sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
tsk->signal = sig; tsk->signal = sig;
...@@ -795,40 +824,25 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) ...@@ -795,40 +824,25 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
sig->it_real_incr.tv64 = 0; sig->it_real_incr.tv64 = 0;
sig->real_timer.function = it_real_fn; sig->real_timer.function = it_real_fn;
sig->it_virt_expires = cputime_zero;
sig->it_virt_incr = cputime_zero;
sig->it_prof_expires = cputime_zero;
sig->it_prof_incr = cputime_zero;
sig->leader = 0; /* session leadership doesn't inherit */ sig->leader = 0; /* session leadership doesn't inherit */
sig->tty_old_pgrp = NULL; sig->tty_old_pgrp = NULL;
sig->tty = NULL; sig->tty = NULL;
sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero; sig->cutime = sig->cstime = cputime_zero;
sig->gtime = cputime_zero; sig->gtime = cputime_zero;
sig->cgtime = cputime_zero; sig->cgtime = cputime_zero;
sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0; sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0; sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0; sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
task_io_accounting_init(&sig->ioac); task_io_accounting_init(&sig->ioac);
sig->sum_sched_runtime = 0;
INIT_LIST_HEAD(&sig->cpu_timers[0]);
INIT_LIST_HEAD(&sig->cpu_timers[1]);
INIT_LIST_HEAD(&sig->cpu_timers[2]);
taskstats_tgid_init(sig); taskstats_tgid_init(sig);
task_lock(current->group_leader); task_lock(current->group_leader);
memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
task_unlock(current->group_leader); task_unlock(current->group_leader);
if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { posix_cpu_timers_init_group(sig);
/*
* New sole thread in the process gets an expiry time
* of the whole CPU time limit.
*/
tsk->it_prof_expires =
secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
}
acct_init_pacct(&sig->pacct); acct_init_pacct(&sig->pacct);
tty_audit_fork(sig); tty_audit_fork(sig);
...@@ -838,6 +852,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) ...@@ -838,6 +852,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
void __cleanup_signal(struct signal_struct *sig) void __cleanup_signal(struct signal_struct *sig)
{ {
thread_group_cputime_free(sig);
exit_thread_group_keys(sig); exit_thread_group_keys(sig);
tty_kref_put(sig->tty); tty_kref_put(sig->tty);
kmem_cache_free(signal_cachep, sig); kmem_cache_free(signal_cachep, sig);
...@@ -887,6 +902,19 @@ void mm_init_owner(struct mm_struct *mm, struct task_struct *p) ...@@ -887,6 +902,19 @@ void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
} }
#endif /* CONFIG_MM_OWNER */ #endif /* CONFIG_MM_OWNER */
/*
* Initialize POSIX timer handling for a single task.
*/
static void posix_cpu_timers_init(struct task_struct *tsk)
{
tsk->cputime_expires.prof_exp = cputime_zero;
tsk->cputime_expires.virt_exp = cputime_zero;
tsk->cputime_expires.sched_exp = 0;
INIT_LIST_HEAD(&tsk->cpu_timers[0]);
INIT_LIST_HEAD(&tsk->cpu_timers[1]);
INIT_LIST_HEAD(&tsk->cpu_timers[2]);
}
/* /*
* This creates a new process as a copy of the old one, * This creates a new process as a copy of the old one,
* but does not actually start it yet. * but does not actually start it yet.
...@@ -997,12 +1025,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, ...@@ -997,12 +1025,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
task_io_accounting_init(&p->ioac); task_io_accounting_init(&p->ioac);
acct_clear_integrals(p); acct_clear_integrals(p);
p->it_virt_expires = cputime_zero; posix_cpu_timers_init(p);
p->it_prof_expires = cputime_zero;
p->it_sched_expires = 0;
INIT_LIST_HEAD(&p->cpu_timers[0]);
INIT_LIST_HEAD(&p->cpu_timers[1]);
INIT_LIST_HEAD(&p->cpu_timers[2]);
p->lock_depth = -1; /* -1 = no lock */ p->lock_depth = -1; /* -1 = no lock */
do_posix_clock_monotonic_gettime(&p->start_time); do_posix_clock_monotonic_gettime(&p->start_time);
...@@ -1203,21 +1226,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, ...@@ -1203,21 +1226,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
if (clone_flags & CLONE_THREAD) { if (clone_flags & CLONE_THREAD) {
p->group_leader = current->group_leader; p->group_leader = current->group_leader;
list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
if (!cputime_eq(current->signal->it_virt_expires,
cputime_zero) ||
!cputime_eq(current->signal->it_prof_expires,
cputime_zero) ||
current->signal->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY ||
!list_empty(&current->signal->cpu_timers[0]) ||
!list_empty(&current->signal->cpu_timers[1]) ||
!list_empty(&current->signal->cpu_timers[2])) {
/*
* Have child wake up on its first tick to check
* for process CPU timers.
*/
p->it_prof_expires = jiffies_to_cputime(1);
}
} }
if (likely(p->pid)) { if (likely(p->pid)) {
......
...@@ -1403,9 +1403,7 @@ void hrtimer_run_queues(void) ...@@ -1403,9 +1403,7 @@ void hrtimer_run_queues(void)
if (!base->first) if (!base->first)
continue; continue;
if (base->get_softirq_time) if (gettime) {
base->softirq_time = base->get_softirq_time();
else if (gettime) {
hrtimer_get_softirq_time(cpu_base); hrtimer_get_softirq_time(cpu_base);
gettime = 0; gettime = 0;
} }
...@@ -1688,9 +1686,11 @@ static void migrate_hrtimers(int cpu) ...@@ -1688,9 +1686,11 @@ static void migrate_hrtimers(int cpu)
new_base = &get_cpu_var(hrtimer_bases); new_base = &get_cpu_var(hrtimer_bases);
tick_cancel_sched_timer(cpu); tick_cancel_sched_timer(cpu);
/*
local_irq_disable(); * The caller is globally serialized and nobody else
spin_lock(&new_base->lock); * takes two locks at once, deadlock is not possible.
*/
spin_lock_irq(&new_base->lock);
spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
...@@ -1703,8 +1703,7 @@ static void migrate_hrtimers(int cpu) ...@@ -1703,8 +1703,7 @@ static void migrate_hrtimers(int cpu)
raise = 1; raise = 1;
spin_unlock(&old_base->lock); spin_unlock(&old_base->lock);
spin_unlock(&new_base->lock); spin_unlock_irq(&new_base->lock);
local_irq_enable();
put_cpu_var(hrtimer_bases); put_cpu_var(hrtimer_bases);
if (raise) if (raise)
......
...@@ -55,17 +55,15 @@ int do_getitimer(int which, struct itimerval *value) ...@@ -55,17 +55,15 @@ int do_getitimer(int which, struct itimerval *value)
spin_unlock_irq(&tsk->sighand->siglock); spin_unlock_irq(&tsk->sighand->siglock);
break; break;
case ITIMER_VIRTUAL: case ITIMER_VIRTUAL:
read_lock(&tasklist_lock);
spin_lock_irq(&tsk->sighand->siglock); spin_lock_irq(&tsk->sighand->siglock);
cval = tsk->signal->it_virt_expires; cval = tsk->signal->it_virt_expires;
cinterval = tsk->signal->it_virt_incr; cinterval = tsk->signal->it_virt_incr;
if (!cputime_eq(cval, cputime_zero)) { if (!cputime_eq(cval, cputime_zero)) {
struct task_struct *t = tsk; struct task_cputime cputime;
cputime_t utime = tsk->signal->utime; cputime_t utime;
do {
utime = cputime_add(utime, t->utime); thread_group_cputime(tsk, &cputime);
t = next_thread(t); utime = cputime.utime;
} while (t != tsk);
if (cputime_le(cval, utime)) { /* about to fire */ if (cputime_le(cval, utime)) { /* about to fire */
cval = jiffies_to_cputime(1); cval = jiffies_to_cputime(1);
} else { } else {
...@@ -73,25 +71,19 @@ int do_getitimer(int which, struct itimerval *value) ...@@ -73,25 +71,19 @@ int do_getitimer(int which, struct itimerval *value)
} }
} }
spin_unlock_irq(&tsk->sighand->siglock); spin_unlock_irq(&tsk->sighand->siglock);
read_unlock(&tasklist_lock);
cputime_to_timeval(cval, &value->it_value); cputime_to_timeval(cval, &value->it_value);
cputime_to_timeval(cinterval, &value->it_interval); cputime_to_timeval(cinterval, &value->it_interval);
break; break;
case ITIMER_PROF: case ITIMER_PROF:
read_lock(&tasklist_lock);
spin_lock_irq(&tsk->sighand->siglock); spin_lock_irq(&tsk->sighand->siglock);
cval = tsk->signal->it_prof_expires; cval = tsk->signal->it_prof_expires;
cinterval = tsk->signal->it_prof_incr; cinterval = tsk->signal->it_prof_incr;
if (!cputime_eq(cval, cputime_zero)) { if (!cputime_eq(cval, cputime_zero)) {
struct task_struct *t = tsk; struct task_cputime times;
cputime_t ptime = cputime_add(tsk->signal->utime, cputime_t ptime;
tsk->signal->stime);
do { thread_group_cputime(tsk, &times);
ptime = cputime_add(ptime, ptime = cputime_add(times.utime, times.stime);
cputime_add(t->utime,
t->stime));
t = next_thread(t);
} while (t != tsk);
if (cputime_le(cval, ptime)) { /* about to fire */ if (cputime_le(cval, ptime)) { /* about to fire */
cval = jiffies_to_cputime(1); cval = jiffies_to_cputime(1);
} else { } else {
...@@ -99,7 +91,6 @@ int do_getitimer(int which, struct itimerval *value) ...@@ -99,7 +91,6 @@ int do_getitimer(int which, struct itimerval *value)
} }
} }
spin_unlock_irq(&tsk->sighand->siglock); spin_unlock_irq(&tsk->sighand->siglock);
read_unlock(&tasklist_lock);
cputime_to_timeval(cval, &value->it_value); cputime_to_timeval(cval, &value->it_value);
cputime_to_timeval(cinterval, &value->it_interval); cputime_to_timeval(cinterval, &value->it_interval);
break; break;
...@@ -185,7 +176,6 @@ int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue) ...@@ -185,7 +176,6 @@ int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue)
case ITIMER_VIRTUAL: case ITIMER_VIRTUAL:
nval = timeval_to_cputime(&value->it_value); nval = timeval_to_cputime(&value->it_value);
ninterval = timeval_to_cputime(&value->it_interval); ninterval = timeval_to_cputime(&value->it_interval);
read_lock(&tasklist_lock);
spin_lock_irq(&tsk->sighand->siglock); spin_lock_irq(&tsk->sighand->siglock);
cval = tsk->signal->it_virt_expires; cval = tsk->signal->it_virt_expires;
cinterval = tsk->signal->it_virt_incr; cinterval = tsk->signal->it_virt_incr;
...@@ -200,7 +190,6 @@ int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue) ...@@ -200,7 +190,6 @@ int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue)
tsk->signal->it_virt_expires = nval; tsk->signal->it_virt_expires = nval;
tsk->signal->it_virt_incr = ninterval; tsk->signal->it_virt_incr = ninterval;
spin_unlock_irq(&tsk->sighand->siglock); spin_unlock_irq(&tsk->sighand->siglock);
read_unlock(&tasklist_lock);
if (ovalue) { if (ovalue) {
cputime_to_timeval(cval, &ovalue->it_value); cputime_to_timeval(cval, &ovalue->it_value);
cputime_to_timeval(cinterval, &ovalue->it_interval); cputime_to_timeval(cinterval, &ovalue->it_interval);
...@@ -209,7 +198,6 @@ int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue) ...@@ -209,7 +198,6 @@ int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue)
case ITIMER_PROF: case ITIMER_PROF:
nval = timeval_to_cputime(&value->it_value); nval = timeval_to_cputime(&value->it_value);
ninterval = timeval_to_cputime(&value->it_interval); ninterval = timeval_to_cputime(&value->it_interval);
read_lock(&tasklist_lock);
spin_lock_irq(&tsk->sighand->siglock); spin_lock_irq(&tsk->sighand->siglock);
cval = tsk->signal->it_prof_expires; cval = tsk->signal->it_prof_expires;
cinterval = tsk->signal->it_prof_incr; cinterval = tsk->signal->it_prof_incr;
...@@ -224,7 +212,6 @@ int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue) ...@@ -224,7 +212,6 @@ int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue)
tsk->signal->it_prof_expires = nval; tsk->signal->it_prof_expires = nval;
tsk->signal->it_prof_incr = ninterval; tsk->signal->it_prof_incr = ninterval;
spin_unlock_irq(&tsk->sighand->siglock); spin_unlock_irq(&tsk->sighand->siglock);
read_unlock(&tasklist_lock);
if (ovalue) { if (ovalue) {
cputime_to_timeval(cval, &ovalue->it_value); cputime_to_timeval(cval, &ovalue->it_value);
cputime_to_timeval(cinterval, &ovalue->it_interval); cputime_to_timeval(cinterval, &ovalue->it_interval);
......
This diff is collapsed.
...@@ -222,6 +222,15 @@ static int posix_ktime_get_ts(clockid_t which_clock, struct timespec *tp) ...@@ -222,6 +222,15 @@ static int posix_ktime_get_ts(clockid_t which_clock, struct timespec *tp)
return 0; return 0;
} }
/*
* Get monotonic time for posix timers
*/
static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec *tp)
{
getrawmonotonic(tp);
return 0;
}
/* /*
* Initialize everything, well, just everything in Posix clocks/timers ;) * Initialize everything, well, just everything in Posix clocks/timers ;)
*/ */
...@@ -235,9 +244,15 @@ static __init int init_posix_timers(void) ...@@ -235,9 +244,15 @@ static __init int init_posix_timers(void)
.clock_get = posix_ktime_get_ts, .clock_get = posix_ktime_get_ts,
.clock_set = do_posix_clock_nosettime, .clock_set = do_posix_clock_nosettime,
}; };
struct k_clock clock_monotonic_raw = {
.clock_getres = hrtimer_get_res,
.clock_get = posix_get_monotonic_raw,
.clock_set = do_posix_clock_nosettime,
};
register_posix_clock(CLOCK_REALTIME, &clock_realtime); register_posix_clock(CLOCK_REALTIME, &clock_realtime);
register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic); register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
posix_timers_cache = kmem_cache_create("posix_timers_cache", posix_timers_cache = kmem_cache_create("posix_timers_cache",
sizeof (struct k_itimer), 0, SLAB_PANIC, sizeof (struct k_itimer), 0, SLAB_PANIC,
...@@ -298,6 +313,7 @@ void do_schedule_next_timer(struct siginfo *info) ...@@ -298,6 +313,7 @@ void do_schedule_next_timer(struct siginfo *info)
int posix_timer_event(struct k_itimer *timr, int si_private) int posix_timer_event(struct k_itimer *timr, int si_private)
{ {
int shared, ret;
/* /*
* FIXME: if ->sigq is queued we can race with * FIXME: if ->sigq is queued we can race with
* dequeue_signal()->do_schedule_next_timer(). * dequeue_signal()->do_schedule_next_timer().
...@@ -311,25 +327,10 @@ int posix_timer_event(struct k_itimer *timr, int si_private) ...@@ -311,25 +327,10 @@ int posix_timer_event(struct k_itimer *timr, int si_private)
*/ */
timr->sigq->info.si_sys_private = si_private; timr->sigq->info.si_sys_private = si_private;
timr->sigq->info.si_signo = timr->it_sigev_signo; shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID);
timr->sigq->info.si_code = SI_TIMER; ret = send_sigqueue(timr->sigq, timr->it_process, shared);
timr->sigq->info.si_tid = timr->it_id; /* If we failed to send the signal the timer stops. */
timr->sigq->info.si_value = timr->it_sigev_value; return ret > 0;
if (timr->it_sigev_notify & SIGEV_THREAD_ID) {
struct task_struct *leader;
int ret = send_sigqueue(timr->sigq, timr->it_process, 0);
if (likely(ret >= 0))
return ret;
timr->it_sigev_notify = SIGEV_SIGNAL;
leader = timr->it_process->group_leader;
put_task_struct(timr->it_process);
timr->it_process = leader;
}
return send_sigqueue(timr->sigq, timr->it_process, 1);
} }
EXPORT_SYMBOL_GPL(posix_timer_event); EXPORT_SYMBOL_GPL(posix_timer_event);
...@@ -468,11 +469,9 @@ sys_timer_create(const clockid_t which_clock, ...@@ -468,11 +469,9 @@ sys_timer_create(const clockid_t which_clock,
struct sigevent __user *timer_event_spec, struct sigevent __user *timer_event_spec,
timer_t __user * created_timer_id) timer_t __user * created_timer_id)
{ {
int error = 0; struct k_itimer *new_timer;
struct k_itimer *new_timer = NULL; int error, new_timer_id;
int new_timer_id; struct task_struct *process;
struct task_struct *process = NULL;
unsigned long flags;
sigevent_t event; sigevent_t event;
int it_id_set = IT_ID_NOT_SET; int it_id_set = IT_ID_NOT_SET;
...@@ -490,12 +489,11 @@ sys_timer_create(const clockid_t which_clock, ...@@ -490,12 +489,11 @@ sys_timer_create(const clockid_t which_clock,
goto out; goto out;
} }
spin_lock_irq(&idr_lock); spin_lock_irq(&idr_lock);
error = idr_get_new(&posix_timers_id, (void *) new_timer, error = idr_get_new(&posix_timers_id, new_timer, &new_timer_id);
&new_timer_id);
spin_unlock_irq(&idr_lock); spin_unlock_irq(&idr_lock);
if (error) {
if (error == -EAGAIN) if (error == -EAGAIN)
goto retry; goto retry;
else if (error) {
/* /*
* Weird looking, but we return EAGAIN if the IDR is * Weird looking, but we return EAGAIN if the IDR is
* full (proper POSIX return value for this) * full (proper POSIX return value for this)
...@@ -526,67 +524,43 @@ sys_timer_create(const clockid_t which_clock, ...@@ -526,67 +524,43 @@ sys_timer_create(const clockid_t which_clock,
error = -EFAULT; error = -EFAULT;
goto out; goto out;
} }
new_timer->it_sigev_notify = event.sigev_notify; rcu_read_lock();
new_timer->it_sigev_signo = event.sigev_signo; process = good_sigevent(&event);
new_timer->it_sigev_value = event.sigev_value; if (process)
read_lock(&tasklist_lock);
if ((process = good_sigevent(&event))) {
/*
* We may be setting up this process for another
* thread. It may be exiting. To catch this
* case the we check the PF_EXITING flag. If
* the flag is not set, the siglock will catch
* him before it is too late (in exit_itimers).
*
* The exec case is a bit more invloved but easy
* to code. If the process is in our thread
* group (and it must be or we would not allow
* it here) and is doing an exec, it will cause
* us to be killed. In this case it will wait
* for us to die which means we can finish this
* linkage with our last gasp. I.e. no code :)
*/
spin_lock_irqsave(&process->sighand->siglock, flags);
if (!(process->flags & PF_EXITING)) {
new_timer->it_process = process;
list_add(&new_timer->list,
&process->signal->posix_timers);
if (new_timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID))
get_task_struct(process); get_task_struct(process);
spin_unlock_irqrestore(&process->sighand->siglock, flags); rcu_read_unlock();
} else {
spin_unlock_irqrestore(&process->sighand->siglock, flags);
process = NULL;
}
}
read_unlock(&tasklist_lock);
if (!process) { if (!process) {
error = -EINVAL; error = -EINVAL;
goto out; goto out;
} }
} else { } else {
new_timer->it_sigev_notify = SIGEV_SIGNAL; event.sigev_notify = SIGEV_SIGNAL;
new_timer->it_sigev_signo = SIGALRM; event.sigev_signo = SIGALRM;
new_timer->it_sigev_value.sival_int = new_timer->it_id; event.sigev_value.sival_int = new_timer->it_id;
process = current->group_leader; process = current->group_leader;
spin_lock_irqsave(&process->sighand->siglock, flags); get_task_struct(process);
new_timer->it_process = process;
list_add(&new_timer->list, &process->signal->posix_timers);
spin_unlock_irqrestore(&process->sighand->siglock, flags);
} }
new_timer->it_sigev_notify = event.sigev_notify;
new_timer->sigq->info.si_signo = event.sigev_signo;
new_timer->sigq->info.si_value = event.sigev_value;
new_timer->sigq->info.si_tid = new_timer->it_id;
new_timer->sigq->info.si_code = SI_TIMER;
spin_lock_irq(&current->sighand->siglock);
new_timer->it_process = process;
list_add(&new_timer->list, &current->signal->posix_timers);
spin_unlock_irq(&current->sighand->siglock);
return 0;
/* /*
* In the case of the timer belonging to another task, after * In the case of the timer belonging to another task, after
* the task is unlocked, the timer is owned by the other task * the task is unlocked, the timer is owned by the other task
* and may cease to exist at any time. Don't use or modify * and may cease to exist at any time. Don't use or modify
* new_timer after the unlock call. * new_timer after the unlock call.
*/ */
out: out:
if (error)
release_posix_timer(new_timer, it_id_set); release_posix_timer(new_timer, it_id_set);
return error; return error;
} }
...@@ -597,7 +571,7 @@ sys_timer_create(const clockid_t which_clock, ...@@ -597,7 +571,7 @@ sys_timer_create(const clockid_t which_clock,
* the find to the timer lock. To avoid a dead lock, the timer id MUST * the find to the timer lock. To avoid a dead lock, the timer id MUST
* be release with out holding the timer lock. * be release with out holding the timer lock.
*/ */
static struct k_itimer * lock_timer(timer_t timer_id, unsigned long *flags) static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags)
{ {
struct k_itimer *timr; struct k_itimer *timr;
/* /*
...@@ -605,23 +579,20 @@ static struct k_itimer * lock_timer(timer_t timer_id, unsigned long *flags) ...@@ -605,23 +579,20 @@ static struct k_itimer * lock_timer(timer_t timer_id, unsigned long *flags)
* flags part over to the timer lock. Must not let interrupts in * flags part over to the timer lock. Must not let interrupts in
* while we are moving the lock. * while we are moving the lock.
*/ */
spin_lock_irqsave(&idr_lock, *flags); spin_lock_irqsave(&idr_lock, *flags);
timr = (struct k_itimer *) idr_find(&posix_timers_id, (int) timer_id); timr = idr_find(&posix_timers_id, (int)timer_id);
if (timr) { if (timr) {
spin_lock(&timr->it_lock); spin_lock(&timr->it_lock);
if (timr->it_process &&
if ((timr->it_id != timer_id) || !(timr->it_process) || same_thread_group(timr->it_process, current)) {
!same_thread_group(timr->it_process, current)) {
spin_unlock(&timr->it_lock);
spin_unlock_irqrestore(&idr_lock, *flags);
timr = NULL;
} else
spin_unlock(&idr_lock); spin_unlock(&idr_lock);
} else return timr;
}
spin_unlock(&timr->it_lock);
}
spin_unlock_irqrestore(&idr_lock, *flags); spin_unlock_irqrestore(&idr_lock, *flags);
return timr; return NULL;
} }
/* /*
...@@ -862,7 +833,6 @@ sys_timer_delete(timer_t timer_id) ...@@ -862,7 +833,6 @@ sys_timer_delete(timer_t timer_id)
* This keeps any tasks waiting on the spin lock from thinking * This keeps any tasks waiting on the spin lock from thinking
* they got something (see the lock code above). * they got something (see the lock code above).
*/ */
if (timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID))
put_task_struct(timer->it_process); put_task_struct(timer->it_process);
timer->it_process = NULL; timer->it_process = NULL;
...@@ -890,7 +860,6 @@ static void itimer_delete(struct k_itimer *timer) ...@@ -890,7 +860,6 @@ static void itimer_delete(struct k_itimer *timer)
* This keeps any tasks waiting on the spin lock from thinking * This keeps any tasks waiting on the spin lock from thinking
* they got something (see the lock code above). * they got something (see the lock code above).
*/ */
if (timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID))
put_task_struct(timer->it_process); put_task_struct(timer->it_process);
timer->it_process = NULL; timer->it_process = NULL;
......
...@@ -4052,23 +4052,26 @@ DEFINE_PER_CPU(struct kernel_stat, kstat); ...@@ -4052,23 +4052,26 @@ DEFINE_PER_CPU(struct kernel_stat, kstat);
EXPORT_PER_CPU_SYMBOL(kstat); EXPORT_PER_CPU_SYMBOL(kstat);
/* /*
* Return p->sum_exec_runtime plus any more ns on the sched_clock * Return any ns on the sched_clock that have not yet been banked in
* that have not yet been banked in case the task is currently running. * @p in case that task is currently running.
*/ */
unsigned long long task_sched_runtime(struct task_struct *p) unsigned long long task_delta_exec(struct task_struct *p)
{ {
unsigned long flags; unsigned long flags;
u64 ns, delta_exec;
struct rq *rq; struct rq *rq;
u64 ns = 0;
rq = task_rq_lock(p, &flags); rq = task_rq_lock(p, &flags);
ns = p->se.sum_exec_runtime;
if (task_current(rq, p)) { if (task_current(rq, p)) {
u64 delta_exec;
update_rq_clock(rq); update_rq_clock(rq);
delta_exec = rq->clock - p->se.exec_start; delta_exec = rq->clock - p->se.exec_start;
if ((s64)delta_exec > 0) if ((s64)delta_exec > 0)
ns += delta_exec; ns = delta_exec;
} }
task_rq_unlock(rq, &flags); task_rq_unlock(rq, &flags);
return ns; return ns;
...@@ -4085,6 +4088,7 @@ void account_user_time(struct task_struct *p, cputime_t cputime) ...@@ -4085,6 +4088,7 @@ void account_user_time(struct task_struct *p, cputime_t cputime)
cputime64_t tmp; cputime64_t tmp;
p->utime = cputime_add(p->utime, cputime); p->utime = cputime_add(p->utime, cputime);
account_group_user_time(p, cputime);
/* Add user time to cpustat. */ /* Add user time to cpustat. */
tmp = cputime_to_cputime64(cputime); tmp = cputime_to_cputime64(cputime);
...@@ -4109,6 +4113,7 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime) ...@@ -4109,6 +4113,7 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime)
tmp = cputime_to_cputime64(cputime); tmp = cputime_to_cputime64(cputime);
p->utime = cputime_add(p->utime, cputime); p->utime = cputime_add(p->utime, cputime);
account_group_user_time(p, cputime);
p->gtime = cputime_add(p->gtime, cputime); p->gtime = cputime_add(p->gtime, cputime);
cpustat->user = cputime64_add(cpustat->user, tmp); cpustat->user = cputime64_add(cpustat->user, tmp);
...@@ -4144,6 +4149,7 @@ void account_system_time(struct task_struct *p, int hardirq_offset, ...@@ -4144,6 +4149,7 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
} }
p->stime = cputime_add(p->stime, cputime); p->stime = cputime_add(p->stime, cputime);
account_group_system_time(p, cputime);
/* Add system time to cpustat. */ /* Add system time to cpustat. */
tmp = cputime_to_cputime64(cputime); tmp = cputime_to_cputime64(cputime);
...@@ -4185,6 +4191,7 @@ void account_steal_time(struct task_struct *p, cputime_t steal) ...@@ -4185,6 +4191,7 @@ void account_steal_time(struct task_struct *p, cputime_t steal)
if (p == rq->idle) { if (p == rq->idle) {
p->stime = cputime_add(p->stime, steal); p->stime = cputime_add(p->stime, steal);
account_group_system_time(p, steal);
if (atomic_read(&rq->nr_iowait) > 0) if (atomic_read(&rq->nr_iowait) > 0)
cpustat->iowait = cputime64_add(cpustat->iowait, tmp); cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
else else
......
...@@ -449,6 +449,7 @@ static void update_curr(struct cfs_rq *cfs_rq) ...@@ -449,6 +449,7 @@ static void update_curr(struct cfs_rq *cfs_rq)
struct task_struct *curtask = task_of(curr); struct task_struct *curtask = task_of(curr);
cpuacct_charge(curtask, delta_exec); cpuacct_charge(curtask, delta_exec);
account_group_exec_runtime(curtask, delta_exec);
} }
} }
......
...@@ -526,6 +526,8 @@ static void update_curr_rt(struct rq *rq) ...@@ -526,6 +526,8 @@ static void update_curr_rt(struct rq *rq)
schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec)); schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
curr->se.sum_exec_runtime += delta_exec; curr->se.sum_exec_runtime += delta_exec;
account_group_exec_runtime(curr, delta_exec);
curr->se.exec_start = rq->clock; curr->se.exec_start = rq->clock;
cpuacct_charge(curr, delta_exec); cpuacct_charge(curr, delta_exec);
...@@ -1458,7 +1460,7 @@ static void watchdog(struct rq *rq, struct task_struct *p) ...@@ -1458,7 +1460,7 @@ static void watchdog(struct rq *rq, struct task_struct *p)
p->rt.timeout++; p->rt.timeout++;
next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ); next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
if (p->rt.timeout > next) if (p->rt.timeout > next)
p->it_sched_expires = p->se.sum_exec_runtime; p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
} }
} }
......
...@@ -270,3 +270,89 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next) ...@@ -270,3 +270,89 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next)
#define sched_info_switch(t, next) do { } while (0) #define sched_info_switch(t, next) do { } while (0)
#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */ #endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
/*
* The following are functions that support scheduler-internal time accounting.
* These functions are generally called at the timer tick. None of this depends
* on CONFIG_SCHEDSTATS.
*/
/**
* account_group_user_time - Maintain utime for a thread group.
*
* @tsk: Pointer to task structure.
* @cputime: Time value by which to increment the utime field of the
* thread_group_cputime structure.
*
* If thread group time is being maintained, get the structure for the
* running CPU and update the utime field there.
*/
static inline void account_group_user_time(struct task_struct *tsk,
cputime_t cputime)
{
struct signal_struct *sig;
sig = tsk->signal;
if (unlikely(!sig))
return;
if (sig->cputime.totals) {
struct task_cputime *times;
times = per_cpu_ptr(sig->cputime.totals, get_cpu());
times->utime = cputime_add(times->utime, cputime);
put_cpu_no_resched();
}
}
/**
* account_group_system_time - Maintain stime for a thread group.
*
* @tsk: Pointer to task structure.
* @cputime: Time value by which to increment the stime field of the
* thread_group_cputime structure.
*
* If thread group time is being maintained, get the structure for the
* running CPU and update the stime field there.
*/
static inline void account_group_system_time(struct task_struct *tsk,
cputime_t cputime)
{
struct signal_struct *sig;
sig = tsk->signal;
if (unlikely(!sig))
return;
if (sig->cputime.totals) {
struct task_cputime *times;
times = per_cpu_ptr(sig->cputime.totals, get_cpu());
times->stime = cputime_add(times->stime, cputime);
put_cpu_no_resched();
}
}
/**
* account_group_exec_runtime - Maintain exec runtime for a thread group.
*
* @tsk: Pointer to task structure.
* @ns: Time value by which to increment the sum_exec_runtime field
* of the thread_group_cputime structure.
*
* If thread group time is being maintained, get the structure for the
* running CPU and update the sum_exec_runtime field there.
*/
static inline void account_group_exec_runtime(struct task_struct *tsk,
unsigned long long ns)
{
struct signal_struct *sig;
sig = tsk->signal;
if (unlikely(!sig))
return;
if (sig->cputime.totals) {
struct task_cputime *times;
times = per_cpu_ptr(sig->cputime.totals, get_cpu());
times->sum_exec_runtime += ns;
put_cpu_no_resched();
}
}
...@@ -1338,6 +1338,7 @@ int do_notify_parent(struct task_struct *tsk, int sig) ...@@ -1338,6 +1338,7 @@ int do_notify_parent(struct task_struct *tsk, int sig)
struct siginfo info; struct siginfo info;
unsigned long flags; unsigned long flags;
struct sighand_struct *psig; struct sighand_struct *psig;
struct task_cputime cputime;
int ret = sig; int ret = sig;
BUG_ON(sig == -1); BUG_ON(sig == -1);
...@@ -1368,10 +1369,9 @@ int do_notify_parent(struct task_struct *tsk, int sig) ...@@ -1368,10 +1369,9 @@ int do_notify_parent(struct task_struct *tsk, int sig)
info.si_uid = tsk->uid; info.si_uid = tsk->uid;
info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime, thread_group_cputime(tsk, &cputime);
tsk->signal->utime)); info.si_utime = cputime_to_jiffies(cputime.utime);
info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime, info.si_stime = cputime_to_jiffies(cputime.stime);
tsk->signal->stime));
info.si_status = tsk->exit_code & 0x7f; info.si_status = tsk->exit_code & 0x7f;
if (tsk->exit_code & 0x80) if (tsk->exit_code & 0x80)
......
...@@ -267,16 +267,12 @@ asmlinkage void do_softirq(void) ...@@ -267,16 +267,12 @@ asmlinkage void do_softirq(void)
*/ */
void irq_enter(void) void irq_enter(void)
{ {
#ifdef CONFIG_NO_HZ
int cpu = smp_processor_id(); int cpu = smp_processor_id();
if (idle_cpu(cpu) && !in_interrupt()) if (idle_cpu(cpu) && !in_interrupt())
tick_nohz_stop_idle(cpu); tick_check_idle(cpu);
#endif
__irq_enter(); __irq_enter();
#ifdef CONFIG_NO_HZ
if (idle_cpu(cpu))
tick_nohz_update_jiffies();
#endif
} }
#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
......
...@@ -853,38 +853,28 @@ asmlinkage long sys_setfsgid(gid_t gid) ...@@ -853,38 +853,28 @@ asmlinkage long sys_setfsgid(gid_t gid)
return old_fsgid; return old_fsgid;
} }
void do_sys_times(struct tms *tms)
{
struct task_cputime cputime;
cputime_t cutime, cstime;
spin_lock_irq(&current->sighand->siglock);
thread_group_cputime(current, &cputime);
cutime = current->signal->cutime;
cstime = current->signal->cstime;
spin_unlock_irq(&current->sighand->siglock);
tms->tms_utime = cputime_to_clock_t(cputime.utime);
tms->tms_stime = cputime_to_clock_t(cputime.stime);
tms->tms_cutime = cputime_to_clock_t(cutime);
tms->tms_cstime = cputime_to_clock_t(cstime);
}
asmlinkage long sys_times(struct tms __user * tbuf) asmlinkage long sys_times(struct tms __user * tbuf)
{ {
/*
* In the SMP world we might just be unlucky and have one of
* the times increment as we use it. Since the value is an
* atomically safe type this is just fine. Conceptually its
* as if the syscall took an instant longer to occur.
*/
if (tbuf) { if (tbuf) {
struct tms tmp; struct tms tmp;
struct task_struct *tsk = current;
struct task_struct *t;
cputime_t utime, stime, cutime, cstime;
spin_lock_irq(&tsk->sighand->siglock); do_sys_times(&tmp);
utime = tsk->signal->utime;
stime = tsk->signal->stime;
t = tsk;
do {
utime = cputime_add(utime, t->utime);
stime = cputime_add(stime, t->stime);
t = next_thread(t);
} while (t != tsk);
cutime = tsk->signal->cutime;
cstime = tsk->signal->cstime;
spin_unlock_irq(&tsk->sighand->siglock);
tmp.tms_utime = cputime_to_clock_t(utime);
tmp.tms_stime = cputime_to_clock_t(stime);
tmp.tms_cutime = cputime_to_clock_t(cutime);
tmp.tms_cstime = cputime_to_clock_t(cstime);
if (copy_to_user(tbuf, &tmp, sizeof(struct tms))) if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
return -EFAULT; return -EFAULT;
} }
...@@ -1449,7 +1439,6 @@ asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *r ...@@ -1449,7 +1439,6 @@ asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *r
asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim) asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
{ {
struct rlimit new_rlim, *old_rlim; struct rlimit new_rlim, *old_rlim;
unsigned long it_prof_secs;
int retval; int retval;
if (resource >= RLIM_NLIMITS) if (resource >= RLIM_NLIMITS)
...@@ -1503,18 +1492,7 @@ asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim) ...@@ -1503,18 +1492,7 @@ asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
if (new_rlim.rlim_cur == RLIM_INFINITY) if (new_rlim.rlim_cur == RLIM_INFINITY)
goto out; goto out;
it_prof_secs = cputime_to_secs(current->signal->it_prof_expires); update_rlimit_cpu(new_rlim.rlim_cur);
if (it_prof_secs == 0 || new_rlim.rlim_cur <= it_prof_secs) {
unsigned long rlim_cur = new_rlim.rlim_cur;
cputime_t cputime;
cputime = secs_to_cputime(rlim_cur);
read_lock(&tasklist_lock);
spin_lock_irq(&current->sighand->siglock);
set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
spin_unlock_irq(&current->sighand->siglock);
read_unlock(&tasklist_lock);
}
out: out:
return 0; return 0;
} }
...@@ -1552,11 +1530,8 @@ asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim) ...@@ -1552,11 +1530,8 @@ asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
* *
*/ */
static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r, static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
cputime_t *utimep, cputime_t *stimep)
{ {
*utimep = cputime_add(*utimep, t->utime);
*stimep = cputime_add(*stimep, t->stime);
r->ru_nvcsw += t->nvcsw; r->ru_nvcsw += t->nvcsw;
r->ru_nivcsw += t->nivcsw; r->ru_nivcsw += t->nivcsw;
r->ru_minflt += t->min_flt; r->ru_minflt += t->min_flt;
...@@ -1570,12 +1545,13 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) ...@@ -1570,12 +1545,13 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
struct task_struct *t; struct task_struct *t;
unsigned long flags; unsigned long flags;
cputime_t utime, stime; cputime_t utime, stime;
struct task_cputime cputime;
memset((char *) r, 0, sizeof *r); memset((char *) r, 0, sizeof *r);
utime = stime = cputime_zero; utime = stime = cputime_zero;
if (who == RUSAGE_THREAD) { if (who == RUSAGE_THREAD) {
accumulate_thread_rusage(p, r, &utime, &stime); accumulate_thread_rusage(p, r);
goto out; goto out;
} }
...@@ -1598,8 +1574,9 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) ...@@ -1598,8 +1574,9 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
break; break;
case RUSAGE_SELF: case RUSAGE_SELF:
utime = cputime_add(utime, p->signal->utime); thread_group_cputime(p, &cputime);
stime = cputime_add(stime, p->signal->stime); utime = cputime_add(utime, cputime.utime);
stime = cputime_add(stime, cputime.stime);
r->ru_nvcsw += p->signal->nvcsw; r->ru_nvcsw += p->signal->nvcsw;
r->ru_nivcsw += p->signal->nivcsw; r->ru_nivcsw += p->signal->nivcsw;
r->ru_minflt += p->signal->min_flt; r->ru_minflt += p->signal->min_flt;
...@@ -1608,7 +1585,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) ...@@ -1608,7 +1585,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
r->ru_oublock += p->signal->oublock; r->ru_oublock += p->signal->oublock;
t = p; t = p;
do { do {
accumulate_thread_rusage(t, r, &utime, &stime); accumulate_thread_rusage(t, r);
t = next_thread(t); t = next_thread(t);
} while (t != p); } while (t != p);
break; break;
......
...@@ -325,6 +325,9 @@ int clocksource_register(struct clocksource *c) ...@@ -325,6 +325,9 @@ int clocksource_register(struct clocksource *c)
unsigned long flags; unsigned long flags;
int ret; int ret;
/* save mult_orig on registration */
c->mult_orig = c->mult;
spin_lock_irqsave(&clocksource_lock, flags); spin_lock_irqsave(&clocksource_lock, flags);
ret = clocksource_enqueue(c); ret = clocksource_enqueue(c);
if (!ret) if (!ret)
......
...@@ -61,6 +61,7 @@ struct clocksource clocksource_jiffies = { ...@@ -61,6 +61,7 @@ struct clocksource clocksource_jiffies = {
.read = jiffies_read, .read = jiffies_read,
.mask = 0xffffffff, /*32bits*/ .mask = 0xffffffff, /*32bits*/
.mult = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */ .mult = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */
.mult_orig = NSEC_PER_JIFFY << JIFFIES_SHIFT,
.shift = JIFFIES_SHIFT, .shift = JIFFIES_SHIFT,
}; };
......
...@@ -10,13 +10,13 @@ ...@@ -10,13 +10,13 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/time.h> #include <linux/time.h>
#include <linux/timer.h>
#include <linux/timex.h> #include <linux/timex.h>
#include <linux/jiffies.h> #include <linux/jiffies.h>
#include <linux/hrtimer.h> #include <linux/hrtimer.h>
#include <linux/capability.h> #include <linux/capability.h>
#include <linux/math64.h> #include <linux/math64.h>
#include <linux/clocksource.h> #include <linux/clocksource.h>
#include <linux/workqueue.h>
#include <asm/timex.h> #include <asm/timex.h>
/* /*
...@@ -218,11 +218,11 @@ void second_overflow(void) ...@@ -218,11 +218,11 @@ void second_overflow(void)
/* Disable the cmos update - used by virtualization and embedded */ /* Disable the cmos update - used by virtualization and embedded */
int no_sync_cmos_clock __read_mostly; int no_sync_cmos_clock __read_mostly;
static void sync_cmos_clock(unsigned long dummy); static void sync_cmos_clock(struct work_struct *work);
static DEFINE_TIMER(sync_cmos_timer, sync_cmos_clock, 0, 0); static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock);
static void sync_cmos_clock(unsigned long dummy) static void sync_cmos_clock(struct work_struct *work)
{ {
struct timespec now, next; struct timespec now, next;
int fail = 1; int fail = 1;
...@@ -258,13 +258,13 @@ static void sync_cmos_clock(unsigned long dummy) ...@@ -258,13 +258,13 @@ static void sync_cmos_clock(unsigned long dummy)
next.tv_sec++; next.tv_sec++;
next.tv_nsec -= NSEC_PER_SEC; next.tv_nsec -= NSEC_PER_SEC;
} }
mod_timer(&sync_cmos_timer, jiffies + timespec_to_jiffies(&next)); schedule_delayed_work(&sync_cmos_work, timespec_to_jiffies(&next));
} }
static void notify_cmos_timer(void) static void notify_cmos_timer(void)
{ {
if (!no_sync_cmos_clock) if (!no_sync_cmos_clock)
mod_timer(&sync_cmos_timer, jiffies + 1); schedule_delayed_work(&sync_cmos_work, 0);
} }
#else #else
...@@ -277,38 +277,50 @@ static inline void notify_cmos_timer(void) { } ...@@ -277,38 +277,50 @@ static inline void notify_cmos_timer(void) { }
int do_adjtimex(struct timex *txc) int do_adjtimex(struct timex *txc)
{ {
struct timespec ts; struct timespec ts;
long save_adjust, sec;
int result; int result;
/* Validate the data before disabling interrupts */
if (txc->modes & ADJ_ADJTIME) {
/* singleshot must not be used with any other mode bits */
if (!(txc->modes & ADJ_OFFSET_SINGLESHOT))
return -EINVAL;
if (!(txc->modes & ADJ_OFFSET_READONLY) &&
!capable(CAP_SYS_TIME))
return -EPERM;
} else {
/* In order to modify anything, you gotta be super-user! */ /* In order to modify anything, you gotta be super-user! */
if (txc->modes && !capable(CAP_SYS_TIME)) if (txc->modes && !capable(CAP_SYS_TIME))
return -EPERM; return -EPERM;
/* Now we validate the data before disabling interrupts */ /* if the quartz is off by more than 10% something is VERY wrong! */
if (txc->modes & ADJ_TICK &&
if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT) { (txc->tick < 900000/USER_HZ ||
/* singleshot must not be used with any other mode bits */ txc->tick > 1100000/USER_HZ))
if (txc->modes & ~ADJ_OFFSET_SS_READ)
return -EINVAL;
}
/* if the quartz is off by more than 10% something is VERY wrong ! */
if (txc->modes & ADJ_TICK)
if (txc->tick < 900000/USER_HZ ||
txc->tick > 1100000/USER_HZ)
return -EINVAL; return -EINVAL;
if (time_state != TIME_OK && txc->modes & ADJ_STATUS) if (txc->modes & ADJ_STATUS && time_state != TIME_OK)
hrtimer_cancel(&leap_timer); hrtimer_cancel(&leap_timer);
}
getnstimeofday(&ts); getnstimeofday(&ts);
write_seqlock_irq(&xtime_lock); write_seqlock_irq(&xtime_lock);
/* Save for later - semantics of adjtime is to return old value */
save_adjust = time_adjust;
/* If there are input parameters, then process them */ /* If there are input parameters, then process them */
if (txc->modes & ADJ_ADJTIME) {
long save_adjust = time_adjust;
if (!(txc->modes & ADJ_OFFSET_READONLY)) {
/* adjtime() is independent from ntp_adjtime() */
time_adjust = txc->offset;
ntp_update_frequency();
}
txc->offset = save_adjust;
goto adj_done;
}
if (txc->modes) { if (txc->modes) {
long sec;
if (txc->modes & ADJ_STATUS) { if (txc->modes & ADJ_STATUS) {
if ((time_status & STA_PLL) && if ((time_status & STA_PLL) &&
!(txc->status & STA_PLL)) { !(txc->status & STA_PLL)) {
...@@ -375,13 +387,8 @@ int do_adjtimex(struct timex *txc) ...@@ -375,13 +387,8 @@ int do_adjtimex(struct timex *txc)
if (txc->modes & ADJ_TAI && txc->constant > 0) if (txc->modes & ADJ_TAI && txc->constant > 0)
time_tai = txc->constant; time_tai = txc->constant;
if (txc->modes & ADJ_OFFSET) { if (txc->modes & ADJ_OFFSET)
if (txc->modes == ADJ_OFFSET_SINGLESHOT)
/* adjtime() is independent from ntp_adjtime() */
time_adjust = txc->offset;
else
ntp_update_offset(txc->offset); ntp_update_offset(txc->offset);
}
if (txc->modes & ADJ_TICK) if (txc->modes & ADJ_TICK)
tick_usec = txc->tick; tick_usec = txc->tick;
...@@ -389,22 +396,18 @@ int do_adjtimex(struct timex *txc) ...@@ -389,22 +396,18 @@ int do_adjtimex(struct timex *txc)
ntp_update_frequency(); ntp_update_frequency();
} }
result = time_state; /* mostly `TIME_OK' */
if (time_status & (STA_UNSYNC|STA_CLOCKERR))
result = TIME_ERROR;
if ((txc->modes == ADJ_OFFSET_SINGLESHOT) ||
(txc->modes == ADJ_OFFSET_SS_READ))
txc->offset = save_adjust;
else {
txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ, txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ,
NTP_SCALE_SHIFT); NTP_SCALE_SHIFT);
if (!(time_status & STA_NANO)) if (!(time_status & STA_NANO))
txc->offset /= NSEC_PER_USEC; txc->offset /= NSEC_PER_USEC;
}
txc->freq = shift_right((s32)(time_freq >> PPM_SCALE_INV_SHIFT) * adj_done:
(s64)PPM_SCALE_INV, result = time_state; /* mostly `TIME_OK' */
NTP_SCALE_SHIFT); if (time_status & (STA_UNSYNC|STA_CLOCKERR))
result = TIME_ERROR;
txc->freq = shift_right((time_freq >> PPM_SCALE_INV_SHIFT) *
(s64)PPM_SCALE_INV, NTP_SCALE_SHIFT);
txc->maxerror = time_maxerror; txc->maxerror = time_maxerror;
txc->esterror = time_esterror; txc->esterror = time_esterror;
txc->status = time_status; txc->status = time_status;
......
...@@ -383,6 +383,19 @@ int tick_resume_broadcast_oneshot(struct clock_event_device *bc) ...@@ -383,6 +383,19 @@ int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
return 0; return 0;
} }
/*
* Called from irq_enter() when idle was interrupted to reenable the
* per cpu device.
*/
void tick_check_oneshot_broadcast(int cpu)
{
if (cpu_isset(cpu, tick_broadcast_oneshot_mask)) {
struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT);
}
}
/* /*
* Handle oneshot mode broadcasting * Handle oneshot mode broadcasting
*/ */
......
...@@ -36,6 +36,7 @@ extern void tick_broadcast_switch_to_oneshot(void); ...@@ -36,6 +36,7 @@ extern void tick_broadcast_switch_to_oneshot(void);
extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup);
extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc);
extern int tick_broadcast_oneshot_active(void); extern int tick_broadcast_oneshot_active(void);
extern void tick_check_oneshot_broadcast(int cpu);
# else /* BROADCAST */ # else /* BROADCAST */
static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
{ {
...@@ -45,6 +46,7 @@ static inline void tick_broadcast_oneshot_control(unsigned long reason) { } ...@@ -45,6 +46,7 @@ static inline void tick_broadcast_oneshot_control(unsigned long reason) { }
static inline void tick_broadcast_switch_to_oneshot(void) { } static inline void tick_broadcast_switch_to_oneshot(void) { }
static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
static inline int tick_broadcast_oneshot_active(void) { return 0; } static inline int tick_broadcast_oneshot_active(void) { return 0; }
static inline void tick_check_oneshot_broadcast(int cpu) { }
# endif /* !BROADCAST */ # endif /* !BROADCAST */
#else /* !ONESHOT */ #else /* !ONESHOT */
......
...@@ -155,7 +155,7 @@ void tick_nohz_update_jiffies(void) ...@@ -155,7 +155,7 @@ void tick_nohz_update_jiffies(void)
touch_softlockup_watchdog(); touch_softlockup_watchdog();
} }
void tick_nohz_stop_idle(int cpu) static void tick_nohz_stop_idle(int cpu)
{ {
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
...@@ -377,6 +377,32 @@ ktime_t tick_nohz_get_sleep_length(void) ...@@ -377,6 +377,32 @@ ktime_t tick_nohz_get_sleep_length(void)
return ts->sleep_length; return ts->sleep_length;
} }
static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
{
hrtimer_cancel(&ts->sched_timer);
ts->sched_timer.expires = ts->idle_tick;
while (1) {
/* Forward the time to expire in the future */
hrtimer_forward(&ts->sched_timer, now, tick_period);
if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
hrtimer_start(&ts->sched_timer,
ts->sched_timer.expires,
HRTIMER_MODE_ABS);
/* Check, if the timer was already in the past */
if (hrtimer_active(&ts->sched_timer))
break;
} else {
if (!tick_program_event(ts->sched_timer.expires, 0))
break;
}
/* Update jiffies and reread time */
tick_do_update_jiffies64(now);
now = ktime_get();
}
}
/** /**
* tick_nohz_restart_sched_tick - restart the idle tick from the idle task * tick_nohz_restart_sched_tick - restart the idle tick from the idle task
* *
...@@ -430,28 +456,7 @@ void tick_nohz_restart_sched_tick(void) ...@@ -430,28 +456,7 @@ void tick_nohz_restart_sched_tick(void)
*/ */
ts->tick_stopped = 0; ts->tick_stopped = 0;
ts->idle_exittime = now; ts->idle_exittime = now;
hrtimer_cancel(&ts->sched_timer); tick_nohz_restart(ts, now);
ts->sched_timer.expires = ts->idle_tick;
while (1) {
/* Forward the time to expire in the future */
hrtimer_forward(&ts->sched_timer, now, tick_period);
if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
hrtimer_start(&ts->sched_timer,
ts->sched_timer.expires,
HRTIMER_MODE_ABS);
/* Check, if the timer was already in the past */
if (hrtimer_active(&ts->sched_timer))
break;
} else {
if (!tick_program_event(ts->sched_timer.expires, 0))
break;
}
/* Update jiffies and reread time */
tick_do_update_jiffies64(now);
now = ktime_get();
}
local_irq_enable(); local_irq_enable();
} }
...@@ -503,10 +508,6 @@ static void tick_nohz_handler(struct clock_event_device *dev) ...@@ -503,10 +508,6 @@ static void tick_nohz_handler(struct clock_event_device *dev)
update_process_times(user_mode(regs)); update_process_times(user_mode(regs));
profile_tick(CPU_PROFILING); profile_tick(CPU_PROFILING);
/* Do not restart, when we are in the idle loop */
if (ts->tick_stopped)
return;
while (tick_nohz_reprogram(ts, now)) { while (tick_nohz_reprogram(ts, now)) {
now = ktime_get(); now = ktime_get();
tick_do_update_jiffies64(now); tick_do_update_jiffies64(now);
...@@ -552,12 +553,46 @@ static void tick_nohz_switch_to_nohz(void) ...@@ -552,12 +553,46 @@ static void tick_nohz_switch_to_nohz(void)
smp_processor_id()); smp_processor_id());
} }
/*
* When NOHZ is enabled and the tick is stopped, we need to kick the
* tick timer from irq_enter() so that the jiffies update is kept
* alive during long running softirqs. That's ugly as hell, but
* correctness is key even if we need to fix the offending softirq in
* the first place.
*
* Note, this is different to tick_nohz_restart. We just kick the
* timer and do not touch the other magic bits which need to be done
* when idle is left.
*/
static void tick_nohz_kick_tick(int cpu)
{
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
if (!ts->tick_stopped)
return;
tick_nohz_restart(ts, ktime_get());
}
#else #else
static inline void tick_nohz_switch_to_nohz(void) { } static inline void tick_nohz_switch_to_nohz(void) { }
#endif /* NO_HZ */ #endif /* NO_HZ */
/*
* Called from irq_enter to notify about the possible interruption of idle()
*/
void tick_check_idle(int cpu)
{
tick_check_oneshot_broadcast(cpu);
#ifdef CONFIG_NO_HZ
tick_nohz_stop_idle(cpu);
tick_nohz_update_jiffies();
tick_nohz_kick_tick(cpu);
#endif
}
/* /*
* High resolution timer specific code * High resolution timer specific code
*/ */
...@@ -611,10 +646,6 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) ...@@ -611,10 +646,6 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
profile_tick(CPU_PROFILING); profile_tick(CPU_PROFILING);
} }
/* Do not restart, when we are in the idle loop */
if (ts->tick_stopped)
return HRTIMER_NORESTART;
hrtimer_forward(timer, now, tick_period); hrtimer_forward(timer, now, tick_period);
return HRTIMER_RESTART; return HRTIMER_RESTART;
......
...@@ -58,27 +58,26 @@ struct clocksource *clock; ...@@ -58,27 +58,26 @@ struct clocksource *clock;
#ifdef CONFIG_GENERIC_TIME #ifdef CONFIG_GENERIC_TIME
/** /**
* __get_nsec_offset - Returns nanoseconds since last call to periodic_hook * clocksource_forward_now - update clock to the current time
* *
* private function, must hold xtime_lock lock when being * Forward the current clock to update its state since the last call to
* called. Returns the number of nanoseconds since the * update_wall_time(). This is useful before significant clock changes,
* last call to update_wall_time() (adjusted by NTP scaling) * as it avoids having to deal with this time offset explicitly.
*/ */
static inline s64 __get_nsec_offset(void) static void clocksource_forward_now(void)
{ {
cycle_t cycle_now, cycle_delta; cycle_t cycle_now, cycle_delta;
s64 ns_offset; s64 nsec;
/* read clocksource: */
cycle_now = clocksource_read(clock); cycle_now = clocksource_read(clock);
/* calculate the delta since the last update_wall_time: */
cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
clock->cycle_last = cycle_now;
/* convert to nanoseconds: */ nsec = cyc2ns(clock, cycle_delta);
ns_offset = cyc2ns(clock, cycle_delta); timespec_add_ns(&xtime, nsec);
return ns_offset; nsec = ((s64)cycle_delta * clock->mult_orig) >> clock->shift;
clock->raw_time.tv_nsec += nsec;
} }
/** /**
...@@ -89,6 +88,7 @@ static inline s64 __get_nsec_offset(void) ...@@ -89,6 +88,7 @@ static inline s64 __get_nsec_offset(void)
*/ */
void getnstimeofday(struct timespec *ts) void getnstimeofday(struct timespec *ts)
{ {
cycle_t cycle_now, cycle_delta;
unsigned long seq; unsigned long seq;
s64 nsecs; s64 nsecs;
...@@ -96,7 +96,15 @@ void getnstimeofday(struct timespec *ts) ...@@ -96,7 +96,15 @@ void getnstimeofday(struct timespec *ts)
seq = read_seqbegin(&xtime_lock); seq = read_seqbegin(&xtime_lock);
*ts = xtime; *ts = xtime;
nsecs = __get_nsec_offset();
/* read clocksource: */
cycle_now = clocksource_read(clock);
/* calculate the delta since the last update_wall_time: */
cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
/* convert to nanoseconds: */
nsecs = cyc2ns(clock, cycle_delta);
} while (read_seqretry(&xtime_lock, seq)); } while (read_seqretry(&xtime_lock, seq));
...@@ -129,22 +137,22 @@ EXPORT_SYMBOL(do_gettimeofday); ...@@ -129,22 +137,22 @@ EXPORT_SYMBOL(do_gettimeofday);
*/ */
int do_settimeofday(struct timespec *tv) int do_settimeofday(struct timespec *tv)
{ {
struct timespec ts_delta;
unsigned long flags; unsigned long flags;
time_t wtm_sec, sec = tv->tv_sec;
long wtm_nsec, nsec = tv->tv_nsec;
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL; return -EINVAL;
write_seqlock_irqsave(&xtime_lock, flags); write_seqlock_irqsave(&xtime_lock, flags);
nsec -= __get_nsec_offset(); clocksource_forward_now();
wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); ts_delta.tv_sec = tv->tv_sec - xtime.tv_sec;
wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); ts_delta.tv_nsec = tv->tv_nsec - xtime.tv_nsec;
wall_to_monotonic = timespec_sub(wall_to_monotonic, ts_delta);
xtime = *tv;
set_normalized_timespec(&xtime, sec, nsec);
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
update_xtime_cache(0); update_xtime_cache(0);
clock->error = 0; clock->error = 0;
...@@ -170,22 +178,19 @@ EXPORT_SYMBOL(do_settimeofday); ...@@ -170,22 +178,19 @@ EXPORT_SYMBOL(do_settimeofday);
static void change_clocksource(void) static void change_clocksource(void)
{ {
struct clocksource *new; struct clocksource *new;
cycle_t now;
u64 nsec;
new = clocksource_get_next(); new = clocksource_get_next();
if (clock == new) if (clock == new)
return; return;
new->cycle_last = 0; clocksource_forward_now();
now = clocksource_read(new);
nsec = __get_nsec_offset();
timespec_add_ns(&xtime, nsec);
clock = new; new->raw_time = clock->raw_time;
clock->cycle_last = now;
clock = new;
clock->cycle_last = 0;
clock->cycle_last = clocksource_read(new);
clock->error = 0; clock->error = 0;
clock->xtime_nsec = 0; clock->xtime_nsec = 0;
clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
...@@ -200,10 +205,43 @@ static void change_clocksource(void) ...@@ -200,10 +205,43 @@ static void change_clocksource(void)
*/ */
} }
#else #else
static inline void clocksource_forward_now(void) { }
static inline void change_clocksource(void) { } static inline void change_clocksource(void) { }
static inline s64 __get_nsec_offset(void) { return 0; }
#endif #endif
/**
* getrawmonotonic - Returns the raw monotonic time in a timespec
* @ts: pointer to the timespec to be set
*
* Returns the raw monotonic time (completely un-modified by ntp)
*/
void getrawmonotonic(struct timespec *ts)
{
unsigned long seq;
s64 nsecs;
cycle_t cycle_now, cycle_delta;
do {
seq = read_seqbegin(&xtime_lock);
/* read clocksource: */
cycle_now = clocksource_read(clock);
/* calculate the delta since the last update_wall_time: */
cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
/* convert to nanoseconds: */
nsecs = ((s64)cycle_delta * clock->mult_orig) >> clock->shift;
*ts = clock->raw_time;
} while (read_seqretry(&xtime_lock, seq));
timespec_add_ns(ts, nsecs);
}
EXPORT_SYMBOL(getrawmonotonic);
/** /**
* timekeeping_valid_for_hres - Check if timekeeping is suitable for hres * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
*/ */
...@@ -265,8 +303,6 @@ void __init timekeeping_init(void) ...@@ -265,8 +303,6 @@ void __init timekeeping_init(void)
static int timekeeping_suspended; static int timekeeping_suspended;
/* time in seconds when suspend began */ /* time in seconds when suspend began */
static unsigned long timekeeping_suspend_time; static unsigned long timekeeping_suspend_time;
/* xtime offset when we went into suspend */
static s64 timekeeping_suspend_nsecs;
/** /**
* timekeeping_resume - Resumes the generic timekeeping subsystem. * timekeeping_resume - Resumes the generic timekeeping subsystem.
...@@ -292,8 +328,6 @@ static int timekeeping_resume(struct sys_device *dev) ...@@ -292,8 +328,6 @@ static int timekeeping_resume(struct sys_device *dev)
wall_to_monotonic.tv_sec -= sleep_length; wall_to_monotonic.tv_sec -= sleep_length;
total_sleep_time += sleep_length; total_sleep_time += sleep_length;
} }
/* Make sure that we have the correct xtime reference */
timespec_add_ns(&xtime, timekeeping_suspend_nsecs);
update_xtime_cache(0); update_xtime_cache(0);
/* re-base the last cycle value */ /* re-base the last cycle value */
clock->cycle_last = 0; clock->cycle_last = 0;
...@@ -319,8 +353,7 @@ static int timekeeping_suspend(struct sys_device *dev, pm_message_t state) ...@@ -319,8 +353,7 @@ static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
timekeeping_suspend_time = read_persistent_clock(); timekeeping_suspend_time = read_persistent_clock();
write_seqlock_irqsave(&xtime_lock, flags); write_seqlock_irqsave(&xtime_lock, flags);
/* Get the current xtime offset */ clocksource_forward_now();
timekeeping_suspend_nsecs = __get_nsec_offset();
timekeeping_suspended = 1; timekeeping_suspended = 1;
write_sequnlock_irqrestore(&xtime_lock, flags); write_sequnlock_irqrestore(&xtime_lock, flags);
...@@ -454,23 +487,29 @@ void update_wall_time(void) ...@@ -454,23 +487,29 @@ void update_wall_time(void)
#else #else
offset = clock->cycle_interval; offset = clock->cycle_interval;
#endif #endif
clock->xtime_nsec += (s64)xtime.tv_nsec << clock->shift; clock->xtime_nsec = (s64)xtime.tv_nsec << clock->shift;
/* normally this loop will run just once, however in the /* normally this loop will run just once, however in the
* case of lost or late ticks, it will accumulate correctly. * case of lost or late ticks, it will accumulate correctly.
*/ */
while (offset >= clock->cycle_interval) { while (offset >= clock->cycle_interval) {
/* accumulate one interval */ /* accumulate one interval */
clock->xtime_nsec += clock->xtime_interval;
clock->cycle_last += clock->cycle_interval;
offset -= clock->cycle_interval; offset -= clock->cycle_interval;
clock->cycle_last += clock->cycle_interval;
clock->xtime_nsec += clock->xtime_interval;
if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) { if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) {
clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift; clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift;
xtime.tv_sec++; xtime.tv_sec++;
second_overflow(); second_overflow();
} }
clock->raw_time.tv_nsec += clock->raw_interval;
if (clock->raw_time.tv_nsec >= NSEC_PER_SEC) {
clock->raw_time.tv_nsec -= NSEC_PER_SEC;
clock->raw_time.tv_sec++;
}
/* accumulate error between NTP and clock interval */ /* accumulate error between NTP and clock interval */
clock->error += tick_length; clock->error += tick_length;
clock->error -= clock->xtime_interval << (NTP_SCALE_SHIFT - clock->shift); clock->error -= clock->xtime_interval << (NTP_SCALE_SHIFT - clock->shift);
...@@ -479,9 +518,12 @@ void update_wall_time(void) ...@@ -479,9 +518,12 @@ void update_wall_time(void)
/* correct the clock when NTP error is too big */ /* correct the clock when NTP error is too big */
clocksource_adjust(offset); clocksource_adjust(offset);
/* store full nanoseconds into xtime */ /* store full nanoseconds into xtime after rounding it up and
xtime.tv_nsec = (s64)clock->xtime_nsec >> clock->shift; * add the remainder to the error difference.
*/
xtime.tv_nsec = ((s64)clock->xtime_nsec >> clock->shift) + 1;
clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift; clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift;
clock->error += clock->xtime_nsec << (NTP_SCALE_SHIFT - clock->shift);
update_xtime_cache(cyc2ns(clock, offset)); update_xtime_cache(cyc2ns(clock, offset));
......
...@@ -47,13 +47,14 @@ static void print_name_offset(struct seq_file *m, void *sym) ...@@ -47,13 +47,14 @@ static void print_name_offset(struct seq_file *m, void *sym)
} }
static void static void
print_timer(struct seq_file *m, struct hrtimer *timer, int idx, u64 now) print_timer(struct seq_file *m, struct hrtimer *taddr, struct hrtimer *timer,
int idx, u64 now)
{ {
#ifdef CONFIG_TIMER_STATS #ifdef CONFIG_TIMER_STATS
char tmp[TASK_COMM_LEN + 1]; char tmp[TASK_COMM_LEN + 1];
#endif #endif
SEQ_printf(m, " #%d: ", idx); SEQ_printf(m, " #%d: ", idx);
print_name_offset(m, timer); print_name_offset(m, taddr);
SEQ_printf(m, ", "); SEQ_printf(m, ", ");
print_name_offset(m, timer->function); print_name_offset(m, timer->function);
SEQ_printf(m, ", S:%02lx", timer->state); SEQ_printf(m, ", S:%02lx", timer->state);
...@@ -99,7 +100,7 @@ print_active_timers(struct seq_file *m, struct hrtimer_clock_base *base, ...@@ -99,7 +100,7 @@ print_active_timers(struct seq_file *m, struct hrtimer_clock_base *base,
tmp = *timer; tmp = *timer;
spin_unlock_irqrestore(&base->cpu_base->lock, flags); spin_unlock_irqrestore(&base->cpu_base->lock, flags);
print_timer(m, &tmp, i, now); print_timer(m, timer, &tmp, i, now);
next++; next++;
goto next_one; goto next_one;
} }
...@@ -109,6 +110,7 @@ print_active_timers(struct seq_file *m, struct hrtimer_clock_base *base, ...@@ -109,6 +110,7 @@ print_active_timers(struct seq_file *m, struct hrtimer_clock_base *base,
static void static void
print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now) print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
{ {
SEQ_printf(m, " .base: %p\n", base);
SEQ_printf(m, " .index: %d\n", SEQ_printf(m, " .index: %d\n",
base->index); base->index);
SEQ_printf(m, " .resolution: %Lu nsecs\n", SEQ_printf(m, " .resolution: %Lu nsecs\n",
...@@ -183,12 +185,16 @@ static void print_cpu(struct seq_file *m, int cpu, u64 now) ...@@ -183,12 +185,16 @@ static void print_cpu(struct seq_file *m, int cpu, u64 now)
#ifdef CONFIG_GENERIC_CLOCKEVENTS #ifdef CONFIG_GENERIC_CLOCKEVENTS
static void static void
print_tickdevice(struct seq_file *m, struct tick_device *td) print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu)
{ {
struct clock_event_device *dev = td->evtdev; struct clock_event_device *dev = td->evtdev;
SEQ_printf(m, "\n"); SEQ_printf(m, "\n");
SEQ_printf(m, "Tick Device: mode: %d\n", td->mode); SEQ_printf(m, "Tick Device: mode: %d\n", td->mode);
if (cpu < 0)
SEQ_printf(m, "Broadcast device\n");
else
SEQ_printf(m, "Per CPU device: %d\n", cpu);
SEQ_printf(m, "Clock Event Device: "); SEQ_printf(m, "Clock Event Device: ");
if (!dev) { if (!dev) {
...@@ -222,7 +228,7 @@ static void timer_list_show_tickdevices(struct seq_file *m) ...@@ -222,7 +228,7 @@ static void timer_list_show_tickdevices(struct seq_file *m)
int cpu; int cpu;
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
print_tickdevice(m, tick_get_broadcast_device()); print_tickdevice(m, tick_get_broadcast_device(), -1);
SEQ_printf(m, "tick_broadcast_mask: %08lx\n", SEQ_printf(m, "tick_broadcast_mask: %08lx\n",
tick_get_broadcast_mask()->bits[0]); tick_get_broadcast_mask()->bits[0]);
#ifdef CONFIG_TICK_ONESHOT #ifdef CONFIG_TICK_ONESHOT
...@@ -232,7 +238,7 @@ static void timer_list_show_tickdevices(struct seq_file *m) ...@@ -232,7 +238,7 @@ static void timer_list_show_tickdevices(struct seq_file *m)
SEQ_printf(m, "\n"); SEQ_printf(m, "\n");
#endif #endif
for_each_online_cpu(cpu) for_each_online_cpu(cpu)
print_tickdevice(m, tick_get_device(cpu)); print_tickdevice(m, tick_get_device(cpu), cpu);
SEQ_printf(m, "\n"); SEQ_printf(m, "\n");
} }
#else #else
...@@ -244,7 +250,7 @@ static int timer_list_show(struct seq_file *m, void *v) ...@@ -244,7 +250,7 @@ static int timer_list_show(struct seq_file *m, void *v)
u64 now = ktime_to_ns(ktime_get()); u64 now = ktime_to_ns(ktime_get());
int cpu; int cpu;
SEQ_printf(m, "Timer List Version: v0.3\n"); SEQ_printf(m, "Timer List Version: v0.4\n");
SEQ_printf(m, "HRTIMER_MAX_CLOCK_BASES: %d\n", HRTIMER_MAX_CLOCK_BASES); SEQ_printf(m, "HRTIMER_MAX_CLOCK_BASES: %d\n", HRTIMER_MAX_CLOCK_BASES);
SEQ_printf(m, "now at %Ld nsecs\n", (unsigned long long)now); SEQ_printf(m, "now at %Ld nsecs\n", (unsigned long long)now);
......
...@@ -1436,9 +1436,11 @@ static void __cpuinit migrate_timers(int cpu) ...@@ -1436,9 +1436,11 @@ static void __cpuinit migrate_timers(int cpu)
BUG_ON(cpu_online(cpu)); BUG_ON(cpu_online(cpu));
old_base = per_cpu(tvec_bases, cpu); old_base = per_cpu(tvec_bases, cpu);
new_base = get_cpu_var(tvec_bases); new_base = get_cpu_var(tvec_bases);
/*
local_irq_disable(); * The caller is globally serialized and nobody else
spin_lock(&new_base->lock); * takes two locks at once, deadlock is not possible.
*/
spin_lock_irq(&new_base->lock);
spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
BUG_ON(old_base->running_timer); BUG_ON(old_base->running_timer);
...@@ -1453,8 +1455,7 @@ static void __cpuinit migrate_timers(int cpu) ...@@ -1453,8 +1455,7 @@ static void __cpuinit migrate_timers(int cpu)
} }
spin_unlock(&old_base->lock); spin_unlock(&old_base->lock);
spin_unlock(&new_base->lock); spin_unlock_irq(&new_base->lock);
local_irq_enable();
put_cpu_var(tvec_bases); put_cpu_var(tvec_bases);
} }
#endif /* CONFIG_HOTPLUG_CPU */ #endif /* CONFIG_HOTPLUG_CPU */
......
...@@ -75,6 +75,7 @@ ...@@ -75,6 +75,7 @@
#include <linux/string.h> #include <linux/string.h>
#include <linux/selinux.h> #include <linux/selinux.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/posix-timers.h>
#include "avc.h" #include "avc.h"
#include "objsec.h" #include "objsec.h"
...@@ -2322,13 +2323,7 @@ static void selinux_bprm_post_apply_creds(struct linux_binprm *bprm) ...@@ -2322,13 +2323,7 @@ static void selinux_bprm_post_apply_creds(struct linux_binprm *bprm)
initrlim = init_task.signal->rlim+i; initrlim = init_task.signal->rlim+i;
rlim->rlim_cur = min(rlim->rlim_max, initrlim->rlim_cur); rlim->rlim_cur = min(rlim->rlim_max, initrlim->rlim_cur);
} }
if (current->signal->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { update_rlimit_cpu(rlim->rlim_cur);
/*
* This will cause RLIMIT_CPU calculations
* to be refigured.
*/
current->it_prof_expires = jiffies_to_cputime(1);
}
} }
/* Wake up the parent if it is waiting so that it can /* Wake up the parent if it is waiting so that it can
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment