Commit 1f6d6e8e authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'v28-range-hrtimers-for-linus-v2' of...

Merge branch 'v28-range-hrtimers-for-linus-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'v28-range-hrtimers-for-linus-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (37 commits)
  hrtimers: add missing docbook comments to struct hrtimer
  hrtimers: simplify hrtimer_peek_ahead_timers()
  hrtimers: fix docbook comments
  DECLARE_PER_CPU needs linux/percpu.h
  hrtimers: fix typo
  rangetimers: fix the bug reported by Ingo for real
  rangetimer: fix BUG_ON reported by Ingo
  rangetimer: fix x86 build failure for the !HRTIMERS case
  select: fix alpha OSF wrapper
  select: fix alpha OSF wrapper
  hrtimer: peek at the timer queue just before going idle
  hrtimer: make the futex() system call use the per process slack value
  hrtimer: make the nanosleep() syscall use the per process slack
  hrtimer: fix signed/unsigned bug in slack estimator
  hrtimer: show the timer ranges in /proc/timer_list
  hrtimer: incorporate feedback from Peter Zijlstra
  hrtimer: add a hrtimer_start_range() function
  hrtimer: another build fix
  hrtimer: fix build bug found by Ingo
  hrtimer: make select() and poll() use the hrtimer range feature
  ...
parents db563fc2 268a3dcf
...@@ -983,10 +983,12 @@ asmlinkage int ...@@ -983,10 +983,12 @@ asmlinkage int
osf_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, osf_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp,
struct timeval32 __user *tvp) struct timeval32 __user *tvp)
{ {
s64 timeout = MAX_SCHEDULE_TIMEOUT; struct timespec end_time, *to = NULL;
if (tvp) { if (tvp) {
time_t sec, usec; time_t sec, usec;
to = &end_time;
if (!access_ok(VERIFY_READ, tvp, sizeof(*tvp)) if (!access_ok(VERIFY_READ, tvp, sizeof(*tvp))
|| __get_user(sec, &tvp->tv_sec) || __get_user(sec, &tvp->tv_sec)
|| __get_user(usec, &tvp->tv_usec)) { || __get_user(usec, &tvp->tv_usec)) {
...@@ -996,14 +998,13 @@ osf_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, ...@@ -996,14 +998,13 @@ osf_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp,
if (sec < 0 || usec < 0) if (sec < 0 || usec < 0)
return -EINVAL; return -EINVAL;
if ((unsigned long) sec < MAX_SELECT_SECONDS) { if (poll_select_set_timeout(to, sec, usec * NSEC_PER_USEC))
timeout = (usec + 1000000/HZ - 1) / (1000000/HZ); return -EINVAL;
timeout += sec * (unsigned long) HZ;
}
} }
/* OSF does not copy back the remaining time. */ /* OSF does not copy back the remaining time. */
return core_sys_select(n, inp, outp, exp, &timeout); return core_sys_select(n, inp, outp, exp, to);
} }
struct rusage32 { struct rusage32 {
......
...@@ -1114,7 +1114,7 @@ static void kvm_migrate_hlt_timer(struct kvm_vcpu *vcpu) ...@@ -1114,7 +1114,7 @@ static void kvm_migrate_hlt_timer(struct kvm_vcpu *vcpu)
struct hrtimer *p_ht = &vcpu->arch.hlt_timer; struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
if (hrtimer_cancel(p_ht)) if (hrtimer_cancel(p_ht))
hrtimer_start(p_ht, p_ht->expires, HRTIMER_MODE_ABS); hrtimer_start_expires(p_ht, HRTIMER_MODE_ABS);
} }
static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data) static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data)
......
...@@ -195,7 +195,7 @@ int start_spu_profiling(unsigned int cycles_reset) ...@@ -195,7 +195,7 @@ int start_spu_profiling(unsigned int cycles_reset)
pr_debug("timer resolution: %lu\n", TICK_NSEC); pr_debug("timer resolution: %lu\n", TICK_NSEC);
kt = ktime_set(0, profiling_interval); kt = ktime_set(0, profiling_interval);
hrtimer_init(&timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer_init(&timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
timer.expires = kt; hrtimer_set_expires(&timer, kt);
timer.function = profile_spus; timer.function = profile_spus;
/* Allocate arrays for collecting SPU PC samples */ /* Allocate arrays for collecting SPU PC samples */
......
...@@ -204,10 +204,10 @@ static int __pit_timer_fn(struct kvm_kpit_state *ps) ...@@ -204,10 +204,10 @@ static int __pit_timer_fn(struct kvm_kpit_state *ps)
if (vcpu0 && waitqueue_active(&vcpu0->wq)) if (vcpu0 && waitqueue_active(&vcpu0->wq))
wake_up_interruptible(&vcpu0->wq); wake_up_interruptible(&vcpu0->wq);
pt->timer.expires = ktime_add_ns(pt->timer.expires, pt->period); hrtimer_add_expires_ns(&pt->timer, pt->period);
pt->scheduled = ktime_to_ns(pt->timer.expires); pt->scheduled = hrtimer_get_expires_ns(&pt->timer);
if (pt->period) if (pt->period)
ps->channels[0].count_load_time = pt->timer.expires; ps->channels[0].count_load_time = hrtimer_get_expires(&pt->timer);
return (pt->period == 0 ? 0 : 1); return (pt->period == 0 ? 0 : 1);
} }
...@@ -257,7 +257,7 @@ void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu) ...@@ -257,7 +257,7 @@ void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
timer = &pit->pit_state.pit_timer.timer; timer = &pit->pit_state.pit_timer.timer;
if (hrtimer_cancel(timer)) if (hrtimer_cancel(timer))
hrtimer_start(timer, timer->expires, HRTIMER_MODE_ABS); hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
} }
static void destroy_pit_timer(struct kvm_kpit_timer *pt) static void destroy_pit_timer(struct kvm_kpit_timer *pt)
......
...@@ -946,9 +946,7 @@ static int __apic_timer_fn(struct kvm_lapic *apic) ...@@ -946,9 +946,7 @@ static int __apic_timer_fn(struct kvm_lapic *apic)
if (apic_lvtt_period(apic)) { if (apic_lvtt_period(apic)) {
result = 1; result = 1;
apic->timer.dev.expires = ktime_add_ns( hrtimer_add_expires_ns(&apic->timer.dev, apic->timer.period);
apic->timer.dev.expires,
apic->timer.period);
} }
return result; return result;
} }
...@@ -1117,7 +1115,7 @@ void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) ...@@ -1117,7 +1115,7 @@ void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
timer = &apic->timer.dev; timer = &apic->timer.dev;
if (hrtimer_cancel(timer)) if (hrtimer_cancel(timer))
hrtimer_start(timer, timer->expires, HRTIMER_MODE_ABS); hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
} }
void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu) void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/cpuidle.h> #include <linux/cpuidle.h>
#include <linux/ktime.h> #include <linux/ktime.h>
#include <linux/hrtimer.h>
#include "cpuidle.h" #include "cpuidle.h"
...@@ -64,6 +65,12 @@ static void cpuidle_idle_call(void) ...@@ -64,6 +65,12 @@ static void cpuidle_idle_call(void)
return; return;
} }
/*
* run any timers that can be run now, at this point
* before calculating the idle duration etc.
*/
hrtimer_peek_ahead_timers();
/* ask the governor for the next state */ /* ask the governor for the next state */
next_state = cpuidle_curr_governor->select(dev); next_state = cpuidle_curr_governor->select(dev);
if (need_resched()) if (need_resched())
......
...@@ -659,9 +659,9 @@ static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf, ...@@ -659,9 +659,9 @@ static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
hr_time = ktime_set(0, poll_timeout); hr_time = ktime_set(0, poll_timeout);
if (!hrtimer_is_queued(&ap_poll_timer) || if (!hrtimer_is_queued(&ap_poll_timer) ||
!hrtimer_forward(&ap_poll_timer, ap_poll_timer.expires, hr_time)) { !hrtimer_forward(&ap_poll_timer, hrtimer_get_expires(&ap_poll_timer), hr_time)) {
ap_poll_timer.expires = hr_time; hrtimer_set_expires(&ap_poll_timer, hr_time);
hrtimer_start(&ap_poll_timer, hr_time, HRTIMER_MODE_ABS); hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS);
} }
return count; return count;
} }
......
...@@ -1469,6 +1469,57 @@ int compat_do_execve(char * filename, ...@@ -1469,6 +1469,57 @@ int compat_do_execve(char * filename,
#define __COMPAT_NFDBITS (8 * sizeof(compat_ulong_t)) #define __COMPAT_NFDBITS (8 * sizeof(compat_ulong_t))
static int poll_select_copy_remaining(struct timespec *end_time, void __user *p,
int timeval, int ret)
{
struct timespec ts;
if (!p)
return ret;
if (current->personality & STICKY_TIMEOUTS)
goto sticky;
/* No update for zero timeout */
if (!end_time->tv_sec && !end_time->tv_nsec)
return ret;
ktime_get_ts(&ts);
ts = timespec_sub(*end_time, ts);
if (ts.tv_sec < 0)
ts.tv_sec = ts.tv_nsec = 0;
if (timeval) {
struct compat_timeval rtv;
rtv.tv_sec = ts.tv_sec;
rtv.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
if (!copy_to_user(p, &rtv, sizeof(rtv)))
return ret;
} else {
struct compat_timespec rts;
rts.tv_sec = ts.tv_sec;
rts.tv_nsec = ts.tv_nsec;
if (!copy_to_user(p, &rts, sizeof(rts)))
return ret;
}
/*
* If an application puts its timeval in read-only memory, we
* don't want the Linux-specific update to the timeval to
* cause a fault after the select has completed
* successfully. However, because we're not updating the
* timeval, we can't restart the system call.
*/
sticky:
if (ret == -ERESTARTNOHAND)
ret = -EINTR;
return ret;
}
/* /*
* Ooo, nasty. We need here to frob 32-bit unsigned longs to * Ooo, nasty. We need here to frob 32-bit unsigned longs to
* 64-bit unsigned longs. * 64-bit unsigned longs.
...@@ -1550,7 +1601,8 @@ int compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset, ...@@ -1550,7 +1601,8 @@ int compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1) ((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1)
int compat_core_sys_select(int n, compat_ulong_t __user *inp, int compat_core_sys_select(int n, compat_ulong_t __user *inp,
compat_ulong_t __user *outp, compat_ulong_t __user *exp, s64 *timeout) compat_ulong_t __user *outp, compat_ulong_t __user *exp,
struct timespec *end_time)
{ {
fd_set_bits fds; fd_set_bits fds;
void *bits; void *bits;
...@@ -1597,7 +1649,7 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp, ...@@ -1597,7 +1649,7 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp,
zero_fd_set(n, fds.res_out); zero_fd_set(n, fds.res_out);
zero_fd_set(n, fds.res_ex); zero_fd_set(n, fds.res_ex);
ret = do_select(n, &fds, timeout); ret = do_select(n, &fds, end_time);
if (ret < 0) if (ret < 0)
goto out; goto out;
...@@ -1623,7 +1675,7 @@ asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp, ...@@ -1623,7 +1675,7 @@ asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp,
compat_ulong_t __user *outp, compat_ulong_t __user *exp, compat_ulong_t __user *outp, compat_ulong_t __user *exp,
struct compat_timeval __user *tvp) struct compat_timeval __user *tvp)
{ {
s64 timeout = -1; struct timespec end_time, *to = NULL;
struct compat_timeval tv; struct compat_timeval tv;
int ret; int ret;
...@@ -1631,43 +1683,14 @@ asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp, ...@@ -1631,43 +1683,14 @@ asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp,
if (copy_from_user(&tv, tvp, sizeof(tv))) if (copy_from_user(&tv, tvp, sizeof(tv)))
return -EFAULT; return -EFAULT;
if (tv.tv_sec < 0 || tv.tv_usec < 0) to = &end_time;
if (poll_select_set_timeout(to, tv.tv_sec,
tv.tv_usec * NSEC_PER_USEC))
return -EINVAL; return -EINVAL;
/* Cast to u64 to make GCC stop complaining */
if ((u64)tv.tv_sec >= (u64)MAX_INT64_SECONDS)
timeout = -1; /* infinite */
else {
timeout = DIV_ROUND_UP(tv.tv_usec, 1000000/HZ);
timeout += tv.tv_sec * HZ;
}
} }
ret = compat_core_sys_select(n, inp, outp, exp, &timeout); ret = compat_core_sys_select(n, inp, outp, exp, to);
ret = poll_select_copy_remaining(&end_time, tvp, 1, ret);
if (tvp) {
struct compat_timeval rtv;
if (current->personality & STICKY_TIMEOUTS)
goto sticky;
rtv.tv_usec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ));
rtv.tv_sec = timeout;
if (compat_timeval_compare(&rtv, &tv) >= 0)
rtv = tv;
if (copy_to_user(tvp, &rtv, sizeof(rtv))) {
sticky:
/*
* If an application puts its timeval in read-only
* memory, we don't want the Linux-specific update to
* the timeval to cause a fault after the select has
* completed successfully. However, because we're not
* updating the timeval, we can't restart the system
* call.
*/
if (ret == -ERESTARTNOHAND)
ret = -EINTR;
}
}
return ret; return ret;
} }
...@@ -1680,15 +1703,16 @@ asmlinkage long compat_sys_pselect7(int n, compat_ulong_t __user *inp, ...@@ -1680,15 +1703,16 @@ asmlinkage long compat_sys_pselect7(int n, compat_ulong_t __user *inp,
{ {
compat_sigset_t ss32; compat_sigset_t ss32;
sigset_t ksigmask, sigsaved; sigset_t ksigmask, sigsaved;
s64 timeout = MAX_SCHEDULE_TIMEOUT;
struct compat_timespec ts; struct compat_timespec ts;
struct timespec end_time, *to = NULL;
int ret; int ret;
if (tsp) { if (tsp) {
if (copy_from_user(&ts, tsp, sizeof(ts))) if (copy_from_user(&ts, tsp, sizeof(ts)))
return -EFAULT; return -EFAULT;
if (ts.tv_sec < 0 || ts.tv_nsec < 0) to = &end_time;
if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
return -EINVAL; return -EINVAL;
} }
...@@ -1703,51 +1727,8 @@ asmlinkage long compat_sys_pselect7(int n, compat_ulong_t __user *inp, ...@@ -1703,51 +1727,8 @@ asmlinkage long compat_sys_pselect7(int n, compat_ulong_t __user *inp,
sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
} }
do { ret = compat_core_sys_select(n, inp, outp, exp, to);
if (tsp) { ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
if ((unsigned long)ts.tv_sec < MAX_SELECT_SECONDS) {
timeout = DIV_ROUND_UP(ts.tv_nsec, 1000000000/HZ);
timeout += ts.tv_sec * (unsigned long)HZ;
ts.tv_sec = 0;
ts.tv_nsec = 0;
} else {
ts.tv_sec -= MAX_SELECT_SECONDS;
timeout = MAX_SELECT_SECONDS * HZ;
}
}
ret = compat_core_sys_select(n, inp, outp, exp, &timeout);
} while (!ret && !timeout && tsp && (ts.tv_sec || ts.tv_nsec));
if (tsp) {
struct compat_timespec rts;
if (current->personality & STICKY_TIMEOUTS)
goto sticky;
rts.tv_sec = timeout / HZ;
rts.tv_nsec = (timeout % HZ) * (NSEC_PER_SEC/HZ);
if (rts.tv_nsec >= NSEC_PER_SEC) {
rts.tv_sec++;
rts.tv_nsec -= NSEC_PER_SEC;
}
if (compat_timespec_compare(&rts, &ts) >= 0)
rts = ts;
if (copy_to_user(tsp, &rts, sizeof(rts))) {
sticky:
/*
* If an application puts its timeval in read-only
* memory, we don't want the Linux-specific update to
* the timeval to cause a fault after the select has
* completed successfully. However, because we're not
* updating the timeval, we can't restart the system
* call.
*/
if (ret == -ERESTARTNOHAND)
ret = -EINTR;
}
}
if (ret == -ERESTARTNOHAND) { if (ret == -ERESTARTNOHAND) {
/* /*
...@@ -1792,18 +1773,16 @@ asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds, ...@@ -1792,18 +1773,16 @@ asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
compat_sigset_t ss32; compat_sigset_t ss32;
sigset_t ksigmask, sigsaved; sigset_t ksigmask, sigsaved;
struct compat_timespec ts; struct compat_timespec ts;
s64 timeout = -1; struct timespec end_time, *to = NULL;
int ret; int ret;
if (tsp) { if (tsp) {
if (copy_from_user(&ts, tsp, sizeof(ts))) if (copy_from_user(&ts, tsp, sizeof(ts)))
return -EFAULT; return -EFAULT;
/* We assume that ts.tv_sec is always lower than to = &end_time;
the number of seconds that can be expressed in if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
an s64. Otherwise the compiler bitches at us */ return -EINVAL;
timeout = DIV_ROUND_UP(ts.tv_nsec, 1000000000/HZ);
timeout += ts.tv_sec * HZ;
} }
if (sigmask) { if (sigmask) {
...@@ -1817,7 +1796,7 @@ asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds, ...@@ -1817,7 +1796,7 @@ asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
} }
ret = do_sys_poll(ufds, nfds, &timeout); ret = do_sys_poll(ufds, nfds, to);
/* We can restart this syscall, usually */ /* We can restart this syscall, usually */
if (ret == -EINTR) { if (ret == -EINTR) {
...@@ -1835,31 +1814,7 @@ asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds, ...@@ -1835,31 +1814,7 @@ asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
} else if (sigmask) } else if (sigmask)
sigprocmask(SIG_SETMASK, &sigsaved, NULL); sigprocmask(SIG_SETMASK, &sigsaved, NULL);
if (tsp && timeout >= 0) { ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
struct compat_timespec rts;
if (current->personality & STICKY_TIMEOUTS)
goto sticky;
/* Yes, we know it's actually an s64, but it's also positive. */
rts.tv_nsec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ)) *
1000;
rts.tv_sec = timeout;
if (compat_timespec_compare(&rts, &ts) >= 0)
rts = ts;
if (copy_to_user(tsp, &rts, sizeof(rts))) {
sticky:
/*
* If an application puts its timeval in read-only
* memory, we don't want the Linux-specific update to
* the timeval to cause a fault after the select has
* completed successfully. However, because we're not
* updating the timeval, we can't restart the system
* call.
*/
if (ret == -ERESTARTNOHAND && timeout >= 0)
ret = -EINTR;
}
}
return ret; return ret;
} }
......
This diff is collapsed.
...@@ -52,11 +52,9 @@ static enum hrtimer_restart timerfd_tmrproc(struct hrtimer *htmr) ...@@ -52,11 +52,9 @@ static enum hrtimer_restart timerfd_tmrproc(struct hrtimer *htmr)
static ktime_t timerfd_get_remaining(struct timerfd_ctx *ctx) static ktime_t timerfd_get_remaining(struct timerfd_ctx *ctx)
{ {
ktime_t now, remaining; ktime_t remaining;
now = ctx->tmr.base->get_time();
remaining = ktime_sub(ctx->tmr.expires, now);
remaining = hrtimer_expires_remaining(&ctx->tmr);
return remaining.tv64 < 0 ? ktime_set(0, 0): remaining; return remaining.tv64 < 0 ? ktime_set(0, 0): remaining;
} }
...@@ -74,7 +72,7 @@ static void timerfd_setup(struct timerfd_ctx *ctx, int flags, ...@@ -74,7 +72,7 @@ static void timerfd_setup(struct timerfd_ctx *ctx, int flags,
ctx->ticks = 0; ctx->ticks = 0;
ctx->tintv = timespec_to_ktime(ktmr->it_interval); ctx->tintv = timespec_to_ktime(ktmr->it_interval);
hrtimer_init(&ctx->tmr, ctx->clockid, htmode); hrtimer_init(&ctx->tmr, ctx->clockid, htmode);
ctx->tmr.expires = texp; hrtimer_set_expires(&ctx->tmr, texp);
ctx->tmr.function = timerfd_tmrproc; ctx->tmr.function = timerfd_tmrproc;
if (texp.tv64 != 0) if (texp.tv64 != 0)
hrtimer_start(&ctx->tmr, texp, htmode); hrtimer_start(&ctx->tmr, texp, htmode);
......
...@@ -20,6 +20,8 @@ ...@@ -20,6 +20,8 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/wait.h> #include <linux/wait.h>
#include <linux/percpu.h>
struct hrtimer_clock_base; struct hrtimer_clock_base;
struct hrtimer_cpu_base; struct hrtimer_cpu_base;
...@@ -101,9 +103,14 @@ enum hrtimer_cb_mode { ...@@ -101,9 +103,14 @@ enum hrtimer_cb_mode {
/** /**
* struct hrtimer - the basic hrtimer structure * struct hrtimer - the basic hrtimer structure
* @node: red black tree node for time ordered insertion * @node: red black tree node for time ordered insertion
* @expires: the absolute expiry time in the hrtimers internal * @_expires: the absolute expiry time in the hrtimers internal
* representation. The time is related to the clock on * representation. The time is related to the clock on
* which the timer is based. * which the timer is based. Is setup by adding
* slack to the _softexpires value. For non range timers
* identical to _softexpires.
* @_softexpires: the absolute earliest expiry time of the hrtimer.
* The time which was given as expiry time when the timer
* was armed.
* @function: timer expiry callback function * @function: timer expiry callback function
* @base: pointer to the timer base (per cpu and per clock) * @base: pointer to the timer base (per cpu and per clock)
* @state: state information (See bit values above) * @state: state information (See bit values above)
...@@ -121,7 +128,8 @@ enum hrtimer_cb_mode { ...@@ -121,7 +128,8 @@ enum hrtimer_cb_mode {
*/ */
struct hrtimer { struct hrtimer {
struct rb_node node; struct rb_node node;
ktime_t expires; ktime_t _expires;
ktime_t _softexpires;
enum hrtimer_restart (*function)(struct hrtimer *); enum hrtimer_restart (*function)(struct hrtimer *);
struct hrtimer_clock_base *base; struct hrtimer_clock_base *base;
unsigned long state; unsigned long state;
...@@ -201,6 +209,71 @@ struct hrtimer_cpu_base { ...@@ -201,6 +209,71 @@ struct hrtimer_cpu_base {
#endif #endif
}; };
static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
{
timer->_expires = time;
timer->_softexpires = time;
}
static inline void hrtimer_set_expires_range(struct hrtimer *timer, ktime_t time, ktime_t delta)
{
timer->_softexpires = time;
timer->_expires = ktime_add_safe(time, delta);
}
static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, unsigned long delta)
{
timer->_softexpires = time;
timer->_expires = ktime_add_safe(time, ns_to_ktime(delta));
}
static inline void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64)
{
timer->_expires.tv64 = tv64;
timer->_softexpires.tv64 = tv64;
}
static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time)
{
timer->_expires = ktime_add_safe(timer->_expires, time);
timer->_softexpires = ktime_add_safe(timer->_softexpires, time);
}
static inline void hrtimer_add_expires_ns(struct hrtimer *timer, unsigned long ns)
{
timer->_expires = ktime_add_ns(timer->_expires, ns);
timer->_softexpires = ktime_add_ns(timer->_softexpires, ns);
}
static inline ktime_t hrtimer_get_expires(const struct hrtimer *timer)
{
return timer->_expires;
}
static inline ktime_t hrtimer_get_softexpires(const struct hrtimer *timer)
{
return timer->_softexpires;
}
static inline s64 hrtimer_get_expires_tv64(const struct hrtimer *timer)
{
return timer->_expires.tv64;
}
static inline s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer)
{
return timer->_softexpires.tv64;
}
static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer)
{
return ktime_to_ns(timer->_expires);
}
static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer)
{
return ktime_sub(timer->_expires, timer->base->get_time());
}
#ifdef CONFIG_HIGH_RES_TIMERS #ifdef CONFIG_HIGH_RES_TIMERS
struct clock_event_device; struct clock_event_device;
...@@ -221,6 +294,8 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer) ...@@ -221,6 +294,8 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer)
return timer->base->cpu_base->hres_active; return timer->base->cpu_base->hres_active;
} }
extern void hrtimer_peek_ahead_timers(void);
/* /*
* The resolution of the clocks. The resolution value is returned in * The resolution of the clocks. The resolution value is returned in
* the clock_getres() system call to give application programmers an * the clock_getres() system call to give application programmers an
...@@ -243,6 +318,7 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer) ...@@ -243,6 +318,7 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer)
* is expired in the next softirq when the clock was advanced. * is expired in the next softirq when the clock was advanced.
*/ */
static inline void clock_was_set(void) { } static inline void clock_was_set(void) { }
static inline void hrtimer_peek_ahead_timers(void) { }
static inline void hres_timers_resume(void) { } static inline void hres_timers_resume(void) { }
...@@ -264,6 +340,10 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer) ...@@ -264,6 +340,10 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer)
extern ktime_t ktime_get(void); extern ktime_t ktime_get(void);
extern ktime_t ktime_get_real(void); extern ktime_t ktime_get_real(void);
DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
/* Exported timer functions: */ /* Exported timer functions: */
/* Initialize timers: */ /* Initialize timers: */
...@@ -288,12 +368,25 @@ static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { } ...@@ -288,12 +368,25 @@ static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { }
/* Basic timer operations: */ /* Basic timer operations: */
extern int hrtimer_start(struct hrtimer *timer, ktime_t tim, extern int hrtimer_start(struct hrtimer *timer, ktime_t tim,
const enum hrtimer_mode mode); const enum hrtimer_mode mode);
extern int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
unsigned long range_ns, const enum hrtimer_mode mode);
extern int hrtimer_cancel(struct hrtimer *timer); extern int hrtimer_cancel(struct hrtimer *timer);
extern int hrtimer_try_to_cancel(struct hrtimer *timer); extern int hrtimer_try_to_cancel(struct hrtimer *timer);
static inline int hrtimer_start_expires(struct hrtimer *timer,
enum hrtimer_mode mode)
{
unsigned long delta;
ktime_t soft, hard;
soft = hrtimer_get_softexpires(timer);
hard = hrtimer_get_expires(timer);
delta = ktime_to_ns(ktime_sub(hard, soft));
return hrtimer_start_range_ns(timer, soft, delta, mode);
}
static inline int hrtimer_restart(struct hrtimer *timer) static inline int hrtimer_restart(struct hrtimer *timer)
{ {
return hrtimer_start(timer, timer->expires, HRTIMER_MODE_ABS); return hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
} }
/* Query timers: */ /* Query timers: */
...@@ -350,6 +443,10 @@ extern long hrtimer_nanosleep_restart(struct restart_block *restart_block); ...@@ -350,6 +443,10 @@ extern long hrtimer_nanosleep_restart(struct restart_block *restart_block);
extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
struct task_struct *tsk); struct task_struct *tsk);
extern int schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
const enum hrtimer_mode mode);
extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
/* Soft interrupt function to run the hrtimer queues: */ /* Soft interrupt function to run the hrtimer queues: */
extern void hrtimer_run_queues(void); extern void hrtimer_run_queues(void);
extern void hrtimer_run_pending(void); extern void hrtimer_run_pending(void);
......
...@@ -170,6 +170,7 @@ extern struct group_info init_groups; ...@@ -170,6 +170,7 @@ extern struct group_info init_groups;
.cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
.fs_excl = ATOMIC_INIT(0), \ .fs_excl = ATOMIC_INIT(0), \
.pi_lock = __SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ .pi_lock = __SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
.timer_slack_ns = 50000, /* 50 usec default slack */ \
.pids = { \ .pids = { \
[PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \ [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
[PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \ [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
......
...@@ -114,11 +114,13 @@ void zero_fd_set(unsigned long nr, unsigned long *fdset) ...@@ -114,11 +114,13 @@ void zero_fd_set(unsigned long nr, unsigned long *fdset)
#define MAX_INT64_SECONDS (((s64)(~((u64)0)>>1)/HZ)-1) #define MAX_INT64_SECONDS (((s64)(~((u64)0)>>1)/HZ)-1)
extern int do_select(int n, fd_set_bits *fds, s64 *timeout); extern int do_select(int n, fd_set_bits *fds, struct timespec *end_time);
extern int do_sys_poll(struct pollfd __user * ufds, unsigned int nfds, extern int do_sys_poll(struct pollfd __user * ufds, unsigned int nfds,
s64 *timeout); struct timespec *end_time);
extern int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, extern int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
fd_set __user *exp, s64 *timeout); fd_set __user *exp, struct timespec *end_time);
extern int poll_select_set_timeout(struct timespec *to, long sec, long nsec);
#endif /* KERNEL */ #endif /* KERNEL */
......
...@@ -78,4 +78,11 @@ ...@@ -78,4 +78,11 @@
#define PR_GET_SECUREBITS 27 #define PR_GET_SECUREBITS 27
#define PR_SET_SECUREBITS 28 #define PR_SET_SECUREBITS 28
/*
* Get/set the timerslack as used by poll/select/nanosleep
* A value of 0 means "use default"
*/
#define PR_SET_TIMERSLACK 29
#define PR_GET_TIMERSLACK 30
#endif /* _LINUX_PRCTL_H */ #endif /* _LINUX_PRCTL_H */
...@@ -1345,6 +1345,12 @@ struct task_struct { ...@@ -1345,6 +1345,12 @@ struct task_struct {
int latency_record_count; int latency_record_count;
struct latency_record latency_record[LT_SAVECOUNT]; struct latency_record latency_record[LT_SAVECOUNT];
#endif #endif
/*
* time slack values; these are used to round up poll() and
* select() etc timeout values. These are in nanoseconds.
*/
unsigned long timer_slack_ns;
unsigned long default_timer_slack_ns;
}; };
/* /*
......
...@@ -38,6 +38,14 @@ struct restart_block { ...@@ -38,6 +38,14 @@ struct restart_block {
#endif #endif
u64 expires; u64 expires;
} nanosleep; } nanosleep;
/* For poll */
struct {
struct pollfd __user *ufds;
int nfds;
int has_timeout;
unsigned long tv_sec;
unsigned long tv_nsec;
} poll;
}; };
}; };
......
...@@ -40,6 +40,8 @@ extern struct timezone sys_tz; ...@@ -40,6 +40,8 @@ extern struct timezone sys_tz;
#define NSEC_PER_SEC 1000000000L #define NSEC_PER_SEC 1000000000L
#define FSEC_PER_SEC 1000000000000000L #define FSEC_PER_SEC 1000000000000000L
#define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1)
static inline int timespec_equal(const struct timespec *a, static inline int timespec_equal(const struct timespec *a,
const struct timespec *b) const struct timespec *b)
{ {
...@@ -74,6 +76,8 @@ extern unsigned long mktime(const unsigned int year, const unsigned int mon, ...@@ -74,6 +76,8 @@ extern unsigned long mktime(const unsigned int year, const unsigned int mon,
const unsigned int min, const unsigned int sec); const unsigned int min, const unsigned int sec);
extern void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec); extern void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec);
extern struct timespec timespec_add_safe(const struct timespec lhs,
const struct timespec rhs);
/* /*
* sub = lhs - rhs, in normalized form * sub = lhs - rhs, in normalized form
......
...@@ -1018,6 +1018,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, ...@@ -1018,6 +1018,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->prev_utime = cputime_zero; p->prev_utime = cputime_zero;
p->prev_stime = cputime_zero; p->prev_stime = cputime_zero;
p->default_timer_slack_ns = current->timer_slack_ns;
#ifdef CONFIG_DETECT_SOFTLOCKUP #ifdef CONFIG_DETECT_SOFTLOCKUP
p->last_switch_count = 0; p->last_switch_count = 0;
p->last_switch_timestamp = 0; p->last_switch_timestamp = 0;
......
...@@ -1296,13 +1296,16 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared, ...@@ -1296,13 +1296,16 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
if (!abs_time) if (!abs_time)
schedule(); schedule();
else { else {
unsigned long slack;
slack = current->timer_slack_ns;
if (rt_task(current))
slack = 0;
hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC,
HRTIMER_MODE_ABS); HRTIMER_MODE_ABS);
hrtimer_init_sleeper(&t, current); hrtimer_init_sleeper(&t, current);
t.timer.expires = *abs_time; hrtimer_set_expires_range_ns(&t.timer, *abs_time, slack);
hrtimer_start(&t.timer, t.timer.expires, hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS);
HRTIMER_MODE_ABS);
if (!hrtimer_active(&t.timer)) if (!hrtimer_active(&t.timer))
t.task = NULL; t.task = NULL;
...@@ -1404,7 +1407,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, ...@@ -1404,7 +1407,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME, hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
HRTIMER_MODE_ABS); HRTIMER_MODE_ABS);
hrtimer_init_sleeper(to, current); hrtimer_init_sleeper(to, current);
to->timer.expires = *time; hrtimer_set_expires(&to->timer, *time);
} }
q.pi_state = NULL; q.pi_state = NULL;
......
This diff is collapsed.
...@@ -639,7 +639,7 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting) ...@@ -639,7 +639,7 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
(timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))
timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv); timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv);
remaining = ktime_sub(timer->expires, now); remaining = ktime_sub(hrtimer_get_expires(timer), now);
/* Return 0 only, when the timer is expired and not pending */ /* Return 0 only, when the timer is expired and not pending */
if (remaining.tv64 <= 0) { if (remaining.tv64 <= 0) {
/* /*
...@@ -733,7 +733,7 @@ common_timer_set(struct k_itimer *timr, int flags, ...@@ -733,7 +733,7 @@ common_timer_set(struct k_itimer *timr, int flags,
hrtimer_init(&timr->it.real.timer, timr->it_clock, mode); hrtimer_init(&timr->it.real.timer, timr->it_clock, mode);
timr->it.real.timer.function = posix_timer_fn; timr->it.real.timer.function = posix_timer_fn;
timer->expires = timespec_to_ktime(new_setting->it_value); hrtimer_set_expires(timer, timespec_to_ktime(new_setting->it_value));
/* Convert interval */ /* Convert interval */
timr->it.real.interval = timespec_to_ktime(new_setting->it_interval); timr->it.real.interval = timespec_to_ktime(new_setting->it_interval);
...@@ -742,14 +742,12 @@ common_timer_set(struct k_itimer *timr, int flags, ...@@ -742,14 +742,12 @@ common_timer_set(struct k_itimer *timr, int flags,
if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) { if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) {
/* Setup correct expiry time for relative timers */ /* Setup correct expiry time for relative timers */
if (mode == HRTIMER_MODE_REL) { if (mode == HRTIMER_MODE_REL) {
timer->expires = hrtimer_add_expires(timer, timer->base->get_time());
ktime_add_safe(timer->expires,
timer->base->get_time());
} }
return 0; return 0;
} }
hrtimer_start(timer, timer->expires, mode); hrtimer_start_expires(timer, mode);
return 0; return 0;
} }
......
...@@ -631,8 +631,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, ...@@ -631,8 +631,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
/* Setup the timer, when timeout != NULL */ /* Setup the timer, when timeout != NULL */
if (unlikely(timeout)) { if (unlikely(timeout)) {
hrtimer_start(&timeout->timer, timeout->timer.expires, hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
HRTIMER_MODE_ABS);
if (!hrtimer_active(&timeout->timer)) if (!hrtimer_active(&timeout->timer))
timeout->task = NULL; timeout->task = NULL;
} }
......
...@@ -227,9 +227,8 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b) ...@@ -227,9 +227,8 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
now = hrtimer_cb_get_time(&rt_b->rt_period_timer); now = hrtimer_cb_get_time(&rt_b->rt_period_timer);
hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period); hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period);
hrtimer_start(&rt_b->rt_period_timer, hrtimer_start_expires(&rt_b->rt_period_timer,
rt_b->rt_period_timer.expires, HRTIMER_MODE_ABS);
HRTIMER_MODE_ABS);
} }
spin_unlock(&rt_b->rt_runtime_lock); spin_unlock(&rt_b->rt_runtime_lock);
} }
...@@ -1071,7 +1070,7 @@ static void hrtick_start(struct rq *rq, u64 delay) ...@@ -1071,7 +1070,7 @@ static void hrtick_start(struct rq *rq, u64 delay)
struct hrtimer *timer = &rq->hrtick_timer; struct hrtimer *timer = &rq->hrtick_timer;
ktime_t time = ktime_add_ns(timer->base->get_time(), delay); ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
timer->expires = time; hrtimer_set_expires(timer, time);
if (rq == this_rq()) { if (rq == this_rq()) {
hrtimer_restart(timer); hrtimer_restart(timer);
......
...@@ -1716,6 +1716,16 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, ...@@ -1716,6 +1716,16 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
case PR_SET_TSC: case PR_SET_TSC:
error = SET_TSC_CTL(arg2); error = SET_TSC_CTL(arg2);
break; break;
case PR_GET_TIMERSLACK:
error = current->timer_slack_ns;
break;
case PR_SET_TIMERSLACK:
if (arg2 <= 0)
current->timer_slack_ns =
current->default_timer_slack_ns;
else
current->timer_slack_ns = arg2;
break;
default: default:
error = -EINVAL; error = -EINVAL;
break; break;
......
...@@ -669,3 +669,21 @@ EXPORT_SYMBOL(get_jiffies_64); ...@@ -669,3 +669,21 @@ EXPORT_SYMBOL(get_jiffies_64);
#endif #endif
EXPORT_SYMBOL(jiffies); EXPORT_SYMBOL(jiffies);
/*
* Add two timespec values and do a safety check for overflow.
* It's assumed that both values are valid (>= 0)
*/
struct timespec timespec_add_safe(const struct timespec lhs,
const struct timespec rhs)
{
struct timespec res;
set_normalized_timespec(&res, lhs.tv_sec + rhs.tv_sec,
lhs.tv_nsec + rhs.tv_nsec);
if (res.tv_sec < lhs.tv_sec || res.tv_sec < rhs.tv_sec)
res.tv_sec = TIME_T_MAX;
return res;
}
...@@ -142,8 +142,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer) ...@@ -142,8 +142,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
time_state = TIME_OOP; time_state = TIME_OOP;
printk(KERN_NOTICE "Clock: " printk(KERN_NOTICE "Clock: "
"inserting leap second 23:59:60 UTC\n"); "inserting leap second 23:59:60 UTC\n");
leap_timer.expires = ktime_add_ns(leap_timer.expires, hrtimer_add_expires_ns(&leap_timer, NSEC_PER_SEC);
NSEC_PER_SEC);
res = HRTIMER_RESTART; res = HRTIMER_RESTART;
break; break;
case TIME_DEL: case TIME_DEL:
......
...@@ -300,7 +300,7 @@ void tick_nohz_stop_sched_tick(int inidle) ...@@ -300,7 +300,7 @@ void tick_nohz_stop_sched_tick(int inidle)
goto out; goto out;
} }
ts->idle_tick = ts->sched_timer.expires; ts->idle_tick = hrtimer_get_expires(&ts->sched_timer);
ts->tick_stopped = 1; ts->tick_stopped = 1;
ts->idle_jiffies = last_jiffies; ts->idle_jiffies = last_jiffies;
rcu_enter_nohz(); rcu_enter_nohz();
...@@ -380,21 +380,21 @@ ktime_t tick_nohz_get_sleep_length(void) ...@@ -380,21 +380,21 @@ ktime_t tick_nohz_get_sleep_length(void)
static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
{ {
hrtimer_cancel(&ts->sched_timer); hrtimer_cancel(&ts->sched_timer);
ts->sched_timer.expires = ts->idle_tick; hrtimer_set_expires(&ts->sched_timer, ts->idle_tick);
while (1) { while (1) {
/* Forward the time to expire in the future */ /* Forward the time to expire in the future */
hrtimer_forward(&ts->sched_timer, now, tick_period); hrtimer_forward(&ts->sched_timer, now, tick_period);
if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
hrtimer_start(&ts->sched_timer, hrtimer_start_expires(&ts->sched_timer,
ts->sched_timer.expires,
HRTIMER_MODE_ABS); HRTIMER_MODE_ABS);
/* Check, if the timer was already in the past */ /* Check, if the timer was already in the past */
if (hrtimer_active(&ts->sched_timer)) if (hrtimer_active(&ts->sched_timer))
break; break;
} else { } else {
if (!tick_program_event(ts->sched_timer.expires, 0)) if (!tick_program_event(
hrtimer_get_expires(&ts->sched_timer), 0))
break; break;
} }
/* Update jiffies and reread time */ /* Update jiffies and reread time */
...@@ -456,14 +456,16 @@ void tick_nohz_restart_sched_tick(void) ...@@ -456,14 +456,16 @@ void tick_nohz_restart_sched_tick(void)
*/ */
ts->tick_stopped = 0; ts->tick_stopped = 0;
ts->idle_exittime = now; ts->idle_exittime = now;
tick_nohz_restart(ts, now); tick_nohz_restart(ts, now);
local_irq_enable(); local_irq_enable();
} }
static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now) static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now)
{ {
hrtimer_forward(&ts->sched_timer, now, tick_period); hrtimer_forward(&ts->sched_timer, now, tick_period);
return tick_program_event(ts->sched_timer.expires, 0); return tick_program_event(hrtimer_get_expires(&ts->sched_timer), 0);
} }
/* /*
...@@ -542,7 +544,7 @@ static void tick_nohz_switch_to_nohz(void) ...@@ -542,7 +544,7 @@ static void tick_nohz_switch_to_nohz(void)
next = tick_init_jiffy_update(); next = tick_init_jiffy_update();
for (;;) { for (;;) {
ts->sched_timer.expires = next; hrtimer_set_expires(&ts->sched_timer, next);
if (!tick_program_event(next, 0)) if (!tick_program_event(next, 0))
break; break;
next = ktime_add(next, tick_period); next = ktime_add(next, tick_period);
...@@ -577,7 +579,7 @@ static void tick_nohz_kick_tick(int cpu) ...@@ -577,7 +579,7 @@ static void tick_nohz_kick_tick(int cpu)
* already reached or less/equal than the tick period. * already reached or less/equal than the tick period.
*/ */
now = ktime_get(); now = ktime_get();
delta = ktime_sub(ts->sched_timer.expires, now); delta = ktime_sub(hrtimer_get_expires(&ts->sched_timer), now);
if (delta.tv64 <= tick_period.tv64) if (delta.tv64 <= tick_period.tv64)
return; return;
...@@ -678,16 +680,15 @@ void tick_setup_sched_timer(void) ...@@ -678,16 +680,15 @@ void tick_setup_sched_timer(void)
ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU; ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU;
/* Get the next period (per cpu) */ /* Get the next period (per cpu) */
ts->sched_timer.expires = tick_init_jiffy_update(); hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
offset = ktime_to_ns(tick_period) >> 1; offset = ktime_to_ns(tick_period) >> 1;
do_div(offset, num_possible_cpus()); do_div(offset, num_possible_cpus());
offset *= smp_processor_id(); offset *= smp_processor_id();
ts->sched_timer.expires = ktime_add_ns(ts->sched_timer.expires, offset); hrtimer_add_expires_ns(&ts->sched_timer, offset);
for (;;) { for (;;) {
hrtimer_forward(&ts->sched_timer, now, tick_period); hrtimer_forward(&ts->sched_timer, now, tick_period);
hrtimer_start(&ts->sched_timer, ts->sched_timer.expires, hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS);
HRTIMER_MODE_ABS);
/* Check, if the timer was already in the past */ /* Check, if the timer was already in the past */
if (hrtimer_active(&ts->sched_timer)) if (hrtimer_active(&ts->sched_timer))
break; break;
......
...@@ -66,9 +66,11 @@ print_timer(struct seq_file *m, struct hrtimer *taddr, struct hrtimer *timer, ...@@ -66,9 +66,11 @@ print_timer(struct seq_file *m, struct hrtimer *taddr, struct hrtimer *timer,
SEQ_printf(m, ", %s/%d", tmp, timer->start_pid); SEQ_printf(m, ", %s/%d", tmp, timer->start_pid);
#endif #endif
SEQ_printf(m, "\n"); SEQ_printf(m, "\n");
SEQ_printf(m, " # expires at %Lu nsecs [in %Ld nsecs]\n", SEQ_printf(m, " # expires at %Lu-%Lu nsecs [in %Ld to %Ld nsecs]\n",
(unsigned long long)ktime_to_ns(timer->expires), (unsigned long long)ktime_to_ns(hrtimer_get_softexpires(timer)),
(long long)(ktime_to_ns(timer->expires) - now)); (unsigned long long)ktime_to_ns(hrtimer_get_expires(timer)),
(long long)(ktime_to_ns(hrtimer_get_softexpires(timer)) - now),
(long long)(ktime_to_ns(hrtimer_get_expires(timer)) - now));
} }
static void static void
......
...@@ -545,9 +545,10 @@ static void cbq_ovl_delay(struct cbq_class *cl) ...@@ -545,9 +545,10 @@ static void cbq_ovl_delay(struct cbq_class *cl)
expires = ktime_set(0, 0); expires = ktime_set(0, 0);
expires = ktime_add_ns(expires, PSCHED_US2NS(sched)); expires = ktime_add_ns(expires, PSCHED_US2NS(sched));
if (hrtimer_try_to_cancel(&q->delay_timer) && if (hrtimer_try_to_cancel(&q->delay_timer) &&
ktime_to_ns(ktime_sub(q->delay_timer.expires, ktime_to_ns(ktime_sub(
expires)) > 0) hrtimer_get_expires(&q->delay_timer),
q->delay_timer.expires = expires; expires)) > 0)
hrtimer_set_expires(&q->delay_timer, expires);
hrtimer_restart(&q->delay_timer); hrtimer_restart(&q->delay_timer);
cl->delayed = 1; cl->delayed = 1;
cl->xstats.overactions++; cl->xstats.overactions++;
......
...@@ -34,7 +34,7 @@ enum hrtimer_restart pcsp_do_timer(struct hrtimer *handle) ...@@ -34,7 +34,7 @@ enum hrtimer_restart pcsp_do_timer(struct hrtimer *handle)
chip->thalf = 0; chip->thalf = 0;
if (!atomic_read(&chip->timer_active)) if (!atomic_read(&chip->timer_active))
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
hrtimer_forward(&chip->timer, chip->timer.expires, hrtimer_forward(&chip->timer, hrtimer_get_expires(&chip->timer),
ktime_set(0, chip->ns_rem)); ktime_set(0, chip->ns_rem));
return HRTIMER_RESTART; return HRTIMER_RESTART;
} }
...@@ -118,7 +118,8 @@ enum hrtimer_restart pcsp_do_timer(struct hrtimer *handle) ...@@ -118,7 +118,8 @@ enum hrtimer_restart pcsp_do_timer(struct hrtimer *handle)
chip->ns_rem = PCSP_PERIOD_NS(); chip->ns_rem = PCSP_PERIOD_NS();
ns = (chip->thalf ? PCSP_CALC_NS(timer_cnt) : chip->ns_rem); ns = (chip->thalf ? PCSP_CALC_NS(timer_cnt) : chip->ns_rem);
chip->ns_rem -= ns; chip->ns_rem -= ns;
hrtimer_forward(&chip->timer, chip->timer.expires, ktime_set(0, ns)); hrtimer_forward(&chip->timer, hrtimer_get_expires(&chip->timer),
ktime_set(0, ns));
return HRTIMER_RESTART; return HRTIMER_RESTART;
exit_nr_unlock2: exit_nr_unlock2:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment