Commit 4961b6e1 authored by Thomas Gleixner's avatar Thomas Gleixner

sched: core: Use hrtimer_start[_expires]()

hrtimer_start() now enforces a timer interrupt when an already expired
timer is enqueued.

Get rid of the __hrtimer_start_range_ns() invocations and the loops
around it.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarPeter Zijlstra <peterz@infradead.org>
Cc: Preeti U Murthy <preeti@linux.vnet.ibm.com>
Cc: Viresh Kumar <viresh.kumar@linaro.org>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Link: http://lkml.kernel.org/r/20150414203502.531131739@linutronix.deSigned-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 3497d206
...@@ -92,22 +92,11 @@ ...@@ -92,22 +92,11 @@
void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period) void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
{ {
unsigned long delta; if (hrtimer_active(period_timer))
ktime_t soft, hard, now; return;
for (;;) {
if (hrtimer_active(period_timer))
break;
now = hrtimer_cb_get_time(period_timer);
hrtimer_forward(period_timer, now, period);
soft = hrtimer_get_softexpires(period_timer); hrtimer_forward_now(period_timer, period);
hard = hrtimer_get_expires(period_timer); hrtimer_start_expires(period_timer, HRTIMER_MODE_ABS_PINNED);
delta = ktime_to_ns(ktime_sub(hard, soft));
__hrtimer_start_range_ns(period_timer, soft, delta,
HRTIMER_MODE_ABS_PINNED, 0);
}
} }
DEFINE_MUTEX(sched_domains_mutex); DEFINE_MUTEX(sched_domains_mutex);
...@@ -355,12 +344,11 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer) ...@@ -355,12 +344,11 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static int __hrtick_restart(struct rq *rq) static void __hrtick_restart(struct rq *rq)
{ {
struct hrtimer *timer = &rq->hrtick_timer; struct hrtimer *timer = &rq->hrtick_timer;
ktime_t time = hrtimer_get_softexpires(timer);
return __hrtimer_start_range_ns(timer, time, 0, HRTIMER_MODE_ABS_PINNED, 0); hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
} }
/* /*
...@@ -440,8 +428,8 @@ void hrtick_start(struct rq *rq, u64 delay) ...@@ -440,8 +428,8 @@ void hrtick_start(struct rq *rq, u64 delay)
* doesn't make sense. Rely on vruntime for fairness. * doesn't make sense. Rely on vruntime for fairness.
*/ */
delay = max_t(u64, delay, 10000LL); delay = max_t(u64, delay, 10000LL);
__hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0, hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
HRTIMER_MODE_REL_PINNED, 0); HRTIMER_MODE_REL_PINNED);
} }
static inline void init_hrtick(void) static inline void init_hrtick(void)
......
...@@ -3850,7 +3850,7 @@ static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC; ...@@ -3850,7 +3850,7 @@ static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
* Are we near the end of the current quota period? * Are we near the end of the current quota period?
* *
* Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
* hrtimer base being cleared by __hrtimer_start_range_ns. In the case of * hrtimer base being cleared by hrtimer_start. In the case of
* migrate_hrtimers, base is never cleared, so we are fine. * migrate_hrtimers, base is never cleared, so we are fine.
*/ */
static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire) static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment