Commit 303e967f authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Linus Torvalds

[PATCH] hrtimers; add state tracking

Reintroduce ktimers feature "optimized away" by the ktimers review process:
multiple hrtimer states to enable the running of hrtimers without holding the
cpu-base-lock.

(The "optimized" rbtree hack carried only 2 states worth of information and we
need 4 for high resolution timers and dynamic ticks.)

No functional changes.

Build-fixes-from: Andrew Morton <akpm@osdl.org>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Cc: Roman Zippel <zippel@linux-m68k.org>
Cc: john stultz <johnstul@us.ibm.com>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 3c8aa39d
...@@ -40,6 +40,34 @@ enum hrtimer_restart { ...@@ -40,6 +40,34 @@ enum hrtimer_restart {
HRTIMER_RESTART, /* Timer must be restarted */ HRTIMER_RESTART, /* Timer must be restarted */
}; };
/*
* Bit values to track state of the timer
*
* Possible states:
*
* 0x00 inactive
* 0x01 enqueued into rbtree
* 0x02 callback function running
* 0x03 callback function running and enqueued
* (was requeued on another CPU)
*
* The "callback function running and enqueued" status is only possible on
* SMP. It happens for example when a posix timer expired and the callback
* queued a signal. Between dropping the lock which protects the posix timer
* and reacquiring the base lock of the hrtimer, another CPU can deliver the
* signal and rearm the timer. We have to preserve the callback running state,
* as otherwise the timer could be removed before the softirq code finishes the
* the handling of the timer.
*
* The HRTIMER_STATE_ENQUEUE bit is always or'ed to the current state to
* preserve the HRTIMER_STATE_CALLBACK bit in the above scenario.
*
* All state transitions are protected by cpu_base->lock.
*/
#define HRTIMER_STATE_INACTIVE 0x00
#define HRTIMER_STATE_ENQUEUED 0x01
#define HRTIMER_STATE_CALLBACK 0x02
/** /**
* struct hrtimer - the basic hrtimer structure * struct hrtimer - the basic hrtimer structure
* @node: red black tree node for time ordered insertion * @node: red black tree node for time ordered insertion
...@@ -48,6 +76,7 @@ enum hrtimer_restart { ...@@ -48,6 +76,7 @@ enum hrtimer_restart {
* which the timer is based. * which the timer is based.
* @function: timer expiry callback function * @function: timer expiry callback function
* @base: pointer to the timer base (per cpu and per clock) * @base: pointer to the timer base (per cpu and per clock)
* @state: state information (See bit values above)
* *
* The hrtimer structure must be initialized by init_hrtimer_#CLOCKTYPE() * The hrtimer structure must be initialized by init_hrtimer_#CLOCKTYPE()
*/ */
...@@ -56,6 +85,7 @@ struct hrtimer { ...@@ -56,6 +85,7 @@ struct hrtimer {
ktime_t expires; ktime_t expires;
enum hrtimer_restart (*function)(struct hrtimer *); enum hrtimer_restart (*function)(struct hrtimer *);
struct hrtimer_clock_base *base; struct hrtimer_clock_base *base;
unsigned long state;
}; };
/** /**
...@@ -141,9 +171,13 @@ extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp); ...@@ -141,9 +171,13 @@ extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);
extern ktime_t hrtimer_get_next_event(void); extern ktime_t hrtimer_get_next_event(void);
#endif #endif
/*
* A timer is active, when it is enqueued into the rbtree or the callback
* function is running.
*/
static inline int hrtimer_active(const struct hrtimer *timer) static inline int hrtimer_active(const struct hrtimer *timer)
{ {
return rb_parent(&timer->node) != &timer->node; return timer->state != HRTIMER_STATE_INACTIVE;
} }
/* Forward a hrtimer so it expires after now: */ /* Forward a hrtimer so it expires after now: */
......
...@@ -149,6 +149,23 @@ static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base) ...@@ -149,6 +149,23 @@ static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
ktime_add(xtim, tomono); ktime_add(xtim, tomono);
} }
/*
* Helper function to check, whether the timer is on one of the queues
*/
static inline int hrtimer_is_queued(struct hrtimer *timer)
{
return timer->state & HRTIMER_STATE_ENQUEUED;
}
/*
* Helper function to check, whether the timer is running the callback
* function
*/
static inline int hrtimer_callback_running(struct hrtimer *timer)
{
return timer->state & HRTIMER_STATE_CALLBACK;
}
/* /*
* Functions and macros which are different for UP/SMP systems are kept in a * Functions and macros which are different for UP/SMP systems are kept in a
* single place * single place
...@@ -390,6 +407,11 @@ static void enqueue_hrtimer(struct hrtimer *timer, ...@@ -390,6 +407,11 @@ static void enqueue_hrtimer(struct hrtimer *timer,
*/ */
rb_link_node(&timer->node, parent, link); rb_link_node(&timer->node, parent, link);
rb_insert_color(&timer->node, &base->active); rb_insert_color(&timer->node, &base->active);
/*
* HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the
* state of a possibly running callback.
*/
timer->state |= HRTIMER_STATE_ENQUEUED;
if (!base->first || timer->expires.tv64 < if (!base->first || timer->expires.tv64 <
rb_entry(base->first, struct hrtimer, node)->expires.tv64) rb_entry(base->first, struct hrtimer, node)->expires.tv64)
...@@ -402,7 +424,8 @@ static void enqueue_hrtimer(struct hrtimer *timer, ...@@ -402,7 +424,8 @@ static void enqueue_hrtimer(struct hrtimer *timer,
* Caller must hold the base lock. * Caller must hold the base lock.
*/ */
static void __remove_hrtimer(struct hrtimer *timer, static void __remove_hrtimer(struct hrtimer *timer,
struct hrtimer_clock_base *base) struct hrtimer_clock_base *base,
unsigned long newstate)
{ {
/* /*
* Remove the timer from the rbtree and replace the * Remove the timer from the rbtree and replace the
...@@ -411,7 +434,7 @@ static void __remove_hrtimer(struct hrtimer *timer, ...@@ -411,7 +434,7 @@ static void __remove_hrtimer(struct hrtimer *timer,
if (base->first == &timer->node) if (base->first == &timer->node)
base->first = rb_next(&timer->node); base->first = rb_next(&timer->node);
rb_erase(&timer->node, &base->active); rb_erase(&timer->node, &base->active);
rb_set_parent(&timer->node, &timer->node); timer->state = newstate;
} }
/* /*
...@@ -420,8 +443,8 @@ static void __remove_hrtimer(struct hrtimer *timer, ...@@ -420,8 +443,8 @@ static void __remove_hrtimer(struct hrtimer *timer,
static inline int static inline int
remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base) remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
{ {
if (hrtimer_active(timer)) { if (hrtimer_is_queued(timer)) {
__remove_hrtimer(timer, base); __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE);
return 1; return 1;
} }
return 0; return 0;
...@@ -493,7 +516,7 @@ int hrtimer_try_to_cancel(struct hrtimer *timer) ...@@ -493,7 +516,7 @@ int hrtimer_try_to_cancel(struct hrtimer *timer)
base = lock_hrtimer_base(timer, &flags); base = lock_hrtimer_base(timer, &flags);
if (base->cpu_base->curr_timer != timer) if (!hrtimer_callback_running(timer))
ret = remove_hrtimer(timer, base); ret = remove_hrtimer(timer, base);
unlock_hrtimer_base(timer, &flags); unlock_hrtimer_base(timer, &flags);
...@@ -598,7 +621,6 @@ void hrtimer_init(struct hrtimer *timer, clockid_t clock_id, ...@@ -598,7 +621,6 @@ void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
clock_id = CLOCK_MONOTONIC; clock_id = CLOCK_MONOTONIC;
timer->base = &cpu_base->clock_base[clock_id]; timer->base = &cpu_base->clock_base[clock_id];
rb_set_parent(&timer->node, &timer->node);
} }
EXPORT_SYMBOL_GPL(hrtimer_init); EXPORT_SYMBOL_GPL(hrtimer_init);
...@@ -649,13 +671,14 @@ static inline void run_hrtimer_queue(struct hrtimer_cpu_base *cpu_base, ...@@ -649,13 +671,14 @@ static inline void run_hrtimer_queue(struct hrtimer_cpu_base *cpu_base,
fn = timer->function; fn = timer->function;
set_curr_timer(cpu_base, timer); set_curr_timer(cpu_base, timer);
__remove_hrtimer(timer, base); __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK);
spin_unlock_irq(&cpu_base->lock); spin_unlock_irq(&cpu_base->lock);
restart = fn(timer); restart = fn(timer);
spin_lock_irq(&cpu_base->lock); spin_lock_irq(&cpu_base->lock);
timer->state &= ~HRTIMER_STATE_CALLBACK;
if (restart != HRTIMER_NORESTART) { if (restart != HRTIMER_NORESTART) {
BUG_ON(hrtimer_active(timer)); BUG_ON(hrtimer_active(timer));
enqueue_hrtimer(timer, base); enqueue_hrtimer(timer, base);
...@@ -826,7 +849,8 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, ...@@ -826,7 +849,8 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
while ((node = rb_first(&old_base->active))) { while ((node = rb_first(&old_base->active))) {
timer = rb_entry(node, struct hrtimer, node); timer = rb_entry(node, struct hrtimer, node);
__remove_hrtimer(timer, old_base); BUG_ON(timer->state & HRTIMER_STATE_CALLBACK);
__remove_hrtimer(timer, old_base, HRTIMER_STATE_INACTIVE);
timer->base = new_base; timer->base = new_base;
enqueue_hrtimer(timer, new_base); enqueue_hrtimer(timer, new_base);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment