Commit 70b4cf84 authored by Anna-Maria Behnsen's avatar Anna-Maria Behnsen Committed by Thomas Gleixner

timers: Split out "get next timer interrupt" functionality

The functionality for getting the next timer interrupt in
get_next_timer_interrupt() is split into a separate function
fetch_next_timer_interrupt() to be usable by other call sites.

This is preparatory work for the conversion of the NOHZ timer
placement to a pull at expiry time model. No functional change.
Signed-off-by: default avatarAnna-Maria Behnsen <anna-maria@linutronix.de>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarFrederic Weisbecker <frederic@kernel.org>
Link: https://lore.kernel.org/r/20240221090548.36600-13-anna-maria@linutronix.de
parent 21927fc8
...@@ -2033,30 +2033,13 @@ static unsigned long next_timer_interrupt(struct timer_base *base, ...@@ -2033,30 +2033,13 @@ static unsigned long next_timer_interrupt(struct timer_base *base,
return base->next_expiry; return base->next_expiry;
} }
static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem, static unsigned long fetch_next_timer_interrupt(unsigned long basej, u64 basem,
bool *idle) struct timer_base *base_local,
struct timer_base *base_global,
struct timer_events *tevt)
{ {
struct timer_events tevt = { .local = KTIME_MAX, .global = KTIME_MAX };
unsigned long nextevt, nextevt_local, nextevt_global; unsigned long nextevt, nextevt_local, nextevt_global;
struct timer_base *base_local, *base_global;
bool local_first; bool local_first;
u64 expires;
/*
* Pretend that there is no timer pending if the cpu is offline.
* Possible pending timers will be migrated later to an active cpu.
*/
if (cpu_is_offline(smp_processor_id())) {
if (idle)
*idle = true;
return tevt.local;
}
base_local = this_cpu_ptr(&timer_bases[BASE_LOCAL]);
base_global = this_cpu_ptr(&timer_bases[BASE_GLOBAL]);
raw_spin_lock(&base_local->lock);
raw_spin_lock_nested(&base_global->lock, SINGLE_DEPTH_NESTING);
nextevt_local = next_timer_interrupt(base_local, basej); nextevt_local = next_timer_interrupt(base_local, basej);
nextevt_global = next_timer_interrupt(base_global, basej); nextevt_global = next_timer_interrupt(base_global, basej);
...@@ -2074,8 +2057,8 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem, ...@@ -2074,8 +2057,8 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
/* If we missed a tick already, force 0 delta */ /* If we missed a tick already, force 0 delta */
if (time_before(nextevt, basej)) if (time_before(nextevt, basej))
nextevt = basej; nextevt = basej;
tevt.local = basem + (u64)(nextevt - basej) * TICK_NSEC; tevt->local = basem + (u64)(nextevt - basej) * TICK_NSEC;
goto forward; return nextevt;
} }
/* /*
...@@ -2085,12 +2068,41 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem, ...@@ -2085,12 +2068,41 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
* ignored. If the global queue is empty, nothing to do either. * ignored. If the global queue is empty, nothing to do either.
*/ */
if (!local_first && base_global->timers_pending) if (!local_first && base_global->timers_pending)
tevt.global = basem + (u64)(nextevt_global - basej) * TICK_NSEC; tevt->global = basem + (u64)(nextevt_global - basej) * TICK_NSEC;
if (base_local->timers_pending) if (base_local->timers_pending)
tevt.local = basem + (u64)(nextevt_local - basej) * TICK_NSEC; tevt->local = basem + (u64)(nextevt_local - basej) * TICK_NSEC;
return nextevt;
}
static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
bool *idle)
{
struct timer_events tevt = { .local = KTIME_MAX, .global = KTIME_MAX };
struct timer_base *base_local, *base_global;
unsigned long nextevt;
u64 expires;
/*
* Pretend that there is no timer pending if the cpu is offline.
* Possible pending timers will be migrated later to an active cpu.
*/
if (cpu_is_offline(smp_processor_id())) {
if (idle)
*idle = true;
return tevt.local;
}
base_local = this_cpu_ptr(&timer_bases[BASE_LOCAL]);
base_global = this_cpu_ptr(&timer_bases[BASE_GLOBAL]);
raw_spin_lock(&base_local->lock);
raw_spin_lock_nested(&base_global->lock, SINGLE_DEPTH_NESTING);
nextevt = fetch_next_timer_interrupt(basej, basem, base_local,
base_global, &tevt);
forward:
/* /*
* We have a fresh next event. Check whether we can forward the * We have a fresh next event. Check whether we can forward the
* base. * base.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment