Commit cea92e84 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull timer fixes from Thomas Gleixner:
 "A pile of fixes for long standing issues with the timer wheel and the
  NOHZ code:

   - Prevent timer base confusion accross the nohz switch, which can
     cause unlocked access and data corruption

   - Reinitialize the stale base clock on cpu hotplug to prevent subtle
     side effects including rollovers on 32bit

   - Prevent an interrupt storm when the timer softirq is already
     pending caused by tick_nohz_stop_sched_tick()

   - Move the timer start tracepoint to a place where it actually makes
     sense

   - Add documentation to timerqueue functions as they caused confusion
     several times now"

* 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  timerqueue: Document return values of timerqueue_add/del()
  timers: Invoke timer_start_debug() where it makes sense
  nohz: Prevent a timer interrupt storm in tick_nohz_stop_sched_tick()
  timers: Reinitialize per cpu bases on hotplug
  timers: Use deferrable base independent of base::nohz_active
parents 8d517bdf 9f4533cd
...@@ -86,7 +86,7 @@ enum cpuhp_state { ...@@ -86,7 +86,7 @@ enum cpuhp_state {
CPUHP_MM_ZSWP_POOL_PREPARE, CPUHP_MM_ZSWP_POOL_PREPARE,
CPUHP_KVM_PPC_BOOK3S_PREPARE, CPUHP_KVM_PPC_BOOK3S_PREPARE,
CPUHP_ZCOMP_PREPARE, CPUHP_ZCOMP_PREPARE,
CPUHP_TIMERS_DEAD, CPUHP_TIMERS_PREPARE,
CPUHP_MIPS_SOC_PREPARE, CPUHP_MIPS_SOC_PREPARE,
CPUHP_BP_PREPARE_DYN, CPUHP_BP_PREPARE_DYN,
CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20, CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20,
......
...@@ -207,9 +207,11 @@ unsigned long round_jiffies_up(unsigned long j); ...@@ -207,9 +207,11 @@ unsigned long round_jiffies_up(unsigned long j);
unsigned long round_jiffies_up_relative(unsigned long j); unsigned long round_jiffies_up_relative(unsigned long j);
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
int timers_prepare_cpu(unsigned int cpu);
int timers_dead_cpu(unsigned int cpu); int timers_dead_cpu(unsigned int cpu);
#else #else
#define timers_dead_cpu NULL #define timers_prepare_cpu NULL
#define timers_dead_cpu NULL
#endif #endif
#endif #endif
...@@ -1277,9 +1277,9 @@ static struct cpuhp_step cpuhp_bp_states[] = { ...@@ -1277,9 +1277,9 @@ static struct cpuhp_step cpuhp_bp_states[] = {
* before blk_mq_queue_reinit_notify() from notify_dead(), * before blk_mq_queue_reinit_notify() from notify_dead(),
* otherwise a RCU stall occurs. * otherwise a RCU stall occurs.
*/ */
[CPUHP_TIMERS_DEAD] = { [CPUHP_TIMERS_PREPARE] = {
.name = "timers:dead", .name = "timers:dead",
.startup.single = NULL, .startup.single = timers_prepare_cpu,
.teardown.single = timers_dead_cpu, .teardown.single = timers_dead_cpu,
}, },
/* Kicks the plugged cpu into life */ /* Kicks the plugged cpu into life */
......
...@@ -650,6 +650,11 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) ...@@ -650,6 +650,11 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
ts->next_tick = 0; ts->next_tick = 0;
} }
static inline bool local_timer_softirq_pending(void)
{
return local_softirq_pending() & TIMER_SOFTIRQ;
}
static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
ktime_t now, int cpu) ktime_t now, int cpu)
{ {
...@@ -666,8 +671,18 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, ...@@ -666,8 +671,18 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
} while (read_seqretry(&jiffies_lock, seq)); } while (read_seqretry(&jiffies_lock, seq));
ts->last_jiffies = basejiff; ts->last_jiffies = basejiff;
if (rcu_needs_cpu(basemono, &next_rcu) || /*
arch_needs_cpu() || irq_work_needs_cpu()) { * Keep the periodic tick, when RCU, architecture or irq_work
* requests it.
* Aside of that check whether the local timer softirq is
* pending. If so its a bad idea to call get_next_timer_interrupt()
* because there is an already expired timer, so it will request
* immeditate expiry, which rearms the hardware timer with a
* minimal delta which brings us back to this place
* immediately. Lather, rinse and repeat...
*/
if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() ||
irq_work_needs_cpu() || local_timer_softirq_pending()) {
next_tick = basemono + TICK_NSEC; next_tick = basemono + TICK_NSEC;
} else { } else {
/* /*
......
...@@ -823,11 +823,10 @@ static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu) ...@@ -823,11 +823,10 @@ static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu)
struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu); struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu);
/* /*
* If the timer is deferrable and nohz is active then we need to use * If the timer is deferrable and NO_HZ_COMMON is set then we need
* the deferrable base. * to use the deferrable base.
*/ */
if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active && if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE))
(tflags & TIMER_DEFERRABLE))
base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu); base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu);
return base; return base;
} }
...@@ -837,11 +836,10 @@ static inline struct timer_base *get_timer_this_cpu_base(u32 tflags) ...@@ -837,11 +836,10 @@ static inline struct timer_base *get_timer_this_cpu_base(u32 tflags)
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
/* /*
* If the timer is deferrable and nohz is active then we need to use * If the timer is deferrable and NO_HZ_COMMON is set then we need
* the deferrable base. * to use the deferrable base.
*/ */
if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active && if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE))
(tflags & TIMER_DEFERRABLE))
base = this_cpu_ptr(&timer_bases[BASE_DEF]); base = this_cpu_ptr(&timer_bases[BASE_DEF]);
return base; return base;
} }
...@@ -1009,8 +1007,6 @@ __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int option ...@@ -1009,8 +1007,6 @@ __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int option
if (!ret && (options & MOD_TIMER_PENDING_ONLY)) if (!ret && (options & MOD_TIMER_PENDING_ONLY))
goto out_unlock; goto out_unlock;
debug_activate(timer, expires);
new_base = get_target_base(base, timer->flags); new_base = get_target_base(base, timer->flags);
if (base != new_base) { if (base != new_base) {
...@@ -1034,6 +1030,8 @@ __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int option ...@@ -1034,6 +1030,8 @@ __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int option
} }
} }
debug_activate(timer, expires);
timer->expires = expires; timer->expires = expires;
/* /*
* If 'idx' was calculated above and the base time did not advance * If 'idx' was calculated above and the base time did not advance
...@@ -1684,7 +1682,7 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h) ...@@ -1684,7 +1682,7 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
base->must_forward_clk = false; base->must_forward_clk = false;
__run_timers(base); __run_timers(base);
if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active) if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
__run_timers(this_cpu_ptr(&timer_bases[BASE_DEF])); __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
} }
...@@ -1855,6 +1853,21 @@ static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *h ...@@ -1855,6 +1853,21 @@ static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *h
} }
} }
int timers_prepare_cpu(unsigned int cpu)
{
struct timer_base *base;
int b;
for (b = 0; b < NR_BASES; b++) {
base = per_cpu_ptr(&timer_bases[b], cpu);
base->clk = jiffies;
base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA;
base->is_idle = false;
base->must_forward_clk = true;
}
return 0;
}
int timers_dead_cpu(unsigned int cpu) int timers_dead_cpu(unsigned int cpu)
{ {
struct timer_base *old_base; struct timer_base *old_base;
......
...@@ -33,8 +33,9 @@ ...@@ -33,8 +33,9 @@
* @head: head of timerqueue * @head: head of timerqueue
* @node: timer node to be added * @node: timer node to be added
* *
* Adds the timer node to the timerqueue, sorted by the * Adds the timer node to the timerqueue, sorted by the node's expires
* node's expires value. * value. Returns true if the newly added timer is the first expiring timer in
* the queue.
*/ */
bool timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node) bool timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node)
{ {
...@@ -70,7 +71,8 @@ EXPORT_SYMBOL_GPL(timerqueue_add); ...@@ -70,7 +71,8 @@ EXPORT_SYMBOL_GPL(timerqueue_add);
* @head: head of timerqueue * @head: head of timerqueue
* @node: timer node to be removed * @node: timer node to be removed
* *
* Removes the timer node from the timerqueue. * Removes the timer node from the timerqueue. Returns true if the queue is
* not empty after the remove.
*/ */
bool timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node) bool timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment