Commit cb3cb673 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'WIP.core/rcu' into core/rcu, to pick up two x86/entry dependencies

Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents 806f04e9 07325d4a
...@@ -2,31 +2,28 @@ ...@@ -2,31 +2,28 @@
#ifndef LINUX_HARDIRQ_H #ifndef LINUX_HARDIRQ_H
#define LINUX_HARDIRQ_H #define LINUX_HARDIRQ_H
#include <linux/context_tracking_state.h>
#include <linux/preempt.h> #include <linux/preempt.h>
#include <linux/lockdep.h> #include <linux/lockdep.h>
#include <linux/ftrace_irq.h> #include <linux/ftrace_irq.h>
#include <linux/vtime.h> #include <linux/vtime.h>
#include <asm/hardirq.h> #include <asm/hardirq.h>
extern void synchronize_irq(unsigned int irq); extern void synchronize_irq(unsigned int irq);
extern bool synchronize_hardirq(unsigned int irq); extern bool synchronize_hardirq(unsigned int irq);
#if defined(CONFIG_TINY_RCU) #ifdef CONFIG_NO_HZ_FULL
void __rcu_irq_enter_check_tick(void);
static inline void rcu_nmi_enter(void) #else
{ static inline void __rcu_irq_enter_check_tick(void) { }
} #endif
static inline void rcu_nmi_exit(void) static __always_inline void rcu_irq_enter_check_tick(void)
{ {
if (context_tracking_enabled())
__rcu_irq_enter_check_tick();
} }
#else
extern void rcu_nmi_enter(void);
extern void rcu_nmi_exit(void);
#endif
/* /*
* It is safe to do non-atomic ops on ->hardirq_context, * It is safe to do non-atomic ops on ->hardirq_context,
* because NMI handlers may not preempt and the ops are * because NMI handlers may not preempt and the ops are
...@@ -65,6 +62,14 @@ extern void irq_exit(void); ...@@ -65,6 +62,14 @@ extern void irq_exit(void);
#define arch_nmi_exit() do { } while (0) #define arch_nmi_exit() do { } while (0)
#endif #endif
#ifdef CONFIG_TINY_RCU
static inline void rcu_nmi_enter(void) { }
static inline void rcu_nmi_exit(void) { }
#else
extern void rcu_nmi_enter(void);
extern void rcu_nmi_exit(void);
#endif
/* /*
* NMI vs Tracing * NMI vs Tracing
* -------------- * --------------
......
...@@ -72,6 +72,7 @@ static inline void rcu_irq_exit_irqson(void) { } ...@@ -72,6 +72,7 @@ static inline void rcu_irq_exit_irqson(void) { }
static inline void rcu_irq_enter_irqson(void) { } static inline void rcu_irq_enter_irqson(void) { }
static inline void rcu_irq_exit(void) { } static inline void rcu_irq_exit(void) { }
static inline void rcu_irq_exit_preempt(void) { } static inline void rcu_irq_exit_preempt(void) { }
static inline void rcu_irq_exit_check_preempt(void) { }
static inline void exit_rcu(void) { } static inline void exit_rcu(void) { }
static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t) static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t)
{ {
......
...@@ -51,6 +51,12 @@ void rcu_irq_exit_preempt(void); ...@@ -51,6 +51,12 @@ void rcu_irq_exit_preempt(void);
void rcu_irq_enter_irqson(void); void rcu_irq_enter_irqson(void);
void rcu_irq_exit_irqson(void); void rcu_irq_exit_irqson(void);
#ifdef CONFIG_PROVE_RCU
void rcu_irq_exit_check_preempt(void);
#else
static inline void rcu_irq_exit_check_preempt(void) { }
#endif
void exit_rcu(void); void exit_rcu(void);
void rcu_scheduler_starting(void); void rcu_scheduler_starting(void);
......
...@@ -778,6 +778,24 @@ void rcu_irq_exit_preempt(void) ...@@ -778,6 +778,24 @@ void rcu_irq_exit_preempt(void)
"RCU in extended quiescent state!"); "RCU in extended quiescent state!");
} }
#ifdef CONFIG_PROVE_RCU
/**
* rcu_irq_exit_check_preempt - Validate that scheduling is possible
*/
void rcu_irq_exit_check_preempt(void)
{
lockdep_assert_irqs_disabled();
RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0,
"RCU dynticks_nesting counter underflow/zero!");
RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) !=
DYNTICK_IRQ_NONIDLE,
"Bad RCU dynticks_nmi_nesting counter\n");
RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
"RCU in extended quiescent state!");
}
#endif /* #ifdef CONFIG_PROVE_RCU */
/* /*
* Wrapper for rcu_irq_exit() where interrupts are enabled. * Wrapper for rcu_irq_exit() where interrupts are enabled.
* *
...@@ -861,6 +879,67 @@ void noinstr rcu_user_exit(void) ...@@ -861,6 +879,67 @@ void noinstr rcu_user_exit(void)
{ {
rcu_eqs_exit(1); rcu_eqs_exit(1);
} }
/**
* __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it.
*
* The scheduler tick is not normally enabled when CPUs enter the kernel
* from nohz_full userspace execution. After all, nohz_full userspace
* execution is an RCU quiescent state and the time executing in the kernel
* is quite short. Except of course when it isn't. And it is not hard to
* cause a large system to spend tens of seconds or even minutes looping
* in the kernel, which can cause a number of problems, include RCU CPU
* stall warnings.
*
* Therefore, if a nohz_full CPU fails to report a quiescent state
* in a timely manner, the RCU grace-period kthread sets that CPU's
* ->rcu_urgent_qs flag with the expectation that the next interrupt or
* exception will invoke this function, which will turn on the scheduler
* tick, which will enable RCU to detect that CPU's quiescent states,
* for example, due to cond_resched() calls in CONFIG_PREEMPT=n kernels.
* The tick will be disabled once a quiescent state is reported for
* this CPU.
*
* Of course, in carefully tuned systems, there might never be an
* interrupt or exception. In that case, the RCU grace-period kthread
* will eventually cause one to happen. However, in less carefully
* controlled environments, this function allows RCU to get what it
* needs without creating otherwise useless interruptions.
*/
void __rcu_irq_enter_check_tick(void)
{
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
// Enabling the tick is unsafe in NMI handlers.
if (WARN_ON_ONCE(in_nmi()))
return;
RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
"Illegal rcu_irq_enter_check_tick() from extended quiescent state");
if (!tick_nohz_full_cpu(rdp->cpu) ||
!READ_ONCE(rdp->rcu_urgent_qs) ||
READ_ONCE(rdp->rcu_forced_tick)) {
// RCU doesn't need nohz_full help from this CPU, or it is
// already getting that help.
return;
}
// We get here only when not in an extended quiescent state and
// from interrupts (as opposed to NMIs). Therefore, (1) RCU is
// already watching and (2) The fact that we are in an interrupt
// handler and that the rcu_node lock is an irq-disabled lock
// prevents self-deadlock. So we can safely recheck under the lock.
// Note that the nohz_full state currently cannot change.
raw_spin_lock_rcu_node(rdp->mynode);
if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) {
// A nohz_full CPU is in the kernel and RCU needs a
// quiescent state. Turn on the tick!
WRITE_ONCE(rdp->rcu_forced_tick, true);
tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
}
raw_spin_unlock_rcu_node(rdp->mynode);
}
#endif /* CONFIG_NO_HZ_FULL */ #endif /* CONFIG_NO_HZ_FULL */
/** /**
...@@ -907,26 +986,7 @@ noinstr void rcu_nmi_enter(void) ...@@ -907,26 +986,7 @@ noinstr void rcu_nmi_enter(void)
incby = 1; incby = 1;
} else if (!in_nmi()) { } else if (!in_nmi()) {
instrumentation_begin(); instrumentation_begin();
if (tick_nohz_full_cpu(rdp->cpu) && rcu_irq_enter_check_tick();
rdp->dynticks_nmi_nesting == DYNTICK_IRQ_NONIDLE &&
READ_ONCE(rdp->rcu_urgent_qs) &&
!READ_ONCE(rdp->rcu_forced_tick)) {
// We get here only if we had already exited the
// extended quiescent state and this was an
// interrupt (not an NMI). Therefore, (1) RCU is
// already watching and (2) The fact that we are in
// an interrupt handler and that the rcu_node lock
// is an irq-disabled lock prevents self-deadlock.
// So we can safely recheck under the lock.
raw_spin_lock_rcu_node(rdp->mynode);
if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) {
// A nohz_full CPU is in the kernel and RCU
// needs a quiescent state. Turn on the tick!
WRITE_ONCE(rdp->rcu_forced_tick, true);
tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
}
raw_spin_unlock_rcu_node(rdp->mynode);
}
instrumentation_end(); instrumentation_end();
} }
instrumentation_begin(); instrumentation_begin();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment