Commit 5b72f964 authored by Paul E. McKenney's avatar Paul E. McKenney

rcu: Complain if blocking in preemptible RCU read-side critical section

Although preemptible RCU allows its read-side critical sections to be
preempted, general blocking is forbidden.  The reason for this is that
excessive preemption times can be handled by CONFIG_RCU_BOOST=y, but a
voluntarily blocked task doesn't care how high you boost its priority.
Because preemptible RCU is a global mechanism, one ill-behaved reader
hurts everyone.  Hence the prohibition against general blocking in
RCU-preempt read-side critical sections.  Preemption yes, blocking no.

This commit enforces this prohibition.

There is a special exception for the -rt patchset (which they kindly
volunteered to implement):  It is OK to block (as opposed to merely being
preempted) within an RCU-preempt read-side critical section, but only if
the blocking is subject to priority inheritance.  This exception permits
CONFIG_RCU_BOOST=y to get -rt RCU readers out of trouble.

Why doesn't this exception also apply to mainline's rt_mutex?  Because
of the possibility that someone does general blocking while holding
an rt_mutex.  Yes, the priority boosting will affect the rt_mutex,
but it won't help with the task doing general blocking while holding
that rt_mutex.
Reported-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
parent 881ec9d2
...@@ -478,7 +478,7 @@ void rcu_note_context_switch(bool preempt) ...@@ -478,7 +478,7 @@ void rcu_note_context_switch(bool preempt)
barrier(); /* Avoid RCU read-side critical sections leaking down. */ barrier(); /* Avoid RCU read-side critical sections leaking down. */
trace_rcu_utilization(TPS("Start context switch")); trace_rcu_utilization(TPS("Start context switch"));
rcu_sched_qs(); rcu_sched_qs();
rcu_preempt_note_context_switch(); rcu_preempt_note_context_switch(preempt);
/* Load rcu_urgent_qs before other flags. */ /* Load rcu_urgent_qs before other flags. */
if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs)))
goto out; goto out;
......
...@@ -477,7 +477,7 @@ DECLARE_PER_CPU(char, rcu_cpu_has_work); ...@@ -477,7 +477,7 @@ DECLARE_PER_CPU(char, rcu_cpu_has_work);
/* Forward declarations for rcutree_plugin.h */ /* Forward declarations for rcutree_plugin.h */
static void rcu_bootup_announce(void); static void rcu_bootup_announce(void);
static void rcu_preempt_note_context_switch(void); static void rcu_preempt_note_context_switch(bool preempt);
static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
static bool rcu_preempt_has_tasks(struct rcu_node *rnp); static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
......
...@@ -286,12 +286,13 @@ static void rcu_preempt_qs(void) ...@@ -286,12 +286,13 @@ static void rcu_preempt_qs(void)
* *
* Caller must disable interrupts. * Caller must disable interrupts.
*/ */
static void rcu_preempt_note_context_switch(void) static void rcu_preempt_note_context_switch(bool preempt)
{ {
struct task_struct *t = current; struct task_struct *t = current;
struct rcu_data *rdp; struct rcu_data *rdp;
struct rcu_node *rnp; struct rcu_node *rnp;
WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0);
if (t->rcu_read_lock_nesting > 0 && if (t->rcu_read_lock_nesting > 0 &&
!t->rcu_read_unlock_special.b.blocked) { !t->rcu_read_unlock_special.b.blocked) {
...@@ -738,7 +739,7 @@ static void __init rcu_bootup_announce(void) ...@@ -738,7 +739,7 @@ static void __init rcu_bootup_announce(void)
* Because preemptible RCU does not exist, we never have to check for * Because preemptible RCU does not exist, we never have to check for
* CPUs being in quiescent states. * CPUs being in quiescent states.
*/ */
static void rcu_preempt_note_context_switch(void) static void rcu_preempt_note_context_switch(bool preempt)
{ {
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment