Commit 19822e3e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'rcu-urgent.2022.12.17a' of...

Merge tag 'rcu-urgent.2022.12.17a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu

Pull RCU fix from Paul McKenney:
 "This fixes a lockdep false positive in synchronize_rcu() that can
  otherwise occur during early boot.

  The fix simply avoids invoking lockdep if the scheduler has not yet
  been initialized, that is, during that portion of boot when interrupts
  are disabled"

* tag 'rcu-urgent.2022.12.17a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu:
  rcu: Don't assert interrupts enabled too early in boot
parents b6bb9676 3f6c3d29
...@@ -1362,7 +1362,7 @@ static void rcu_poll_gp_seq_start(unsigned long *snap) ...@@ -1362,7 +1362,7 @@ static void rcu_poll_gp_seq_start(unsigned long *snap)
{ {
struct rcu_node *rnp = rcu_get_root(); struct rcu_node *rnp = rcu_get_root();
if (rcu_init_invoked()) if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
raw_lockdep_assert_held_rcu_node(rnp); raw_lockdep_assert_held_rcu_node(rnp);
// If RCU was idle, note beginning of GP. // If RCU was idle, note beginning of GP.
...@@ -1378,7 +1378,7 @@ static void rcu_poll_gp_seq_end(unsigned long *snap) ...@@ -1378,7 +1378,7 @@ static void rcu_poll_gp_seq_end(unsigned long *snap)
{ {
struct rcu_node *rnp = rcu_get_root(); struct rcu_node *rnp = rcu_get_root();
if (rcu_init_invoked()) if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
raw_lockdep_assert_held_rcu_node(rnp); raw_lockdep_assert_held_rcu_node(rnp);
// If the previously noted GP is still in effect, record the // If the previously noted GP is still in effect, record the
...@@ -1401,7 +1401,8 @@ static void rcu_poll_gp_seq_start_unlocked(unsigned long *snap) ...@@ -1401,7 +1401,8 @@ static void rcu_poll_gp_seq_start_unlocked(unsigned long *snap)
struct rcu_node *rnp = rcu_get_root(); struct rcu_node *rnp = rcu_get_root();
if (rcu_init_invoked()) { if (rcu_init_invoked()) {
lockdep_assert_irqs_enabled(); if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
lockdep_assert_irqs_enabled();
raw_spin_lock_irqsave_rcu_node(rnp, flags); raw_spin_lock_irqsave_rcu_node(rnp, flags);
} }
rcu_poll_gp_seq_start(snap); rcu_poll_gp_seq_start(snap);
...@@ -1417,7 +1418,8 @@ static void rcu_poll_gp_seq_end_unlocked(unsigned long *snap) ...@@ -1417,7 +1418,8 @@ static void rcu_poll_gp_seq_end_unlocked(unsigned long *snap)
struct rcu_node *rnp = rcu_get_root(); struct rcu_node *rnp = rcu_get_root();
if (rcu_init_invoked()) { if (rcu_init_invoked()) {
lockdep_assert_irqs_enabled(); if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
lockdep_assert_irqs_enabled();
raw_spin_lock_irqsave_rcu_node(rnp, flags); raw_spin_lock_irqsave_rcu_node(rnp, flags);
} }
rcu_poll_gp_seq_end(snap); rcu_poll_gp_seq_end(snap);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment