Commit 52f2b34f authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'for-mingo' of...

Merge branch 'for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into core/rcu

Pull RCU fix from Paul E. McKenney:

 "This additional v4.18 pull request contains a single commit that fell
  through the cracks:

      Provide early rcu_cpu_starting() callback for the benefit of the
      x86/mtrr code, which needs RCU to be available on incoming CPUs
      earlier than has been the case in the past."
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents 13a55319 f64c6013
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/syscore_ops.h> #include <linux/syscore_ops.h>
#include <linux/rcupdate.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/e820/api.h> #include <asm/e820/api.h>
...@@ -793,6 +794,9 @@ void mtrr_ap_init(void) ...@@ -793,6 +794,9 @@ void mtrr_ap_init(void)
if (!use_intel() || mtrr_aps_delayed_init) if (!use_intel() || mtrr_aps_delayed_init)
return; return;
rcu_cpu_starting(smp_processor_id());
/* /*
* Ideally we should hold mtrr_mutex here to avoid mtrr entries * Ideally we should hold mtrr_mutex here to avoid mtrr entries
* changed, but this routine will be called in cpu boot time, * changed, but this routine will be called in cpu boot time,
......
...@@ -108,7 +108,6 @@ void rcu_sched_qs(void); ...@@ -108,7 +108,6 @@ void rcu_sched_qs(void);
void rcu_bh_qs(void); void rcu_bh_qs(void);
void rcu_check_callbacks(int user); void rcu_check_callbacks(int user);
void rcu_report_dead(unsigned int cpu); void rcu_report_dead(unsigned int cpu);
void rcu_cpu_starting(unsigned int cpu);
void rcutree_migrate_callbacks(int cpu); void rcutree_migrate_callbacks(int cpu);
#ifdef CONFIG_RCU_STALL_COMMON #ifdef CONFIG_RCU_STALL_COMMON
......
...@@ -132,5 +132,6 @@ static inline void rcu_all_qs(void) { barrier(); } ...@@ -132,5 +132,6 @@ static inline void rcu_all_qs(void) { barrier(); }
#define rcutree_offline_cpu NULL #define rcutree_offline_cpu NULL
#define rcutree_dead_cpu NULL #define rcutree_dead_cpu NULL
#define rcutree_dying_cpu NULL #define rcutree_dying_cpu NULL
static inline void rcu_cpu_starting(unsigned int cpu) { }
#endif /* __LINUX_RCUTINY_H */ #endif /* __LINUX_RCUTINY_H */
...@@ -101,5 +101,6 @@ int rcutree_online_cpu(unsigned int cpu); ...@@ -101,5 +101,6 @@ int rcutree_online_cpu(unsigned int cpu);
int rcutree_offline_cpu(unsigned int cpu); int rcutree_offline_cpu(unsigned int cpu);
int rcutree_dead_cpu(unsigned int cpu); int rcutree_dead_cpu(unsigned int cpu);
int rcutree_dying_cpu(unsigned int cpu); int rcutree_dying_cpu(unsigned int cpu);
void rcu_cpu_starting(unsigned int cpu);
#endif /* __LINUX_RCUTREE_H */ #endif /* __LINUX_RCUTREE_H */
...@@ -3665,6 +3665,8 @@ int rcutree_dead_cpu(unsigned int cpu) ...@@ -3665,6 +3665,8 @@ int rcutree_dead_cpu(unsigned int cpu)
return 0; return 0;
} }
static DEFINE_PER_CPU(int, rcu_cpu_started);
/* /*
* Mark the specified CPU as being online so that subsequent grace periods * Mark the specified CPU as being online so that subsequent grace periods
* (both expedited and normal) will wait on it. Note that this means that * (both expedited and normal) will wait on it. Note that this means that
...@@ -3686,6 +3688,11 @@ void rcu_cpu_starting(unsigned int cpu) ...@@ -3686,6 +3688,11 @@ void rcu_cpu_starting(unsigned int cpu)
struct rcu_node *rnp; struct rcu_node *rnp;
struct rcu_state *rsp; struct rcu_state *rsp;
if (per_cpu(rcu_cpu_started, cpu))
return;
per_cpu(rcu_cpu_started, cpu) = 1;
for_each_rcu_flavor(rsp) { for_each_rcu_flavor(rsp) {
rdp = per_cpu_ptr(rsp->rda, cpu); rdp = per_cpu_ptr(rsp->rda, cpu);
rnp = rdp->mynode; rnp = rdp->mynode;
...@@ -3742,6 +3749,8 @@ void rcu_report_dead(unsigned int cpu) ...@@ -3742,6 +3749,8 @@ void rcu_report_dead(unsigned int cpu)
preempt_enable(); preempt_enable();
for_each_rcu_flavor(rsp) for_each_rcu_flavor(rsp)
rcu_cleanup_dying_idle_cpu(cpu, rsp); rcu_cleanup_dying_idle_cpu(cpu, rsp);
per_cpu(rcu_cpu_started, cpu) = 0;
} }
/* Migrate the dead CPU's callbacks to the current CPU. */ /* Migrate the dead CPU's callbacks to the current CPU. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment