Commit ce5215c1 authored by Paul E. McKenney's avatar Paul E. McKenney

rcu/nocb: Use separate flag to indicate offloaded ->cblist

RCU callback processing currently uses rcu_is_nocb_cpu() to determine
whether or not the current CPU's callbacks are to be offloaded.
This works, but it is not so good for cache locality.  Plus use of
->cblist for offloaded callbacks will greatly increase the frequency
of these checks.  This commit therefore adds a ->offloaded flag to the
rcu_segcblist structure to provide a more flexible and cache-friendly
means of checking for callback offloading.
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.ibm.com>
parent 1bb5f9b9
...@@ -71,6 +71,7 @@ struct rcu_segcblist { ...@@ -71,6 +71,7 @@ struct rcu_segcblist {
long len; long len;
long len_lazy; long len_lazy;
u8 enabled; u8 enabled;
u8 offloaded;
}; };
#define RCU_SEGCBLIST_INITIALIZER(n) \ #define RCU_SEGCBLIST_INITIALIZER(n) \
......
...@@ -73,6 +73,18 @@ void rcu_segcblist_disable(struct rcu_segcblist *rsclp) ...@@ -73,6 +73,18 @@ void rcu_segcblist_disable(struct rcu_segcblist *rsclp)
rsclp->enabled = 0; rsclp->enabled = 0;
} }
/*
* Mark the specified rcu_segcblist structure as offloaded. This
* structure must be empty.
*/
void rcu_segcblist_offload(struct rcu_segcblist *rsclp)
{
WARN_ON_ONCE(!rcu_segcblist_empty(rsclp));
WARN_ON_ONCE(rcu_segcblist_n_cbs(rsclp));
WARN_ON_ONCE(rcu_segcblist_n_lazy_cbs(rsclp));
rsclp->offloaded = 1;
}
/* /*
* Does the specified rcu_segcblist structure contain callbacks that * Does the specified rcu_segcblist structure contain callbacks that
* are ready to be invoked? * are ready to be invoked?
......
...@@ -66,6 +66,12 @@ static inline bool rcu_segcblist_is_enabled(struct rcu_segcblist *rsclp) ...@@ -66,6 +66,12 @@ static inline bool rcu_segcblist_is_enabled(struct rcu_segcblist *rsclp)
return rsclp->enabled; return rsclp->enabled;
} }
/* Is the specified rcu_segcblist offloaded? */
static inline bool rcu_segcblist_is_offloaded(struct rcu_segcblist *rsclp)
{
return rsclp->offloaded;
}
/* /*
* Are all segments following the specified segment of the specified * Are all segments following the specified segment of the specified
* rcu_segcblist structure empty of callbacks? (The specified * rcu_segcblist structure empty of callbacks? (The specified
...@@ -78,6 +84,7 @@ static inline bool rcu_segcblist_restempty(struct rcu_segcblist *rsclp, int seg) ...@@ -78,6 +84,7 @@ static inline bool rcu_segcblist_restempty(struct rcu_segcblist *rsclp, int seg)
void rcu_segcblist_init(struct rcu_segcblist *rsclp); void rcu_segcblist_init(struct rcu_segcblist *rsclp);
void rcu_segcblist_disable(struct rcu_segcblist *rsclp); void rcu_segcblist_disable(struct rcu_segcblist *rsclp);
void rcu_segcblist_offload(struct rcu_segcblist *rsclp);
bool rcu_segcblist_ready_cbs(struct rcu_segcblist *rsclp); bool rcu_segcblist_ready_cbs(struct rcu_segcblist *rsclp);
bool rcu_segcblist_pend_cbs(struct rcu_segcblist *rsclp); bool rcu_segcblist_pend_cbs(struct rcu_segcblist *rsclp);
struct rcu_head *rcu_segcblist_first_cb(struct rcu_segcblist *rsclp); struct rcu_head *rcu_segcblist_first_cb(struct rcu_segcblist *rsclp);
......
...@@ -2858,10 +2858,11 @@ void rcu_barrier(void) ...@@ -2858,10 +2858,11 @@ void rcu_barrier(void)
* corresponding CPU's preceding callbacks have been invoked. * corresponding CPU's preceding callbacks have been invoked.
*/ */
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
if (!cpu_online(cpu) && !rcu_is_nocb_cpu(cpu))
continue;
rdp = per_cpu_ptr(&rcu_data, cpu); rdp = per_cpu_ptr(&rcu_data, cpu);
if (rcu_is_nocb_cpu(cpu)) { if (!cpu_online(cpu) &&
!rcu_segcblist_is_offloaded(&rdp->cblist))
continue;
if (rcu_segcblist_is_offloaded(&rdp->cblist)) {
if (!rcu_nocb_cpu_needs_barrier(cpu)) { if (!rcu_nocb_cpu_needs_barrier(cpu)) {
rcu_barrier_trace(TPS("OfflineNoCB"), cpu, rcu_barrier_trace(TPS("OfflineNoCB"), cpu,
rcu_state.barrier_sequence); rcu_state.barrier_sequence);
...@@ -3155,7 +3156,8 @@ void rcutree_migrate_callbacks(int cpu) ...@@ -3155,7 +3156,8 @@ void rcutree_migrate_callbacks(int cpu)
struct rcu_node *rnp_root = rcu_get_root(); struct rcu_node *rnp_root = rcu_get_root();
bool needwake; bool needwake;
if (rcu_is_nocb_cpu(cpu) || rcu_segcblist_empty(&rdp->cblist)) if (rcu_segcblist_is_offloaded(&rdp->cblist) ||
rcu_segcblist_empty(&rdp->cblist))
return; /* No callbacks to migrate. */ return; /* No callbacks to migrate. */
local_irq_save(flags); local_irq_save(flags);
......
...@@ -1382,7 +1382,7 @@ static void rcu_prepare_for_idle(void) ...@@ -1382,7 +1382,7 @@ static void rcu_prepare_for_idle(void)
int tne; int tne;
lockdep_assert_irqs_disabled(); lockdep_assert_irqs_disabled();
if (rcu_is_nocb_cpu(smp_processor_id())) if (rcu_segcblist_is_offloaded(&rdp->cblist))
return; return;
/* Handle nohz enablement switches conservatively. */ /* Handle nohz enablement switches conservatively. */
...@@ -1431,8 +1431,10 @@ static void rcu_prepare_for_idle(void) ...@@ -1431,8 +1431,10 @@ static void rcu_prepare_for_idle(void)
*/ */
static void rcu_cleanup_after_idle(void) static void rcu_cleanup_after_idle(void)
{ {
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
lockdep_assert_irqs_disabled(); lockdep_assert_irqs_disabled();
if (rcu_is_nocb_cpu(smp_processor_id())) if (rcu_segcblist_is_offloaded(&rdp->cblist))
return; return;
if (rcu_try_advance_all_cbs()) if (rcu_try_advance_all_cbs())
invoke_rcu_core(); invoke_rcu_core();
...@@ -1694,7 +1696,7 @@ static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, ...@@ -1694,7 +1696,7 @@ static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
bool lazy, unsigned long flags) bool lazy, unsigned long flags)
{ {
if (!rcu_is_nocb_cpu(rdp->cpu)) if (!rcu_segcblist_is_offloaded(&rdp->cblist))
return false; return false;
__call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy, flags); __call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy, flags);
if (__is_kfree_rcu_offset((unsigned long)rhp->func)) if (__is_kfree_rcu_offset((unsigned long)rhp->func))
...@@ -1729,7 +1731,7 @@ static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_data *my_rdp, ...@@ -1729,7 +1731,7 @@ static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_data *my_rdp,
unsigned long flags) unsigned long flags)
{ {
lockdep_assert_irqs_disabled(); lockdep_assert_irqs_disabled();
if (!rcu_is_nocb_cpu(smp_processor_id())) if (!rcu_segcblist_is_offloaded(&my_rdp->cblist))
return false; /* Not NOCBs CPU, caller must migrate CBs. */ return false; /* Not NOCBs CPU, caller must migrate CBs. */
__call_rcu_nocb_enqueue(my_rdp, rcu_segcblist_head(&rdp->cblist), __call_rcu_nocb_enqueue(my_rdp, rcu_segcblist_head(&rdp->cblist),
rcu_segcblist_tail(&rdp->cblist), rcu_segcblist_tail(&rdp->cblist),
...@@ -2192,6 +2194,7 @@ static bool init_nocb_callback_list(struct rcu_data *rdp) ...@@ -2192,6 +2194,7 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
} }
rcu_segcblist_init(&rdp->cblist); rcu_segcblist_init(&rdp->cblist);
rcu_segcblist_disable(&rdp->cblist); rcu_segcblist_disable(&rdp->cblist);
rcu_segcblist_offload(&rdp->cblist);
return true; return true;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment