Commit 20652ed6 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'for-mingo' of...

Merge branch 'for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into core/rcu

Pull RCU fixes from Paul E. McKenney:

"This series adds a pair of commits that move function definitions
 from include/linux/rcu_segcblist.h to new kernel/rcu/rcu_segcblist.h
 and kernel/rcu/rcu_segcblist.c files, thus greatly decreasing the
 size of the externally visible include/linux/rcu_segcblist.h file."
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents b5fe223a 933dfbd7
This diff is collapsed.
......@@ -573,6 +573,9 @@ config RCU_STALL_COMMON
the tiny variants to disable RCU CPU stall warnings, while
making these warnings mandatory for the tree variants.
config RCU_NEED_SEGCBLIST
def_bool ( TREE_RCU || PREEMPT_RCU || TINY_SRCU || TREE_SRCU )
config CONTEXT_TRACKING
bool
......
......@@ -12,3 +12,4 @@ obj-$(CONFIG_TREE_RCU) += tree.o
obj-$(CONFIG_PREEMPT_RCU) += tree.o
obj-$(CONFIG_TREE_RCU_TRACE) += tree_trace.o
obj-$(CONFIG_TINY_RCU) += tiny.o
obj-$(CONFIG_RCU_NEED_SEGCBLIST) += rcu_segcblist.o
This diff is collapsed.
/*
* RCU segmented callback lists, internal-to-rcu header file
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can access it online at
* http://www.gnu.org/licenses/gpl-2.0.html.
*
* Copyright IBM Corporation, 2017
*
* Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
*/
#include <linux/rcu_segcblist.h>
/*
* Account for the fact that a previously dequeued callback turned out
* to be marked as lazy.
*/
static inline void rcu_cblist_dequeued_lazy(struct rcu_cblist *rclp)
{
rclp->len_lazy--;
}
/*
* Interim function to return rcu_cblist head pointer. Longer term, the
* rcu_cblist will be used more pervasively, removing the need for this
* function.
*/
static inline struct rcu_head *rcu_cblist_head(struct rcu_cblist *rclp)
{
return rclp->head;
}
/*
* Interim function to return rcu_cblist head pointer. Longer term, the
* rcu_cblist will be used more pervasively, removing the need for this
* function.
*/
static inline struct rcu_head **rcu_cblist_tail(struct rcu_cblist *rclp)
{
WARN_ON_ONCE(!rclp->head);
return rclp->tail;
}
void rcu_cblist_init(struct rcu_cblist *rclp);
long rcu_cblist_count_cbs(struct rcu_cblist *rclp, long lim);
struct rcu_head *rcu_cblist_dequeue(struct rcu_cblist *rclp);
/*
* Is the specified rcu_segcblist structure empty?
*
* But careful! The fact that the ->head field is NULL does not
* necessarily imply that there are no callbacks associated with
* this structure. When callbacks are being invoked, they are
* removed as a group. If callback invocation must be preempted,
* the remaining callbacks will be added back to the list. Either
* way, the counts are updated later.
*
* So it is often the case that rcu_segcblist_n_cbs() should be used
* instead.
*/
static inline bool rcu_segcblist_empty(struct rcu_segcblist *rsclp)
{
return !rsclp->head;
}
/* Return number of callbacks in segmented callback list. */
static inline long rcu_segcblist_n_cbs(struct rcu_segcblist *rsclp)
{
return READ_ONCE(rsclp->len);
}
/* Return number of lazy callbacks in segmented callback list. */
static inline long rcu_segcblist_n_lazy_cbs(struct rcu_segcblist *rsclp)
{
return rsclp->len_lazy;
}
/* Return number of lazy callbacks in segmented callback list. */
static inline long rcu_segcblist_n_nonlazy_cbs(struct rcu_segcblist *rsclp)
{
return rsclp->len - rsclp->len_lazy;
}
/*
* Is the specified rcu_segcblist enabled, for example, not corresponding
* to an offline or callback-offloaded CPU?
*/
static inline bool rcu_segcblist_is_enabled(struct rcu_segcblist *rsclp)
{
return !!rsclp->tails[RCU_NEXT_TAIL];
}
/*
* Are all segments following the specified segment of the specified
* rcu_segcblist structure empty of callbacks? (The specified
* segment might well contain callbacks.)
*/
static inline bool rcu_segcblist_restempty(struct rcu_segcblist *rsclp, int seg)
{
return !*rsclp->tails[seg];
}
/*
* Interim function to return rcu_segcblist head pointer. Longer term, the
* rcu_segcblist will be used more pervasively, removing the need for this
* function.
*/
static inline struct rcu_head *rcu_segcblist_head(struct rcu_segcblist *rsclp)
{
return rsclp->head;
}
/*
* Interim function to return rcu_segcblist head pointer. Longer term, the
* rcu_segcblist will be used more pervasively, removing the need for this
* function.
*/
static inline struct rcu_head **rcu_segcblist_tail(struct rcu_segcblist *rsclp)
{
WARN_ON_ONCE(rcu_segcblist_empty(rsclp));
return rsclp->tails[RCU_NEXT_TAIL];
}
void rcu_segcblist_init(struct rcu_segcblist *rsclp);
void rcu_segcblist_disable(struct rcu_segcblist *rsclp);
bool rcu_segcblist_segempty(struct rcu_segcblist *rsclp, int seg);
bool rcu_segcblist_ready_cbs(struct rcu_segcblist *rsclp);
bool rcu_segcblist_pend_cbs(struct rcu_segcblist *rsclp);
struct rcu_head *rcu_segcblist_dequeue(struct rcu_segcblist *rsclp);
void rcu_segcblist_dequeued_lazy(struct rcu_segcblist *rsclp);
struct rcu_head *rcu_segcblist_first_cb(struct rcu_segcblist *rsclp);
struct rcu_head *rcu_segcblist_first_pend_cb(struct rcu_segcblist *rsclp);
bool rcu_segcblist_new_cbs(struct rcu_segcblist *rsclp);
void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp,
struct rcu_head *rhp, bool lazy);
bool rcu_segcblist_entrain(struct rcu_segcblist *rsclp,
struct rcu_head *rhp, bool lazy);
void rcu_segcblist_extract_count(struct rcu_segcblist *rsclp,
struct rcu_cblist *rclp);
void rcu_segcblist_extract_done_cbs(struct rcu_segcblist *rsclp,
struct rcu_cblist *rclp);
void rcu_segcblist_extract_pend_cbs(struct rcu_segcblist *rsclp,
struct rcu_cblist *rclp);
void rcu_segcblist_insert_count(struct rcu_segcblist *rsclp,
struct rcu_cblist *rclp);
void rcu_segcblist_insert_done_cbs(struct rcu_segcblist *rsclp,
struct rcu_cblist *rclp);
void rcu_segcblist_insert_pend_cbs(struct rcu_segcblist *rsclp,
struct rcu_cblist *rclp);
void rcu_segcblist_advance(struct rcu_segcblist *rsclp, unsigned long seq);
bool rcu_segcblist_accelerate(struct rcu_segcblist *rsclp, unsigned long seq);
bool rcu_segcblist_future_gp_needed(struct rcu_segcblist *rsclp,
unsigned long seq);
......@@ -30,6 +30,7 @@
#include <linux/srcu.h>
#include <linux/rcu_node_tree.h>
#include "rcu_segcblist.h"
#include "rcu.h"
static int init_srcu_struct_fields(struct srcu_struct *sp)
......
......@@ -38,6 +38,7 @@
#include <linux/srcu.h>
#include "rcu.h"
#include "rcu_segcblist.h"
ulong exp_holdoff = 25 * 1000; /* Holdoff (ns) for auto-expediting. */
module_param(exp_holdoff, ulong, 0444);
......
......@@ -2633,9 +2633,8 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp, unsigned long flags)
return;
/* Do the accounting first. */
rdp->n_cbs_adopted += rcu_cblist_n_cbs(&rsp->orphan_done);
if (rcu_cblist_n_lazy_cbs(&rsp->orphan_done) !=
rcu_cblist_n_cbs(&rsp->orphan_done))
rdp->n_cbs_adopted += rsp->orphan_done.len;
if (rsp->orphan_done.len_lazy != rsp->orphan_done.len)
rcu_idle_count_callbacks_posted();
rcu_segcblist_insert_count(&rdp->cblist, &rsp->orphan_done);
......@@ -2647,9 +2646,9 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp, unsigned long flags)
/* First adopt the ready-to-invoke callbacks, then the done ones. */
rcu_segcblist_insert_done_cbs(&rdp->cblist, &rsp->orphan_done);
WARN_ON_ONCE(!rcu_cblist_empty(&rsp->orphan_done));
WARN_ON_ONCE(rsp->orphan_done.head);
rcu_segcblist_insert_pend_cbs(&rdp->cblist, &rsp->orphan_pend);
WARN_ON_ONCE(!rcu_cblist_empty(&rsp->orphan_pend));
WARN_ON_ONCE(rsp->orphan_pend.head);
WARN_ON_ONCE(rcu_segcblist_empty(&rdp->cblist) !=
!rcu_segcblist_n_cbs(&rdp->cblist));
}
......@@ -2792,17 +2791,16 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
* Stop only if limit reached and CPU has something to do.
* Note: The rcl structure counts down from zero.
*/
if (-rcu_cblist_n_cbs(&rcl) >= bl &&
if (-rcl.len >= bl &&
(need_resched() ||
(!is_idle_task(current) && !rcu_is_callbacks_kthread())))
break;
}
local_irq_save(flags);
count = -rcu_cblist_n_cbs(&rcl);
trace_rcu_batch_end(rsp->name, count, !rcu_cblist_empty(&rcl),
need_resched(), is_idle_task(current),
rcu_is_callbacks_kthread());
count = -rcl.len;
trace_rcu_batch_end(rsp->name, count, !!rcl.head, need_resched(),
is_idle_task(current), rcu_is_callbacks_kthread());
/* Update counts and requeue any remaining callbacks. */
rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
......
......@@ -30,9 +30,10 @@
#include <linux/seqlock.h>
#include <linux/swait.h>
#include <linux/stop_machine.h>
#include <linux/rcu_segcblist.h>
#include <linux/rcu_node_tree.h>
#include "rcu_segcblist.h"
/*
* Dynticks per-CPU state.
*/
......
......@@ -1934,20 +1934,20 @@ static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
struct rcu_data *rdp,
unsigned long flags)
{
long ql = rcu_cblist_n_cbs(&rsp->orphan_done);
long qll = rcu_cblist_n_lazy_cbs(&rsp->orphan_done);
long ql = rsp->orphan_done.len;
long qll = rsp->orphan_done.len_lazy;
/* If this is not a no-CBs CPU, tell the caller to do it the old way. */
if (!rcu_is_nocb_cpu(smp_processor_id()))
return false;
/* First, enqueue the donelist, if any. This preserves CB ordering. */
if (!rcu_cblist_empty(&rsp->orphan_done)) {
if (rsp->orphan_done.head) {
__call_rcu_nocb_enqueue(rdp, rcu_cblist_head(&rsp->orphan_done),
rcu_cblist_tail(&rsp->orphan_done),
ql, qll, flags);
}
if (!rcu_cblist_empty(&rsp->orphan_pend)) {
if (rsp->orphan_pend.head) {
__call_rcu_nocb_enqueue(rdp, rcu_cblist_head(&rsp->orphan_pend),
rcu_cblist_tail(&rsp->orphan_pend),
ql, qll, flags);
......
......@@ -277,8 +277,8 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
rsp->n_force_qs, rsp->n_force_qs_ngp,
rsp->n_force_qs - rsp->n_force_qs_ngp,
READ_ONCE(rsp->n_force_qs_lh),
rcu_cblist_n_lazy_cbs(&rsp->orphan_done),
rcu_cblist_n_cbs(&rsp->orphan_done));
rsp->orphan_done.len_lazy,
rsp->orphan_done.len);
for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < rcu_num_nodes; rnp++) {
if (rnp->level != level) {
seq_puts(m, "\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment