Commit ab3d681e authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull RCU updates from Ingo Molnar:
 "The major changes:

  - Simplify RCU's grace-period and callback processing based on the new
    numbering for callbacks.

  - Removal of TINY_PREEMPT_RCU in favor of TREE_PREEMPT_RCU for
    single-CPU low-latency systems.

  - SRCU-related changes and fixes.

  - Miscellaneous fixes, including converting a few remaining printk()
    calls to pr_*().

  - Documentation updates"

* 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (32 commits)
  rcu: Shrink TINY_RCU by reworking CPU-stall ifdefs
  rcu: Shrink TINY_RCU by moving exit_rcu()
  rcu: Remove TINY_PREEMPT_RCU tracing documentation
  rcu: Consolidate rcutiny_plugin.h ifdefs
  rcu: Remove rcu_preempt_note_context_switch()
  rcu: Remove the CONFIG_TINY_RCU ifdefs in rcutiny.h
  rcu: Remove check_cpu_stall_preempt()
  rcu: Simplify RCU_TINY RCU callback invocation
  rcu: Remove rcu_preempt_process_callbacks()
  rcu: Remove rcu_preempt_remove_callbacks()
  rcu: Remove rcu_preempt_check_callbacks()
  rcu: Remove show_tiny_preempt_stats()
  rcu: Remove TINY_PREEMPT_RCU
  powerpc,kvm: fix imbalance srcu_read_[un]lock()
  rcu: Remove srcu_read_lock_raw() and srcu_read_unlock_raw().
  rcu: Apply Dave Jones's NOCB Kconfig help feedback
  rcu: Merge adjacent identical ifdefs
  rcu: Drive quiescent-state-forcing delay from HZ
  rcu: Remove "Experimental" flags
  kthread: Add kworker kthreads to OS-jitter documentation
  ...
parents 0c46d68d b1fe9987
...@@ -354,12 +354,6 @@ over a rather long period of time, but improvements are always welcome! ...@@ -354,12 +354,6 @@ over a rather long period of time, but improvements are always welcome!
using RCU rather than SRCU, because RCU is almost always faster using RCU rather than SRCU, because RCU is almost always faster
and easier to use than is SRCU. and easier to use than is SRCU.
If you need to enter your read-side critical section in a
hardirq or exception handler, and then exit that same read-side
critical section in the task that was interrupted, then you need
to srcu_read_lock_raw() and srcu_read_unlock_raw(), which avoid
the lockdep checking that would otherwise this practice illegal.
Also unlike other forms of RCU, explicit initialization Also unlike other forms of RCU, explicit initialization
and cleanup is required via init_srcu_struct() and and cleanup is required via init_srcu_struct() and
cleanup_srcu_struct(). These are passed a "struct srcu_struct" cleanup_srcu_struct(). These are passed a "struct srcu_struct"
......
...@@ -182,12 +182,6 @@ torture_type The type of RCU to test, with string values as follows: ...@@ -182,12 +182,6 @@ torture_type The type of RCU to test, with string values as follows:
"srcu_expedited": srcu_read_lock(), srcu_read_unlock() and "srcu_expedited": srcu_read_lock(), srcu_read_unlock() and
synchronize_srcu_expedited(). synchronize_srcu_expedited().
"srcu_raw": srcu_read_lock_raw(), srcu_read_unlock_raw(),
and call_srcu().
"srcu_raw_sync": srcu_read_lock_raw(), srcu_read_unlock_raw(),
and synchronize_srcu().
"sched": preempt_disable(), preempt_enable(), and "sched": preempt_disable(), preempt_enable(), and
call_rcu_sched(). call_rcu_sched().
......
...@@ -530,113 +530,21 @@ o "nos" counts the number of times we balked for other ...@@ -530,113 +530,21 @@ o "nos" counts the number of times we balked for other
reasons, e.g., the grace period ended first. reasons, e.g., the grace period ended first.
CONFIG_TINY_RCU and CONFIG_TINY_PREEMPT_RCU debugfs Files and Formats CONFIG_TINY_RCU debugfs Files and Formats
These implementations of RCU provides a single debugfs file under the These implementations of RCU provides a single debugfs file under the
top-level directory RCU, namely rcu/rcudata, which displays fields in top-level directory RCU, namely rcu/rcudata, which displays fields in
rcu_bh_ctrlblk, rcu_sched_ctrlblk and, for CONFIG_TINY_PREEMPT_RCU, rcu_bh_ctrlblk and rcu_sched_ctrlblk.
rcu_preempt_ctrlblk.
The output of "cat rcu/rcudata" is as follows: The output of "cat rcu/rcudata" is as follows:
rcu_preempt: qlen=24 gp=1097669 g197/p197/c197 tasks=...
ttb=. btg=no ntb=184 neb=0 nnb=183 j=01f7 bt=0274
normal balk: nt=1097669 gt=0 bt=371 b=0 ny=25073378 nos=0
exp balk: bt=0 nos=0
rcu_sched: qlen: 0 rcu_sched: qlen: 0
rcu_bh: qlen: 0 rcu_bh: qlen: 0
This is split into rcu_preempt, rcu_sched, and rcu_bh sections, with the This is split into rcu_sched and rcu_bh sections. The field is as
rcu_preempt section appearing only in CONFIG_TINY_PREEMPT_RCU builds. follows:
The last three lines of the rcu_preempt section appear only in
CONFIG_RCU_BOOST kernel builds. The fields are as follows:
o "qlen" is the number of RCU callbacks currently waiting either o "qlen" is the number of RCU callbacks currently waiting either
for an RCU grace period or waiting to be invoked. This is the for an RCU grace period or waiting to be invoked. This is the
only field present for rcu_sched and rcu_bh, due to the only field present for rcu_sched and rcu_bh, due to the
short-circuiting of grace period in those two cases. short-circuiting of grace period in those two cases.
o "gp" is the number of grace periods that have completed.
o "g197/p197/c197" displays the grace-period state, with the
"g" number being the number of grace periods that have started
(mod 256), the "p" number being the number of grace periods
that the CPU has responded to (also mod 256), and the "c"
number being the number of grace periods that have completed
(once again mode 256).
Why have both "gp" and "g"? Because the data flowing into
"gp" is only present in a CONFIG_RCU_TRACE kernel.
o "tasks" is a set of bits. The first bit is "T" if there are
currently tasks that have recently blocked within an RCU
read-side critical section, the second bit is "N" if any of the
aforementioned tasks are blocking the current RCU grace period,
and the third bit is "E" if any of the aforementioned tasks are
blocking the current expedited grace period. Each bit is "."
if the corresponding condition does not hold.
o "ttb" is a single bit. It is "B" if any of the blocked tasks
need to be priority boosted and "." otherwise.
o "btg" indicates whether boosting has been carried out during
the current grace period, with "exp" indicating that boosting
is in progress for an expedited grace period, "no" indicating
that boosting has not yet started for a normal grace period,
"begun" indicating that boosting has bebug for a normal grace
period, and "done" indicating that boosting has completed for
a normal grace period.
o "ntb" is the total number of tasks subjected to RCU priority boosting
periods since boot.
o "neb" is the number of expedited grace periods that have had
to resort to RCU priority boosting since boot.
o "nnb" is the number of normal grace periods that have had
to resort to RCU priority boosting since boot.
o "j" is the low-order 16 bits of the jiffies counter in hexadecimal.
o "bt" is the low-order 16 bits of the value that the jiffies counter
will have at the next time that boosting is scheduled to begin.
o In the line beginning with "normal balk", the fields are as follows:
o "nt" is the number of times that the system balked from
boosting because there were no blocked tasks to boost.
Note that the system will balk from boosting even if the
grace period is overdue when the currently running task
is looping within an RCU read-side critical section.
There is no point in boosting in this case, because
boosting a running task won't make it run any faster.
o "gt" is the number of times that the system balked
from boosting because, although there were blocked tasks,
none of them were preventing the current grace period
from completing.
o "bt" is the number of times that the system balked
from boosting because boosting was already in progress.
o "b" is the number of times that the system balked from
boosting because boosting had already completed for
the grace period in question.
o "ny" is the number of times that the system balked from
boosting because it was not yet time to start boosting
the grace period in question.
o "nos" is the number of times that the system balked from
boosting for inexplicable ("not otherwise specified")
reasons. This can actually happen due to races involving
increments of the jiffies counter.
o In the line beginning with "exp balk", the fields are as follows:
o "bt" is the number of times that the system balked from
boosting because there were no blocked tasks to boost.
o "nos" is the number of times that the system balked from
boosting for inexplicable ("not otherwise specified")
reasons.
...@@ -842,9 +842,7 @@ SRCU: Critical sections Grace period Barrier ...@@ -842,9 +842,7 @@ SRCU: Critical sections Grace period Barrier
srcu_read_lock synchronize_srcu srcu_barrier srcu_read_lock synchronize_srcu srcu_barrier
srcu_read_unlock call_srcu srcu_read_unlock call_srcu
srcu_read_lock_raw synchronize_srcu_expedited srcu_dereference synchronize_srcu_expedited
srcu_read_unlock_raw
srcu_dereference
SRCU: Initialization/cleanup SRCU: Initialization/cleanup
init_srcu_struct init_srcu_struct
...@@ -865,38 +863,32 @@ list can be helpful: ...@@ -865,38 +863,32 @@ list can be helpful:
a. Will readers need to block? If so, you need SRCU. a. Will readers need to block? If so, you need SRCU.
b. Is it necessary to start a read-side critical section in a b. What about the -rt patchset? If readers would need to block
hardirq handler or exception handler, and then to complete
this read-side critical section in the task that was
interrupted? If so, you need SRCU's srcu_read_lock_raw() and
srcu_read_unlock_raw() primitives.
c. What about the -rt patchset? If readers would need to block
in an non-rt kernel, you need SRCU. If readers would block in an non-rt kernel, you need SRCU. If readers would block
in a -rt kernel, but not in a non-rt kernel, SRCU is not in a -rt kernel, but not in a non-rt kernel, SRCU is not
necessary. necessary.
d. Do you need to treat NMI handlers, hardirq handlers, c. Do you need to treat NMI handlers, hardirq handlers,
and code segments with preemption disabled (whether and code segments with preemption disabled (whether
via preempt_disable(), local_irq_save(), local_bh_disable(), via preempt_disable(), local_irq_save(), local_bh_disable(),
or some other mechanism) as if they were explicit RCU readers? or some other mechanism) as if they were explicit RCU readers?
If so, RCU-sched is the only choice that will work for you. If so, RCU-sched is the only choice that will work for you.
e. Do you need RCU grace periods to complete even in the face d. Do you need RCU grace periods to complete even in the face
of softirq monopolization of one or more of the CPUs? For of softirq monopolization of one or more of the CPUs? For
example, is your code subject to network-based denial-of-service example, is your code subject to network-based denial-of-service
attacks? If so, you need RCU-bh. attacks? If so, you need RCU-bh.
f. Is your workload too update-intensive for normal use of e. Is your workload too update-intensive for normal use of
RCU, but inappropriate for other synchronization mechanisms? RCU, but inappropriate for other synchronization mechanisms?
If so, consider SLAB_DESTROY_BY_RCU. But please be careful! If so, consider SLAB_DESTROY_BY_RCU. But please be careful!
g. Do you need read-side critical sections that are respected f. Do you need read-side critical sections that are respected
even though they are in the middle of the idle loop, during even though they are in the middle of the idle loop, during
user-mode execution, or on an offlined CPU? If so, SRCU is the user-mode execution, or on an offlined CPU? If so, SRCU is the
only choice that will work for you. only choice that will work for you.
h. Otherwise, use RCU. g. Otherwise, use RCU.
Of course, this all assumes that you have determined that RCU is in fact Of course, this all assumes that you have determined that RCU is in fact
the right tool for your job. the right tool for your job.
......
...@@ -157,6 +157,53 @@ RCU_SOFTIRQ: Do at least one of the following: ...@@ -157,6 +157,53 @@ RCU_SOFTIRQ: Do at least one of the following:
calls and by forcing both kernel threads and interrupts calls and by forcing both kernel threads and interrupts
to execute elsewhere. to execute elsewhere.
Name: kworker/%u:%d%s (cpu, id, priority)
Purpose: Execute workqueue requests
To reduce its OS jitter, do any of the following:
1. Run your workload at a real-time priority, which will allow
preempting the kworker daemons.
2. Do any of the following needed to avoid jitter that your
application cannot tolerate:
a. Build your kernel with CONFIG_SLUB=y rather than
CONFIG_SLAB=y, thus avoiding the slab allocator's periodic
use of each CPU's workqueues to run its cache_reap()
function.
b. Avoid using oprofile, thus avoiding OS jitter from
wq_sync_buffer().
c. Limit your CPU frequency so that a CPU-frequency
governor is not required, possibly enlisting the aid of
special heatsinks or other cooling technologies. If done
correctly, and if you CPU architecture permits, you should
be able to build your kernel with CONFIG_CPU_FREQ=n to
avoid the CPU-frequency governor periodically running
on each CPU, including cs_dbs_timer() and od_dbs_timer().
WARNING: Please check your CPU specifications to
make sure that this is safe on your particular system.
d. It is not possible to entirely get rid of OS jitter
from vmstat_update() on CONFIG_SMP=y systems, but you
can decrease its frequency by writing a large value to
/proc/sys/vm/stat_interval. The default value is HZ,
for an interval of one second. Of course, larger values
will make your virtual-memory statistics update more
slowly. Of course, you can also run your workload at
a real-time priority, thus preempting vmstat_update().
e. If running on high-end powerpc servers, build with
CONFIG_PPC_RTAS_DAEMON=n. This prevents the RTAS
daemon from running on each CPU every second or so.
(This will require editing Kconfig files and will defeat
this platform's RAS functionality.) This avoids jitter
due to the rtas_event_scan() function.
WARNING: Please check your CPU specifications to
make sure that this is safe on your particular system.
f. If running on Cell Processor, build your kernel with
CBE_CPUFREQ_SPU_GOVERNOR=n to avoid OS jitter from
spu_gov_work().
WARNING: Please check your CPU specifications to
make sure that this is safe on your particular system.
g. If running on PowerMAC, build your kernel with
CONFIG_PMAC_RACKMETER=n to disable the CPU-meter,
avoiding OS jitter from rackmeter_do_timer().
Name: rcuc/%u Name: rcuc/%u
Purpose: Execute RCU callbacks in CONFIG_RCU_BOOST=y kernels. Purpose: Execute RCU callbacks in CONFIG_RCU_BOOST=y kernels.
To reduce its OS jitter, do at least one of the following: To reduce its OS jitter, do at least one of the following:
......
...@@ -7,21 +7,59 @@ efficiency and reducing OS jitter. Reducing OS jitter is important for ...@@ -7,21 +7,59 @@ efficiency and reducing OS jitter. Reducing OS jitter is important for
some types of computationally intensive high-performance computing (HPC) some types of computationally intensive high-performance computing (HPC)
applications and for real-time applications. applications and for real-time applications.
There are two main contexts in which the number of scheduling-clock There are three main ways of managing scheduling-clock interrupts
interrupts can be reduced compared to the old-school approach of sending (also known as "scheduling-clock ticks" or simply "ticks"):
a scheduling-clock interrupt to all CPUs every jiffy whether they need
it or not (CONFIG_HZ_PERIODIC=y or CONFIG_NO_HZ=n for older kernels):
1. Idle CPUs (CONFIG_NO_HZ_IDLE=y or CONFIG_NO_HZ=y for older kernels). 1. Never omit scheduling-clock ticks (CONFIG_HZ_PERIODIC=y or
CONFIG_NO_HZ=n for older kernels). You normally will -not-
want to choose this option.
2. CPUs having only one runnable task (CONFIG_NO_HZ_FULL=y). 2. Omit scheduling-clock ticks on idle CPUs (CONFIG_NO_HZ_IDLE=y or
CONFIG_NO_HZ=y for older kernels). This is the most common
approach, and should be the default.
These two cases are described in the following two sections, followed 3. Omit scheduling-clock ticks on CPUs that are either idle or that
have only one runnable task (CONFIG_NO_HZ_FULL=y). Unless you
are running realtime applications or certain types of HPC
workloads, you will normally -not- want this option.
These three cases are described in the following three sections, followed
by a third section on RCU-specific considerations and a fourth and final by a third section on RCU-specific considerations and a fourth and final
section listing known issues. section listing known issues.
IDLE CPUs NEVER OMIT SCHEDULING-CLOCK TICKS
Very old versions of Linux from the 1990s and the very early 2000s
are incapable of omitting scheduling-clock ticks. It turns out that
there are some situations where this old-school approach is still the
right approach, for example, in heavy workloads with lots of tasks
that use short bursts of CPU, where there are very frequent idle
periods, but where these idle periods are also quite short (tens or
hundreds of microseconds). For these types of workloads, scheduling
clock interrupts will normally be delivered any way because there
will frequently be multiple runnable tasks per CPU. In these cases,
attempting to turn off the scheduling clock interrupt will have no effect
other than increasing the overhead of switching to and from idle and
transitioning between user and kernel execution.
This mode of operation can be selected using CONFIG_HZ_PERIODIC=y (or
CONFIG_NO_HZ=n for older kernels).
However, if you are instead running a light workload with long idle
periods, failing to omit scheduling-clock interrupts will result in
excessive power consumption. This is especially bad on battery-powered
devices, where it results in extremely short battery lifetimes. If you
are running light workloads, you should therefore read the following
section.
In addition, if you are running either a real-time workload or an HPC
workload with short iterations, the scheduling-clock interrupts can
degrade your applications performance. If this describes your workload,
you should read the following two sections.
OMIT SCHEDULING-CLOCK TICKS FOR IDLE CPUs
If a CPU is idle, there is little point in sending it a scheduling-clock If a CPU is idle, there is little point in sending it a scheduling-clock
interrupt. After all, the primary purpose of a scheduling-clock interrupt interrupt. After all, the primary purpose of a scheduling-clock interrupt
...@@ -59,10 +97,12 @@ By default, CONFIG_NO_HZ_IDLE=y kernels boot with "nohz=on", enabling ...@@ -59,10 +97,12 @@ By default, CONFIG_NO_HZ_IDLE=y kernels boot with "nohz=on", enabling
dyntick-idle mode. dyntick-idle mode.
CPUs WITH ONLY ONE RUNNABLE TASK OMIT SCHEDULING-CLOCK TICKS FOR CPUs WITH ONLY ONE RUNNABLE TASK
If a CPU has only one runnable task, there is little point in sending it If a CPU has only one runnable task, there is little point in sending it
a scheduling-clock interrupt because there is no other task to switch to. a scheduling-clock interrupt because there is no other task to switch to.
Note that omitting scheduling-clock ticks for CPUs with only one runnable
task implies also omitting them for idle CPUs.
The CONFIG_NO_HZ_FULL=y Kconfig option causes the kernel to avoid The CONFIG_NO_HZ_FULL=y Kconfig option causes the kernel to avoid
sending scheduling-clock interrupts to CPUs with a single runnable task, sending scheduling-clock interrupts to CPUs with a single runnable task,
...@@ -238,6 +278,11 @@ o Adaptive-ticks does not do anything unless there is only one ...@@ -238,6 +278,11 @@ o Adaptive-ticks does not do anything unless there is only one
single runnable SCHED_FIFO task and multiple runnable SCHED_OTHER single runnable SCHED_FIFO task and multiple runnable SCHED_OTHER
tasks, even though these interrupts are unnecessary. tasks, even though these interrupts are unnecessary.
And even when there are multiple runnable tasks on a given CPU,
there is little point in interrupting that CPU until the current
running task's timeslice expires, which is almost always way
longer than the time of the next scheduling-clock interrupt.
Better handling of these sorts of situations is future work. Better handling of these sorts of situations is future work.
o A reboot is required to reconfigure both adaptive idle and RCU o A reboot is required to reconfigure both adaptive idle and RCU
...@@ -268,6 +313,16 @@ o Unless all CPUs are idle, at least one CPU must keep the ...@@ -268,6 +313,16 @@ o Unless all CPUs are idle, at least one CPU must keep the
scheduling-clock interrupt going in order to support accurate scheduling-clock interrupt going in order to support accurate
timekeeping. timekeeping.
o If there are adaptive-ticks CPUs, there will be at least one o If there might potentially be some adaptive-ticks CPUs, there
CPU keeping the scheduling-clock interrupt going, even if all will be at least one CPU keeping the scheduling-clock interrupt
CPUs are otherwise idle. going, even if all CPUs are otherwise idle.
Better handling of this situation is ongoing work.
o Some process-handling operations still require the occasional
scheduling-clock tick. These operations include calculating CPU
load, maintaining sched average, computing CFS entity vruntime,
computing avenrun, and carrying out load balancing. They are
currently accommodated by scheduling-clock tick every second
or so. On-going work will eliminate the need even for these
infrequent scheduling-clock ticks.
...@@ -1864,7 +1864,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) ...@@ -1864,7 +1864,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
up_out: up_out:
up_read(&current->mm->mmap_sem); up_read(&current->mm->mmap_sem);
goto out; goto out_srcu;
} }
int kvmppc_core_init_vm(struct kvm *kvm) int kvmppc_core_init_vm(struct kvm *kvm)
......
...@@ -128,7 +128,7 @@ extern void synchronize_irq(unsigned int irq); ...@@ -128,7 +128,7 @@ extern void synchronize_irq(unsigned int irq);
# define synchronize_irq(irq) barrier() # define synchronize_irq(irq) barrier()
#endif #endif
#if defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU) #if defined(CONFIG_TINY_RCU)
static inline void rcu_nmi_enter(void) static inline void rcu_nmi_enter(void)
{ {
......
...@@ -216,6 +216,7 @@ static inline int rcu_preempt_depth(void) ...@@ -216,6 +216,7 @@ static inline int rcu_preempt_depth(void)
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */ #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
/* Internal to kernel */ /* Internal to kernel */
extern void rcu_init(void);
extern void rcu_sched_qs(int cpu); extern void rcu_sched_qs(int cpu);
extern void rcu_bh_qs(int cpu); extern void rcu_bh_qs(int cpu);
extern void rcu_check_callbacks(int cpu, int user); extern void rcu_check_callbacks(int cpu, int user);
...@@ -239,8 +240,6 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev, ...@@ -239,8 +240,6 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev,
struct task_struct *next) { } struct task_struct *next) { }
#endif /* CONFIG_RCU_USER_QS */ #endif /* CONFIG_RCU_USER_QS */
extern void exit_rcu(void);
/** /**
* RCU_NONIDLE - Indicate idle-loop code that needs RCU readers * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
* @a: Code that RCU needs to pay attention to. * @a: Code that RCU needs to pay attention to.
...@@ -277,7 +276,7 @@ void wait_rcu_gp(call_rcu_func_t crf); ...@@ -277,7 +276,7 @@ void wait_rcu_gp(call_rcu_func_t crf);
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
#include <linux/rcutree.h> #include <linux/rcutree.h>
#elif defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU) #elif defined(CONFIG_TINY_RCU)
#include <linux/rcutiny.h> #include <linux/rcutiny.h>
#else #else
#error "Unknown RCU implementation specified to kernel configuration" #error "Unknown RCU implementation specified to kernel configuration"
......
...@@ -27,10 +27,6 @@ ...@@ -27,10 +27,6 @@
#include <linux/cache.h> #include <linux/cache.h>
static inline void rcu_init(void)
{
}
static inline void rcu_barrier_bh(void) static inline void rcu_barrier_bh(void)
{ {
wait_rcu_gp(call_rcu_bh); wait_rcu_gp(call_rcu_bh);
...@@ -41,8 +37,6 @@ static inline void rcu_barrier_sched(void) ...@@ -41,8 +37,6 @@ static inline void rcu_barrier_sched(void)
wait_rcu_gp(call_rcu_sched); wait_rcu_gp(call_rcu_sched);
} }
#ifdef CONFIG_TINY_RCU
static inline void synchronize_rcu_expedited(void) static inline void synchronize_rcu_expedited(void)
{ {
synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */ synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */
...@@ -53,17 +47,6 @@ static inline void rcu_barrier(void) ...@@ -53,17 +47,6 @@ static inline void rcu_barrier(void)
rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */ rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */
} }
#else /* #ifdef CONFIG_TINY_RCU */
void synchronize_rcu_expedited(void);
static inline void rcu_barrier(void)
{
wait_rcu_gp(call_rcu);
}
#endif /* #else #ifdef CONFIG_TINY_RCU */
static inline void synchronize_rcu_bh(void) static inline void synchronize_rcu_bh(void)
{ {
synchronize_sched(); synchronize_sched();
...@@ -85,35 +68,15 @@ static inline void kfree_call_rcu(struct rcu_head *head, ...@@ -85,35 +68,15 @@ static inline void kfree_call_rcu(struct rcu_head *head,
call_rcu(head, func); call_rcu(head, func);
} }
#ifdef CONFIG_TINY_RCU
static inline void rcu_preempt_note_context_switch(void)
{
}
static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
{ {
*delta_jiffies = ULONG_MAX; *delta_jiffies = ULONG_MAX;
return 0; return 0;
} }
#else /* #ifdef CONFIG_TINY_RCU */
void rcu_preempt_note_context_switch(void);
int rcu_preempt_needs_cpu(void);
static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
{
*delta_jiffies = ULONG_MAX;
return rcu_preempt_needs_cpu();
}
#endif /* #else #ifdef CONFIG_TINY_RCU */
static inline void rcu_note_context_switch(int cpu) static inline void rcu_note_context_switch(int cpu)
{ {
rcu_sched_qs(cpu); rcu_sched_qs(cpu);
rcu_preempt_note_context_switch();
} }
/* /*
...@@ -156,6 +119,10 @@ static inline void rcu_cpu_stall_reset(void) ...@@ -156,6 +119,10 @@ static inline void rcu_cpu_stall_reset(void)
{ {
} }
static inline void exit_rcu(void)
{
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
extern int rcu_scheduler_active __read_mostly; extern int rcu_scheduler_active __read_mostly;
extern void rcu_scheduler_starting(void); extern void rcu_scheduler_starting(void);
......
...@@ -30,7 +30,6 @@ ...@@ -30,7 +30,6 @@
#ifndef __LINUX_RCUTREE_H #ifndef __LINUX_RCUTREE_H
#define __LINUX_RCUTREE_H #define __LINUX_RCUTREE_H
extern void rcu_init(void);
extern void rcu_note_context_switch(int cpu); extern void rcu_note_context_switch(int cpu);
extern int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies); extern int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies);
extern void rcu_cpu_stall_reset(void); extern void rcu_cpu_stall_reset(void);
...@@ -86,6 +85,8 @@ extern void rcu_force_quiescent_state(void); ...@@ -86,6 +85,8 @@ extern void rcu_force_quiescent_state(void);
extern void rcu_bh_force_quiescent_state(void); extern void rcu_bh_force_quiescent_state(void);
extern void rcu_sched_force_quiescent_state(void); extern void rcu_sched_force_quiescent_state(void);
extern void exit_rcu(void);
extern void rcu_scheduler_starting(void); extern void rcu_scheduler_starting(void);
extern int rcu_scheduler_active __read_mostly; extern int rcu_scheduler_active __read_mostly;
......
...@@ -237,47 +237,4 @@ static inline void srcu_read_unlock(struct srcu_struct *sp, int idx) ...@@ -237,47 +237,4 @@ static inline void srcu_read_unlock(struct srcu_struct *sp, int idx)
__srcu_read_unlock(sp, idx); __srcu_read_unlock(sp, idx);
} }
/**
* srcu_read_lock_raw - register a new reader for an SRCU-protected structure.
* @sp: srcu_struct in which to register the new reader.
*
* Enter an SRCU read-side critical section. Similar to srcu_read_lock(),
* but avoids the RCU-lockdep checking. This means that it is legal to
* use srcu_read_lock_raw() in one context, for example, in an exception
* handler, and then have the matching srcu_read_unlock_raw() in another
* context, for example in the task that took the exception.
*
* However, the entire SRCU read-side critical section must reside within a
* single task. For example, beware of using srcu_read_lock_raw() in
* a device interrupt handler and srcu_read_unlock() in the interrupted
* task: This will not work if interrupts are threaded.
*/
static inline int srcu_read_lock_raw(struct srcu_struct *sp)
{
unsigned long flags;
int ret;
local_irq_save(flags);
ret = __srcu_read_lock(sp);
local_irq_restore(flags);
return ret;
}
/**
* srcu_read_unlock_raw - unregister reader from an SRCU-protected structure.
* @sp: srcu_struct in which to unregister the old reader.
* @idx: return value from corresponding srcu_read_lock_raw().
*
* Exit an SRCU read-side critical section without lockdep-RCU checking.
* See srcu_read_lock_raw() for more details.
*/
static inline void srcu_read_unlock_raw(struct srcu_struct *sp, int idx)
{
unsigned long flags;
local_irq_save(flags);
__srcu_read_unlock(sp, idx);
local_irq_restore(flags);
}
#endif #endif
...@@ -473,18 +473,10 @@ config TINY_RCU ...@@ -473,18 +473,10 @@ config TINY_RCU
is not required. This option greatly reduces the is not required. This option greatly reduces the
memory footprint of RCU. memory footprint of RCU.
config TINY_PREEMPT_RCU
bool "Preemptible UP-only small-memory-footprint RCU"
depends on PREEMPT && !SMP
help
This option selects the RCU implementation that is designed
for real-time UP systems. This option greatly reduces the
memory footprint of RCU.
endchoice endchoice
config PREEMPT_RCU config PREEMPT_RCU
def_bool ( TREE_PREEMPT_RCU || TINY_PREEMPT_RCU ) def_bool TREE_PREEMPT_RCU
help help
This option enables preemptible-RCU code that is common between This option enables preemptible-RCU code that is common between
the TREE_PREEMPT_RCU and TINY_PREEMPT_RCU implementations. the TREE_PREEMPT_RCU and TINY_PREEMPT_RCU implementations.
...@@ -670,7 +662,7 @@ config RCU_BOOST_DELAY ...@@ -670,7 +662,7 @@ config RCU_BOOST_DELAY
Accept the default if unsure. Accept the default if unsure.
config RCU_NOCB_CPU config RCU_NOCB_CPU
bool "Offload RCU callback processing from boot-selected CPUs (EXPERIMENTAL" bool "Offload RCU callback processing from boot-selected CPUs"
depends on TREE_RCU || TREE_PREEMPT_RCU depends on TREE_RCU || TREE_PREEMPT_RCU
default n default n
help help
...@@ -696,9 +688,10 @@ choice ...@@ -696,9 +688,10 @@ choice
prompt "Build-forced no-CBs CPUs" prompt "Build-forced no-CBs CPUs"
default RCU_NOCB_CPU_NONE default RCU_NOCB_CPU_NONE
help help
This option allows no-CBs CPUs to be specified at build time. This option allows no-CBs CPUs (whose RCU callbacks are invoked
Additional no-CBs CPUs may be specified by the rcu_nocbs= from kthreads rather than from softirq context) to be specified
boot parameter. at build time. Additional no-CBs CPUs may be specified by
the rcu_nocbs= boot parameter.
config RCU_NOCB_CPU_NONE config RCU_NOCB_CPU_NONE
bool "No build_forced no-CBs CPUs" bool "No build_forced no-CBs CPUs"
...@@ -706,25 +699,40 @@ config RCU_NOCB_CPU_NONE ...@@ -706,25 +699,40 @@ config RCU_NOCB_CPU_NONE
help help
This option does not force any of the CPUs to be no-CBs CPUs. This option does not force any of the CPUs to be no-CBs CPUs.
Only CPUs designated by the rcu_nocbs= boot parameter will be Only CPUs designated by the rcu_nocbs= boot parameter will be
no-CBs CPUs. no-CBs CPUs, whose RCU callbacks will be invoked by per-CPU
kthreads whose names begin with "rcuo". All other CPUs will
invoke their own RCU callbacks in softirq context.
Select this option if you want to choose no-CBs CPUs at
boot time, for example, to allow testing of different no-CBs
configurations without having to rebuild the kernel each time.
config RCU_NOCB_CPU_ZERO config RCU_NOCB_CPU_ZERO
bool "CPU 0 is a build_forced no-CBs CPU" bool "CPU 0 is a build_forced no-CBs CPU"
depends on RCU_NOCB_CPU && !NO_HZ_FULL depends on RCU_NOCB_CPU && !NO_HZ_FULL
help help
This option forces CPU 0 to be a no-CBs CPU. Additional CPUs This option forces CPU 0 to be a no-CBs CPU, so that its RCU
may be designated as no-CBs CPUs using the rcu_nocbs= boot callbacks are invoked by a per-CPU kthread whose name begins
parameter will be no-CBs CPUs. with "rcuo". Additional CPUs may be designated as no-CBs
CPUs using the rcu_nocbs= boot parameter will be no-CBs CPUs.
All other CPUs will invoke their own RCU callbacks in softirq
context.
Select this if CPU 0 needs to be a no-CBs CPU for real-time Select this if CPU 0 needs to be a no-CBs CPU for real-time
or energy-efficiency reasons. or energy-efficiency reasons, but the real reason it exists
is to ensure that randconfig testing covers mixed systems.
config RCU_NOCB_CPU_ALL config RCU_NOCB_CPU_ALL
bool "All CPUs are build_forced no-CBs CPUs" bool "All CPUs are build_forced no-CBs CPUs"
depends on RCU_NOCB_CPU depends on RCU_NOCB_CPU
help help
This option forces all CPUs to be no-CBs CPUs. The rcu_nocbs= This option forces all CPUs to be no-CBs CPUs. The rcu_nocbs=
boot parameter will be ignored. boot parameter will be ignored. All CPUs' RCU callbacks will
be executed in the context of per-CPU rcuo kthreads created for
this purpose. Assuming that the kthreads whose names start with
"rcuo" are bound to "housekeeping" CPUs, this reduces OS jitter
on the remaining CPUs, but might decrease memory locality during
RCU-callback invocation, thus potentially degrading throughput.
Select this if all CPUs need to be no-CBs CPUs for real-time Select this if all CPUs need to be no-CBs CPUs for real-time
or energy-efficiency reasons. or energy-efficiency reasons.
......
...@@ -104,31 +104,7 @@ void __rcu_read_unlock(void) ...@@ -104,31 +104,7 @@ void __rcu_read_unlock(void)
} }
EXPORT_SYMBOL_GPL(__rcu_read_unlock); EXPORT_SYMBOL_GPL(__rcu_read_unlock);
/* #endif /* #ifdef CONFIG_PREEMPT_RCU */
* Check for a task exiting while in a preemptible-RCU read-side
* critical section, clean up if so. No need to issue warnings,
* as debug_check_no_locks_held() already does this if lockdep
* is enabled.
*/
void exit_rcu(void)
{
struct task_struct *t = current;
if (likely(list_empty(&current->rcu_node_entry)))
return;
t->rcu_read_lock_nesting = 1;
barrier();
t->rcu_read_unlock_special = RCU_READ_UNLOCK_BLOCKED;
__rcu_read_unlock();
}
#else /* #ifdef CONFIG_PREEMPT_RCU */
void exit_rcu(void)
{
}
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key rcu_lock_key; static struct lock_class_key rcu_lock_key;
...@@ -145,9 +121,6 @@ static struct lock_class_key rcu_sched_lock_key; ...@@ -145,9 +121,6 @@ static struct lock_class_key rcu_sched_lock_key;
struct lockdep_map rcu_sched_lock_map = struct lockdep_map rcu_sched_lock_map =
STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key); STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key);
EXPORT_SYMBOL_GPL(rcu_sched_lock_map); EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
int debug_lockdep_rcu_enabled(void) int debug_lockdep_rcu_enabled(void)
{ {
......
...@@ -44,7 +44,6 @@ ...@@ -44,7 +44,6 @@
/* Forward declarations for rcutiny_plugin.h. */ /* Forward declarations for rcutiny_plugin.h. */
struct rcu_ctrlblk; struct rcu_ctrlblk;
static void invoke_rcu_callbacks(void);
static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp); static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
static void rcu_process_callbacks(struct softirq_action *unused); static void rcu_process_callbacks(struct softirq_action *unused);
static void __call_rcu(struct rcu_head *head, static void __call_rcu(struct rcu_head *head,
...@@ -205,7 +204,7 @@ static int rcu_is_cpu_rrupt_from_idle(void) ...@@ -205,7 +204,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
*/ */
static int rcu_qsctr_help(struct rcu_ctrlblk *rcp) static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
{ {
reset_cpu_stall_ticks(rcp); RCU_TRACE(reset_cpu_stall_ticks(rcp));
if (rcp->rcucblist != NULL && if (rcp->rcucblist != NULL &&
rcp->donetail != rcp->curtail) { rcp->donetail != rcp->curtail) {
rcp->donetail = rcp->curtail; rcp->donetail = rcp->curtail;
...@@ -227,7 +226,7 @@ void rcu_sched_qs(int cpu) ...@@ -227,7 +226,7 @@ void rcu_sched_qs(int cpu)
local_irq_save(flags); local_irq_save(flags);
if (rcu_qsctr_help(&rcu_sched_ctrlblk) + if (rcu_qsctr_help(&rcu_sched_ctrlblk) +
rcu_qsctr_help(&rcu_bh_ctrlblk)) rcu_qsctr_help(&rcu_bh_ctrlblk))
invoke_rcu_callbacks(); raise_softirq(RCU_SOFTIRQ);
local_irq_restore(flags); local_irq_restore(flags);
} }
...@@ -240,7 +239,7 @@ void rcu_bh_qs(int cpu) ...@@ -240,7 +239,7 @@ void rcu_bh_qs(int cpu)
local_irq_save(flags); local_irq_save(flags);
if (rcu_qsctr_help(&rcu_bh_ctrlblk)) if (rcu_qsctr_help(&rcu_bh_ctrlblk))
invoke_rcu_callbacks(); raise_softirq(RCU_SOFTIRQ);
local_irq_restore(flags); local_irq_restore(flags);
} }
...@@ -252,12 +251,11 @@ void rcu_bh_qs(int cpu) ...@@ -252,12 +251,11 @@ void rcu_bh_qs(int cpu)
*/ */
void rcu_check_callbacks(int cpu, int user) void rcu_check_callbacks(int cpu, int user)
{ {
check_cpu_stalls(); RCU_TRACE(check_cpu_stalls());
if (user || rcu_is_cpu_rrupt_from_idle()) if (user || rcu_is_cpu_rrupt_from_idle())
rcu_sched_qs(cpu); rcu_sched_qs(cpu);
else if (!in_softirq()) else if (!in_softirq())
rcu_bh_qs(cpu); rcu_bh_qs(cpu);
rcu_preempt_check_callbacks();
} }
/* /*
...@@ -278,7 +276,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) ...@@ -278,7 +276,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
ACCESS_ONCE(rcp->rcucblist), ACCESS_ONCE(rcp->rcucblist),
need_resched(), need_resched(),
is_idle_task(current), is_idle_task(current),
rcu_is_callbacks_kthread())); false));
return; return;
} }
...@@ -290,7 +288,6 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) ...@@ -290,7 +288,6 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
*rcp->donetail = NULL; *rcp->donetail = NULL;
if (rcp->curtail == rcp->donetail) if (rcp->curtail == rcp->donetail)
rcp->curtail = &rcp->rcucblist; rcp->curtail = &rcp->rcucblist;
rcu_preempt_remove_callbacks(rcp);
rcp->donetail = &rcp->rcucblist; rcp->donetail = &rcp->rcucblist;
local_irq_restore(flags); local_irq_restore(flags);
...@@ -309,14 +306,13 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) ...@@ -309,14 +306,13 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count)); RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count));
RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count, 0, need_resched(), RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count, 0, need_resched(),
is_idle_task(current), is_idle_task(current),
rcu_is_callbacks_kthread())); false));
} }
static void rcu_process_callbacks(struct softirq_action *unused) static void rcu_process_callbacks(struct softirq_action *unused)
{ {
__rcu_process_callbacks(&rcu_sched_ctrlblk); __rcu_process_callbacks(&rcu_sched_ctrlblk);
__rcu_process_callbacks(&rcu_bh_ctrlblk); __rcu_process_callbacks(&rcu_bh_ctrlblk);
rcu_preempt_process_callbacks();
} }
/* /*
...@@ -382,3 +378,8 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) ...@@ -382,3 +378,8 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
__call_rcu(head, func, &rcu_bh_ctrlblk); __call_rcu(head, func, &rcu_bh_ctrlblk);
} }
EXPORT_SYMBOL_GPL(call_rcu_bh); EXPORT_SYMBOL_GPL(call_rcu_bh);
void rcu_init(void)
{
open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
}
This diff is collapsed.
...@@ -695,44 +695,6 @@ static struct rcu_torture_ops srcu_sync_ops = { ...@@ -695,44 +695,6 @@ static struct rcu_torture_ops srcu_sync_ops = {
.name = "srcu_sync" .name = "srcu_sync"
}; };
static int srcu_torture_read_lock_raw(void) __acquires(&srcu_ctl)
{
return srcu_read_lock_raw(&srcu_ctl);
}
static void srcu_torture_read_unlock_raw(int idx) __releases(&srcu_ctl)
{
srcu_read_unlock_raw(&srcu_ctl, idx);
}
static struct rcu_torture_ops srcu_raw_ops = {
.init = rcu_sync_torture_init,
.readlock = srcu_torture_read_lock_raw,
.read_delay = srcu_read_delay,
.readunlock = srcu_torture_read_unlock_raw,
.completed = srcu_torture_completed,
.deferred_free = srcu_torture_deferred_free,
.sync = srcu_torture_synchronize,
.call = NULL,
.cb_barrier = NULL,
.stats = srcu_torture_stats,
.name = "srcu_raw"
};
static struct rcu_torture_ops srcu_raw_sync_ops = {
.init = rcu_sync_torture_init,
.readlock = srcu_torture_read_lock_raw,
.read_delay = srcu_read_delay,
.readunlock = srcu_torture_read_unlock_raw,
.completed = srcu_torture_completed,
.deferred_free = rcu_sync_torture_deferred_free,
.sync = srcu_torture_synchronize,
.call = NULL,
.cb_barrier = NULL,
.stats = srcu_torture_stats,
.name = "srcu_raw_sync"
};
static void srcu_torture_synchronize_expedited(void) static void srcu_torture_synchronize_expedited(void)
{ {
synchronize_srcu_expedited(&srcu_ctl); synchronize_srcu_expedited(&srcu_ctl);
...@@ -1983,7 +1945,6 @@ rcu_torture_init(void) ...@@ -1983,7 +1945,6 @@ rcu_torture_init(void)
{ &rcu_ops, &rcu_sync_ops, &rcu_expedited_ops, { &rcu_ops, &rcu_sync_ops, &rcu_expedited_ops,
&rcu_bh_ops, &rcu_bh_sync_ops, &rcu_bh_expedited_ops, &rcu_bh_ops, &rcu_bh_sync_ops, &rcu_bh_expedited_ops,
&srcu_ops, &srcu_sync_ops, &srcu_expedited_ops, &srcu_ops, &srcu_sync_ops, &srcu_expedited_ops,
&srcu_raw_ops, &srcu_raw_sync_ops,
&sched_ops, &sched_sync_ops, &sched_expedited_ops, }; &sched_ops, &sched_sync_ops, &sched_expedited_ops, };
mutex_lock(&fullstop_mutex); mutex_lock(&fullstop_mutex);
......
This diff is collapsed.
...@@ -343,12 +343,17 @@ struct rcu_data { ...@@ -343,12 +343,17 @@ struct rcu_data {
#define RCU_FORCE_QS 3 /* Need to force quiescent state. */ #define RCU_FORCE_QS 3 /* Need to force quiescent state. */
#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK #define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
#define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */ #define RCU_JIFFIES_TILL_FORCE_QS (1 + (HZ > 250) + (HZ > 500))
/* For jiffies_till_first_fqs and */
/* and jiffies_till_next_fqs. */
#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */ #define RCU_JIFFIES_FQS_DIV 256 /* Very large systems need more */
/* to take at least one */ /* delay between bouts of */
/* scheduling clock irq */ /* quiescent-state forcing. */
/* before ratting on them. */
#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time to take */
/* at least one scheduling clock */
/* irq before ratting on them. */
#define rcu_wait(cond) \ #define rcu_wait(cond) \
do { \ do { \
......
...@@ -53,38 +53,37 @@ static char __initdata nocb_buf[NR_CPUS * 5]; ...@@ -53,38 +53,37 @@ static char __initdata nocb_buf[NR_CPUS * 5];
static void __init rcu_bootup_announce_oddness(void) static void __init rcu_bootup_announce_oddness(void)
{ {
#ifdef CONFIG_RCU_TRACE #ifdef CONFIG_RCU_TRACE
printk(KERN_INFO "\tRCU debugfs-based tracing is enabled.\n"); pr_info("\tRCU debugfs-based tracing is enabled.\n");
#endif #endif
#if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32) #if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
printk(KERN_INFO "\tCONFIG_RCU_FANOUT set to non-default value of %d\n", pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
CONFIG_RCU_FANOUT); CONFIG_RCU_FANOUT);
#endif #endif
#ifdef CONFIG_RCU_FANOUT_EXACT #ifdef CONFIG_RCU_FANOUT_EXACT
printk(KERN_INFO "\tHierarchical RCU autobalancing is disabled.\n"); pr_info("\tHierarchical RCU autobalancing is disabled.\n");
#endif #endif
#ifdef CONFIG_RCU_FAST_NO_HZ #ifdef CONFIG_RCU_FAST_NO_HZ
printk(KERN_INFO pr_info("\tRCU dyntick-idle grace-period acceleration is enabled.\n");
"\tRCU dyntick-idle grace-period acceleration is enabled.\n");
#endif #endif
#ifdef CONFIG_PROVE_RCU #ifdef CONFIG_PROVE_RCU
printk(KERN_INFO "\tRCU lockdep checking is enabled.\n"); pr_info("\tRCU lockdep checking is enabled.\n");
#endif #endif
#ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE #ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
printk(KERN_INFO "\tRCU torture testing starts during boot.\n"); pr_info("\tRCU torture testing starts during boot.\n");
#endif #endif
#if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE) #if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
printk(KERN_INFO "\tDump stacks of tasks blocking RCU-preempt GP.\n"); pr_info("\tDump stacks of tasks blocking RCU-preempt GP.\n");
#endif #endif
#if defined(CONFIG_RCU_CPU_STALL_INFO) #if defined(CONFIG_RCU_CPU_STALL_INFO)
printk(KERN_INFO "\tAdditional per-CPU info printed with stalls.\n"); pr_info("\tAdditional per-CPU info printed with stalls.\n");
#endif #endif
#if NUM_RCU_LVL_4 != 0 #if NUM_RCU_LVL_4 != 0
printk(KERN_INFO "\tFour-level hierarchy is enabled.\n"); pr_info("\tFour-level hierarchy is enabled.\n");
#endif #endif
if (rcu_fanout_leaf != CONFIG_RCU_FANOUT_LEAF) if (rcu_fanout_leaf != CONFIG_RCU_FANOUT_LEAF)
printk(KERN_INFO "\tExperimental boot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf); pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
if (nr_cpu_ids != NR_CPUS) if (nr_cpu_ids != NR_CPUS)
printk(KERN_INFO "\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids); pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids);
#ifdef CONFIG_RCU_NOCB_CPU #ifdef CONFIG_RCU_NOCB_CPU
#ifndef CONFIG_RCU_NOCB_CPU_NONE #ifndef CONFIG_RCU_NOCB_CPU_NONE
if (!have_rcu_nocb_mask) { if (!have_rcu_nocb_mask) {
...@@ -92,19 +91,19 @@ static void __init rcu_bootup_announce_oddness(void) ...@@ -92,19 +91,19 @@ static void __init rcu_bootup_announce_oddness(void)
have_rcu_nocb_mask = true; have_rcu_nocb_mask = true;
} }
#ifdef CONFIG_RCU_NOCB_CPU_ZERO #ifdef CONFIG_RCU_NOCB_CPU_ZERO
pr_info("\tExperimental no-CBs CPU 0\n"); pr_info("\tOffload RCU callbacks from CPU 0\n");
cpumask_set_cpu(0, rcu_nocb_mask); cpumask_set_cpu(0, rcu_nocb_mask);
#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ZERO */ #endif /* #ifdef CONFIG_RCU_NOCB_CPU_ZERO */
#ifdef CONFIG_RCU_NOCB_CPU_ALL #ifdef CONFIG_RCU_NOCB_CPU_ALL
pr_info("\tExperimental no-CBs for all CPUs\n"); pr_info("\tOffload RCU callbacks from all CPUs\n");
cpumask_setall(rcu_nocb_mask); cpumask_setall(rcu_nocb_mask);
#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ALL */ #endif /* #ifdef CONFIG_RCU_NOCB_CPU_ALL */
#endif /* #ifndef CONFIG_RCU_NOCB_CPU_NONE */ #endif /* #ifndef CONFIG_RCU_NOCB_CPU_NONE */
if (have_rcu_nocb_mask) { if (have_rcu_nocb_mask) {
cpulist_scnprintf(nocb_buf, sizeof(nocb_buf), rcu_nocb_mask); cpulist_scnprintf(nocb_buf, sizeof(nocb_buf), rcu_nocb_mask);
pr_info("\tExperimental no-CBs CPUs: %s.\n", nocb_buf); pr_info("\tOffload RCU callbacks from CPUs: %s.\n", nocb_buf);
if (rcu_nocb_poll) if (rcu_nocb_poll)
pr_info("\tExperimental polled no-CBs CPUs.\n"); pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
} }
#endif /* #ifdef CONFIG_RCU_NOCB_CPU */ #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
} }
...@@ -123,7 +122,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp); ...@@ -123,7 +122,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp);
*/ */
static void __init rcu_bootup_announce(void) static void __init rcu_bootup_announce(void)
{ {
printk(KERN_INFO "Preemptible hierarchical RCU implementation.\n"); pr_info("Preemptible hierarchical RCU implementation.\n");
rcu_bootup_announce_oddness(); rcu_bootup_announce_oddness();
} }
...@@ -490,13 +489,13 @@ static void rcu_print_detail_task_stall(struct rcu_state *rsp) ...@@ -490,13 +489,13 @@ static void rcu_print_detail_task_stall(struct rcu_state *rsp)
static void rcu_print_task_stall_begin(struct rcu_node *rnp) static void rcu_print_task_stall_begin(struct rcu_node *rnp)
{ {
printk(KERN_ERR "\tTasks blocked on level-%d rcu_node (CPUs %d-%d):", pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
rnp->level, rnp->grplo, rnp->grphi); rnp->level, rnp->grplo, rnp->grphi);
} }
static void rcu_print_task_stall_end(void) static void rcu_print_task_stall_end(void)
{ {
printk(KERN_CONT "\n"); pr_cont("\n");
} }
#else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */ #else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
...@@ -526,7 +525,7 @@ static int rcu_print_task_stall(struct rcu_node *rnp) ...@@ -526,7 +525,7 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
t = list_entry(rnp->gp_tasks, t = list_entry(rnp->gp_tasks,
struct task_struct, rcu_node_entry); struct task_struct, rcu_node_entry);
list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
printk(KERN_CONT " P%d", t->pid); pr_cont(" P%d", t->pid);
ndetected++; ndetected++;
} }
rcu_print_task_stall_end(); rcu_print_task_stall_end();
...@@ -933,6 +932,24 @@ static void __init __rcu_init_preempt(void) ...@@ -933,6 +932,24 @@ static void __init __rcu_init_preempt(void)
rcu_init_one(&rcu_preempt_state, &rcu_preempt_data); rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
} }
/*
* Check for a task exiting while in a preemptible-RCU read-side
* critical section, clean up if so. No need to issue warnings,
* as debug_check_no_locks_held() already does this if lockdep
* is enabled.
*/
void exit_rcu(void)
{
struct task_struct *t = current;
if (likely(list_empty(&current->rcu_node_entry)))
return;
t->rcu_read_lock_nesting = 1;
barrier();
t->rcu_read_unlock_special = RCU_READ_UNLOCK_BLOCKED;
__rcu_read_unlock();
}
#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
static struct rcu_state *rcu_state = &rcu_sched_state; static struct rcu_state *rcu_state = &rcu_sched_state;
...@@ -942,7 +959,7 @@ static struct rcu_state *rcu_state = &rcu_sched_state; ...@@ -942,7 +959,7 @@ static struct rcu_state *rcu_state = &rcu_sched_state;
*/ */
static void __init rcu_bootup_announce(void) static void __init rcu_bootup_announce(void)
{ {
printk(KERN_INFO "Hierarchical RCU implementation.\n"); pr_info("Hierarchical RCU implementation.\n");
rcu_bootup_announce_oddness(); rcu_bootup_announce_oddness();
} }
...@@ -1101,6 +1118,14 @@ static void __init __rcu_init_preempt(void) ...@@ -1101,6 +1118,14 @@ static void __init __rcu_init_preempt(void)
{ {
} }
/*
* Because preemptible RCU does not exist, tasks cannot possibly exit
* while in preemptible RCU read-side critical sections.
*/
void exit_rcu(void)
{
}
#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
#ifdef CONFIG_RCU_BOOST #ifdef CONFIG_RCU_BOOST
...@@ -1629,7 +1654,7 @@ static bool rcu_try_advance_all_cbs(void) ...@@ -1629,7 +1654,7 @@ static bool rcu_try_advance_all_cbs(void)
*/ */
if (rdp->completed != rnp->completed && if (rdp->completed != rnp->completed &&
rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_NEXT_TAIL]) rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_NEXT_TAIL])
rcu_process_gp_end(rsp, rdp); note_gp_changes(rsp, rdp);
if (cpu_has_callbacks_ready_to_invoke(rdp)) if (cpu_has_callbacks_ready_to_invoke(rdp))
cbs_ready = true; cbs_ready = true;
...@@ -1883,7 +1908,7 @@ static void print_cpu_stall_fast_no_hz(char *cp, int cpu) ...@@ -1883,7 +1908,7 @@ static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
/* Initiate the stall-info list. */ /* Initiate the stall-info list. */
static void print_cpu_stall_info_begin(void) static void print_cpu_stall_info_begin(void)
{ {
printk(KERN_CONT "\n"); pr_cont("\n");
} }
/* /*
...@@ -1914,7 +1939,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu) ...@@ -1914,7 +1939,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
ticks_value = rsp->gpnum - rdp->gpnum; ticks_value = rsp->gpnum - rdp->gpnum;
} }
print_cpu_stall_fast_no_hz(fast_no_hz, cpu); print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n", pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
cpu, ticks_value, ticks_title, cpu, ticks_value, ticks_title,
atomic_read(&rdtp->dynticks) & 0xfff, atomic_read(&rdtp->dynticks) & 0xfff,
rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting, rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
...@@ -1925,7 +1950,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu) ...@@ -1925,7 +1950,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
/* Terminate the stall-info list. */ /* Terminate the stall-info list. */
static void print_cpu_stall_info_end(void) static void print_cpu_stall_info_end(void)
{ {
printk(KERN_ERR "\t"); pr_err("\t");
} }
/* Zero ->ticks_this_gp for all flavors of RCU. */ /* Zero ->ticks_this_gp for all flavors of RCU. */
...@@ -1948,17 +1973,17 @@ static void increment_cpu_stall_ticks(void) ...@@ -1948,17 +1973,17 @@ static void increment_cpu_stall_ticks(void)
static void print_cpu_stall_info_begin(void) static void print_cpu_stall_info_begin(void)
{ {
printk(KERN_CONT " {"); pr_cont(" {");
} }
static void print_cpu_stall_info(struct rcu_state *rsp, int cpu) static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
{ {
printk(KERN_CONT " %d", cpu); pr_cont(" %d", cpu);
} }
static void print_cpu_stall_info_end(void) static void print_cpu_stall_info_end(void)
{ {
printk(KERN_CONT "} "); pr_cont("} ");
} }
static void zero_cpu_stall_ticks(struct rcu_data *rdp) static void zero_cpu_stall_ticks(struct rcu_data *rdp)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment