Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
cb3cb673
Commit
cb3cb673
authored
Jun 01, 2020
by
Ingo Molnar
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'WIP.core/rcu' into core/rcu, to pick up two x86/entry dependencies
Signed-off-by:
Ingo Molnar
<
mingo@kernel.org
>
parents
806f04e9
07325d4a
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
104 additions
and
32 deletions
+104
-32
include/linux/hardirq.h
include/linux/hardirq.h
+17
-12
include/linux/rcutiny.h
include/linux/rcutiny.h
+1
-0
include/linux/rcutree.h
include/linux/rcutree.h
+6
-0
kernel/rcu/tree.c
kernel/rcu/tree.c
+80
-20
No files found.
include/linux/hardirq.h
View file @
cb3cb673
...
...
@@ -2,31 +2,28 @@
#ifndef LINUX_HARDIRQ_H
#define LINUX_HARDIRQ_H
#include <linux/context_tracking_state.h>
#include <linux/preempt.h>
#include <linux/lockdep.h>
#include <linux/ftrace_irq.h>
#include <linux/vtime.h>
#include <asm/hardirq.h>
extern
void
synchronize_irq
(
unsigned
int
irq
);
extern
bool
synchronize_hardirq
(
unsigned
int
irq
);
#if
defined(CONFIG_TINY_RCU)
static
inline
void
rcu_nmi_enter
(
void
)
{
}
#if
def CONFIG_NO_HZ_FULL
void
__rcu_irq_enter_check_tick
(
void
);
#else
static
inline
void
__rcu_irq_enter_check_tick
(
void
)
{
}
#endif
static
inline
void
rcu_nmi_exit
(
void
)
static
__always_inline
void
rcu_irq_enter_check_tick
(
void
)
{
if
(
context_tracking_enabled
())
__rcu_irq_enter_check_tick
();
}
#else
extern
void
rcu_nmi_enter
(
void
);
extern
void
rcu_nmi_exit
(
void
);
#endif
/*
* It is safe to do non-atomic ops on ->hardirq_context,
* because NMI handlers may not preempt and the ops are
...
...
@@ -65,6 +62,14 @@ extern void irq_exit(void);
#define arch_nmi_exit() do { } while (0)
#endif
#ifdef CONFIG_TINY_RCU
static
inline
void
rcu_nmi_enter
(
void
)
{
}
static
inline
void
rcu_nmi_exit
(
void
)
{
}
#else
extern
void
rcu_nmi_enter
(
void
);
extern
void
rcu_nmi_exit
(
void
);
#endif
/*
* NMI vs Tracing
* --------------
...
...
include/linux/rcutiny.h
View file @
cb3cb673
...
...
@@ -72,6 +72,7 @@ static inline void rcu_irq_exit_irqson(void) { }
static
inline
void
rcu_irq_enter_irqson
(
void
)
{
}
static
inline
void
rcu_irq_exit
(
void
)
{
}
static
inline
void
rcu_irq_exit_preempt
(
void
)
{
}
static
inline
void
rcu_irq_exit_check_preempt
(
void
)
{
}
static
inline
void
exit_rcu
(
void
)
{
}
static
inline
bool
rcu_preempt_need_deferred_qs
(
struct
task_struct
*
t
)
{
...
...
include/linux/rcutree.h
View file @
cb3cb673
...
...
@@ -51,6 +51,12 @@ void rcu_irq_exit_preempt(void);
void
rcu_irq_enter_irqson
(
void
);
void
rcu_irq_exit_irqson
(
void
);
#ifdef CONFIG_PROVE_RCU
void
rcu_irq_exit_check_preempt
(
void
);
#else
static
inline
void
rcu_irq_exit_check_preempt
(
void
)
{
}
#endif
void
exit_rcu
(
void
);
void
rcu_scheduler_starting
(
void
);
...
...
kernel/rcu/tree.c
View file @
cb3cb673
...
...
@@ -778,6 +778,24 @@ void rcu_irq_exit_preempt(void)
"RCU in extended quiescent state!"
);
}
#ifdef CONFIG_PROVE_RCU
/**
* rcu_irq_exit_check_preempt - Validate that scheduling is possible
*/
void
rcu_irq_exit_check_preempt
(
void
)
{
lockdep_assert_irqs_disabled
();
RCU_LOCKDEP_WARN
(
__this_cpu_read
(
rcu_data
.
dynticks_nesting
)
<=
0
,
"RCU dynticks_nesting counter underflow/zero!"
);
RCU_LOCKDEP_WARN
(
__this_cpu_read
(
rcu_data
.
dynticks_nmi_nesting
)
!=
DYNTICK_IRQ_NONIDLE
,
"Bad RCU dynticks_nmi_nesting counter
\n
"
);
RCU_LOCKDEP_WARN
(
rcu_dynticks_curr_cpu_in_eqs
(),
"RCU in extended quiescent state!"
);
}
#endif
/* #ifdef CONFIG_PROVE_RCU */
/*
* Wrapper for rcu_irq_exit() where interrupts are enabled.
*
...
...
@@ -861,6 +879,67 @@ void noinstr rcu_user_exit(void)
{
rcu_eqs_exit
(
1
);
}
/**
* __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it.
*
* The scheduler tick is not normally enabled when CPUs enter the kernel
* from nohz_full userspace execution. After all, nohz_full userspace
* execution is an RCU quiescent state and the time executing in the kernel
* is quite short. Except of course when it isn't. And it is not hard to
* cause a large system to spend tens of seconds or even minutes looping
* in the kernel, which can cause a number of problems, include RCU CPU
* stall warnings.
*
* Therefore, if a nohz_full CPU fails to report a quiescent state
* in a timely manner, the RCU grace-period kthread sets that CPU's
* ->rcu_urgent_qs flag with the expectation that the next interrupt or
* exception will invoke this function, which will turn on the scheduler
* tick, which will enable RCU to detect that CPU's quiescent states,
* for example, due to cond_resched() calls in CONFIG_PREEMPT=n kernels.
* The tick will be disabled once a quiescent state is reported for
* this CPU.
*
* Of course, in carefully tuned systems, there might never be an
* interrupt or exception. In that case, the RCU grace-period kthread
* will eventually cause one to happen. However, in less carefully
* controlled environments, this function allows RCU to get what it
* needs without creating otherwise useless interruptions.
*/
void
__rcu_irq_enter_check_tick
(
void
)
{
struct
rcu_data
*
rdp
=
this_cpu_ptr
(
&
rcu_data
);
// Enabling the tick is unsafe in NMI handlers.
if
(
WARN_ON_ONCE
(
in_nmi
()))
return
;
RCU_LOCKDEP_WARN
(
rcu_dynticks_curr_cpu_in_eqs
(),
"Illegal rcu_irq_enter_check_tick() from extended quiescent state"
);
if
(
!
tick_nohz_full_cpu
(
rdp
->
cpu
)
||
!
READ_ONCE
(
rdp
->
rcu_urgent_qs
)
||
READ_ONCE
(
rdp
->
rcu_forced_tick
))
{
// RCU doesn't need nohz_full help from this CPU, or it is
// already getting that help.
return
;
}
// We get here only when not in an extended quiescent state and
// from interrupts (as opposed to NMIs). Therefore, (1) RCU is
// already watching and (2) The fact that we are in an interrupt
// handler and that the rcu_node lock is an irq-disabled lock
// prevents self-deadlock. So we can safely recheck under the lock.
// Note that the nohz_full state currently cannot change.
raw_spin_lock_rcu_node
(
rdp
->
mynode
);
if
(
rdp
->
rcu_urgent_qs
&&
!
rdp
->
rcu_forced_tick
)
{
// A nohz_full CPU is in the kernel and RCU needs a
// quiescent state. Turn on the tick!
WRITE_ONCE
(
rdp
->
rcu_forced_tick
,
true
);
tick_dep_set_cpu
(
rdp
->
cpu
,
TICK_DEP_BIT_RCU
);
}
raw_spin_unlock_rcu_node
(
rdp
->
mynode
);
}
#endif
/* CONFIG_NO_HZ_FULL */
/**
...
...
@@ -907,26 +986,7 @@ noinstr void rcu_nmi_enter(void)
incby
=
1
;
}
else
if
(
!
in_nmi
())
{
instrumentation_begin
();
if
(
tick_nohz_full_cpu
(
rdp
->
cpu
)
&&
rdp
->
dynticks_nmi_nesting
==
DYNTICK_IRQ_NONIDLE
&&
READ_ONCE
(
rdp
->
rcu_urgent_qs
)
&&
!
READ_ONCE
(
rdp
->
rcu_forced_tick
))
{
// We get here only if we had already exited the
// extended quiescent state and this was an
// interrupt (not an NMI). Therefore, (1) RCU is
// already watching and (2) The fact that we are in
// an interrupt handler and that the rcu_node lock
// is an irq-disabled lock prevents self-deadlock.
// So we can safely recheck under the lock.
raw_spin_lock_rcu_node
(
rdp
->
mynode
);
if
(
rdp
->
rcu_urgent_qs
&&
!
rdp
->
rcu_forced_tick
)
{
// A nohz_full CPU is in the kernel and RCU
// needs a quiescent state. Turn on the tick!
WRITE_ONCE
(
rdp
->
rcu_forced_tick
,
true
);
tick_dep_set_cpu
(
rdp
->
cpu
,
TICK_DEP_BIT_RCU
);
}
raw_spin_unlock_rcu_node
(
rdp
->
mynode
);
}
rcu_irq_enter_check_tick
();
instrumentation_end
();
}
instrumentation_begin
();
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment