Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
25e03a74
Commit
25e03a74
authored
Oct 15, 2013
by
Paul E. McKenney
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'gp.2013.09.25a' into HEAD
gp.2013.09.25a: Topic branch for grace-period updates.
parents
460aebac
15f5191b
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
144 additions
and
29 deletions
+144
-29
include/trace/events/rcu.h
include/trace/events/rcu.h
+66
-14
kernel/rcutiny.c
kernel/rcutiny.c
+1
-1
kernel/rcutree.c
kernel/rcutree.c
+46
-10
kernel/rcutree_plugin.h
kernel/rcutree_plugin.h
+31
-4
No files found.
include/trace/events/rcu.h
View file @
25e03a74
...
...
@@ -39,15 +39,26 @@ TRACE_EVENT(rcu_utilization,
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
/*
* Tracepoint for grace-period events: starting and ending a grace
* period ("start" and "end", respectively), a CPU noting the start
* of a new grace period or the end of an old grace period ("cpustart"
* and "cpuend", respectively), a CPU passing through a quiescent
* state ("cpuqs"), a CPU coming online or going offline ("cpuonl"
* and "cpuofl", respectively), a CPU being kicked for being too
* long in dyntick-idle mode ("kick"), a CPU accelerating its new
* callbacks to RCU_NEXT_READY_TAIL ("AccReadyCB"), and a CPU
* accelerating its new callbacks to RCU_WAIT_TAIL ("AccWaitCB").
* Tracepoint for grace-period events. Takes a string identifying the
* RCU flavor, the grace-period number, and a string identifying the
* grace-period-related event as follows:
*
* "AccReadyCB": CPU acclerates new callbacks to RCU_NEXT_READY_TAIL.
* "AccWaitCB": CPU accelerates new callbacks to RCU_WAIT_TAIL.
* "newreq": Request a new grace period.
* "start": Start a grace period.
* "cpustart": CPU first notices a grace-period start.
* "cpuqs": CPU passes through a quiescent state.
* "cpuonl": CPU comes online.
* "cpuofl": CPU goes offline.
* "reqwait": GP kthread sleeps waiting for grace-period request.
* "reqwaitsig": GP kthread awakened by signal from reqwait state.
* "fqswait": GP kthread waiting until time to force quiescent states.
* "fqsstart": GP kthread starts forcing quiescent states.
* "fqsend": GP kthread done forcing quiescent states.
* "fqswaitsig": GP kthread awakened by signal from fqswait state.
* "end": End a grace period.
* "cpuend": CPU first notices a grace-period end.
*/
TRACE_EVENT
(
rcu_grace_period
,
...
...
@@ -160,6 +171,46 @@ TRACE_EVENT(rcu_grace_period_init,
__entry
->
grplo
,
__entry
->
grphi
,
__entry
->
qsmask
)
);
/*
* Tracepoint for RCU no-CBs CPU callback handoffs. This event is intended
* to assist debugging of these handoffs.
*
* The first argument is the name of the RCU flavor, and the second is
* the number of the offloaded CPU are extracted. The third and final
* argument is a string as follows:
*
* "WakeEmpty": Wake rcuo kthread, first CB to empty list.
* "WakeOvf": Wake rcuo kthread, CB list is huge.
* "WakeNot": Don't wake rcuo kthread.
* "WakeNotPoll": Don't wake rcuo kthread because it is polling.
* "Poll": Start of new polling cycle for rcu_nocb_poll.
* "Sleep": Sleep waiting for CBs for !rcu_nocb_poll.
* "WokeEmpty": rcuo kthread woke to find empty list.
* "WokeNonEmpty": rcuo kthread woke to find non-empty list.
* "WaitQueue": Enqueue partially done, timed wait for it to complete.
* "WokeQueue": Partial enqueue now complete.
*/
TRACE_EVENT
(
rcu_nocb_wake
,
TP_PROTO
(
const
char
*
rcuname
,
int
cpu
,
const
char
*
reason
),
TP_ARGS
(
rcuname
,
cpu
,
reason
),
TP_STRUCT__entry
(
__field
(
const
char
*
,
rcuname
)
__field
(
int
,
cpu
)
__field
(
const
char
*
,
reason
)
),
TP_fast_assign
(
__entry
->
rcuname
=
rcuname
;
__entry
->
cpu
=
cpu
;
__entry
->
reason
=
reason
;
),
TP_printk
(
"%s %d %s"
,
__entry
->
rcuname
,
__entry
->
cpu
,
__entry
->
reason
)
);
/*
* Tracepoint for tasks blocking within preemptible-RCU read-side
* critical sections. Track the type of RCU (which one day might
...
...
@@ -540,17 +591,17 @@ TRACE_EVENT(rcu_invoke_kfree_callback,
TRACE_EVENT
(
rcu_batch_end
,
TP_PROTO
(
const
char
*
rcuname
,
int
callbacks_invoked
,
bool
cb
,
bool
nr
,
bool
iit
,
bool
risk
),
char
cb
,
char
nr
,
char
iit
,
char
risk
),
TP_ARGS
(
rcuname
,
callbacks_invoked
,
cb
,
nr
,
iit
,
risk
),
TP_STRUCT__entry
(
__field
(
const
char
*
,
rcuname
)
__field
(
int
,
callbacks_invoked
)
__field
(
bool
,
cb
)
__field
(
bool
,
nr
)
__field
(
bool
,
iit
)
__field
(
bool
,
risk
)
__field
(
char
,
cb
)
__field
(
char
,
nr
)
__field
(
char
,
iit
)
__field
(
char
,
risk
)
),
TP_fast_assign
(
...
...
@@ -656,6 +707,7 @@ TRACE_EVENT(rcu_barrier,
#define trace_rcu_future_grace_period(rcuname, gpnum, completed, c, \
level, grplo, grphi, event) \
do { } while (0)
#define trace_rcu_nocb_wake(rcuname, cpu, reason) do { } while (0)
#define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0)
#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \
...
...
kernel/rcutiny.c
View file @
25e03a74
...
...
@@ -275,7 +275,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
if
(
&
rcp
->
rcucblist
==
rcp
->
donetail
)
{
RCU_TRACE
(
trace_rcu_batch_start
(
rcp
->
name
,
0
,
0
,
-
1
));
RCU_TRACE
(
trace_rcu_batch_end
(
rcp
->
name
,
0
,
ACCESS_ONCE
(
rcp
->
rcucblist
),
!!
ACCESS_ONCE
(
rcp
->
rcucblist
),
need_resched
(),
is_idle_task
(
current
),
false
));
...
...
kernel/rcutree.c
View file @
25e03a74
...
...
@@ -1326,7 +1326,7 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
}
/*
* Initialize a new grace period.
* Initialize a new grace period.
Return 0 if no grace period required.
*/
static
int
rcu_gp_init
(
struct
rcu_state
*
rsp
)
{
...
...
@@ -1335,10 +1335,18 @@ static int rcu_gp_init(struct rcu_state *rsp)
rcu_bind_gp_kthread
();
raw_spin_lock_irq
(
&
rnp
->
lock
);
if
(
rsp
->
gp_flags
==
0
)
{
/* Spurious wakeup, tell caller to go back to sleep. */
raw_spin_unlock_irq
(
&
rnp
->
lock
);
return
0
;
}
rsp
->
gp_flags
=
0
;
/* Clear all flags: New grace period. */
if
(
rcu_gp_in_progress
(
rsp
))
{
/* Grace period already in progress, don't start another. */
if
(
WARN_ON_ONCE
(
rcu_gp_in_progress
(
rsp
)))
{
/*
* Grace period already in progress, don't start another.
* Not supposed to be able to happen.
*/
raw_spin_unlock_irq
(
&
rnp
->
lock
);
return
0
;
}
...
...
@@ -1481,8 +1489,12 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
rsp
->
fqs_state
=
RCU_GP_IDLE
;
rdp
=
this_cpu_ptr
(
rsp
->
rda
);
rcu_advance_cbs
(
rsp
,
rnp
,
rdp
);
/* Reduce false positives below. */
if
(
cpu_needs_another_gp
(
rsp
,
rdp
))
if
(
cpu_needs_another_gp
(
rsp
,
rdp
))
{
rsp
->
gp_flags
=
RCU_GP_FLAG_INIT
;
trace_rcu_grace_period
(
rsp
->
name
,
ACCESS_ONCE
(
rsp
->
gpnum
),
TPS
(
"newreq"
));
}
raw_spin_unlock_irq
(
&
rnp
->
lock
);
}
...
...
@@ -1492,6 +1504,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
static
int
__noreturn
rcu_gp_kthread
(
void
*
arg
)
{
int
fqs_state
;
int
gf
;
unsigned
long
j
;
int
ret
;
struct
rcu_state
*
rsp
=
arg
;
...
...
@@ -1501,14 +1514,19 @@ static int __noreturn rcu_gp_kthread(void *arg)
/* Handle grace-period start. */
for
(;;)
{
trace_rcu_grace_period
(
rsp
->
name
,
ACCESS_ONCE
(
rsp
->
gpnum
),
TPS
(
"reqwait"
));
wait_event_interruptible
(
rsp
->
gp_wq
,
rsp
->
gp_flags
&
ACCESS_ONCE
(
rsp
->
gp_flags
)
&
RCU_GP_FLAG_INIT
);
if
((
rsp
->
gp_flags
&
RCU_GP_FLAG_INIT
)
&&
rcu_gp_init
(
rsp
))
if
(
rcu_gp_init
(
rsp
))
break
;
cond_resched
();
flush_signals
(
current
);
trace_rcu_grace_period
(
rsp
->
name
,
ACCESS_ONCE
(
rsp
->
gpnum
),
TPS
(
"reqwaitsig"
));
}
/* Handle quiescent-state forcing. */
...
...
@@ -1518,10 +1536,16 @@ static int __noreturn rcu_gp_kthread(void *arg)
j
=
HZ
;
jiffies_till_first_fqs
=
HZ
;
}
ret
=
0
;
for
(;;)
{
rsp
->
jiffies_force_qs
=
jiffies
+
j
;
if
(
!
ret
)
rsp
->
jiffies_force_qs
=
jiffies
+
j
;
trace_rcu_grace_period
(
rsp
->
name
,
ACCESS_ONCE
(
rsp
->
gpnum
),
TPS
(
"fqswait"
));
ret
=
wait_event_interruptible_timeout
(
rsp
->
gp_wq
,
(
rsp
->
gp_flags
&
RCU_GP_FLAG_FQS
)
||
((
gf
=
ACCESS_ONCE
(
rsp
->
gp_flags
))
&
RCU_GP_FLAG_FQS
)
||
(
!
ACCESS_ONCE
(
rnp
->
qsmask
)
&&
!
rcu_preempt_blocked_readers_cgp
(
rnp
)),
j
);
...
...
@@ -1530,13 +1554,23 @@ static int __noreturn rcu_gp_kthread(void *arg)
!
rcu_preempt_blocked_readers_cgp
(
rnp
))
break
;
/* If time for quiescent-state forcing, do it. */
if
(
ret
==
0
||
(
rsp
->
gp_flags
&
RCU_GP_FLAG_FQS
))
{
if
(
ULONG_CMP_GE
(
jiffies
,
rsp
->
jiffies_force_qs
)
||
(
gf
&
RCU_GP_FLAG_FQS
))
{
trace_rcu_grace_period
(
rsp
->
name
,
ACCESS_ONCE
(
rsp
->
gpnum
),
TPS
(
"fqsstart"
));
fqs_state
=
rcu_gp_fqs
(
rsp
,
fqs_state
);
trace_rcu_grace_period
(
rsp
->
name
,
ACCESS_ONCE
(
rsp
->
gpnum
),
TPS
(
"fqsend"
));
cond_resched
();
}
else
{
/* Deal with stray signal. */
cond_resched
();
flush_signals
(
current
);
trace_rcu_grace_period
(
rsp
->
name
,
ACCESS_ONCE
(
rsp
->
gpnum
),
TPS
(
"fqswaitsig"
));
}
j
=
jiffies_till_next_fqs
;
if
(
j
>
HZ
)
{
...
...
@@ -1584,6 +1618,8 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
return
;
}
rsp
->
gp_flags
=
RCU_GP_FLAG_INIT
;
trace_rcu_grace_period
(
rsp
->
name
,
ACCESS_ONCE
(
rsp
->
gpnum
),
TPS
(
"newreq"
));
/*
* We can't do wakeups while holding the rnp->lock, as that
...
...
kernel/rcutree_plugin.h
View file @
25e03a74
...
...
@@ -2113,15 +2113,22 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
/* If we are not being polled and there is a kthread, awaken it ... */
t
=
ACCESS_ONCE
(
rdp
->
nocb_kthread
);
if
(
rcu_nocb_poll
||
!
t
)
if
(
rcu_nocb_poll
||
!
t
)
{
trace_rcu_nocb_wake
(
rdp
->
rsp
->
name
,
rdp
->
cpu
,
TPS
(
"WakeNotPoll"
));
return
;
}
len
=
atomic_long_read
(
&
rdp
->
nocb_q_count
);
if
(
old_rhpp
==
&
rdp
->
nocb_head
)
{
wake_up
(
&
rdp
->
nocb_wq
);
/* ... only if queue was empty ... */
rdp
->
qlen_last_fqs_check
=
0
;
trace_rcu_nocb_wake
(
rdp
->
rsp
->
name
,
rdp
->
cpu
,
TPS
(
"WakeEmpty"
));
}
else
if
(
len
>
rdp
->
qlen_last_fqs_check
+
qhimark
)
{
wake_up_process
(
t
);
/* ... or if many callbacks queued. */
rdp
->
qlen_last_fqs_check
=
LONG_MAX
/
2
;
trace_rcu_nocb_wake
(
rdp
->
rsp
->
name
,
rdp
->
cpu
,
TPS
(
"WakeOvf"
));
}
else
{
trace_rcu_nocb_wake
(
rdp
->
rsp
->
name
,
rdp
->
cpu
,
TPS
(
"WakeNot"
));
}
return
;
}
...
...
@@ -2145,10 +2152,12 @@ static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
if
(
__is_kfree_rcu_offset
((
unsigned
long
)
rhp
->
func
))
trace_rcu_kfree_callback
(
rdp
->
rsp
->
name
,
rhp
,
(
unsigned
long
)
rhp
->
func
,
rdp
->
qlen_lazy
,
rdp
->
qlen
);
-
atomic_long_read
(
&
rdp
->
nocb_q_count_lazy
),
-
atomic_long_read
(
&
rdp
->
nocb_q_count
));
else
trace_rcu_callback
(
rdp
->
rsp
->
name
,
rhp
,
rdp
->
qlen_lazy
,
rdp
->
qlen
);
-
atomic_long_read
(
&
rdp
->
nocb_q_count_lazy
),
-
atomic_long_read
(
&
rdp
->
nocb_q_count
));
return
1
;
}
...
...
@@ -2226,6 +2235,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
static
int
rcu_nocb_kthread
(
void
*
arg
)
{
int
c
,
cl
;
bool
firsttime
=
1
;
struct
rcu_head
*
list
;
struct
rcu_head
*
next
;
struct
rcu_head
**
tail
;
...
...
@@ -2234,14 +2244,27 @@ static int rcu_nocb_kthread(void *arg)
/* Each pass through this loop invokes one batch of callbacks */
for
(;;)
{
/* If not polling, wait for next batch of callbacks. */
if
(
!
rcu_nocb_poll
)
if
(
!
rcu_nocb_poll
)
{
trace_rcu_nocb_wake
(
rdp
->
rsp
->
name
,
rdp
->
cpu
,
TPS
(
"Sleep"
));
wait_event_interruptible
(
rdp
->
nocb_wq
,
rdp
->
nocb_head
);
}
else
if
(
firsttime
)
{
firsttime
=
0
;
trace_rcu_nocb_wake
(
rdp
->
rsp
->
name
,
rdp
->
cpu
,
TPS
(
"Poll"
));
}
list
=
ACCESS_ONCE
(
rdp
->
nocb_head
);
if
(
!
list
)
{
if
(
!
rcu_nocb_poll
)
trace_rcu_nocb_wake
(
rdp
->
rsp
->
name
,
rdp
->
cpu
,
TPS
(
"WokeEmpty"
));
schedule_timeout_interruptible
(
1
);
flush_signals
(
current
);
continue
;
}
firsttime
=
1
;
trace_rcu_nocb_wake
(
rdp
->
rsp
->
name
,
rdp
->
cpu
,
TPS
(
"WokeNonEmpty"
));
/*
* Extract queued callbacks, update counts, and wait
...
...
@@ -2262,7 +2285,11 @@ static int rcu_nocb_kthread(void *arg)
next
=
list
->
next
;
/* Wait for enqueuing to complete, if needed. */
while
(
next
==
NULL
&&
&
list
->
next
!=
tail
)
{
trace_rcu_nocb_wake
(
rdp
->
rsp
->
name
,
rdp
->
cpu
,
TPS
(
"WaitQueue"
));
schedule_timeout_interruptible
(
1
);
trace_rcu_nocb_wake
(
rdp
->
rsp
->
name
,
rdp
->
cpu
,
TPS
(
"WokeQueue"
));
next
=
list
->
next
;
}
debug_rcu_head_unqueue
(
list
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment