Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
467386fb
Commit
467386fb
authored
Dec 04, 2015
by
Ingo Molnar
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'sched/urgent' into sched/core, to pick up fixes before applying new changes
Signed-off-by:
Ingo Molnar
<
mingo@kernel.org
>
parents
525628c7
ecf7d01c
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
44 additions
and
14 deletions
+44
-14
kernel/sched/core.c
kernel/sched/core.c
+30
-6
kernel/sched/cputime.c
kernel/sched/cputime.c
+3
-0
kernel/sched/sched.h
kernel/sched/sched.h
+3
-0
kernel/sched/wait.c
kernel/sched/wait.c
+8
-8
No files found.
kernel/sched/core.c
View file @
467386fb
...
...
@@ -1957,6 +1957,25 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
goto
stat
;
#ifdef CONFIG_SMP
/*
* Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
* possible to, falsely, observe p->on_cpu == 0.
*
* One must be running (->on_cpu == 1) in order to remove oneself
* from the runqueue.
*
* [S] ->on_cpu = 1; [L] ->on_rq
* UNLOCK rq->lock
* RMB
* LOCK rq->lock
* [S] ->on_rq = 0; [L] ->on_cpu
*
* Pairs with the full barrier implied in the UNLOCK+LOCK on rq->lock
* from the consecutive calls to schedule(); the first switching to our
* task, the second putting it to sleep.
*/
smp_rmb
();
/*
* If the owning (remote) cpu is still in the middle of schedule() with
* this task as prev, wait until its done referencing the task.
...
...
@@ -1964,7 +1983,13 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
while
(
p
->
on_cpu
)
cpu_relax
();
/*
* Pairs with the smp_wmb() in finish_lock_switch().
* Combined with the control dependency above, we have an effective
* smp_load_acquire() without the need for full barriers.
*
* Pairs with the smp_store_release() in finish_lock_switch().
*
* This ensures that tasks getting woken will be fully ordered against
* their previous state and preserve Program Order.
*/
smp_rmb
();
...
...
@@ -2050,7 +2075,6 @@ static void try_to_wake_up_local(struct task_struct *p)
*/
int
wake_up_process
(
struct
task_struct
*
p
)
{
WARN_ON
(
task_is_stopped_or_traced
(
p
));
return
try_to_wake_up
(
p
,
TASK_NORMAL
,
0
);
}
EXPORT_SYMBOL
(
wake_up_process
);
...
...
@@ -5858,13 +5882,13 @@ static int init_rootdomain(struct root_domain *rd)
{
memset
(
rd
,
0
,
sizeof
(
*
rd
));
if
(
!
alloc_cpumask_var
(
&
rd
->
span
,
GFP_KERNEL
))
if
(
!
z
alloc_cpumask_var
(
&
rd
->
span
,
GFP_KERNEL
))
goto
out
;
if
(
!
alloc_cpumask_var
(
&
rd
->
online
,
GFP_KERNEL
))
if
(
!
z
alloc_cpumask_var
(
&
rd
->
online
,
GFP_KERNEL
))
goto
free_span
;
if
(
!
alloc_cpumask_var
(
&
rd
->
dlo_mask
,
GFP_KERNEL
))
if
(
!
z
alloc_cpumask_var
(
&
rd
->
dlo_mask
,
GFP_KERNEL
))
goto
free_online
;
if
(
!
alloc_cpumask_var
(
&
rd
->
rto_mask
,
GFP_KERNEL
))
if
(
!
z
alloc_cpumask_var
(
&
rd
->
rto_mask
,
GFP_KERNEL
))
goto
free_dlo_mask
;
init_dl_bw
(
&
rd
->
dl_bw
);
...
...
kernel/sched/cputime.c
View file @
467386fb
...
...
@@ -788,6 +788,9 @@ cputime_t task_gtime(struct task_struct *t)
unsigned
int
seq
;
cputime_t
gtime
;
if
(
!
context_tracking_is_enabled
())
return
t
->
gtime
;
do
{
seq
=
read_seqbegin
(
&
t
->
vtime_seqlock
);
...
...
kernel/sched/sched.h
View file @
467386fb
...
...
@@ -1073,6 +1073,9 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
* We must ensure this doesn't happen until the switch is completely
* finished.
*
* In particular, the load of prev->state in finish_task_switch() must
* happen before this.
*
* Pairs with the control dependency and rmb in try_to_wake_up().
*/
smp_store_release
(
&
prev
->
on_cpu
,
0
);
...
...
kernel/sched/wait.c
View file @
467386fb
...
...
@@ -583,18 +583,18 @@ EXPORT_SYMBOL(wake_up_atomic_t);
__sched
int
bit_wait
(
struct
wait_bit_key
*
word
)
{
if
(
signal_pending_state
(
current
->
state
,
current
))
return
1
;
schedule
();
if
(
signal_pending
(
current
))
return
-
EINTR
;
return
0
;
}
EXPORT_SYMBOL
(
bit_wait
);
__sched
int
bit_wait_io
(
struct
wait_bit_key
*
word
)
{
if
(
signal_pending_state
(
current
->
state
,
current
))
return
1
;
io_schedule
();
if
(
signal_pending
(
current
))
return
-
EINTR
;
return
0
;
}
EXPORT_SYMBOL
(
bit_wait_io
);
...
...
@@ -602,11 +602,11 @@ EXPORT_SYMBOL(bit_wait_io);
__sched
int
bit_wait_timeout
(
struct
wait_bit_key
*
word
)
{
unsigned
long
now
=
READ_ONCE
(
jiffies
);
if
(
signal_pending_state
(
current
->
state
,
current
))
return
1
;
if
(
time_after_eq
(
now
,
word
->
timeout
))
return
-
EAGAIN
;
schedule_timeout
(
word
->
timeout
-
now
);
if
(
signal_pending
(
current
))
return
-
EINTR
;
return
0
;
}
EXPORT_SYMBOL_GPL
(
bit_wait_timeout
);
...
...
@@ -614,11 +614,11 @@ EXPORT_SYMBOL_GPL(bit_wait_timeout);
__sched
int
bit_wait_io_timeout
(
struct
wait_bit_key
*
word
)
{
unsigned
long
now
=
READ_ONCE
(
jiffies
);
if
(
signal_pending_state
(
current
->
state
,
current
))
return
1
;
if
(
time_after_eq
(
now
,
word
->
timeout
))
return
-
EAGAIN
;
io_schedule_timeout
(
word
->
timeout
-
now
);
if
(
signal_pending
(
current
))
return
-
EINTR
;
return
0
;
}
EXPORT_SYMBOL_GPL
(
bit_wait_io_timeout
);
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment