Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
0334a4e2
Commit
0334a4e2
authored
Jun 19, 2002
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge
ssh://master.kernel.org//home/mingo/bk-sched/
into home.transmeta.com:/home/torvalds/v2.5/linux
parents
68c14a04
4f4eb77b
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
53 additions
and
23 deletions
+53
-23
include/linux/sched.h
include/linux/sched.h
+28
-0
kernel/sched.c
kernel/sched.c
+25
-23
No files found.
include/linux/sched.h
View file @
0334a4e2
...
@@ -863,6 +863,34 @@ static inline void recalc_sigpending(void)
...
@@ -863,6 +863,34 @@ static inline void recalc_sigpending(void)
clear_thread_flag
(
TIF_SIGPENDING
);
clear_thread_flag
(
TIF_SIGPENDING
);
}
}
/*
* Wrappers for p->thread_info->cpu access. No-op on UP.
*/
#ifdef CONFIG_SMP
static
inline
unsigned
int
task_cpu
(
struct
task_struct
*
p
)
{
return
p
->
thread_info
->
cpu
;
}
static
inline
void
set_task_cpu
(
struct
task_struct
*
p
,
unsigned
int
cpu
)
{
p
->
thread_info
->
cpu
=
cpu
;
}
#else
static
inline
unsigned
int
task_cpu
(
struct
task_struct
*
p
)
{
return
0
;
}
static
inline
void
set_task_cpu
(
struct
task_struct
*
p
,
unsigned
int
cpu
)
{
}
#endif
/* CONFIG_SMP */
#endif
/* __KERNEL__ */
#endif
/* __KERNEL__ */
#endif
#endif
kernel/sched.c
View file @
0334a4e2
...
@@ -148,7 +148,7 @@ static struct runqueue runqueues[NR_CPUS] __cacheline_aligned;
...
@@ -148,7 +148,7 @@ static struct runqueue runqueues[NR_CPUS] __cacheline_aligned;
#define cpu_rq(cpu) (runqueues + (cpu))
#define cpu_rq(cpu) (runqueues + (cpu))
#define this_rq() cpu_rq(smp_processor_id())
#define this_rq() cpu_rq(smp_processor_id())
#define task_rq(p) cpu_rq(
(p)->thread_info->cpu
)
#define task_rq(p) cpu_rq(
task_cpu(p)
)
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
#define rt_task(p) ((p)->prio < MAX_RT_PRIO)
#define rt_task(p) ((p)->prio < MAX_RT_PRIO)
...
@@ -284,8 +284,8 @@ static inline void resched_task(task_t *p)
...
@@ -284,8 +284,8 @@ static inline void resched_task(task_t *p)
need_resched
=
test_and_set_tsk_thread_flag
(
p
,
TIF_NEED_RESCHED
);
need_resched
=
test_and_set_tsk_thread_flag
(
p
,
TIF_NEED_RESCHED
);
nrpolling
|=
test_tsk_thread_flag
(
p
,
TIF_POLLING_NRFLAG
);
nrpolling
|=
test_tsk_thread_flag
(
p
,
TIF_POLLING_NRFLAG
);
if
(
!
need_resched
&&
!
nrpolling
&&
(
p
->
thread_info
->
cpu
!=
smp_processor_id
()))
if
(
!
need_resched
&&
!
nrpolling
&&
(
task_cpu
(
p
)
!=
smp_processor_id
()))
smp_send_reschedule
(
p
->
thread_info
->
cpu
);
smp_send_reschedule
(
task_cpu
(
p
)
);
preempt_enable
();
preempt_enable
();
#else
#else
set_tsk_need_resched
(
p
);
set_tsk_need_resched
(
p
);
...
@@ -366,10 +366,10 @@ static int try_to_wake_up(task_t * p, int sync)
...
@@ -366,10 +366,10 @@ static int try_to_wake_up(task_t * p, int sync)
* currently. Do not violate hard affinity.
* currently. Do not violate hard affinity.
*/
*/
if
(
unlikely
(
sync
&&
(
rq
->
curr
!=
p
)
&&
if
(
unlikely
(
sync
&&
(
rq
->
curr
!=
p
)
&&
(
p
->
thread_info
->
cpu
!=
smp_processor_id
())
&&
(
task_cpu
(
p
)
!=
smp_processor_id
())
&&
(
p
->
cpus_allowed
&
(
1UL
<<
smp_processor_id
()))))
{
(
p
->
cpus_allowed
&
(
1UL
<<
smp_processor_id
()))))
{
p
->
thread_info
->
cpu
=
smp_processor_id
(
);
set_task_cpu
(
p
,
smp_processor_id
()
);
task_rq_unlock
(
rq
,
&
flags
);
task_rq_unlock
(
rq
,
&
flags
);
goto
repeat_lock_task
;
goto
repeat_lock_task
;
}
}
...
@@ -409,7 +409,7 @@ void wake_up_forked_process(task_t * p)
...
@@ -409,7 +409,7 @@ void wake_up_forked_process(task_t * p)
p
->
sleep_avg
=
p
->
sleep_avg
*
CHILD_PENALTY
/
100
;
p
->
sleep_avg
=
p
->
sleep_avg
*
CHILD_PENALTY
/
100
;
p
->
prio
=
effective_prio
(
p
);
p
->
prio
=
effective_prio
(
p
);
}
}
p
->
thread_info
->
cpu
=
smp_processor_id
(
);
set_task_cpu
(
p
,
smp_processor_id
()
);
activate_task
(
p
,
rq
);
activate_task
(
p
,
rq
);
rq_unlock
(
rq
);
rq_unlock
(
rq
);
...
@@ -663,7 +663,7 @@ static void load_balance(runqueue_t *this_rq, int idle)
...
@@ -663,7 +663,7 @@ static void load_balance(runqueue_t *this_rq, int idle)
*/
*/
dequeue_task
(
next
,
array
);
dequeue_task
(
next
,
array
);
busiest
->
nr_running
--
;
busiest
->
nr_running
--
;
next
->
thread_info
->
cpu
=
this_cpu
;
set_task_cpu
(
next
,
this_cpu
)
;
this_rq
->
nr_running
++
;
this_rq
->
nr_running
++
;
enqueue_task
(
next
,
this_rq
->
active
);
enqueue_task
(
next
,
this_rq
->
active
);
if
(
next
->
prio
<
current
->
prio
)
if
(
next
->
prio
<
current
->
prio
)
...
@@ -821,7 +821,7 @@ asmlinkage void schedule(void)
...
@@ -821,7 +821,7 @@ asmlinkage void schedule(void)
spin_lock_irq
(
&
rq
->
lock
);
spin_lock_irq
(
&
rq
->
lock
);
/*
/*
* if entering off a kernel preemption go straight
* if entering off
of
a kernel preemption go straight
* to picking the next task.
* to picking the next task.
*/
*/
if
(
unlikely
(
preempt_get_count
()
&
PREEMPT_ACTIVE
))
if
(
unlikely
(
preempt_get_count
()
&
PREEMPT_ACTIVE
))
...
@@ -906,7 +906,7 @@ asmlinkage void preempt_schedule(void)
...
@@ -906,7 +906,7 @@ asmlinkage void preempt_schedule(void)
schedule
();
schedule
();
ti
->
preempt_count
=
0
;
ti
->
preempt_count
=
0
;
/* we c
an
miss a preemption opportunity between schedule and now */
/* we c
ould
miss a preemption opportunity between schedule and now */
barrier
();
barrier
();
if
(
unlikely
(
test_thread_flag
(
TIF_NEED_RESCHED
)))
if
(
unlikely
(
test_thread_flag
(
TIF_NEED_RESCHED
)))
goto
need_resched
;
goto
need_resched
;
...
@@ -1630,7 +1630,7 @@ static inline void double_rq_unlock(runqueue_t *rq1, runqueue_t *rq2)
...
@@ -1630,7 +1630,7 @@ static inline void double_rq_unlock(runqueue_t *rq1, runqueue_t *rq2)
void
__init
init_idle
(
task_t
*
idle
,
int
cpu
)
void
__init
init_idle
(
task_t
*
idle
,
int
cpu
)
{
{
runqueue_t
*
idle_rq
=
cpu_rq
(
cpu
),
*
rq
=
cpu_rq
(
idle
->
thread_info
->
cpu
);
runqueue_t
*
idle_rq
=
cpu_rq
(
cpu
),
*
rq
=
cpu_rq
(
task_cpu
(
idle
)
);
unsigned
long
flags
;
unsigned
long
flags
;
__save_flags
(
flags
);
__save_flags
(
flags
);
...
@@ -1642,7 +1642,7 @@ void __init init_idle(task_t *idle, int cpu)
...
@@ -1642,7 +1642,7 @@ void __init init_idle(task_t *idle, int cpu)
idle
->
array
=
NULL
;
idle
->
array
=
NULL
;
idle
->
prio
=
MAX_PRIO
;
idle
->
prio
=
MAX_PRIO
;
idle
->
state
=
TASK_RUNNING
;
idle
->
state
=
TASK_RUNNING
;
idle
->
thread_info
->
cpu
=
cpu
;
set_task_cpu
(
idle
,
cpu
)
;
double_rq_unlock
(
idle_rq
,
rq
);
double_rq_unlock
(
idle_rq
,
rq
);
set_tsk_need_resched
(
idle
);
set_tsk_need_resched
(
idle
);
__restore_flags
(
flags
);
__restore_flags
(
flags
);
...
@@ -1751,7 +1751,7 @@ void set_cpus_allowed(task_t *p, unsigned long new_mask)
...
@@ -1751,7 +1751,7 @@ void set_cpus_allowed(task_t *p, unsigned long new_mask)
* Can the task run on the task's current CPU? If not then
* Can the task run on the task's current CPU? If not then
* migrate the process off to a proper CPU.
* migrate the process off to a proper CPU.
*/
*/
if
(
new_mask
&
(
1UL
<<
p
->
thread_info
->
cpu
))
{
if
(
new_mask
&
(
1UL
<<
task_cpu
(
p
)
))
{
task_rq_unlock
(
rq
,
&
flags
);
task_rq_unlock
(
rq
,
&
flags
);
goto
out
;
goto
out
;
}
}
...
@@ -1760,7 +1760,7 @@ void set_cpus_allowed(task_t *p, unsigned long new_mask)
...
@@ -1760,7 +1760,7 @@ void set_cpus_allowed(task_t *p, unsigned long new_mask)
* it is sufficient to simply update the task's cpu field.
* it is sufficient to simply update the task's cpu field.
*/
*/
if
(
!
p
->
array
&&
(
p
!=
rq
->
curr
))
{
if
(
!
p
->
array
&&
(
p
!=
rq
->
curr
))
{
p
->
thread_info
->
cpu
=
__ffs
(
p
->
cpus_allowed
);
set_task_cpu
(
p
,
__ffs
(
p
->
cpus_allowed
)
);
task_rq_unlock
(
rq
,
&
flags
);
task_rq_unlock
(
rq
,
&
flags
);
goto
out
;
goto
out
;
}
}
...
@@ -1775,6 +1775,8 @@ void set_cpus_allowed(task_t *p, unsigned long new_mask)
...
@@ -1775,6 +1775,8 @@ void set_cpus_allowed(task_t *p, unsigned long new_mask)
preempt_enable
();
preempt_enable
();
}
}
static
__initdata
int
master_migration_thread
;
static
int
migration_thread
(
void
*
bind_cpu
)
static
int
migration_thread
(
void
*
bind_cpu
)
{
{
int
cpu
=
(
int
)
(
long
)
bind_cpu
;
int
cpu
=
(
int
)
(
long
)
bind_cpu
;
...
@@ -1786,14 +1788,12 @@ static int migration_thread(void * bind_cpu)
...
@@ -1786,14 +1788,12 @@ static int migration_thread(void * bind_cpu)
sigfillset
(
&
current
->
blocked
);
sigfillset
(
&
current
->
blocked
);
set_fs
(
KERNEL_DS
);
set_fs
(
KERNEL_DS
);
/* FIXME: First CPU may not be zero, but this crap code
vanishes with hotplug cpu patch anyway. --RR */
/*
/*
* The first migration thread is started on
CPU #0. This one can
* The first migration thread is started on
the boot CPU, it
* migrate the other migration threads to their destination CPUs.
* migrate
s
the other migration threads to their destination CPUs.
*/
*/
if
(
cpu
!=
0
)
{
if
(
cpu
!=
master_migration_thread
)
{
while
(
!
cpu_rq
(
0
)
->
migration_thread
)
while
(
!
cpu_rq
(
master_migration_thread
)
->
migration_thread
)
yield
();
yield
();
set_cpus_allowed
(
current
,
1UL
<<
cpu
);
set_cpus_allowed
(
current
,
1UL
<<
cpu
);
}
}
...
@@ -1829,18 +1829,18 @@ static int migration_thread(void * bind_cpu)
...
@@ -1829,18 +1829,18 @@ static int migration_thread(void * bind_cpu)
cpu_dest
=
__ffs
(
p
->
cpus_allowed
);
cpu_dest
=
__ffs
(
p
->
cpus_allowed
);
rq_dest
=
cpu_rq
(
cpu_dest
);
rq_dest
=
cpu_rq
(
cpu_dest
);
repeat:
repeat:
cpu_src
=
p
->
thread_info
->
cpu
;
cpu_src
=
task_cpu
(
p
)
;
rq_src
=
cpu_rq
(
cpu_src
);
rq_src
=
cpu_rq
(
cpu_src
);
local_irq_save
(
flags
);
local_irq_save
(
flags
);
double_rq_lock
(
rq_src
,
rq_dest
);
double_rq_lock
(
rq_src
,
rq_dest
);
if
(
p
->
thread_info
->
cpu
!=
cpu_src
)
{
if
(
task_cpu
(
p
)
!=
cpu_src
)
{
double_rq_unlock
(
rq_src
,
rq_dest
);
double_rq_unlock
(
rq_src
,
rq_dest
);
local_irq_restore
(
flags
);
local_irq_restore
(
flags
);
goto
repeat
;
goto
repeat
;
}
}
if
(
rq_src
==
rq
)
{
if
(
rq_src
==
rq
)
{
p
->
thread_info
->
cpu
=
cpu_dest
;
set_task_cpu
(
p
,
cpu_dest
)
;
if
(
p
->
array
)
{
if
(
p
->
array
)
{
deactivate_task
(
p
,
rq_src
);
deactivate_task
(
p
,
rq_src
);
activate_task
(
p
,
rq_dest
);
activate_task
(
p
,
rq_dest
);
...
@@ -1857,7 +1857,9 @@ void __init migration_init(void)
...
@@ -1857,7 +1857,9 @@ void __init migration_init(void)
{
{
int
cpu
;
int
cpu
;
current
->
cpus_allowed
=
1UL
<<
0
;
master_migration_thread
=
smp_processor_id
();
current
->
cpus_allowed
=
1UL
<<
master_migration_thread
;
for
(
cpu
=
0
;
cpu
<
NR_CPUS
;
cpu
++
)
{
for
(
cpu
=
0
;
cpu
<
NR_CPUS
;
cpu
++
)
{
if
(
!
cpu_online
(
cpu
))
if
(
!
cpu_online
(
cpu
))
continue
;
continue
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment