Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
5a983302
Commit
5a983302
authored
Feb 22, 2003
by
Paul Mackerras
Browse files
Options
Browse Files
Download
Plain Diff
Merge samba.org:/home/paulus/kernel/linux-2.5
into samba.org:/home/paulus/kernel/for-linus-ppc
parents
3627be6d
e58d8e30
Changes
6
Show whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
26 additions
and
14 deletions
+26
-14
arch/i386/kernel/entry.S
arch/i386/kernel/entry.S
+2
-1
arch/i386/kernel/process.c
arch/i386/kernel/process.c
+6
-1
include/asm-i386/system.h
include/asm-i386/system.h
+8
-10
include/linux/init_task.h
include/linux/init_task.h
+1
-0
kernel/fork.c
kernel/fork.c
+6
-1
kernel/sched.c
kernel/sched.c
+3
-1
No files found.
arch/i386/kernel/entry.S
View file @
5a983302
...
@@ -173,9 +173,10 @@ ENTRY(lcall27)
...
@@ -173,9 +173,10 @@ ENTRY(lcall27)
ENTRY
(
ret_from_fork
)
ENTRY
(
ret_from_fork
)
#
NOTE
:
this
function
takes
a
parameter
but
it
's unused on x86.
pushl
%
eax
call
schedule_tail
call
schedule_tail
GET_THREAD_INFO
(%
ebp
)
GET_THREAD_INFO
(%
ebp
)
popl
%
eax
jmp
syscall_exit
jmp
syscall_exit
/*
/*
...
...
arch/i386/kernel/process.c
View file @
5a983302
...
@@ -423,8 +423,12 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
...
@@ -423,8 +423,12 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
* so the performance issues may eventually be a valid point.
* so the performance issues may eventually be a valid point.
* More important, however, is the fact that this allows us much
* More important, however, is the fact that this allows us much
* more flexibility.
* more flexibility.
*
* The return value (in %eax) will be the "prev" task after
* the task-switch, and shows up in ret_from_fork in entry.S,
* for example.
*/
*/
void
__switch_to
(
struct
task_struct
*
prev_p
,
struct
task_struct
*
next_p
)
struct
task_struct
*
__switch_to
(
struct
task_struct
*
prev_p
,
struct
task_struct
*
next_p
)
{
{
struct
thread_struct
*
prev
=
&
prev_p
->
thread
,
struct
thread_struct
*
prev
=
&
prev_p
->
thread
,
*
next
=
&
next_p
->
thread
;
*
next
=
&
next_p
->
thread
;
...
@@ -495,6 +499,7 @@ void __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
...
@@ -495,6 +499,7 @@ void __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
*/
*/
tss
->
bitmap
=
INVALID_IO_BITMAP_OFFSET
;
tss
->
bitmap
=
INVALID_IO_BITMAP_OFFSET
;
}
}
return
prev_p
;
}
}
asmlinkage
int
sys_fork
(
struct
pt_regs
regs
)
asmlinkage
int
sys_fork
(
struct
pt_regs
regs
)
...
...
include/asm-i386/system.h
View file @
5a983302
...
@@ -9,26 +9,24 @@
...
@@ -9,26 +9,24 @@
#ifdef __KERNEL__
#ifdef __KERNEL__
struct
task_struct
;
/* one of the stranger aspects of C forward declarations.. */
struct
task_struct
;
/* one of the stranger aspects of C forward declarations.. */
extern
void
FASTCALL
(
__switch_to
(
struct
task_struct
*
prev
,
struct
task_struct
*
next
));
extern
struct
task_struct
*
FASTCALL
(
__switch_to
(
struct
task_struct
*
prev
,
struct
task_struct
*
next
));
#define switch_to(prev,next,last) do { \
#define switch_to(prev,next,last) do { \
unsigned long esi,edi; \
asm volatile("pushfl\n\t" \
asm volatile("pushfl\n\t" \
"pushl %%esi\n\t" \
"pushl %%edi\n\t" \
"pushl %%ebp\n\t" \
"pushl %%ebp\n\t" \
"movl %%esp,%0\n\t"
/* save ESP */
\
"movl %%esp,%0\n\t"
/* save ESP */
\
"movl %
2
,%%esp\n\t"
/* restore ESP */
\
"movl %
5
,%%esp\n\t"
/* restore ESP */
\
"movl $1f,%1\n\t"
/* save EIP */
\
"movl $1f,%1\n\t"
/* save EIP */
\
"pushl %
3
\n\t"
/* restore EIP */
\
"pushl %
6
\n\t"
/* restore EIP */
\
"jmp __switch_to\n" \
"jmp __switch_to\n" \
"1:\t" \
"1:\t" \
"popl %%ebp\n\t" \
"popl %%ebp\n\t" \
"popl %%edi\n\t" \
"popfl" \
"popl %%esi\n\t" \
:"=m" (prev->thread.esp),"=m" (prev->thread.eip), \
"popfl\n\t" \
"=a" (last),"=S" (esi),"=D" (edi) \
:"=m" (prev->thread.esp),"=m" (prev->thread.eip) \
:"m" (next->thread.esp),"m" (next->thread.eip), \
:"m" (next->thread.esp),"m" (next->thread.eip), \
"
a
" (prev), "d" (next)); \
"
2
" (prev), "d" (next)); \
} while (0)
} while (0)
#define _set_base(addr,base) do { unsigned long __pr; \
#define _set_base(addr,base) do { unsigned long __pr; \
...
...
include/linux/init_task.h
View file @
5a983302
...
@@ -62,6 +62,7 @@
...
@@ -62,6 +62,7 @@
{ \
{ \
.state = 0, \
.state = 0, \
.thread_info = &init_thread_info, \
.thread_info = &init_thread_info, \
.usage = ATOMIC_INIT(2), \
.flags = 0, \
.flags = 0, \
.lock_depth = -1, \
.lock_depth = -1, \
.prio = MAX_PRIO-20, \
.prio = MAX_PRIO-20, \
...
...
kernel/fork.c
View file @
5a983302
...
@@ -74,6 +74,9 @@ int nr_processes(void)
...
@@ -74,6 +74,9 @@ int nr_processes(void)
void
__put_task_struct
(
struct
task_struct
*
tsk
)
void
__put_task_struct
(
struct
task_struct
*
tsk
)
{
{
WARN_ON
(
!
(
tsk
->
state
&
(
TASK_DEAD
|
TASK_ZOMBIE
)));
WARN_ON
(
atomic_read
(
&
tsk
->
usage
));
if
(
tsk
!=
current
)
{
if
(
tsk
!=
current
)
{
free_thread_info
(
tsk
->
thread_info
);
free_thread_info
(
tsk
->
thread_info
);
kmem_cache_free
(
task_struct_cachep
,
tsk
);
kmem_cache_free
(
task_struct_cachep
,
tsk
);
...
@@ -217,7 +220,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
...
@@ -217,7 +220,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
*
tsk
=
*
orig
;
*
tsk
=
*
orig
;
tsk
->
thread_info
=
ti
;
tsk
->
thread_info
=
ti
;
ti
->
task
=
tsk
;
ti
->
task
=
tsk
;
atomic_set
(
&
tsk
->
usage
,
1
);
/* One for us, one for whoever does the "release_task()" (usually parent) */
atomic_set
(
&
tsk
->
usage
,
2
);
return
tsk
;
return
tsk
;
}
}
...
...
kernel/sched.c
View file @
5a983302
...
@@ -581,6 +581,8 @@ static inline void finish_task_switch(task_t *prev)
...
@@ -581,6 +581,8 @@ static inline void finish_task_switch(task_t *prev)
finish_arch_switch
(
rq
,
prev
);
finish_arch_switch
(
rq
,
prev
);
if
(
mm
)
if
(
mm
)
mmdrop
(
mm
);
mmdrop
(
mm
);
if
(
prev
->
state
&
(
TASK_DEAD
|
TASK_ZOMBIE
))
put_task_struct
(
prev
);
}
}
/**
/**
...
@@ -1185,7 +1187,7 @@ asmlinkage void schedule(void)
...
@@ -1185,7 +1187,7 @@ asmlinkage void schedule(void)
* schedule() atomically, we ignore that path for now.
* schedule() atomically, we ignore that path for now.
* Otherwise, whine if we are scheduling when we should not be.
* Otherwise, whine if we are scheduling when we should not be.
*/
*/
if
(
likely
(
current
->
state
!=
TASK_ZOMBIE
))
{
if
(
likely
(
!
(
current
->
state
&
(
TASK_DEAD
|
TASK_ZOMBIE
))
))
{
if
(
unlikely
(
in_atomic
()))
{
if
(
unlikely
(
in_atomic
()))
{
printk
(
KERN_ERR
"bad: scheduling while atomic!
\n
"
);
printk
(
KERN_ERR
"bad: scheduling while atomic!
\n
"
);
dump_stack
();
dump_stack
();
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment