Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
5688f16b
Commit
5688f16b
authored
Nov 23, 2007
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Import 2.3.11pre8
parent
c0967d8d
Changes
15
Hide whitespace changes
Inline
Side-by-side
Showing
15 changed files
with
151 additions
and
132 deletions
+151
-132
Documentation/Configure.help
Documentation/Configure.help
+9
-7
Documentation/kernel-parameters.txt
Documentation/kernel-parameters.txt
+31
-4
arch/i386/kernel/ldt.c
arch/i386/kernel/ldt.c
+1
-1
arch/i386/kernel/setup.c
arch/i386/kernel/setup.c
+1
-1
drivers/char/lp.c
drivers/char/lp.c
+22
-22
fs/binfmt_aout.c
fs/binfmt_aout.c
+1
-1
fs/binfmt_elf.c
fs/binfmt_elf.c
+1
-1
fs/buffer.c
fs/buffer.c
+2
-2
fs/exec.c
fs/exec.c
+26
-32
fs/proc/array.c
fs/proc/array.c
+1
-1
include/asm-i386/pgtable.h
include/asm-i386/pgtable.h
+3
-3
include/linux/sched.h
include/linux/sched.h
+11
-4
kernel/exit.c
kernel/exit.c
+9
-20
kernel/fork.c
kernel/fork.c
+17
-6
kernel/sched.c
kernel/sched.c
+16
-27
No files found.
Documentation/Configure.help
View file @
5688f16b
...
...
@@ -11945,13 +11945,15 @@ CONFIG_USB_ACM
Support
for
user
-
space
parallel
port
device
drivers
CONFIG_PPDEV
Saying
Y
to
this
adds
support
for
/
dev
/
parport
device
nodes
.
NB
.
You
have
to
make
them
before
you
can
use
them
:
mknod
/
dev
/
parport00
c
99
0
mknod
/
dev
/
parport01
c
99
1
mknod
/
dev
/
parport10
c
99
16
mknod
/
dev
/
parport11
c
99
17
etc
..
Saying
Y
to
this
adds
support
for
/
dev
/
parport
device
nodes
.
This
is
needed
for
programs
that
want
portable
access
to
the
parallel
port
,
for
instance
deviceid
(
which
displays
Plug
-
and
-
Play
device
IDs
)
and
vlp
(
which
makes
a
Linux
computer
act
as
though
it
's a
printer).
This is the parallel port equivalent of SCSI generic support (sg).
It is safe to say N to this -- it is not needed for normal printing
or parallel port CD-ROM/disk support.
#
# A couple of things I keep forgetting:
...
...
Documentation/kernel-parameters.txt
View file @
5688f16b
...
...
@@ -19,7 +19,7 @@ restrictions referred to are that the relevant option is valid if:
HW Appropriate hardware is enabled.
ISDN Appropriate ISDN support is enabled.
JOY Appropriate joystick support is enabled.
LP
T
Printer support is enabled.
LP
Printer support is enabled.
MCA MCA bus support is enabled.
MDA The MDA console is enabled.
MOUSE Appropriate mouse support is enabled.
...
...
@@ -29,6 +29,7 @@ restrictions referred to are that the relevant option is valid if:
PCI PCI bus support is enabled.
PCMCIA The PCMCIA subsystem is enabled.
PNP Plug & Play support is enabled.
PPT Parallel port support is enabled.
PS2 Appropriate PS/2 support is enabled.
RAM RAMdisc support is enabled.
SCSI Appropriate SCSI support is enabled.
...
...
@@ -187,7 +188,21 @@ running once the system is up.
load_ramdisk= [RAM]
lp= [LPT] Parallel Printer.
lp=0 [LP] Specify parallel ports to use, e.g,
or lp=port[,port...] lp=none,parport0 (lp0 not configured, lp1 uses
or lp=reset first parallel port). 'lp=0' disables the printer
or lp=auto driver. 'lp=reset' (which can be specified in
addition to the ports) causes attached
printers to be reset. Using
lp=port1,port2,... specifies the parallel
ports to associate lp devices with, starting
with lp0. A port specification may be 'none'
to skip that lp device, or a parport name such
as 'parport0'. Specifying 'lp=auto' instead
of a port specification list means that device
IDs from each port should be examined, to see
if an IEEE 1284-compliant printer is attached;
if so, the driver will manage that printer.
ltpc= [HW]
...
...
@@ -244,7 +259,19 @@ running once the system is up.
panic=
parport= [HW,LP]
parport=0 [HW,PPT] Specify parallel ports. 0
or parport=auto disables. Use 'auto' to force the driver
or parport=0xBBB[,IRQ[,DMA]] to use any IRQ/DMA settings detected
(the default is to ignore detected
IRQ/DMA settings because of possible
conflicts). You can specify the base
address, IRQ, and DMA settings; IRQ
and DMA should be numbers or 'auto'
(for using detected settings on that
particular port). Parallel ports are
assigned in the order they are
specified on the command line,
starting with parport0.
pas16= [HW,SCSI]
...
...
@@ -262,7 +289,7 @@ running once the system is up.
pirq= [SMP,APIC]
plip= [
LP
,NET] Parallel port network link.
plip= [
PPT
,NET] Parallel port network link.
profile=
...
...
arch/i386/kernel/ldt.c
View file @
5688f16b
...
...
@@ -94,7 +94,7 @@ static int write_ldt(void * ptr, unsigned long bytecount, int oldmode)
if
(
!
mm
->
segments
)
goto
out_unlock
;
if
(
atomic_read
(
&
mm
->
count
)
>
1
)
if
(
atomic_read
(
&
mm
->
mm_users
)
>
1
)
printk
(
KERN_WARNING
"LDT allocated for cloned task!
\n
"
);
/*
* Possibly do an SMP cross-call to other CPUs to reload
...
...
arch/i386/kernel/setup.c
View file @
5688f16b
...
...
@@ -1029,7 +1029,7 @@ void cpu_init (void)
/*
* set up and load the per-CPU TSS and LDT
*/
mmget
(
&
init_mm
);
atomic_inc
(
&
init_mm
.
mm_count
);
current
->
active_mm
=
&
init_mm
;
t
->
esp0
=
current
->
thread
.
esp0
;
set_tss_desc
(
nr
,
t
);
...
...
drivers/char/lp.c
View file @
5688f16b
...
...
@@ -333,6 +333,9 @@ static ssize_t lp_write(struct file * file, const char * buf,
if
(
copy_from_user
(
kbuf
,
buf
,
copy_size
))
return
-
EFAULT
;
if
(
down_interruptible
(
&
lp_table
[
minor
].
port_mutex
))
return
-
EINTR
;
/* Claim Parport or sleep until it becomes available
*/
lp_parport_claim
(
minor
);
...
...
@@ -341,10 +344,6 @@ static ssize_t lp_write(struct file * file, const char * buf,
parport_negotiate
(
port
,
IEEE1284_MODE_COMPAT
);
do
{
/* Wait until lp_read has finished. */
if
(
down_interruptible
(
&
lp_table
[
minor
].
port_mutex
))
break
;
/* Write the data. */
written
=
parport_write
(
port
,
kbuf
,
copy_size
);
if
(
written
>=
0
)
{
...
...
@@ -354,8 +353,6 @@ static ssize_t lp_write(struct file * file, const char * buf,
retv
+=
written
;
}
up
(
&
lp_table
[
minor
].
port_mutex
);
if
(
signal_pending
(
current
))
{
if
(
retv
==
0
)
retv
=
-
EINTR
;
...
...
@@ -392,6 +389,8 @@ static ssize_t lp_write(struct file * file, const char * buf,
lp_parport_release
(
minor
);
up
(
&
lp_table
[
minor
].
port_mutex
);
return
retv
;
}
...
...
@@ -414,29 +413,28 @@ static ssize_t lp_read(struct file * file, char * buf,
if
(
count
>
LP_BUFFER_SIZE
)
count
=
LP_BUFFER_SIZE
;
if
(
down_interruptible
(
&
lp_table
[
minor
].
port_mutex
))
return
-
EINTR
;
lp_parport_claim
(
minor
);
if
(
!
down_interruptible
(
&
lp_table
[
minor
].
port_mutex
))
{
for
(;;)
{
retval
=
parport_read
(
port
,
kbuf
,
count
);
for
(;;)
{
retval
=
parport_read
(
port
,
kbuf
,
count
);
if
(
retval
)
break
;
if
(
retval
)
break
;
if
(
file
->
f_flags
&
O_NONBLOCK
)
break
;
if
(
file
->
f_flags
&
O_NONBLOCK
)
break
;
/* Wait for an interrupt. */
interruptible_sleep_on_timeout
(
&
lp_table
[
minor
].
waitq
,
LP_TIMEOUT_POLLED
);
/* Wait for an interrupt. */
interruptible_sleep_on_timeout
(
&
lp_table
[
minor
].
waitq
,
LP_TIMEOUT_POLLED
);
if
(
signal_pending
(
current
))
{
retval
=
-
EINTR
;
break
;
}
if
(
signal_pending
(
current
))
{
retval
=
-
EINTR
;
break
;
}
up
(
&
lp_table
[
minor
].
port_mutex
);
}
lp_parport_release
(
minor
);
...
...
@@ -444,6 +442,8 @@ static ssize_t lp_read(struct file * file, char * buf,
if
(
retval
>
0
&&
copy_to_user
(
buf
,
kbuf
,
retval
))
retval
=
-
EFAULT
;
up
(
&
lp_table
[
minor
].
port_mutex
);
return
retval
;
}
...
...
fs/binfmt_aout.c
View file @
5688f16b
...
...
@@ -106,7 +106,7 @@ do_aout_core_dump(long signr, struct pt_regs * regs)
# define START_STACK(u) (u.start_stack)
#endif
if
(
!
current
->
dumpable
||
atomic_read
(
&
current
->
mm
->
count
)
!=
1
)
if
(
!
current
->
dumpable
||
atomic_read
(
&
current
->
mm
->
mm_users
)
!=
1
)
return
0
;
current
->
dumpable
=
0
;
...
...
fs/binfmt_elf.c
View file @
5688f16b
...
...
@@ -1060,7 +1060,7 @@ static int elf_core_dump(long signr, struct pt_regs * regs)
if
(
!
current
->
dumpable
||
limit
<
ELF_EXEC_PAGESIZE
||
atomic_read
(
&
current
->
mm
->
count
)
!=
1
)
atomic_read
(
&
current
->
mm
->
mm_users
)
!=
1
)
return
0
;
current
->
dumpable
=
0
;
...
...
fs/buffer.c
View file @
5688f16b
...
...
@@ -1977,14 +1977,14 @@ asmlinkage int sys_bdflush(int func, long data)
* to and from bdflush.
*/
user_mm
=
current
->
mm
;
mmget
(
user_mm
);
atomic_inc
(
&
user_mm
->
mm_count
);
current
->
mm
=
NULL
;
/* active_mm is still 'user_mm' */
error
=
sync_old_buffers
();
current
->
mm
=
user_mm
;
mm
put
(
current
->
active_mm
);
mm
drop
(
current
->
active_mm
);
current
->
active_mm
=
user_mm
;
goto
out
;
...
...
fs/exec.c
View file @
5688f16b
...
...
@@ -365,17 +365,10 @@ int read_exec(struct dentry *dentry, unsigned long offset,
static
int
exec_mmap
(
void
)
{
struct
mm_struct
*
mm
,
*
old_mm
;
struct
mm_struct
*
mm
,
*
old_mm
,
*
active_mm
;
/*
* NOTE: This works even if "old_mm" is a lazy
* memory state. If count == 1 at this point,
* we know that we're the only holders of that
* lazy mm, so we can turn it into a real mm.
*/
old_mm
=
current
->
active_mm
;
if
(
atomic_read
(
&
old_mm
->
count
)
==
1
)
{
current
->
mm
=
old_mm
;
old_mm
=
current
->
mm
;
if
(
old_mm
&&
atomic_read
(
&
old_mm
->
mm_users
)
==
1
)
{
flush_cache_mm
(
old_mm
);
mm_release
();
release_segments
(
old_mm
);
...
...
@@ -385,27 +378,28 @@ static int exec_mmap(void)
}
mm
=
mm_alloc
();
if
(
!
mm
)
goto
fail_nomem
;
mm
->
cpu_vm_mask
=
(
1UL
<<
smp_processor_id
());
mm
->
total_vm
=
0
;
mm
->
rss
=
0
;
mm
->
pgd
=
pgd_alloc
();
if
(
!
mm
->
pgd
)
goto
fail_free
;
current
->
mm
=
mm
;
current
->
active_mm
=
mm
;
SET_PAGE_DIR
(
current
,
mm
->
pgd
);
activate_context
(
current
);
mm_release
();
mmput
(
old_mm
);
return
0
;
fail_free:
kmem_cache_free
(
mm_cachep
,
mm
);
fail_nomem:
if
(
mm
)
{
mm
->
cpu_vm_mask
=
(
1UL
<<
smp_processor_id
());
mm
->
total_vm
=
0
;
mm
->
rss
=
0
;
mm
->
pgd
=
pgd_alloc
();
if
(
mm
->
pgd
)
{
struct
mm_struct
*
active_mm
=
current
->
active_mm
;
current
->
mm
=
mm
;
current
->
active_mm
=
mm
;
SET_PAGE_DIR
(
current
,
mm
->
pgd
);
activate_context
(
current
);
mm_release
();
if
(
old_mm
)
{
mmput
(
old_mm
);
return
0
;
}
mmdrop
(
active_mm
);
return
0
;
}
kmem_cache_free
(
mm_cachep
,
mm
);
}
return
-
ENOMEM
;
}
...
...
@@ -625,7 +619,7 @@ int prepare_binprm(struct linux_binprm *bprm)
if
(
id_change
||
cap_raised
)
{
/* We can't suid-execute if we're sharing parts of the executable */
/* or if we're being traced (or if suid execs are not allowed) */
/* (current->mm->
count
> 1 is ok, as we'll get a new mm anyway) */
/* (current->mm->
mm_users
> 1 is ok, as we'll get a new mm anyway) */
if
(
IS_NOSUID
(
inode
)
||
must_not_trace_exec
(
current
)
||
(
atomic_read
(
&
current
->
fs
->
count
)
>
1
)
...
...
fs/proc/array.c
View file @
5688f16b
...
...
@@ -1115,7 +1115,7 @@ static ssize_t read_maps (int pid, struct file * file, char * buf,
goto
getlen_out
;
/* Check whether the mmaps could change if we sleep */
volatile_task
=
(
p
!=
current
||
atomic_read
(
&
p
->
mm
->
count
)
>
1
);
volatile_task
=
(
p
!=
current
||
atomic_read
(
&
p
->
mm
->
mm_users
)
>
1
);
/* decode f_pos */
lineno
=
*
ppos
>>
MAPS_LINE_SHIFT
;
...
...
include/asm-i386/pgtable.h
View file @
5688f16b
...
...
@@ -101,7 +101,7 @@ static inline void flush_tlb_range(struct mm_struct *mm,
static
inline
void
flush_tlb_current_task
(
void
)
{
/* just one copy of this mm? */
if
(
atomic_read
(
&
current
->
mm
->
count
)
==
1
)
if
(
atomic_read
(
&
current
->
mm
->
mm_users
)
==
1
)
local_flush_tlb
();
/* and that's us, so.. */
else
smp_flush_tlb
();
...
...
@@ -113,7 +113,7 @@ static inline void flush_tlb_current_task(void)
static
inline
void
flush_tlb_mm
(
struct
mm_struct
*
mm
)
{
if
(
mm
==
current
->
mm
&&
atomic_read
(
&
mm
->
count
)
==
1
)
if
(
mm
==
current
->
mm
&&
atomic_read
(
&
mm
->
mm_users
)
==
1
)
local_flush_tlb
();
else
smp_flush_tlb
();
...
...
@@ -122,7 +122,7 @@ static inline void flush_tlb_mm(struct mm_struct * mm)
static
inline
void
flush_tlb_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
va
)
{
if
(
vma
->
vm_mm
==
current
->
mm
&&
atomic_read
(
&
current
->
mm
->
count
)
==
1
)
if
(
vma
->
vm_mm
==
current
->
mm
&&
atomic_read
(
&
current
->
mm
->
mm_users
)
==
1
)
__flush_tlb_one
(
va
);
else
smp_flush_tlb
();
...
...
include/linux/sched.h
View file @
5688f16b
...
...
@@ -170,7 +170,8 @@ struct mm_struct {
struct
vm_area_struct
*
mmap_avl
;
/* tree of VMAs */
struct
vm_area_struct
*
mmap_cache
;
/* last find_vma result */
pgd_t
*
pgd
;
atomic_t
count
;
atomic_t
mm_users
;
/* How many users with user space? */
atomic_t
mm_count
;
/* How many references to "struct mm_struct" (users count as 1) */
int
map_count
;
/* number of VMAs */
struct
semaphore
mmap_sem
;
spinlock_t
page_table_lock
;
...
...
@@ -193,7 +194,7 @@ struct mm_struct {
#define INIT_MM(name) { \
&init_mmap, NULL, NULL, \
swapper_pg_dir, \
ATOMIC_INIT(2),
1,
\
ATOMIC_INIT(2),
ATOMIC_INIT(1), 1,
\
__MUTEX_INITIALIZER(name.mmap_sem), \
SPIN_LOCK_UNLOCKED, \
0, \
...
...
@@ -591,10 +592,16 @@ extern inline int capable(int cap)
* Routines for handling mm_structs
*/
extern
struct
mm_struct
*
mm_alloc
(
void
);
static
inline
void
mmget
(
struct
mm_struct
*
mm
)
/* mmdrop drops the mm and the page tables */
extern
inline
void
FASTCALL
(
__mmdrop
(
struct
mm_struct
*
));
static
inline
void
mmdrop
(
struct
mm_struct
*
mm
)
{
atomic_inc
(
&
mm
->
count
);
if
(
atomic_dec_and_test
(
&
mm
->
mm_count
))
__mmdrop
(
mm
);
}
/* mmput gets rid of the mappings and all user-space */
extern
void
mmput
(
struct
mm_struct
*
);
/* Remove the current tasks stale references to the old mm_struct */
extern
void
mm_release
(
void
);
...
...
kernel/exit.c
View file @
5688f16b
...
...
@@ -229,31 +229,20 @@ void exit_sighand(struct task_struct *tsk)
__exit_sighand
(
tsk
);
}
/*
* Turn us into a lazy TLB process if we
* aren't already..
*/
static
inline
void
__exit_mm
(
struct
task_struct
*
tsk
)
{
struct
mm_struct
*
mm
=
tsk
->
mm
;
/* Lazy TLB process? */
if
(
!
mm
)
{
mm
=
tsk
->
active_mm
;
goto
drop_mm
;
if
(
mm
)
{
mm_release
();
atomic_inc
(
&
mm
->
mm_count
);
tsk
->
mm
=
NULL
;
mmput
(
mm
);
}
/* Set us up to use the kernel mm state */
flush_cache_mm
(
mm
);
flush_tlb_mm
(
mm
);
destroy_context
(
mm
);
mm_release
();
/* This turns us into a task with no MM */
tsk
->
mm
=
NULL
;
drop_mm:
mmget
(
&
init_mm
);
tsk
->
active_mm
=
&
init_mm
;
tsk
->
swappable
=
0
;
SET_PAGE_DIR
(
tsk
,
swapper_pg_dir
);
mmput
(
mm
);
}
void
exit_mm
(
struct
task_struct
*
tsk
)
...
...
kernel/fork.c
View file @
5688f16b
...
...
@@ -301,7 +301,8 @@ struct mm_struct * mm_alloc(void)
if
(
mm
)
{
memset
(
mm
,
0
,
sizeof
(
*
mm
));
init_new_context
(
mm
);
atomic_set
(
&
mm
->
count
,
1
);
atomic_set
(
&
mm
->
mm_users
,
1
);
atomic_set
(
&
mm
->
mm_count
,
1
);
init_MUTEX
(
&
mm
->
mmap_sem
);
mm
->
page_table_lock
=
SPIN_LOCK_UNLOCKED
;
}
...
...
@@ -332,17 +333,27 @@ void mm_release(void)
}
}
/*
* Called when the last reference to the mm
* is dropped: either by a lazy thread or by
* mmput
*/
inline
void
__mmdrop
(
struct
mm_struct
*
mm
)
{
if
(
mm
==
&
init_mm
)
BUG
();
free_page_tables
(
mm
);
kmem_cache_free
(
mm_cachep
,
mm
);
}
/*
* Decrement the use count and release all resources for an mm.
*/
void
mmput
(
struct
mm_struct
*
mm
)
{
if
(
atomic_dec_and_test
(
&
mm
->
count
))
{
if
(
mm
==
&
init_mm
)
BUG
();
if
(
atomic_dec_and_test
(
&
mm
->
mm_users
))
{
release_segments
(
mm
);
exit_mmap
(
mm
);
free_page_tables
(
mm
);
kmem_cache_free
(
mm_cachep
,
mm
);
mmdrop
(
mm
);
}
}
...
...
@@ -368,7 +379,7 @@ static inline int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
return
0
;
if
(
clone_flags
&
CLONE_VM
)
{
mmget
(
mm
);
atomic_inc
(
&
mm
->
mm_users
);
goto
good_mm
;
}
...
...
kernel/sched.c
View file @
5688f16b
...
...
@@ -156,8 +156,7 @@ void scheduling_functions_start_here(void) { }
* +1000: realtime process, select this.
*/
static
inline
int
goodness
(
struct
task_struct
*
prev
,
struct
task_struct
*
p
,
int
this_cpu
)
static
inline
int
goodness
(
struct
task_struct
*
p
,
int
this_cpu
,
struct
mm_struct
*
this_mm
)
{
int
weight
;
...
...
@@ -190,7 +189,7 @@ static inline int goodness (struct task_struct * prev,
#endif
/* .. and a slight advantage to the current MM */
if
(
p
->
mm
==
prev
->
mm
)
if
(
p
->
mm
==
this_
mm
)
weight
+=
1
;
weight
+=
p
->
priority
;
...
...
@@ -207,24 +206,22 @@ static inline int goodness (struct task_struct * prev,
* to care about SCHED_YIELD is when we calculate the previous process'
* goodness ...
*/
static
inline
int
prev_goodness
(
struct
task_struct
*
prev
,
struct
task_struct
*
p
,
int
this_cpu
)
static
inline
int
prev_goodness
(
struct
task_struct
*
p
,
int
this_cpu
,
struct
mm_struct
*
this_mm
)
{
if
(
p
->
policy
&
SCHED_YIELD
)
{
p
->
policy
&=
~
SCHED_YIELD
;
return
0
;
}
return
goodness
(
p
rev
,
p
,
this_cpu
);
return
goodness
(
p
,
this_cpu
,
this_mm
);
}
/*
* the 'goodness value' of replacing a process on a given CPU.
* positive value means 'replace', zero or negative means 'dont'.
*/
static
inline
int
preemption_goodness
(
struct
task_struct
*
prev
,
struct
task_struct
*
p
,
int
cpu
)
static
inline
int
preemption_goodness
(
struct
task_struct
*
prev
,
struct
task_struct
*
p
,
int
cpu
)
{
return
goodness
(
p
rev
,
p
,
cpu
)
-
goodness
(
prev
,
prev
,
cpu
);
return
goodness
(
p
,
cpu
,
prev
->
mm
)
-
goodness
(
prev
,
cpu
,
prev
->
mm
);
}
/*
...
...
@@ -624,11 +621,14 @@ signed long schedule_timeout(signed long timeout)
*/
static
inline
void
__schedule_tail
(
struct
task_struct
*
prev
)
{
struct
mm_struct
*
mm
=
NULL
;
if
(
!
current
->
active_mm
)
BUG
();
if
(
!
prev
->
mm
)
{
mm
=
prev
->
active_mm
;
prev
->
active_mm
=
NULL
;
struct
mm_struct
*
mm
=
prev
->
active_mm
;
if
(
mm
)
{
prev
->
active_mm
=
NULL
;
mmdrop
(
mm
);
}
}
#ifdef __SMP__
if
((
prev
->
state
==
TASK_RUNNING
)
&&
...
...
@@ -637,16 +637,6 @@ static inline void __schedule_tail(struct task_struct *prev)
wmb
();
prev
->
has_cpu
=
0
;
#endif
/* __SMP__ */
reacquire_kernel_lock
(
current
);
/*
* mmput can sleep. As such, we have to wait until
* after we released "prev" back into the scheduler
* pool and until we have re-aquired out locking
* state until we can actually do this.
*/
if
(
mm
)
mmput
(
mm
);
}
void
schedule_tail
(
struct
task_struct
*
prev
)
...
...
@@ -732,7 +722,7 @@ asmlinkage void schedule(void)
while
(
tmp
!=
&
runqueue_head
)
{
p
=
list_entry
(
tmp
,
struct
task_struct
,
run_list
);
if
(
can_schedule
(
p
))
{
int
weight
=
goodness
(
p
rev
,
p
,
this_cpu
);
int
weight
=
goodness
(
p
,
this_cpu
,
prev
->
active_mm
);
if
(
weight
>
c
)
c
=
weight
,
next
=
p
;
}
...
...
@@ -805,14 +795,13 @@ asmlinkage void schedule(void)
set_mmu_context
(
prev
,
next
);
if
(
next
->
active_mm
)
BUG
();
next
->
active_mm
=
mm
;
mmget
(
mm
);
atomic_inc
(
&
mm
->
mm_count
);
}
}
get_mmu_context
(
next
);
switch_to
(
prev
,
next
,
prev
);
__schedule_tail
(
prev
);
return
;
same_process:
reacquire_kernel_lock
(
current
);
...
...
@@ -831,7 +820,7 @@ asmlinkage void schedule(void)
goto
repeat_schedule
;
still_running:
c
=
prev_goodness
(
prev
,
prev
,
this_cpu
);
c
=
prev_goodness
(
prev
,
this_cpu
,
prev
->
active_mm
);
next
=
prev
;
goto
still_running_back
;
...
...
@@ -2055,6 +2044,6 @@ void __init sched_init(void)
/*
* The boot idle thread does lazy MMU switching as well:
*/
mmget
(
&
init_mm
);
atomic_inc
(
&
init_mm
.
mm_count
);
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment