Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
3192b2dc
Commit
3192b2dc
authored
Feb 04, 2002
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
v2.4.0.1 -> v2.4.0.2
- ISDN fixes - VM balancing tuning
parent
43e9282d
Changes
16
Hide whitespace changes
Inline
Side-by-side
Showing
16 changed files
with
122 additions
and
198 deletions
+122
-198
Makefile
Makefile
+1
-1
arch/i386/Makefile
arch/i386/Makefile
+1
-1
drivers/isdn/hisax/md5sums.asc
drivers/isdn/hisax/md5sums.asc
+6
-6
drivers/scsi/megaraid.c
drivers/scsi/megaraid.c
+0
-1
drivers/scsi/ppa.c
drivers/scsi/ppa.c
+1
-1
fs/nfs/flushd.c
fs/nfs/flushd.c
+8
-6
include/linux/sched.h
include/linux/sched.h
+0
-1
include/linux/swap.h
include/linux/swap.h
+1
-1
kernel/fork.c
kernel/fork.c
+0
-1
mm/filemap.c
mm/filemap.c
+7
-4
mm/page_alloc.c
mm/page_alloc.c
+17
-29
mm/slab.c
mm/slab.c
+1
-1
mm/vmscan.c
mm/vmscan.c
+76
-142
net/ipv4/igmp.c
net/ipv4/igmp.c
+1
-1
net/ipv4/tcp.c
net/ipv4/tcp.c
+1
-1
net/ipv4/tcp_input.c
net/ipv4/tcp_input.c
+1
-1
No files found.
Makefile
View file @
3192b2dc
VERSION
=
2
PATCHLEVEL
=
4
SUBLEVEL
=
1
EXTRAVERSION
=
-pre
1
EXTRAVERSION
=
-pre
2
KERNELRELEASE
=
$(VERSION)
.
$(PATCHLEVEL)
.
$(SUBLEVEL)$(EXTRAVERSION)
...
...
arch/i386/Makefile
View file @
3192b2dc
...
...
@@ -50,7 +50,7 @@ ifdef CONFIG_M686
CFLAGS
+=
-march
=
i686
endif
ifdef
CONFIG_M
686FXSR
ifdef
CONFIG_M
PENTIUMIII
CFLAGS
+=
-march
=
i686
endif
...
...
drivers/isdn/hisax/md5sums.asc
View file @
3192b2dc
...
...
@@ -10,7 +10,7 @@
ca7bd9bac39203f3074f3f093948cc3c isac.c
a2ad619fd404b3149099a2984de9d23c isdnl1.c
d2a78e407f3d94876deac160c6f9aae6 isdnl2.c
a109841c2e75b11fc8ef2c8718e24c3e
isdnl3.c
e7932ca7ae39c497c17f13a2e1434fcd
isdnl3.c
afb5f2f4ac296d6de45c856993b161e1 tei.c
00023e2a482cb86a26ea870577ade5d6 callc.c
a1834e9b2ec068440cff2e899eff4710 cert.c
...
...
@@ -25,9 +25,9 @@ a1834e9b2ec068440cff2e899eff4710 cert.c
Version: 2.6.3i
Charset: noconv
iQCVAwUBOl
MTgDpxHvX/mS9tAQFSbgP/W9y6tnnWHTRLGqyr3EY1OHZiQXERkAAu
hp+Y8PIoX1GgAh4yZ7xhYwUsk6y0z5USdGuhC9ZHh+oZd57lPsJMnhkEZR5BVsYT
r7jHwelP527+QCLkVUCHIVIWUW0ANzeZBhDV2vefkFb+gWLiZsBhaHssbcKGsMNG
Ak4xS1Byqs
M=
=
lsI
J
iQCVAwUBOl
xeLTpxHvX/mS9tAQH6RwP8DhyvqAnXFV6WIGi16iQ3vKikkPoqnDQs
GEn5uCW0dPYKlwthD2Grj/JbMYZhOmCFuDxF7ufJnjTSDe/D8XNe2wngxzAiwcIe
WjCrT8X95cuP3HZHscbFTEinVV0GAnoI0ZEgs5eBDhVHDqILLYMaTFBQaRH3jgXc
i5VH88jPfU
M=
=
qc+
J
-----END PGP SIGNATURE-----
drivers/scsi/megaraid.c
View file @
3192b2dc
...
...
@@ -149,7 +149,6 @@
#include <linux/version.h>
#ifdef MODULE
#include <linux/modversions.h>
#include <linux/module.h>
char
kernel_version
[]
=
UTS_RELEASE
;
...
...
drivers/scsi/ppa.c
View file @
3192b2dc
...
...
@@ -222,8 +222,8 @@ int ppa_detect(Scsi_Host_Template * host)
printk
(
" supported by the imm (ZIP Plus) driver. If the
\n
"
);
printk
(
" cable is marked with
\"
AutoDetect
\"
, this is what has
\n
"
);
printk
(
" happened.
\n
"
);
return
0
;
spin_lock_irq
(
&
io_request_lock
);
return
0
;
}
try_again
=
1
;
goto
retry_entry
;
...
...
fs/nfs/flushd.c
View file @
3192b2dc
...
...
@@ -71,18 +71,17 @@ int nfs_reqlist_init(struct nfs_server *server)
int
status
=
0
;
dprintk
(
"NFS: writecache_init
\n
"
);
/* Create the RPC task */
if
(
!
(
task
=
rpc_new_task
(
server
->
client
,
NULL
,
RPC_TASK_ASYNC
)))
return
-
ENOMEM
;
spin_lock
(
&
nfs_flushd_lock
);
cache
=
server
->
rw_requests
;
if
(
cache
->
task
)
goto
out_unlock
;
/* Create the RPC task */
status
=
-
ENOMEM
;
task
=
rpc_new_task
(
server
->
client
,
NULL
,
RPC_TASK_ASYNC
);
if
(
!
task
)
goto
out_unlock
;
task
->
tk_calldata
=
server
;
cache
->
task
=
task
;
...
...
@@ -99,6 +98,7 @@ int nfs_reqlist_init(struct nfs_server *server)
return
0
;
out_unlock:
spin_unlock
(
&
nfs_flushd_lock
);
rpc_release_task
(
task
);
return
status
;
}
...
...
@@ -195,7 +195,9 @@ void inode_remove_flushd(struct inode *inode)
if
(
*
q
)
{
*
q
=
inode
->
u
.
nfs_i
.
hash_next
;
NFS_FLAGS
(
inode
)
&=
~
NFS_INO_FLUSH
;
spin_unlock
(
&
nfs_flushd_lock
);
iput
(
inode
);
return
;
}
out:
spin_unlock
(
&
nfs_flushd_lock
);
...
...
include/linux/sched.h
View file @
3192b2dc
...
...
@@ -219,7 +219,6 @@ struct mm_struct {
unsigned
long
rss
,
total_vm
,
locked_vm
;
unsigned
long
def_flags
;
unsigned
long
cpu_vm_mask
;
unsigned
long
swap_cnt
;
/* number of pages to swap on next pass */
unsigned
long
swap_address
;
/* Architecture-specific MM context */
...
...
include/linux/swap.h
View file @
3192b2dc
...
...
@@ -107,7 +107,7 @@ extern wait_queue_head_t kreclaimd_wait;
extern
int
page_launder
(
int
,
int
);
extern
int
free_shortage
(
void
);
extern
int
inactive_shortage
(
void
);
extern
void
wakeup_kswapd
(
int
);
extern
void
wakeup_kswapd
(
void
);
extern
int
try_to_free_pages
(
unsigned
int
gfp_mask
);
/* linux/mm/page_io.c */
...
...
kernel/fork.c
View file @
3192b2dc
...
...
@@ -134,7 +134,6 @@ static inline int dup_mmap(struct mm_struct * mm)
mm
->
mmap_cache
=
NULL
;
mm
->
map_count
=
0
;
mm
->
cpu_vm_mask
=
0
;
mm
->
swap_cnt
=
0
;
mm
->
swap_address
=
0
;
pprev
=
&
mm
->
mmap
;
for
(
mpnt
=
current
->
mm
->
mmap
;
mpnt
;
mpnt
=
mpnt
->
vm_next
)
{
...
...
mm/filemap.c
View file @
3192b2dc
...
...
@@ -306,7 +306,7 @@ static inline struct page * __find_page_nolock(struct address_space *mapping, un
*/
age_page_up
(
page
);
if
(
inactive_shortage
()
>
inactive_target
/
2
&&
free_shortage
())
wakeup_kswapd
(
0
);
wakeup_kswapd
();
not_found:
return
page
;
}
...
...
@@ -1835,7 +1835,8 @@ static long madvise_fixup_start(struct vm_area_struct * vma,
n
->
vm_end
=
end
;
setup_read_behavior
(
n
,
behavior
);
n
->
vm_raend
=
0
;
get_file
(
n
->
vm_file
);
if
(
n
->
vm_file
)
get_file
(
n
->
vm_file
);
if
(
n
->
vm_ops
&&
n
->
vm_ops
->
open
)
n
->
vm_ops
->
open
(
n
);
lock_vma_mappings
(
vma
);
...
...
@@ -1861,7 +1862,8 @@ static long madvise_fixup_end(struct vm_area_struct * vma,
n
->
vm_pgoff
+=
(
n
->
vm_start
-
vma
->
vm_start
)
>>
PAGE_SHIFT
;
setup_read_behavior
(
n
,
behavior
);
n
->
vm_raend
=
0
;
get_file
(
n
->
vm_file
);
if
(
n
->
vm_file
)
get_file
(
n
->
vm_file
);
if
(
n
->
vm_ops
&&
n
->
vm_ops
->
open
)
n
->
vm_ops
->
open
(
n
);
lock_vma_mappings
(
vma
);
...
...
@@ -1893,7 +1895,8 @@ static long madvise_fixup_middle(struct vm_area_struct * vma,
right
->
vm_pgoff
+=
(
right
->
vm_start
-
left
->
vm_start
)
>>
PAGE_SHIFT
;
left
->
vm_raend
=
0
;
right
->
vm_raend
=
0
;
atomic_add
(
2
,
&
vma
->
vm_file
->
f_count
);
if
(
vma
->
vm_file
)
atomic_add
(
2
,
&
vma
->
vm_file
->
f_count
);
if
(
vma
->
vm_ops
&&
vma
->
vm_ops
->
open
)
{
vma
->
vm_ops
->
open
(
left
);
...
...
mm/page_alloc.c
View file @
3192b2dc
...
...
@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/pagemap.h>
#include <linux/bootmem.h>
#include <linux/slab.h>
int
nr_swap_pages
;
int
nr_active_pages
;
...
...
@@ -303,7 +304,7 @@ struct page * __alloc_pages(zonelist_t *zonelist, unsigned long order)
* an inactive page shortage, wake up kswapd.
*/
if
(
inactive_shortage
()
>
inactive_target
/
2
&&
free_shortage
())
wakeup_kswapd
(
0
);
wakeup_kswapd
();
/*
* If we are about to get low on free pages and cleaning
* the inactive_dirty pages would fix the situation,
...
...
@@ -379,7 +380,7 @@ struct page * __alloc_pages(zonelist_t *zonelist, unsigned long order)
* - if we don't have __GFP_IO set, kswapd may be
* able to free some memory we can't free ourselves
*/
wakeup_kswapd
(
0
);
wakeup_kswapd
();
if
(
gfp_mask
&
__GFP_WAIT
)
{
__set_current_state
(
TASK_RUNNING
);
current
->
policy
|=
SCHED_YIELD
;
...
...
@@ -404,7 +405,7 @@ struct page * __alloc_pages(zonelist_t *zonelist, unsigned long order)
* - we're doing a higher-order allocation
* --> move pages to the free list until we succeed
* - we're /really/ tight on memory
* -->
wait on the kswapd waitqueue until memory is freed
* -->
try to free pages ourselves with page_launder
*/
if
(
!
(
current
->
flags
&
PF_MEMALLOC
))
{
/*
...
...
@@ -443,36 +444,23 @@ struct page * __alloc_pages(zonelist_t *zonelist, unsigned long order)
/*
* When we arrive here, we are really tight on memory.
*
* We wake up kswapd and sleep until kswapd wakes us
* up again. After that we loop back to the start.
*
* We have to do this because something else might eat
* the memory kswapd frees for us and we need to be
* reliable. Note that we don't loop back for higher
* order allocations since it is possible that kswapd
* simply cannot free a large enough contiguous area
* of memory *ever*.
*/
if
((
gfp_mask
&
(
__GFP_WAIT
|
__GFP_IO
))
==
(
__GFP_WAIT
|
__GFP_IO
))
{
wakeup_kswapd
(
1
);
memory_pressure
++
;
if
(
!
order
)
goto
try_again
;
/*
* If __GFP_IO isn't set, we can't wait on kswapd because
* kswapd just might need some IO locks /we/ are holding ...
*
* SUBTLE: The scheduling point above makes sure that
* kswapd does get the chance to free memory we can't
* free ourselves...
* We try to free pages ourselves by:
* - shrinking the i/d caches.
* - reclaiming unused memory from the slab caches.
* - swapping/syncing pages to disk (done by page_launder)
* - moving clean pages from the inactive dirty list to
* the inactive clean list. (done by page_launder)
*/
}
else
if
(
gfp_mask
&
__GFP_WAIT
)
{
try_to_free_pages
(
gfp_mask
);
memory_pressure
++
;
if
(
gfp_mask
&
__GFP_WAIT
)
{
shrink_icache_memory
(
6
,
gfp_mask
);
shrink_dcache_memory
(
6
,
gfp_mask
);
kmem_cache_reap
(
gfp_mask
);
page_launder
(
gfp_mask
,
1
);
if
(
!
order
)
goto
try_again
;
}
}
/*
...
...
mm/slab.c
View file @
3192b2dc
...
...
@@ -1702,7 +1702,7 @@ static void enable_all_cpucaches (void)
* kmem_cache_reap - Reclaim memory from caches.
* @gfp_mask: the type of memory required.
*
* Called from
try_to_free_page().
* Called from
do_try_to_free_pages() and __alloc_pages()
*/
void
kmem_cache_reap
(
int
gfp_mask
)
{
...
...
mm/vmscan.c
View file @
3192b2dc
...
...
@@ -35,45 +35,21 @@
* using a process that no longer actually exists (it might
* have died while we slept).
*/
static
int
try_to_swap_out
(
struct
mm_struct
*
mm
,
struct
vm_area_struct
*
vma
,
unsigned
long
address
,
pte_t
*
page_tabl
e
)
static
void
try_to_swap_out
(
struct
mm_struct
*
mm
,
struct
vm_area_struct
*
vma
,
unsigned
long
address
,
pte_t
*
page_table
,
struct
page
*
pag
e
)
{
pte_t
pte
;
swp_entry_t
entry
;
struct
page
*
page
;
int
onlist
;
pte
=
*
page_table
;
if
(
!
pte_present
(
pte
))
goto
out_failed
;
page
=
pte_page
(
pte
);
if
((
!
VALID_PAGE
(
page
))
||
PageReserved
(
page
))
goto
out_failed
;
if
(
!
mm
->
swap_cnt
)
return
1
;
mm
->
swap_cnt
--
;
onlist
=
PageActive
(
page
);
/* Don't look at this pte if it's been accessed recently. */
if
(
ptep_test_and_clear_young
(
page_table
))
{
age_page_up
(
page
);
goto
out_failed
;
page
->
age
+=
PAGE_AGE_ADV
;
if
(
page
->
age
>
PAGE_AGE_MAX
)
page
->
age
=
PAGE_AGE_MAX
;
return
;
}
if
(
!
onlist
)
/* The page is still mapped, so it can't be freeable... */
age_page_down_ageonly
(
page
);
/*
* If the page is in active use by us, or if the page
* is in active use by others, don't unmap it or
* (worse) start unneeded IO.
*/
if
(
page
->
age
>
0
)
goto
out_failed
;
if
(
TryLockPage
(
page
))
goto
out_failed
;
return
;
/* From this point on, the odds are that we're going to
* nuke this pte, so read and clear the pte. This hook
...
...
@@ -87,9 +63,6 @@ static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, un
* Is the page already in the swap cache? If so, then
* we can just drop our reference to it without doing
* any IO - it's already up-to-date on disk.
*
* Return 0, as we didn't actually free any real
* memory, and we should just continue our scan.
*/
if
(
PageSwapCache
(
page
))
{
entry
.
val
=
page
->
index
;
...
...
@@ -103,8 +76,7 @@ static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, un
mm
->
rss
--
;
deactivate_page
(
page
);
page_cache_release
(
page
);
out_failed:
return
0
;
return
;
}
/*
...
...
@@ -153,34 +125,20 @@ static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, un
out_unlock_restore:
set_pte
(
page_table
,
pte
);
UnlockPage
(
page
);
return
0
;
return
;
}
/*
* A new implementation of swap_out(). We do not swap complete processes,
* but only a small number of blocks, before we continue with the next
* process. The number of blocks actually swapped is determined on the
* number of page faults, that this process actually had in the last time,
* so we won't swap heavily used processes all the time ...
*
* Note: the priority argument is a hint on much CPU to waste with the
* swap block search, not a hint, of how much blocks to swap with
* each process.
*
* (C) 1993 Kai Petzke, wpp@marie.physik.tu-berlin.de
*/
static
inline
int
swap_out_pmd
(
struct
mm_struct
*
mm
,
struct
vm_area_struct
*
vma
,
pmd_t
*
dir
,
unsigned
long
address
,
unsigned
long
end
)
static
int
swap_out_pmd
(
struct
mm_struct
*
mm
,
struct
vm_area_struct
*
vma
,
pmd_t
*
dir
,
unsigned
long
address
,
unsigned
long
end
,
int
count
)
{
pte_t
*
pte
;
unsigned
long
pmd_end
;
if
(
pmd_none
(
*
dir
))
return
0
;
return
count
;
if
(
pmd_bad
(
*
dir
))
{
pmd_ERROR
(
*
dir
);
pmd_clear
(
dir
);
return
0
;
return
count
;
}
pte
=
pte_offset
(
dir
,
address
);
...
...
@@ -190,28 +148,33 @@ static inline int swap_out_pmd(struct mm_struct * mm, struct vm_area_struct * vm
end
=
pmd_end
;
do
{
int
result
;
mm
->
swap_address
=
address
+
PAGE_SIZE
;
result
=
try_to_swap_out
(
mm
,
vma
,
address
,
pte
);
if
(
result
)
return
result
;
if
(
pte_present
(
*
pte
))
{
struct
page
*
page
=
pte_page
(
*
pte
);
if
(
VALID_PAGE
(
page
)
&&
!
PageReserved
(
page
))
{
try_to_swap_out
(
mm
,
vma
,
address
,
pte
,
page
);
if
(
--
count
)
break
;
}
}
address
+=
PAGE_SIZE
;
pte
++
;
}
while
(
address
&&
(
address
<
end
));
return
0
;
mm
->
swap_address
=
address
+
PAGE_SIZE
;
return
count
;
}
static
inline
int
swap_out_pgd
(
struct
mm_struct
*
mm
,
struct
vm_area_struct
*
vma
,
pgd_t
*
dir
,
unsigned
long
address
,
unsigned
long
end
)
static
inline
int
swap_out_pgd
(
struct
mm_struct
*
mm
,
struct
vm_area_struct
*
vma
,
pgd_t
*
dir
,
unsigned
long
address
,
unsigned
long
end
,
int
count
)
{
pmd_t
*
pmd
;
unsigned
long
pgd_end
;
if
(
pgd_none
(
*
dir
))
return
0
;
return
count
;
if
(
pgd_bad
(
*
dir
))
{
pgd_ERROR
(
*
dir
);
pgd_clear
(
dir
);
return
0
;
return
count
;
}
pmd
=
pmd_offset
(
dir
,
address
);
...
...
@@ -221,23 +184,23 @@ static inline int swap_out_pgd(struct mm_struct * mm, struct vm_area_struct * vm
end
=
pgd_end
;
do
{
int
result
=
swap_out_pmd
(
mm
,
vma
,
pmd
,
address
,
end
);
if
(
resul
t
)
return
result
;
count
=
swap_out_pmd
(
mm
,
vma
,
pmd
,
address
,
end
,
count
);
if
(
!
coun
t
)
break
;
address
=
(
address
+
PMD_SIZE
)
&
PMD_MASK
;
pmd
++
;
}
while
(
address
&&
(
address
<
end
));
return
0
;
return
count
;
}
static
int
swap_out_vma
(
struct
mm_struct
*
mm
,
struct
vm_area_struct
*
vma
,
unsigned
long
address
)
static
int
swap_out_vma
(
struct
mm_struct
*
mm
,
struct
vm_area_struct
*
vma
,
unsigned
long
address
,
int
count
)
{
pgd_t
*
pgdir
;
unsigned
long
end
;
/* Don't swap out areas which are locked down */
if
(
vma
->
vm_flags
&
(
VM_LOCKED
|
VM_RESERVED
))
return
0
;
return
count
;
pgdir
=
pgd_offset
(
mm
,
address
);
...
...
@@ -245,18 +208,17 @@ static int swap_out_vma(struct mm_struct * mm, struct vm_area_struct * vma, unsi
if
(
address
>=
end
)
BUG
();
do
{
int
result
=
swap_out_pgd
(
mm
,
vma
,
pgdir
,
address
,
end
);
if
(
resul
t
)
return
result
;
count
=
swap_out_pgd
(
mm
,
vma
,
pgdir
,
address
,
end
,
count
);
if
(
!
coun
t
)
break
;
address
=
(
address
+
PGDIR_SIZE
)
&
PGDIR_MASK
;
pgdir
++
;
}
while
(
address
&&
(
address
<
end
));
return
0
;
return
count
;
}
static
int
swap_out_mm
(
struct
mm_struct
*
mm
)
static
int
swap_out_mm
(
struct
mm_struct
*
mm
,
int
count
)
{
int
result
=
0
;
unsigned
long
address
;
struct
vm_area_struct
*
vma
;
...
...
@@ -270,15 +232,14 @@ static int swap_out_mm(struct mm_struct * mm)
*/
spin_lock
(
&
mm
->
page_table_lock
);
address
=
mm
->
swap_address
;
mm
->
swap_cnt
=
mm
->
rss
>>
4
;
vma
=
find_vma
(
mm
,
address
);
if
(
vma
)
{
if
(
address
<
vma
->
vm_start
)
address
=
vma
->
vm_start
;
for
(;;)
{
result
=
swap_out_vma
(
mm
,
vma
,
address
);
if
(
resul
t
)
count
=
swap_out_vma
(
mm
,
vma
,
address
,
count
);
if
(
!
coun
t
)
goto
out_unlock
;
vma
=
vma
->
vm_next
;
if
(
!
vma
)
...
...
@@ -288,30 +249,39 @@ static int swap_out_mm(struct mm_struct * mm)
}
/* Reset to 0 when we reach the end of address space */
mm
->
swap_address
=
0
;
mm
->
swap_cnt
=
0
;
out_unlock:
spin_unlock
(
&
mm
->
page_table_lock
);
return
resul
t
;
return
!
coun
t
;
}
/*
* Select the task with maximal swap_cnt and try to swap out a page.
* N.B. This function returns only 0 or 1. Return values != 1 from
* the lower level routines result in continued processing.
*/
#define SWAP_SHIFT 5
#define SWAP_MIN 8
static
inline
int
swap_amount
(
struct
mm_struct
*
mm
)
{
int
nr
=
mm
->
rss
>>
SWAP_SHIFT
;
return
nr
<
SWAP_MIN
?
SWAP_MIN
:
nr
;
}
static
int
swap_out
(
unsigned
int
priority
,
int
gfp_mask
)
{
int
counter
;
int
retval
=
0
;
struct
mm_struct
*
mm
=
current
->
mm
;
/* Always start by trying to penalize the process that is allocating memory */
if
(
mm
)
retval
=
swap_out_mm
(
mm
,
swap_amount
(
mm
));
/* Then, look at the other mm's */
counter
=
mmlist_nr
>>
priority
;
do
{
struct
list_head
*
p
;
struct
mm_struct
*
mm
;
spin_lock
(
&
mmlist_lock
);
p
=
init_mm
.
mmlist
.
next
;
...
...
@@ -327,13 +297,14 @@ static int swap_out(unsigned int priority, int gfp_mask)
atomic_inc
(
&
mm
->
mm_users
);
spin_unlock
(
&
mmlist_lock
);
retval
|=
swap_out_mm
(
mm
);
/* Walk about 6% of the address space each time */
retval
|=
swap_out_mm
(
mm
,
swap_amount
(
mm
));
mmput
(
mm
);
}
while
(
--
counter
>=
0
);
return
retval
;
empty:
spin_lock
(
&
mmlist_lock
);
spin_
un
lock
(
&
mmlist_lock
);
return
0
;
}
...
...
@@ -816,33 +787,35 @@ int inactive_shortage(void)
* really care about latency. In that case we don't try
* to free too many pages.
*/
#define DEF_PRIORITY (6)
static
int
refill_inactive
(
unsigned
int
gfp_mask
,
int
user
)
{
int
priority
,
count
,
start_count
;
int
count
,
start_count
,
maxtry
;
count
=
inactive_shortage
()
+
free_shortage
();
if
(
user
)
count
=
(
1
<<
page_cluster
);
start_count
=
count
;
/* Always trim SLAB caches when memory gets low. */
kmem_cache_reap
(
gfp_mask
);
priority
=
6
;
maxtry
=
6
;
do
{
if
(
current
->
need_resched
)
{
__set_current_state
(
TASK_RUNNING
);
schedule
();
}
while
(
refill_inactive_scan
(
priority
,
1
))
{
while
(
refill_inactive_scan
(
DEF_PRIORITY
,
1
))
{
if
(
--
count
<=
0
)
goto
done
;
}
/* If refill_inactive_scan failed, try to page stuff out.. */
swap_out
(
priority
,
gfp_mask
);
}
while
(
!
inactive_shortage
());
swap_out
(
DEF_PRIORITY
,
gfp_mask
);
if
(
--
maxtry
<=
0
)
return
0
;
}
while
(
inactive_shortage
());
done:
return
(
count
<
start_count
);
...
...
@@ -872,20 +845,14 @@ static int do_try_to_free_pages(unsigned int gfp_mask, int user)
ret
+=
refill_inactive
(
gfp_mask
,
user
);
/*
* Delete pages from the inode and dentry cache
*
if memory is low.
* Delete pages from the inode and dentry cache
s and
*
reclaim unused slab cache if memory is low.
*/
if
(
free_shortage
())
{
shrink_dcache_memory
(
6
,
gfp_mask
);
shrink_icache_memory
(
6
,
gfp_mask
);
}
else
{
/*
* Reclaim unused slab cache memory.
*/
shrink_dcache_memory
(
DEF_PRIORITY
,
gfp_mask
);
shrink_icache_memory
(
DEF_PRIORITY
,
gfp_mask
);
kmem_cache_reap
(
gfp_mask
);
ret
=
1
;
}
}
return
ret
;
}
...
...
@@ -938,13 +905,8 @@ int kswapd(void *unused)
static
int
recalc
=
0
;
/* If needed, try to free some memory. */
if
(
inactive_shortage
()
||
free_shortage
())
{
int
wait
=
0
;
/* Do we need to do some synchronous flushing? */
if
(
waitqueue_active
(
&
kswapd_done
))
wait
=
1
;
do_try_to_free_pages
(
GFP_KSWAPD
,
wait
);
}
if
(
inactive_shortage
()
||
free_shortage
())
do_try_to_free_pages
(
GFP_KSWAPD
,
0
);
/*
* Do some (very minimal) background scanning. This
...
...
@@ -952,7 +914,7 @@ int kswapd(void *unused)
* every minute. This clears old referenced bits
* and moves unused pages to the inactive list.
*/
refill_inactive_scan
(
6
,
0
);
refill_inactive_scan
(
DEF_PRIORITY
,
0
);
/* Once a second, recalculate some VM stats. */
if
(
time_after
(
jiffies
,
recalc
+
HZ
))
{
...
...
@@ -960,11 +922,6 @@ int kswapd(void *unused)
recalculate_vm_stats
();
}
/*
* Wake up everybody waiting for free memory
* and unplug the disk queue.
*/
wake_up_all
(
&
kswapd_done
);
run_task_queue
(
&
tq_disk
);
/*
...
...
@@ -995,33 +952,10 @@ int kswapd(void *unused)
}
}
void
wakeup_kswapd
(
int
block
)
void
wakeup_kswapd
(
void
)
{
DECLARE_WAITQUEUE
(
wait
,
current
);
if
(
current
==
kswapd_task
)
return
;
if
(
!
block
)
{
if
(
waitqueue_active
(
&
kswapd_wait
))
wake_up
(
&
kswapd_wait
);
return
;
}
/*
* Kswapd could wake us up before we get a chance
* to sleep, so we have to be very careful here to
* prevent SMP races...
*/
__set_current_state
(
TASK_UNINTERRUPTIBLE
);
add_wait_queue
(
&
kswapd_done
,
&
wait
);
if
(
waitqueue_active
(
&
kswapd_wait
))
wake_up
(
&
kswapd_wait
);
schedule
();
remove_wait_queue
(
&
kswapd_done
,
&
wait
);
__set_current_state
(
TASK_RUNNING
);
if
(
current
!=
kswapd_task
)
wake_up_process
(
kswapd_task
);
}
/*
...
...
@@ -1046,7 +980,7 @@ DECLARE_WAIT_QUEUE_HEAD(kreclaimd_wait);
/*
* Kreclaimd will move pages from the inactive_clean list to the
* free list, in order to keep atomic allocations possible under
* all circumstances.
Even when kswapd is blocked on IO.
* all circumstances.
*/
int
kreclaimd
(
void
*
unused
)
{
...
...
net/ipv4/igmp.c
View file @
3192b2dc
...
...
@@ -504,8 +504,8 @@ void ip_mc_inc_group(struct in_device *in_dev, u32 addr)
im
->
timer
.
function
=&
igmp_timer_expire
;
im
->
unsolicit_count
=
IGMP_Unsolicited_Report_Count
;
im
->
reporter
=
0
;
im
->
loaded
=
0
;
#endif
im
->
loaded
=
0
;
write_lock_bh
(
&
in_dev
->
lock
);
im
->
next
=
in_dev
->
mc_list
;
in_dev
->
mc_list
=
im
;
...
...
net/ipv4/tcp.c
View file @
3192b2dc
...
...
@@ -954,7 +954,7 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, int size)
*/
skb
=
sk
->
write_queue
.
prev
;
if
(
tp
->
send_head
&&
(
mss_now
-
skb
->
len
)
>
0
)
{
(
mss_now
>
skb
->
len
)
)
{
copy
=
skb
->
len
;
if
(
skb_tailroom
(
skb
)
>
0
)
{
int
last_byte_was_odd
=
(
copy
%
4
);
...
...
net/ipv4/tcp_input.c
View file @
3192b2dc
...
...
@@ -1705,7 +1705,7 @@ static __inline__ void tcp_ack_packets_out(struct sock *sk, struct tcp_opt *tp)
if
((
__s32
)
when
<
(
__s32
)
tp
->
rttvar
)
when
=
tp
->
rttvar
;
tcp_reset_xmit_timer
(
sk
,
TCP_TIME_RETRANS
,
when
);
tcp_reset_xmit_timer
(
sk
,
TCP_TIME_RETRANS
,
min
(
when
,
TCP_RTO_MAX
)
);
}
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment