Commit 3192b2dc authored by Linus Torvalds's avatar Linus Torvalds

v2.4.0.1 -> v2.4.0.2

  - ISDN fixes
  - VM balancing tuning
parent 43e9282d
VERSION = 2 VERSION = 2
PATCHLEVEL = 4 PATCHLEVEL = 4
SUBLEVEL = 1 SUBLEVEL = 1
EXTRAVERSION =-pre1 EXTRAVERSION =-pre2
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION) KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
......
...@@ -50,7 +50,7 @@ ifdef CONFIG_M686 ...@@ -50,7 +50,7 @@ ifdef CONFIG_M686
CFLAGS += -march=i686 CFLAGS += -march=i686
endif endif
ifdef CONFIG_M686FXSR ifdef CONFIG_MPENTIUMIII
CFLAGS += -march=i686 CFLAGS += -march=i686
endif endif
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
ca7bd9bac39203f3074f3f093948cc3c isac.c ca7bd9bac39203f3074f3f093948cc3c isac.c
a2ad619fd404b3149099a2984de9d23c isdnl1.c a2ad619fd404b3149099a2984de9d23c isdnl1.c
d2a78e407f3d94876deac160c6f9aae6 isdnl2.c d2a78e407f3d94876deac160c6f9aae6 isdnl2.c
a109841c2e75b11fc8ef2c8718e24c3e isdnl3.c e7932ca7ae39c497c17f13a2e1434fcd isdnl3.c
afb5f2f4ac296d6de45c856993b161e1 tei.c afb5f2f4ac296d6de45c856993b161e1 tei.c
00023e2a482cb86a26ea870577ade5d6 callc.c 00023e2a482cb86a26ea870577ade5d6 callc.c
a1834e9b2ec068440cff2e899eff4710 cert.c a1834e9b2ec068440cff2e899eff4710 cert.c
...@@ -25,9 +25,9 @@ a1834e9b2ec068440cff2e899eff4710 cert.c ...@@ -25,9 +25,9 @@ a1834e9b2ec068440cff2e899eff4710 cert.c
Version: 2.6.3i Version: 2.6.3i
Charset: noconv Charset: noconv
iQCVAwUBOlMTgDpxHvX/mS9tAQFSbgP/W9y6tnnWHTRLGqyr3EY1OHZiQXERkAAu iQCVAwUBOlxeLTpxHvX/mS9tAQH6RwP8DhyvqAnXFV6WIGi16iQ3vKikkPoqnDQs
hp+Y8PIoX1GgAh4yZ7xhYwUsk6y0z5USdGuhC9ZHh+oZd57lPsJMnhkEZR5BVsYT GEn5uCW0dPYKlwthD2Grj/JbMYZhOmCFuDxF7ufJnjTSDe/D8XNe2wngxzAiwcIe
r7jHwelP527+QCLkVUCHIVIWUW0ANzeZBhDV2vefkFb+gWLiZsBhaHssbcKGsMNG WjCrT8X95cuP3HZHscbFTEinVV0GAnoI0ZEgs5eBDhVHDqILLYMaTFBQaRH3jgXc
Ak4xS1ByqsM= i5VH88jPfUM=
=lsIJ =qc+J
-----END PGP SIGNATURE----- -----END PGP SIGNATURE-----
...@@ -149,7 +149,6 @@ ...@@ -149,7 +149,6 @@
#include <linux/version.h> #include <linux/version.h>
#ifdef MODULE #ifdef MODULE
#include <linux/modversions.h>
#include <linux/module.h> #include <linux/module.h>
char kernel_version[] = UTS_RELEASE; char kernel_version[] = UTS_RELEASE;
......
...@@ -222,8 +222,8 @@ int ppa_detect(Scsi_Host_Template * host) ...@@ -222,8 +222,8 @@ int ppa_detect(Scsi_Host_Template * host)
printk(" supported by the imm (ZIP Plus) driver. If the\n"); printk(" supported by the imm (ZIP Plus) driver. If the\n");
printk(" cable is marked with \"AutoDetect\", this is what has\n"); printk(" cable is marked with \"AutoDetect\", this is what has\n");
printk(" happened.\n"); printk(" happened.\n");
return 0;
spin_lock_irq(&io_request_lock); spin_lock_irq(&io_request_lock);
return 0;
} }
try_again = 1; try_again = 1;
goto retry_entry; goto retry_entry;
......
...@@ -71,18 +71,17 @@ int nfs_reqlist_init(struct nfs_server *server) ...@@ -71,18 +71,17 @@ int nfs_reqlist_init(struct nfs_server *server)
int status = 0; int status = 0;
dprintk("NFS: writecache_init\n"); dprintk("NFS: writecache_init\n");
/* Create the RPC task */
if (!(task = rpc_new_task(server->client, NULL, RPC_TASK_ASYNC)))
return -ENOMEM;
spin_lock(&nfs_flushd_lock); spin_lock(&nfs_flushd_lock);
cache = server->rw_requests; cache = server->rw_requests;
if (cache->task) if (cache->task)
goto out_unlock; goto out_unlock;
/* Create the RPC task */
status = -ENOMEM;
task = rpc_new_task(server->client, NULL, RPC_TASK_ASYNC);
if (!task)
goto out_unlock;
task->tk_calldata = server; task->tk_calldata = server;
cache->task = task; cache->task = task;
...@@ -99,6 +98,7 @@ int nfs_reqlist_init(struct nfs_server *server) ...@@ -99,6 +98,7 @@ int nfs_reqlist_init(struct nfs_server *server)
return 0; return 0;
out_unlock: out_unlock:
spin_unlock(&nfs_flushd_lock); spin_unlock(&nfs_flushd_lock);
rpc_release_task(task);
return status; return status;
} }
...@@ -195,7 +195,9 @@ void inode_remove_flushd(struct inode *inode) ...@@ -195,7 +195,9 @@ void inode_remove_flushd(struct inode *inode)
if (*q) { if (*q) {
*q = inode->u.nfs_i.hash_next; *q = inode->u.nfs_i.hash_next;
NFS_FLAGS(inode) &= ~NFS_INO_FLUSH; NFS_FLAGS(inode) &= ~NFS_INO_FLUSH;
spin_unlock(&nfs_flushd_lock);
iput(inode); iput(inode);
return;
} }
out: out:
spin_unlock(&nfs_flushd_lock); spin_unlock(&nfs_flushd_lock);
......
...@@ -219,7 +219,6 @@ struct mm_struct { ...@@ -219,7 +219,6 @@ struct mm_struct {
unsigned long rss, total_vm, locked_vm; unsigned long rss, total_vm, locked_vm;
unsigned long def_flags; unsigned long def_flags;
unsigned long cpu_vm_mask; unsigned long cpu_vm_mask;
unsigned long swap_cnt; /* number of pages to swap on next pass */
unsigned long swap_address; unsigned long swap_address;
/* Architecture-specific MM context */ /* Architecture-specific MM context */
......
...@@ -107,7 +107,7 @@ extern wait_queue_head_t kreclaimd_wait; ...@@ -107,7 +107,7 @@ extern wait_queue_head_t kreclaimd_wait;
extern int page_launder(int, int); extern int page_launder(int, int);
extern int free_shortage(void); extern int free_shortage(void);
extern int inactive_shortage(void); extern int inactive_shortage(void);
extern void wakeup_kswapd(int); extern void wakeup_kswapd(void);
extern int try_to_free_pages(unsigned int gfp_mask); extern int try_to_free_pages(unsigned int gfp_mask);
/* linux/mm/page_io.c */ /* linux/mm/page_io.c */
......
...@@ -134,7 +134,6 @@ static inline int dup_mmap(struct mm_struct * mm) ...@@ -134,7 +134,6 @@ static inline int dup_mmap(struct mm_struct * mm)
mm->mmap_cache = NULL; mm->mmap_cache = NULL;
mm->map_count = 0; mm->map_count = 0;
mm->cpu_vm_mask = 0; mm->cpu_vm_mask = 0;
mm->swap_cnt = 0;
mm->swap_address = 0; mm->swap_address = 0;
pprev = &mm->mmap; pprev = &mm->mmap;
for (mpnt = current->mm->mmap ; mpnt ; mpnt = mpnt->vm_next) { for (mpnt = current->mm->mmap ; mpnt ; mpnt = mpnt->vm_next) {
......
...@@ -306,7 +306,7 @@ static inline struct page * __find_page_nolock(struct address_space *mapping, un ...@@ -306,7 +306,7 @@ static inline struct page * __find_page_nolock(struct address_space *mapping, un
*/ */
age_page_up(page); age_page_up(page);
if (inactive_shortage() > inactive_target / 2 && free_shortage()) if (inactive_shortage() > inactive_target / 2 && free_shortage())
wakeup_kswapd(0); wakeup_kswapd();
not_found: not_found:
return page; return page;
} }
...@@ -1835,7 +1835,8 @@ static long madvise_fixup_start(struct vm_area_struct * vma, ...@@ -1835,7 +1835,8 @@ static long madvise_fixup_start(struct vm_area_struct * vma,
n->vm_end = end; n->vm_end = end;
setup_read_behavior(n, behavior); setup_read_behavior(n, behavior);
n->vm_raend = 0; n->vm_raend = 0;
get_file(n->vm_file); if (n->vm_file)
get_file(n->vm_file);
if (n->vm_ops && n->vm_ops->open) if (n->vm_ops && n->vm_ops->open)
n->vm_ops->open(n); n->vm_ops->open(n);
lock_vma_mappings(vma); lock_vma_mappings(vma);
...@@ -1861,7 +1862,8 @@ static long madvise_fixup_end(struct vm_area_struct * vma, ...@@ -1861,7 +1862,8 @@ static long madvise_fixup_end(struct vm_area_struct * vma,
n->vm_pgoff += (n->vm_start - vma->vm_start) >> PAGE_SHIFT; n->vm_pgoff += (n->vm_start - vma->vm_start) >> PAGE_SHIFT;
setup_read_behavior(n, behavior); setup_read_behavior(n, behavior);
n->vm_raend = 0; n->vm_raend = 0;
get_file(n->vm_file); if (n->vm_file)
get_file(n->vm_file);
if (n->vm_ops && n->vm_ops->open) if (n->vm_ops && n->vm_ops->open)
n->vm_ops->open(n); n->vm_ops->open(n);
lock_vma_mappings(vma); lock_vma_mappings(vma);
...@@ -1893,7 +1895,8 @@ static long madvise_fixup_middle(struct vm_area_struct * vma, ...@@ -1893,7 +1895,8 @@ static long madvise_fixup_middle(struct vm_area_struct * vma,
right->vm_pgoff += (right->vm_start - left->vm_start) >> PAGE_SHIFT; right->vm_pgoff += (right->vm_start - left->vm_start) >> PAGE_SHIFT;
left->vm_raend = 0; left->vm_raend = 0;
right->vm_raend = 0; right->vm_raend = 0;
atomic_add(2, &vma->vm_file->f_count); if (vma->vm_file)
atomic_add(2, &vma->vm_file->f_count);
if (vma->vm_ops && vma->vm_ops->open) { if (vma->vm_ops && vma->vm_ops->open) {
vma->vm_ops->open(left); vma->vm_ops->open(left);
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <linux/slab.h>
int nr_swap_pages; int nr_swap_pages;
int nr_active_pages; int nr_active_pages;
...@@ -303,7 +304,7 @@ struct page * __alloc_pages(zonelist_t *zonelist, unsigned long order) ...@@ -303,7 +304,7 @@ struct page * __alloc_pages(zonelist_t *zonelist, unsigned long order)
* an inactive page shortage, wake up kswapd. * an inactive page shortage, wake up kswapd.
*/ */
if (inactive_shortage() > inactive_target / 2 && free_shortage()) if (inactive_shortage() > inactive_target / 2 && free_shortage())
wakeup_kswapd(0); wakeup_kswapd();
/* /*
* If we are about to get low on free pages and cleaning * If we are about to get low on free pages and cleaning
* the inactive_dirty pages would fix the situation, * the inactive_dirty pages would fix the situation,
...@@ -379,7 +380,7 @@ struct page * __alloc_pages(zonelist_t *zonelist, unsigned long order) ...@@ -379,7 +380,7 @@ struct page * __alloc_pages(zonelist_t *zonelist, unsigned long order)
* - if we don't have __GFP_IO set, kswapd may be * - if we don't have __GFP_IO set, kswapd may be
* able to free some memory we can't free ourselves * able to free some memory we can't free ourselves
*/ */
wakeup_kswapd(0); wakeup_kswapd();
if (gfp_mask & __GFP_WAIT) { if (gfp_mask & __GFP_WAIT) {
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
current->policy |= SCHED_YIELD; current->policy |= SCHED_YIELD;
...@@ -404,7 +405,7 @@ struct page * __alloc_pages(zonelist_t *zonelist, unsigned long order) ...@@ -404,7 +405,7 @@ struct page * __alloc_pages(zonelist_t *zonelist, unsigned long order)
* - we're doing a higher-order allocation * - we're doing a higher-order allocation
* --> move pages to the free list until we succeed * --> move pages to the free list until we succeed
* - we're /really/ tight on memory * - we're /really/ tight on memory
* --> wait on the kswapd waitqueue until memory is freed * --> try to free pages ourselves with page_launder
*/ */
if (!(current->flags & PF_MEMALLOC)) { if (!(current->flags & PF_MEMALLOC)) {
/* /*
...@@ -443,36 +444,23 @@ struct page * __alloc_pages(zonelist_t *zonelist, unsigned long order) ...@@ -443,36 +444,23 @@ struct page * __alloc_pages(zonelist_t *zonelist, unsigned long order)
/* /*
* When we arrive here, we are really tight on memory. * When we arrive here, we are really tight on memory.
* *
* We wake up kswapd and sleep until kswapd wakes us * We try to free pages ourselves by:
* up again. After that we loop back to the start. * - shrinking the i/d caches.
* * - reclaiming unused memory from the slab caches.
* We have to do this because something else might eat * - swapping/syncing pages to disk (done by page_launder)
* the memory kswapd frees for us and we need to be * - moving clean pages from the inactive dirty list to
* reliable. Note that we don't loop back for higher * the inactive clean list. (done by page_launder)
* order allocations since it is possible that kswapd
* simply cannot free a large enough contiguous area
* of memory *ever*.
*/
if ((gfp_mask & (__GFP_WAIT|__GFP_IO)) == (__GFP_WAIT|__GFP_IO)) {
wakeup_kswapd(1);
memory_pressure++;
if (!order)
goto try_again;
/*
* If __GFP_IO isn't set, we can't wait on kswapd because
* kswapd just might need some IO locks /we/ are holding ...
*
* SUBTLE: The scheduling point above makes sure that
* kswapd does get the chance to free memory we can't
* free ourselves...
*/ */
} else if (gfp_mask & __GFP_WAIT) { if (gfp_mask & __GFP_WAIT) {
try_to_free_pages(gfp_mask); shrink_icache_memory(6, gfp_mask);
memory_pressure++; shrink_dcache_memory(6, gfp_mask);
kmem_cache_reap(gfp_mask);
page_launder(gfp_mask, 1);
if (!order) if (!order)
goto try_again; goto try_again;
} }
} }
/* /*
......
...@@ -1702,7 +1702,7 @@ static void enable_all_cpucaches (void) ...@@ -1702,7 +1702,7 @@ static void enable_all_cpucaches (void)
* kmem_cache_reap - Reclaim memory from caches. * kmem_cache_reap - Reclaim memory from caches.
* @gfp_mask: the type of memory required. * @gfp_mask: the type of memory required.
* *
* Called from try_to_free_page(). * Called from do_try_to_free_pages() and __alloc_pages()
*/ */
void kmem_cache_reap (int gfp_mask) void kmem_cache_reap (int gfp_mask)
{ {
......
...@@ -35,45 +35,21 @@ ...@@ -35,45 +35,21 @@
* using a process that no longer actually exists (it might * using a process that no longer actually exists (it might
* have died while we slept). * have died while we slept).
*/ */
static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, unsigned long address, pte_t * page_table) static void try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, unsigned long address, pte_t * page_table, struct page *page)
{ {
pte_t pte; pte_t pte;
swp_entry_t entry; swp_entry_t entry;
struct page * page;
int onlist;
pte = *page_table;
if (!pte_present(pte))
goto out_failed;
page = pte_page(pte);
if ((!VALID_PAGE(page)) || PageReserved(page))
goto out_failed;
if (!mm->swap_cnt)
return 1;
mm->swap_cnt--;
onlist = PageActive(page);
/* Don't look at this pte if it's been accessed recently. */ /* Don't look at this pte if it's been accessed recently. */
if (ptep_test_and_clear_young(page_table)) { if (ptep_test_and_clear_young(page_table)) {
age_page_up(page); page->age += PAGE_AGE_ADV;
goto out_failed; if (page->age > PAGE_AGE_MAX)
page->age = PAGE_AGE_MAX;
return;
} }
if (!onlist)
/* The page is still mapped, so it can't be freeable... */
age_page_down_ageonly(page);
/*
* If the page is in active use by us, or if the page
* is in active use by others, don't unmap it or
* (worse) start unneeded IO.
*/
if (page->age > 0)
goto out_failed;
if (TryLockPage(page)) if (TryLockPage(page))
goto out_failed; return;
/* From this point on, the odds are that we're going to /* From this point on, the odds are that we're going to
* nuke this pte, so read and clear the pte. This hook * nuke this pte, so read and clear the pte. This hook
...@@ -87,9 +63,6 @@ static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, un ...@@ -87,9 +63,6 @@ static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, un
* Is the page already in the swap cache? If so, then * Is the page already in the swap cache? If so, then
* we can just drop our reference to it without doing * we can just drop our reference to it without doing
* any IO - it's already up-to-date on disk. * any IO - it's already up-to-date on disk.
*
* Return 0, as we didn't actually free any real
* memory, and we should just continue our scan.
*/ */
if (PageSwapCache(page)) { if (PageSwapCache(page)) {
entry.val = page->index; entry.val = page->index;
...@@ -103,8 +76,7 @@ static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, un ...@@ -103,8 +76,7 @@ static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, un
mm->rss--; mm->rss--;
deactivate_page(page); deactivate_page(page);
page_cache_release(page); page_cache_release(page);
out_failed: return;
return 0;
} }
/* /*
...@@ -153,34 +125,20 @@ static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, un ...@@ -153,34 +125,20 @@ static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, un
out_unlock_restore: out_unlock_restore:
set_pte(page_table, pte); set_pte(page_table, pte);
UnlockPage(page); UnlockPage(page);
return 0; return;
} }
/* static int swap_out_pmd(struct mm_struct * mm, struct vm_area_struct * vma, pmd_t *dir, unsigned long address, unsigned long end, int count)
* A new implementation of swap_out(). We do not swap complete processes,
* but only a small number of blocks, before we continue with the next
* process. The number of blocks actually swapped is determined on the
* number of page faults, that this process actually had in the last time,
* so we won't swap heavily used processes all the time ...
*
* Note: the priority argument is a hint on much CPU to waste with the
* swap block search, not a hint, of how much blocks to swap with
* each process.
*
* (C) 1993 Kai Petzke, wpp@marie.physik.tu-berlin.de
*/
static inline int swap_out_pmd(struct mm_struct * mm, struct vm_area_struct * vma, pmd_t *dir, unsigned long address, unsigned long end)
{ {
pte_t * pte; pte_t * pte;
unsigned long pmd_end; unsigned long pmd_end;
if (pmd_none(*dir)) if (pmd_none(*dir))
return 0; return count;
if (pmd_bad(*dir)) { if (pmd_bad(*dir)) {
pmd_ERROR(*dir); pmd_ERROR(*dir);
pmd_clear(dir); pmd_clear(dir);
return 0; return count;
} }
pte = pte_offset(dir, address); pte = pte_offset(dir, address);
...@@ -190,28 +148,33 @@ static inline int swap_out_pmd(struct mm_struct * mm, struct vm_area_struct * vm ...@@ -190,28 +148,33 @@ static inline int swap_out_pmd(struct mm_struct * mm, struct vm_area_struct * vm
end = pmd_end; end = pmd_end;
do { do {
int result; if (pte_present(*pte)) {
mm->swap_address = address + PAGE_SIZE; struct page *page = pte_page(*pte);
result = try_to_swap_out(mm, vma, address, pte);
if (result) if (VALID_PAGE(page) && !PageReserved(page)) {
return result; try_to_swap_out(mm, vma, address, pte, page);
if (--count)
break;
}
}
address += PAGE_SIZE; address += PAGE_SIZE;
pte++; pte++;
} while (address && (address < end)); } while (address && (address < end));
return 0; mm->swap_address = address + PAGE_SIZE;
return count;
} }
static inline int swap_out_pgd(struct mm_struct * mm, struct vm_area_struct * vma, pgd_t *dir, unsigned long address, unsigned long end) static inline int swap_out_pgd(struct mm_struct * mm, struct vm_area_struct * vma, pgd_t *dir, unsigned long address, unsigned long end, int count)
{ {
pmd_t * pmd; pmd_t * pmd;
unsigned long pgd_end; unsigned long pgd_end;
if (pgd_none(*dir)) if (pgd_none(*dir))
return 0; return count;
if (pgd_bad(*dir)) { if (pgd_bad(*dir)) {
pgd_ERROR(*dir); pgd_ERROR(*dir);
pgd_clear(dir); pgd_clear(dir);
return 0; return count;
} }
pmd = pmd_offset(dir, address); pmd = pmd_offset(dir, address);
...@@ -221,23 +184,23 @@ static inline int swap_out_pgd(struct mm_struct * mm, struct vm_area_struct * vm ...@@ -221,23 +184,23 @@ static inline int swap_out_pgd(struct mm_struct * mm, struct vm_area_struct * vm
end = pgd_end; end = pgd_end;
do { do {
int result = swap_out_pmd(mm, vma, pmd, address, end); count = swap_out_pmd(mm, vma, pmd, address, end, count);
if (result) if (!count)
return result; break;
address = (address + PMD_SIZE) & PMD_MASK; address = (address + PMD_SIZE) & PMD_MASK;
pmd++; pmd++;
} while (address && (address < end)); } while (address && (address < end));
return 0; return count;
} }
static int swap_out_vma(struct mm_struct * mm, struct vm_area_struct * vma, unsigned long address) static int swap_out_vma(struct mm_struct * mm, struct vm_area_struct * vma, unsigned long address, int count)
{ {
pgd_t *pgdir; pgd_t *pgdir;
unsigned long end; unsigned long end;
/* Don't swap out areas which are locked down */ /* Don't swap out areas which are locked down */
if (vma->vm_flags & (VM_LOCKED|VM_RESERVED)) if (vma->vm_flags & (VM_LOCKED|VM_RESERVED))
return 0; return count;
pgdir = pgd_offset(mm, address); pgdir = pgd_offset(mm, address);
...@@ -245,18 +208,17 @@ static int swap_out_vma(struct mm_struct * mm, struct vm_area_struct * vma, unsi ...@@ -245,18 +208,17 @@ static int swap_out_vma(struct mm_struct * mm, struct vm_area_struct * vma, unsi
if (address >= end) if (address >= end)
BUG(); BUG();
do { do {
int result = swap_out_pgd(mm, vma, pgdir, address, end); count = swap_out_pgd(mm, vma, pgdir, address, end, count);
if (result) if (!count)
return result; break;
address = (address + PGDIR_SIZE) & PGDIR_MASK; address = (address + PGDIR_SIZE) & PGDIR_MASK;
pgdir++; pgdir++;
} while (address && (address < end)); } while (address && (address < end));
return 0; return count;
} }
static int swap_out_mm(struct mm_struct * mm) static int swap_out_mm(struct mm_struct * mm, int count)
{ {
int result = 0;
unsigned long address; unsigned long address;
struct vm_area_struct* vma; struct vm_area_struct* vma;
...@@ -270,15 +232,14 @@ static int swap_out_mm(struct mm_struct * mm) ...@@ -270,15 +232,14 @@ static int swap_out_mm(struct mm_struct * mm)
*/ */
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
address = mm->swap_address; address = mm->swap_address;
mm->swap_cnt = mm->rss >> 4;
vma = find_vma(mm, address); vma = find_vma(mm, address);
if (vma) { if (vma) {
if (address < vma->vm_start) if (address < vma->vm_start)
address = vma->vm_start; address = vma->vm_start;
for (;;) { for (;;) {
result = swap_out_vma(mm, vma, address); count = swap_out_vma(mm, vma, address, count);
if (result) if (!count)
goto out_unlock; goto out_unlock;
vma = vma->vm_next; vma = vma->vm_next;
if (!vma) if (!vma)
...@@ -288,30 +249,39 @@ static int swap_out_mm(struct mm_struct * mm) ...@@ -288,30 +249,39 @@ static int swap_out_mm(struct mm_struct * mm)
} }
/* Reset to 0 when we reach the end of address space */ /* Reset to 0 when we reach the end of address space */
mm->swap_address = 0; mm->swap_address = 0;
mm->swap_cnt = 0;
out_unlock: out_unlock:
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
return result; return !count;
} }
/* /*
* Select the task with maximal swap_cnt and try to swap out a page.
* N.B. This function returns only 0 or 1. Return values != 1 from * N.B. This function returns only 0 or 1. Return values != 1 from
* the lower level routines result in continued processing. * the lower level routines result in continued processing.
*/ */
#define SWAP_SHIFT 5 #define SWAP_SHIFT 5
#define SWAP_MIN 8 #define SWAP_MIN 8
static inline int swap_amount(struct mm_struct *mm)
{
int nr = mm->rss >> SWAP_SHIFT;
return nr < SWAP_MIN ? SWAP_MIN : nr;
}
static int swap_out(unsigned int priority, int gfp_mask) static int swap_out(unsigned int priority, int gfp_mask)
{ {
int counter; int counter;
int retval = 0; int retval = 0;
struct mm_struct *mm = current->mm;
/* Always start by trying to penalize the process that is allocating memory */
if (mm)
retval = swap_out_mm(mm, swap_amount(mm));
/* Then, look at the other mm's */
counter = mmlist_nr >> priority; counter = mmlist_nr >> priority;
do { do {
struct list_head *p; struct list_head *p;
struct mm_struct *mm;
spin_lock(&mmlist_lock); spin_lock(&mmlist_lock);
p = init_mm.mmlist.next; p = init_mm.mmlist.next;
...@@ -327,13 +297,14 @@ static int swap_out(unsigned int priority, int gfp_mask) ...@@ -327,13 +297,14 @@ static int swap_out(unsigned int priority, int gfp_mask)
atomic_inc(&mm->mm_users); atomic_inc(&mm->mm_users);
spin_unlock(&mmlist_lock); spin_unlock(&mmlist_lock);
retval |= swap_out_mm(mm); /* Walk about 6% of the address space each time */
retval |= swap_out_mm(mm, swap_amount(mm));
mmput(mm); mmput(mm);
} while (--counter >= 0); } while (--counter >= 0);
return retval; return retval;
empty: empty:
spin_lock(&mmlist_lock); spin_unlock(&mmlist_lock);
return 0; return 0;
} }
...@@ -816,33 +787,35 @@ int inactive_shortage(void) ...@@ -816,33 +787,35 @@ int inactive_shortage(void)
* really care about latency. In that case we don't try * really care about latency. In that case we don't try
* to free too many pages. * to free too many pages.
*/ */
#define DEF_PRIORITY (6)
static int refill_inactive(unsigned int gfp_mask, int user) static int refill_inactive(unsigned int gfp_mask, int user)
{ {
int priority, count, start_count; int count, start_count, maxtry;
count = inactive_shortage() + free_shortage(); count = inactive_shortage() + free_shortage();
if (user) if (user)
count = (1 << page_cluster); count = (1 << page_cluster);
start_count = count; start_count = count;
/* Always trim SLAB caches when memory gets low. */ maxtry = 6;
kmem_cache_reap(gfp_mask);
priority = 6;
do { do {
if (current->need_resched) { if (current->need_resched) {
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
schedule(); schedule();
} }
while (refill_inactive_scan(priority, 1)) { while (refill_inactive_scan(DEF_PRIORITY, 1)) {
if (--count <= 0) if (--count <= 0)
goto done; goto done;
} }
/* If refill_inactive_scan failed, try to page stuff out.. */ /* If refill_inactive_scan failed, try to page stuff out.. */
swap_out(priority, gfp_mask); swap_out(DEF_PRIORITY, gfp_mask);
} while (!inactive_shortage());
if (--maxtry <= 0)
return 0;
} while (inactive_shortage());
done: done:
return (count < start_count); return (count < start_count);
...@@ -872,20 +845,14 @@ static int do_try_to_free_pages(unsigned int gfp_mask, int user) ...@@ -872,20 +845,14 @@ static int do_try_to_free_pages(unsigned int gfp_mask, int user)
ret += refill_inactive(gfp_mask, user); ret += refill_inactive(gfp_mask, user);
/* /*
* Delete pages from the inode and dentry cache * Delete pages from the inode and dentry caches and
* if memory is low. * reclaim unused slab cache if memory is low.
*/ */
if (free_shortage()) { if (free_shortage()) {
shrink_dcache_memory(6, gfp_mask); shrink_dcache_memory(DEF_PRIORITY, gfp_mask);
shrink_icache_memory(6, gfp_mask); shrink_icache_memory(DEF_PRIORITY, gfp_mask);
} else {
/*
* Reclaim unused slab cache memory.
*/
kmem_cache_reap(gfp_mask); kmem_cache_reap(gfp_mask);
ret = 1; }
}
return ret; return ret;
} }
...@@ -938,13 +905,8 @@ int kswapd(void *unused) ...@@ -938,13 +905,8 @@ int kswapd(void *unused)
static int recalc = 0; static int recalc = 0;
/* If needed, try to free some memory. */ /* If needed, try to free some memory. */
if (inactive_shortage() || free_shortage()) { if (inactive_shortage() || free_shortage())
int wait = 0; do_try_to_free_pages(GFP_KSWAPD, 0);
/* Do we need to do some synchronous flushing? */
if (waitqueue_active(&kswapd_done))
wait = 1;
do_try_to_free_pages(GFP_KSWAPD, wait);
}
/* /*
* Do some (very minimal) background scanning. This * Do some (very minimal) background scanning. This
...@@ -952,7 +914,7 @@ int kswapd(void *unused) ...@@ -952,7 +914,7 @@ int kswapd(void *unused)
* every minute. This clears old referenced bits * every minute. This clears old referenced bits
* and moves unused pages to the inactive list. * and moves unused pages to the inactive list.
*/ */
refill_inactive_scan(6, 0); refill_inactive_scan(DEF_PRIORITY, 0);
/* Once a second, recalculate some VM stats. */ /* Once a second, recalculate some VM stats. */
if (time_after(jiffies, recalc + HZ)) { if (time_after(jiffies, recalc + HZ)) {
...@@ -960,11 +922,6 @@ int kswapd(void *unused) ...@@ -960,11 +922,6 @@ int kswapd(void *unused)
recalculate_vm_stats(); recalculate_vm_stats();
} }
/*
* Wake up everybody waiting for free memory
* and unplug the disk queue.
*/
wake_up_all(&kswapd_done);
run_task_queue(&tq_disk); run_task_queue(&tq_disk);
/* /*
...@@ -995,33 +952,10 @@ int kswapd(void *unused) ...@@ -995,33 +952,10 @@ int kswapd(void *unused)
} }
} }
void wakeup_kswapd(int block) void wakeup_kswapd(void)
{ {
DECLARE_WAITQUEUE(wait, current); if (current != kswapd_task)
wake_up_process(kswapd_task);
if (current == kswapd_task)
return;
if (!block) {
if (waitqueue_active(&kswapd_wait))
wake_up(&kswapd_wait);
return;
}
/*
* Kswapd could wake us up before we get a chance
* to sleep, so we have to be very careful here to
* prevent SMP races...
*/
__set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&kswapd_done, &wait);
if (waitqueue_active(&kswapd_wait))
wake_up(&kswapd_wait);
schedule();
remove_wait_queue(&kswapd_done, &wait);
__set_current_state(TASK_RUNNING);
} }
/* /*
...@@ -1046,7 +980,7 @@ DECLARE_WAIT_QUEUE_HEAD(kreclaimd_wait); ...@@ -1046,7 +980,7 @@ DECLARE_WAIT_QUEUE_HEAD(kreclaimd_wait);
/* /*
* Kreclaimd will move pages from the inactive_clean list to the * Kreclaimd will move pages from the inactive_clean list to the
* free list, in order to keep atomic allocations possible under * free list, in order to keep atomic allocations possible under
* all circumstances. Even when kswapd is blocked on IO. * all circumstances.
*/ */
int kreclaimd(void *unused) int kreclaimd(void *unused)
{ {
......
...@@ -504,8 +504,8 @@ void ip_mc_inc_group(struct in_device *in_dev, u32 addr) ...@@ -504,8 +504,8 @@ void ip_mc_inc_group(struct in_device *in_dev, u32 addr)
im->timer.function=&igmp_timer_expire; im->timer.function=&igmp_timer_expire;
im->unsolicit_count = IGMP_Unsolicited_Report_Count; im->unsolicit_count = IGMP_Unsolicited_Report_Count;
im->reporter = 0; im->reporter = 0;
im->loaded = 0;
#endif #endif
im->loaded = 0;
write_lock_bh(&in_dev->lock); write_lock_bh(&in_dev->lock);
im->next=in_dev->mc_list; im->next=in_dev->mc_list;
in_dev->mc_list=im; in_dev->mc_list=im;
......
...@@ -954,7 +954,7 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, int size) ...@@ -954,7 +954,7 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, int size)
*/ */
skb = sk->write_queue.prev; skb = sk->write_queue.prev;
if (tp->send_head && if (tp->send_head &&
(mss_now - skb->len) > 0) { (mss_now > skb->len)) {
copy = skb->len; copy = skb->len;
if (skb_tailroom(skb) > 0) { if (skb_tailroom(skb) > 0) {
int last_byte_was_odd = (copy % 4); int last_byte_was_odd = (copy % 4);
......
...@@ -1705,7 +1705,7 @@ static __inline__ void tcp_ack_packets_out(struct sock *sk, struct tcp_opt *tp) ...@@ -1705,7 +1705,7 @@ static __inline__ void tcp_ack_packets_out(struct sock *sk, struct tcp_opt *tp)
if ((__s32)when < (__s32)tp->rttvar) if ((__s32)when < (__s32)tp->rttvar)
when = tp->rttvar; when = tp->rttvar;
tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, when); tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, min(when, TCP_RTO_MAX));
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment