Commit 3192b2dc authored by Linus Torvalds's avatar Linus Torvalds

v2.4.0.1 -> v2.4.0.2

  - ISDN fixes
  - VM balancing tuning
parent 43e9282d
VERSION = 2
PATCHLEVEL = 4
SUBLEVEL = 1
EXTRAVERSION =-pre1
EXTRAVERSION =-pre2
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
......
......@@ -50,7 +50,7 @@ ifdef CONFIG_M686
CFLAGS += -march=i686
endif
ifdef CONFIG_M686FXSR
ifdef CONFIG_MPENTIUMIII
CFLAGS += -march=i686
endif
......
......@@ -10,7 +10,7 @@
ca7bd9bac39203f3074f3f093948cc3c isac.c
a2ad619fd404b3149099a2984de9d23c isdnl1.c
d2a78e407f3d94876deac160c6f9aae6 isdnl2.c
a109841c2e75b11fc8ef2c8718e24c3e isdnl3.c
e7932ca7ae39c497c17f13a2e1434fcd isdnl3.c
afb5f2f4ac296d6de45c856993b161e1 tei.c
00023e2a482cb86a26ea870577ade5d6 callc.c
a1834e9b2ec068440cff2e899eff4710 cert.c
......@@ -25,9 +25,9 @@ a1834e9b2ec068440cff2e899eff4710 cert.c
Version: 2.6.3i
Charset: noconv
iQCVAwUBOlMTgDpxHvX/mS9tAQFSbgP/W9y6tnnWHTRLGqyr3EY1OHZiQXERkAAu
hp+Y8PIoX1GgAh4yZ7xhYwUsk6y0z5USdGuhC9ZHh+oZd57lPsJMnhkEZR5BVsYT
r7jHwelP527+QCLkVUCHIVIWUW0ANzeZBhDV2vefkFb+gWLiZsBhaHssbcKGsMNG
Ak4xS1ByqsM=
=lsIJ
iQCVAwUBOlxeLTpxHvX/mS9tAQH6RwP8DhyvqAnXFV6WIGi16iQ3vKikkPoqnDQs
GEn5uCW0dPYKlwthD2Grj/JbMYZhOmCFuDxF7ufJnjTSDe/D8XNe2wngxzAiwcIe
WjCrT8X95cuP3HZHscbFTEinVV0GAnoI0ZEgs5eBDhVHDqILLYMaTFBQaRH3jgXc
i5VH88jPfUM=
=qc+J
-----END PGP SIGNATURE-----
......@@ -149,7 +149,6 @@
#include <linux/version.h>
#ifdef MODULE
#include <linux/modversions.h>
#include <linux/module.h>
char kernel_version[] = UTS_RELEASE;
......
......@@ -222,8 +222,8 @@ int ppa_detect(Scsi_Host_Template * host)
printk(" supported by the imm (ZIP Plus) driver. If the\n");
printk(" cable is marked with \"AutoDetect\", this is what has\n");
printk(" happened.\n");
return 0;
spin_lock_irq(&io_request_lock);
return 0;
}
try_again = 1;
goto retry_entry;
......
......@@ -71,18 +71,17 @@ int nfs_reqlist_init(struct nfs_server *server)
int status = 0;
dprintk("NFS: writecache_init\n");
/* Create the RPC task */
if (!(task = rpc_new_task(server->client, NULL, RPC_TASK_ASYNC)))
return -ENOMEM;
spin_lock(&nfs_flushd_lock);
cache = server->rw_requests;
if (cache->task)
goto out_unlock;
/* Create the RPC task */
status = -ENOMEM;
task = rpc_new_task(server->client, NULL, RPC_TASK_ASYNC);
if (!task)
goto out_unlock;
task->tk_calldata = server;
cache->task = task;
......@@ -99,6 +98,7 @@ int nfs_reqlist_init(struct nfs_server *server)
return 0;
out_unlock:
spin_unlock(&nfs_flushd_lock);
rpc_release_task(task);
return status;
}
......@@ -195,7 +195,9 @@ void inode_remove_flushd(struct inode *inode)
if (*q) {
*q = inode->u.nfs_i.hash_next;
NFS_FLAGS(inode) &= ~NFS_INO_FLUSH;
spin_unlock(&nfs_flushd_lock);
iput(inode);
return;
}
out:
spin_unlock(&nfs_flushd_lock);
......
......@@ -219,7 +219,6 @@ struct mm_struct {
unsigned long rss, total_vm, locked_vm;
unsigned long def_flags;
unsigned long cpu_vm_mask;
unsigned long swap_cnt; /* number of pages to swap on next pass */
unsigned long swap_address;
/* Architecture-specific MM context */
......
......@@ -107,7 +107,7 @@ extern wait_queue_head_t kreclaimd_wait;
extern int page_launder(int, int);
extern int free_shortage(void);
extern int inactive_shortage(void);
extern void wakeup_kswapd(int);
extern void wakeup_kswapd(void);
extern int try_to_free_pages(unsigned int gfp_mask);
/* linux/mm/page_io.c */
......
......@@ -134,7 +134,6 @@ static inline int dup_mmap(struct mm_struct * mm)
mm->mmap_cache = NULL;
mm->map_count = 0;
mm->cpu_vm_mask = 0;
mm->swap_cnt = 0;
mm->swap_address = 0;
pprev = &mm->mmap;
for (mpnt = current->mm->mmap ; mpnt ; mpnt = mpnt->vm_next) {
......
......@@ -306,7 +306,7 @@ static inline struct page * __find_page_nolock(struct address_space *mapping, un
*/
age_page_up(page);
if (inactive_shortage() > inactive_target / 2 && free_shortage())
wakeup_kswapd(0);
wakeup_kswapd();
not_found:
return page;
}
......@@ -1835,7 +1835,8 @@ static long madvise_fixup_start(struct vm_area_struct * vma,
n->vm_end = end;
setup_read_behavior(n, behavior);
n->vm_raend = 0;
get_file(n->vm_file);
if (n->vm_file)
get_file(n->vm_file);
if (n->vm_ops && n->vm_ops->open)
n->vm_ops->open(n);
lock_vma_mappings(vma);
......@@ -1861,7 +1862,8 @@ static long madvise_fixup_end(struct vm_area_struct * vma,
n->vm_pgoff += (n->vm_start - vma->vm_start) >> PAGE_SHIFT;
setup_read_behavior(n, behavior);
n->vm_raend = 0;
get_file(n->vm_file);
if (n->vm_file)
get_file(n->vm_file);
if (n->vm_ops && n->vm_ops->open)
n->vm_ops->open(n);
lock_vma_mappings(vma);
......@@ -1893,7 +1895,8 @@ static long madvise_fixup_middle(struct vm_area_struct * vma,
right->vm_pgoff += (right->vm_start - left->vm_start) >> PAGE_SHIFT;
left->vm_raend = 0;
right->vm_raend = 0;
atomic_add(2, &vma->vm_file->f_count);
if (vma->vm_file)
atomic_add(2, &vma->vm_file->f_count);
if (vma->vm_ops && vma->vm_ops->open) {
vma->vm_ops->open(left);
......
......@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/pagemap.h>
#include <linux/bootmem.h>
#include <linux/slab.h>
int nr_swap_pages;
int nr_active_pages;
......@@ -303,7 +304,7 @@ struct page * __alloc_pages(zonelist_t *zonelist, unsigned long order)
* an inactive page shortage, wake up kswapd.
*/
if (inactive_shortage() > inactive_target / 2 && free_shortage())
wakeup_kswapd(0);
wakeup_kswapd();
/*
* If we are about to get low on free pages and cleaning
* the inactive_dirty pages would fix the situation,
......@@ -379,7 +380,7 @@ struct page * __alloc_pages(zonelist_t *zonelist, unsigned long order)
* - if we don't have __GFP_IO set, kswapd may be
* able to free some memory we can't free ourselves
*/
wakeup_kswapd(0);
wakeup_kswapd();
if (gfp_mask & __GFP_WAIT) {
__set_current_state(TASK_RUNNING);
current->policy |= SCHED_YIELD;
......@@ -404,7 +405,7 @@ struct page * __alloc_pages(zonelist_t *zonelist, unsigned long order)
* - we're doing a higher-order allocation
* --> move pages to the free list until we succeed
* - we're /really/ tight on memory
* --> wait on the kswapd waitqueue until memory is freed
* --> try to free pages ourselves with page_launder
*/
if (!(current->flags & PF_MEMALLOC)) {
/*
......@@ -443,36 +444,23 @@ struct page * __alloc_pages(zonelist_t *zonelist, unsigned long order)
/*
* When we arrive here, we are really tight on memory.
*
* We wake up kswapd and sleep until kswapd wakes us
* up again. After that we loop back to the start.
*
* We have to do this because something else might eat
* the memory kswapd frees for us and we need to be
* reliable. Note that we don't loop back for higher
* order allocations since it is possible that kswapd
* simply cannot free a large enough contiguous area
* of memory *ever*.
*/
if ((gfp_mask & (__GFP_WAIT|__GFP_IO)) == (__GFP_WAIT|__GFP_IO)) {
wakeup_kswapd(1);
memory_pressure++;
if (!order)
goto try_again;
/*
* If __GFP_IO isn't set, we can't wait on kswapd because
* kswapd just might need some IO locks /we/ are holding ...
*
* SUBTLE: The scheduling point above makes sure that
* kswapd does get the chance to free memory we can't
* free ourselves...
* We try to free pages ourselves by:
* - shrinking the i/d caches.
* - reclaiming unused memory from the slab caches.
* - swapping/syncing pages to disk (done by page_launder)
* - moving clean pages from the inactive dirty list to
* the inactive clean list. (done by page_launder)
*/
} else if (gfp_mask & __GFP_WAIT) {
try_to_free_pages(gfp_mask);
memory_pressure++;
if (gfp_mask & __GFP_WAIT) {
shrink_icache_memory(6, gfp_mask);
shrink_dcache_memory(6, gfp_mask);
kmem_cache_reap(gfp_mask);
page_launder(gfp_mask, 1);
if (!order)
goto try_again;
}
}
/*
......
......@@ -1702,7 +1702,7 @@ static void enable_all_cpucaches (void)
* kmem_cache_reap - Reclaim memory from caches.
* @gfp_mask: the type of memory required.
*
* Called from try_to_free_page().
* Called from do_try_to_free_pages() and __alloc_pages()
*/
void kmem_cache_reap (int gfp_mask)
{
......
This diff is collapsed.
......@@ -504,8 +504,8 @@ void ip_mc_inc_group(struct in_device *in_dev, u32 addr)
im->timer.function=&igmp_timer_expire;
im->unsolicit_count = IGMP_Unsolicited_Report_Count;
im->reporter = 0;
im->loaded = 0;
#endif
im->loaded = 0;
write_lock_bh(&in_dev->lock);
im->next=in_dev->mc_list;
in_dev->mc_list=im;
......
......@@ -954,7 +954,7 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, int size)
*/
skb = sk->write_queue.prev;
if (tp->send_head &&
(mss_now - skb->len) > 0) {
(mss_now > skb->len)) {
copy = skb->len;
if (skb_tailroom(skb) > 0) {
int last_byte_was_odd = (copy % 4);
......
......@@ -1705,7 +1705,7 @@ static __inline__ void tcp_ack_packets_out(struct sock *sk, struct tcp_opt *tp)
if ((__s32)when < (__s32)tp->rttvar)
when = tp->rttvar;
tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, when);
tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, min(when, TCP_RTO_MAX));
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment