Commit ff35c838 authored by Linus Torvalds's avatar Linus Torvalds

v2.4.13.2 -> v2.4.13.3

  - René Scharfe: random bugfix
  - me: block device queuing low-water-marks, VM mapped tweaking.
parent 4fd9cc9e
VERSION = 2
PATCHLEVEL = 4
SUBLEVEL = 14
EXTRAVERSION =-pre2
EXTRAVERSION =-pre3
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
......
......@@ -140,21 +140,23 @@ inline request_queue_t *blk_get_queue(kdev_t dev)
return &blk_dev[MAJOR(dev)].request_queue;
}
static int __blk_cleanup_queue(struct list_head *head)
static int __blk_cleanup_queue(struct request_list *list)
{
struct list_head *head = &list->free;
struct request *rq;
int i = 0;
if (list_empty(head))
return 0;
do {
while (!list_empty(head)) {
rq = list_entry(head->next, struct request, queue);
list_del(&rq->queue);
kmem_cache_free(request_cachep, rq);
i++;
} while (!list_empty(head));
};
if (i != list->count)
printk("request list leak!\n");
list->count = 0;
return i;
}
......@@ -176,10 +178,8 @@ void blk_cleanup_queue(request_queue_t * q)
{
int count = queue_nr_requests;
count -= __blk_cleanup_queue(&q->request_freelist[READ]);
count -= __blk_cleanup_queue(&q->request_freelist[WRITE]);
count -= __blk_cleanup_queue(&q->pending_freelist[READ]);
count -= __blk_cleanup_queue(&q->pending_freelist[WRITE]);
count -= __blk_cleanup_queue(&q->rq[READ]);
count -= __blk_cleanup_queue(&q->rq[WRITE]);
if (count)
printk("blk_cleanup_queue: leaked requests (%d)\n", count);
......@@ -331,11 +331,10 @@ static void blk_init_free_list(request_queue_t *q)
struct request *rq;
int i;
INIT_LIST_HEAD(&q->request_freelist[READ]);
INIT_LIST_HEAD(&q->request_freelist[WRITE]);
INIT_LIST_HEAD(&q->pending_freelist[READ]);
INIT_LIST_HEAD(&q->pending_freelist[WRITE]);
q->pending_free[READ] = q->pending_free[WRITE] = 0;
INIT_LIST_HEAD(&q->rq[READ].free);
INIT_LIST_HEAD(&q->rq[WRITE].free);
q->rq[READ].count = 0;
q->rq[WRITE].count = 0;
/*
* Divide requests in half between read and write
......@@ -349,7 +348,8 @@ static void blk_init_free_list(request_queue_t *q)
}
memset(rq, 0, sizeof(struct request));
rq->rq_status = RQ_INACTIVE;
list_add(&rq->queue, &q->request_freelist[i & 1]);
list_add(&rq->queue, &q->rq[i&1].free);
q->rq[i&1].count++;
}
init_waitqueue_head(&q->wait_for_request);
......@@ -423,10 +423,12 @@ void blk_init_queue(request_queue_t * q, request_fn_proc * rfn)
static inline struct request *get_request(request_queue_t *q, int rw)
{
struct request *rq = NULL;
struct request_list *rl = q->rq + rw;
if (!list_empty(&q->request_freelist[rw])) {
rq = blkdev_free_rq(&q->request_freelist[rw]);
if (!list_empty(&rl->free)) {
rq = blkdev_free_rq(&rl->free);
list_del(&rq->queue);
rl->count--;
rq->rq_status = RQ_ACTIVE;
rq->special = NULL;
rq->q = q;
......@@ -443,17 +445,13 @@ static struct request *__get_request_wait(request_queue_t *q, int rw)
register struct request *rq;
DECLARE_WAITQUEUE(wait, current);
add_wait_queue_exclusive(&q->wait_for_request, &wait);
for (;;) {
__set_current_state(TASK_UNINTERRUPTIBLE);
spin_lock_irq(&io_request_lock);
rq = get_request(q, rw);
spin_unlock_irq(&io_request_lock);
if (rq)
break;
generic_unplug_device(q);
add_wait_queue_exclusive(&q->wait_for_request, &wait);
do {
set_current_state(TASK_UNINTERRUPTIBLE);
if (q->rq[rw].count < batch_requests)
schedule();
}
} while ((rq = get_request(q,rw)) == NULL);
remove_wait_queue(&q->wait_for_request, &wait);
current->state = TASK_RUNNING;
return rq;
......@@ -542,15 +540,6 @@ static inline void add_request(request_queue_t * q, struct request * req,
list_add(&req->queue, insert_here);
}
inline void blk_refill_freelist(request_queue_t *q, int rw)
{
if (q->pending_free[rw]) {
list_splice(&q->pending_freelist[rw], &q->request_freelist[rw]);
INIT_LIST_HEAD(&q->pending_freelist[rw]);
q->pending_free[rw] = 0;
}
}
/*
* Must be called with io_request_lock held and interrupts disabled
*/
......@@ -564,28 +553,12 @@ inline void blkdev_release_request(struct request *req)
/*
* Request may not have originated from ll_rw_blk. if not,
* asumme it has free buffers and check waiters
* assume it has free buffers and check waiters
*/
if (q) {
/*
* If nobody is waiting for requests, don't bother
* batching up.
*/
if (!list_empty(&q->request_freelist[rw])) {
list_add(&req->queue, &q->request_freelist[rw]);
return;
}
/*
* Add to pending free list and batch wakeups
*/
list_add(&req->queue, &q->pending_freelist[rw]);
if (++q->pending_free[rw] >= batch_requests) {
int wake_up = q->pending_free[rw];
blk_refill_freelist(q, rw);
wake_up_nr(&q->wait_for_request, wake_up);
}
list_add(&req->queue, &q->rq[rw].free);
if (++q->rq[rw].count >= batch_requests && waitqueue_active(&q->wait_for_request))
wake_up(&q->wait_for_request);
}
}
......@@ -1144,7 +1117,7 @@ int __init blk_dev_init(void)
/*
* Batch frees according to queue length
*/
batch_requests = queue_nr_requests/3;
batch_requests = queue_nr_requests/4;
printk("block: %d slots per queue, batch=%d\n", queue_nr_requests, batch_requests);
#ifdef CONFIG_AMIGA_Z2RAM
......
......@@ -471,7 +471,7 @@ static int nbd_release(struct inode *inode, struct file *file)
static struct block_device_operations nbd_fops =
{
owner: THIS_MODULE.
owner: THIS_MODULE,
open: nbd_open,
release: nbd_release,
ioctl: nbd_ioctl,
......
......@@ -1253,7 +1253,7 @@ static inline void xfer_secondary_pool(struct entropy_store *r,
r == sec_random_state ? "secondary" : "unknown",
r->entropy_count, nbytes * 8);
extract_entropy(random_state, tmp, nwords, 0);
extract_entropy(random_state, tmp, nwords * 4, 0);
add_entropy_words(r, tmp, nwords);
credit_entropy_store(r, nwords * 32);
}
......
......@@ -1183,6 +1183,24 @@ void complete_change_console(unsigned int new_console)
old_vc_mode = vt_cons[fg_console]->vc_mode;
switch_screen(new_console);
/*
* This can't appear below a successful kill_proc(). If it did,
* then the *blank_screen operation could occur while X, having
* received acqsig, is waking up on another processor. This
* condition can lead to overlapping accesses to the VGA range
* and the framebuffer (causing system lockups).
*
* To account for this we duplicate this code below only if the
* controlling process is gone and we've called reset_vc.
*/
if (old_vc_mode != vt_cons[new_console]->vc_mode)
{
if (vt_cons[new_console]->vc_mode == KD_TEXT)
unblank_screen();
else
do_blank_screen(1);
}
/*
* If this new console is under process control, send it a signal
* telling it that it has acquired. Also check if it has died and
......@@ -1209,13 +1227,7 @@ void complete_change_console(unsigned int new_console)
* to account for and tracking tty count may be undesirable.
*/
reset_vc(new_console);
}
}
/*
* We do this here because the controlling process above may have
* gone, and so there is now a new vc_mode
*/
if (old_vc_mode != vt_cons[new_console]->vc_mode)
{
if (vt_cons[new_console]->vc_mode == KD_TEXT)
......@@ -1223,6 +1235,8 @@ void complete_change_console(unsigned int new_console)
else
do_blank_screen(1);
}
}
}
/*
* Wake anyone waiting for their VT to activate
......
......@@ -705,8 +705,12 @@ void __invalidate_buffers(kdev_t dev, int destroy_dirty_buffers)
static void free_more_memory(void)
{
zone_t * zone = contig_page_data.node_zonelists[GFP_NOFS & GFP_ZONEMASK].zones[0];
balance_dirty();
wakeup_bdflush();
try_to_free_pages(zone, GFP_NOFS, 0);
run_task_queue(&tq_disk);
current->policy |= SCHED_YIELD;
__set_current_state(TASK_RUNNING);
schedule();
......
......@@ -66,14 +66,17 @@ typedef void (unplug_device_fn) (void *q);
*/
#define QUEUE_NR_REQUESTS 8192
struct request_list {
unsigned int count;
struct list_head free;
};
struct request_queue
{
/*
* the queue request freelist, one for reads and one for writes
*/
struct list_head request_freelist[2];
struct list_head pending_freelist[2];
int pending_free[2];
struct request_list rq[2];
/*
* Together with queue_head for cacheline sharing
......
......@@ -82,6 +82,7 @@ extern struct page * find_or_create_page(struct address_space *mapping,
extern void lock_page(struct page *page);
#define find_lock_page(mapping, index) \
__find_lock_page(mapping, index, page_hash(mapping, index))
extern struct page *find_trylock_page(struct address_space *, unsigned long);
extern void add_to_page_cache(struct page * page, struct address_space *mapping, unsigned long index);
extern void add_to_page_cache_locked(struct page * page, struct address_space *mapping, unsigned long index);
......
......@@ -145,6 +145,7 @@ extern int swap_duplicate(swp_entry_t);
extern int swap_count(struct page *);
extern int valid_swaphandles(swp_entry_t, unsigned long *);
extern void swap_free(swp_entry_t);
extern void free_swap_and_cache(swp_entry_t);
struct swap_list_t {
int head; /* head of priority-ordered swapfile list */
int next; /* swapfile to be used next */
......
......@@ -818,6 +818,24 @@ struct page * __find_get_page(struct address_space *mapping,
return page;
}
/*
* Same as above, but trylock it instead of incrementing the count.
*/
struct page *find_trylock_page(struct address_space *mapping, unsigned long offset)
{
struct page *page;
struct page **hash = page_hash(mapping, offset);
spin_lock(&pagecache_lock);
page = __find_page_nolock(mapping, offset, *hash);
if (page) {
if (TryLockPage(page))
page = NULL;
}
spin_unlock(&pagecache_lock);
return page;
}
/*
* Must be called with the pagecache lock held,
* will return with it held (but it may be dropped
......
......@@ -325,7 +325,7 @@ static inline int zap_pte_range(mmu_gather_t *tlb, pmd_t * pmd, unsigned long ad
/* This will eventually call __free_pte on the pte. */
tlb_remove_page(tlb, ptep, address + offset);
} else {
swap_free(pte_to_swp_entry(pte));
free_swap_and_cache(pte_to_swp_entry(pte));
pte_clear(ptep);
}
}
......
......@@ -72,14 +72,6 @@ int vm_enough_memory(long pages)
free += nr_free_pages();
free += nr_swap_pages;
/*
* This double-counts: the nrpages are both in the page-cache
* and in the swapper space. At the same time, this compensates
* for the swap-space over-allocation (ie "nr_swap_pages" being
* too small.
*/
free += swapper_space.nrpages;
/*
* The code below doesn't account for free space in the inode
* and dentry slab cache, slab cache fragmentation, inodes and
......
......@@ -145,18 +145,13 @@ swp_entry_t get_swap_page(void)
return entry;
}
/*
* Caller has made sure that the swapdevice corresponding to entry
* is still around or has not been recycled.
*/
void swap_free(swp_entry_t entry)
static struct swap_info_struct * swap_info_get(swp_entry_t entry)
{
struct swap_info_struct * p;
unsigned long offset, type;
if (!entry.val)
goto out;
type = SWP_TYPE(entry);
if (type >= nr_swapfiles)
goto bad_nofile;
......@@ -172,8 +167,37 @@ void swap_free(swp_entry_t entry)
if (p->prio > swap_info[swap_list.next].prio)
swap_list.next = type;
swap_device_lock(p);
if (p->swap_map[offset] < SWAP_MAP_MAX) {
if (!--(p->swap_map[offset])) {
return p;
bad_free:
printk(KERN_ERR "swap_free: %s%08lx\n", Unused_offset, entry.val);
goto out;
bad_offset:
printk(KERN_ERR "swap_free: %s%08lx\n", Bad_offset, entry.val);
goto out;
bad_device:
printk(KERN_ERR "swap_free: %s%08lx\n", Unused_file, entry.val);
goto out;
bad_nofile:
printk(KERN_ERR "swap_free: %s%08lx\n", Bad_file, entry.val);
out:
return NULL;
}
static void swap_info_put(struct swap_info_struct * p)
{
swap_device_unlock(p);
swap_list_unlock();
}
static int swap_entry_free(struct swap_info_struct *p, unsigned long offset)
{
int count = p->swap_map[offset];
if (count < SWAP_MAP_MAX) {
count--;
p->swap_map[offset] = count;
if (!count) {
if (offset < p->lowest_bit)
p->lowest_bit = offset;
if (offset > p->highest_bit)
......@@ -181,23 +205,45 @@ void swap_free(swp_entry_t entry)
nr_swap_pages++;
}
}
swap_device_unlock(p);
swap_list_unlock();
out:
return;
return count;
}
bad_nofile:
printk(KERN_ERR "swap_free: %s%08lx\n", Bad_file, entry.val);
goto out;
bad_device:
printk(KERN_ERR "swap_free: %s%08lx\n", Unused_file, entry.val);
goto out;
bad_offset:
printk(KERN_ERR "swap_free: %s%08lx\n", Bad_offset, entry.val);
goto out;
bad_free:
printk(KERN_ERR "swap_free: %s%08lx\n", Unused_offset, entry.val);
goto out;
/*
* Caller has made sure that the swapdevice corresponding to entry
* is still around or has not been recycled.
*/
void swap_free(swp_entry_t entry)
{
struct swap_info_struct * p;
p = swap_info_get(entry);
if (p) {
swap_entry_free(p, SWP_OFFSET(entry));
swap_info_put(p);
}
}
/*
* Free the swap entry like above, but also try to
* free the page cache entry if it is the last user.
*/
void free_swap_and_cache(swp_entry_t entry)
{
struct swap_info_struct * p;
struct page *page = NULL;
p = swap_info_get(entry);
if (p) {
if (swap_entry_free(p, SWP_OFFSET(entry)) == 1)
page = find_trylock_page(&swapper_space, entry.val);
swap_info_put(p);
}
if (page) {
page_cache_get(page);
delete_from_swap_cache(page);
UnlockPage(page);
page_cache_release(page);
}
}
/*
......
......@@ -292,8 +292,7 @@ static int swap_out(unsigned int priority, unsigned int gfp_mask, zone_t * class
int counter, nr_pages = SWAP_CLUSTER_MAX;
struct mm_struct *mm;
/* Then, look at the other mm's */
counter = mmlist_nr / priority;
counter = mmlist_nr;
do {
if (unlikely(current->need_resched)) {
__set_current_state(TASK_RUNNING);
......@@ -334,7 +333,7 @@ static int shrink_cache(int nr_pages, zone_t * classzone, unsigned int gfp_mask,
{
struct list_head * entry;
int max_scan = nr_inactive_pages / priority;
int max_mapped = max_scan / 4;
int max_mapped = nr_pages*10;
spin_lock(&pagemap_lru_lock);
while (--max_scan >= 0 && (entry = inactive_list.prev) != &inactive_list) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment