Commit a356c406 authored by Linus Torvalds's avatar Linus Torvalds

v2.4.10.0.3 -> v2.4.10.0.4

  - more buffers-in-pagecache coherency
parent fbc139f5
......@@ -300,7 +300,7 @@ affs_new_inode(struct inode *dir)
u32 block;
struct buffer_head *bh;
if (!(inode = get_empty_inode()))
if (!(inode = new_inode(sb)))
goto err_inode;
if (!(block = affs_alloc_block(dir, dir->i_ino)))
......@@ -312,8 +312,6 @@ affs_new_inode(struct inode *dir)
mark_buffer_dirty_inode(bh, inode);
affs_brelse(bh);
inode->i_sb = sb;
inode->i_dev = sb->s_dev;
inode->i_uid = current->fsuid;
inode->i_gid = current->fsgid;
inode->i_ino = block;
......
......@@ -47,7 +47,7 @@ static inline unsigned int block_size(kdev_t dev)
return retval;
}
static unsigned int max_block(kdev_t dev)
static unsigned long max_block(kdev_t dev)
{
unsigned int retval = ~0U;
int major = MAJOR(dev);
......@@ -86,302 +86,74 @@ static void kill_bdev(struct block_device *bdev)
truncate_inode_pages(bdev->bd_inode->i_mapping, 0);
}
static inline void kill_buffers(kdev_t dev)
int set_blocksize(kdev_t dev, int size)
{
struct block_device *bdev = bdget(dev);
if (bdev) {
kill_bdev(bdev);
bdput(bdev);
}
}
void set_blocksize(kdev_t dev, int size)
{
extern int *blksize_size[];
if (!blksize_size[MAJOR(dev)])
return;
int oldsize;
struct block_device *bdev;
/* Size must be a power of two, and between 512 and PAGE_SIZE */
if (size > PAGE_SIZE || size < 512 || (size & (size-1)))
panic("Invalid blocksize passed to set_blocksize");
return -EINVAL;
/* Size cannot be smaller than the size supported by the device */
if (size < get_hardsect_size(dev))
return -EINVAL;
if (blksize_size[MAJOR(dev)][MINOR(dev)] == 0 && size == BLOCK_SIZE) {
/* No blocksize array? Implies hardcoded BLOCK_SIZE */
if (!blksize_size[MAJOR(dev)]) {
if (size == BLOCK_SIZE)
return 0;
return -EINVAL;
}
oldsize = blksize_size[MAJOR(dev)][MINOR(dev)];
if (oldsize == size)
return 0;
if (!oldsize && size == BLOCK_SIZE) {
blksize_size[MAJOR(dev)][MINOR(dev)] = size;
return;
return 0;
}
if (blksize_size[MAJOR(dev)][MINOR(dev)] == size)
return;
/* Ok, we're actually changing the blocksize.. */
bdev = bdget(dev);
sync_buffers(dev, 2);
blksize_size[MAJOR(dev)][MINOR(dev)] = size;
kill_buffers(dev);
bdev->bd_inode->i_blkbits = blksize_bits(size);
kill_bdev(bdev);
bdput(bdev);
return 0;
}
static inline int blkdev_get_block(struct inode * inode, long iblock, struct buffer_head * bh_result)
static int blkdev_get_block(struct inode * inode, long iblock, struct buffer_head * bh, int create)
{
int err;
err = -EIO;
if (iblock >= max_block(inode->i_rdev))
goto out;
bh_result->b_blocknr = iblock;
bh_result->b_state |= 1UL << BH_Mapped;
err = 0;
return -EIO;
out:
return err;
bh->b_dev = inode->i_rdev;
bh->b_blocknr = iblock;
bh->b_state |= 1UL << BH_Mapped;
return 0;
}
static int blkdev_writepage(struct page * page)
{
int err, i;
unsigned int blocksize;
unsigned long block;
struct buffer_head *bh, *head;
struct inode *inode = page->mapping->host;
if (!PageLocked(page))
BUG();
blocksize = block_size(inode->i_rdev);
if (!page->buffers)
create_empty_buffers(page, inode->i_rdev, blocksize);
head = page->buffers;
block = page->index << (PAGE_CACHE_SHIFT - blksize_bits(blocksize));
bh = head;
i = 0;
/* Stage 1: make sure we have all the buffers mapped! */
do {
/*
* If the buffer isn't up-to-date, we can't be sure
* that the buffer has been initialized with the proper
* block number information etc..
*
* Leave it to the low-level FS to make all those
* decisions (block #0 may actually be a valid block)
*/
if (!buffer_mapped(bh)) {
err = blkdev_get_block(inode, block, bh);
if (err)
goto out;
}
bh = bh->b_this_page;
block++;
} while (bh != head);
/* Stage 2: lock the buffers, mark them clean */
do {
lock_buffer(bh);
set_buffer_async_io(bh);
set_bit(BH_Uptodate, &bh->b_state);
clear_bit(BH_Dirty, &bh->b_state);
bh = bh->b_this_page;
} while (bh != head);
/* Stage 3: submit the IO */
do {
submit_bh(WRITE, bh);
bh = bh->b_this_page;
} while (bh != head);
/* Done - end_buffer_io_async will unlock */
SetPageUptodate(page);
return 0;
out:
ClearPageUptodate(page);
UnlockPage(page);
return err;
return block_write_full_page(page, blkdev_get_block);
}
static int blkdev_readpage(struct file * file, struct page * page)
{
struct inode *inode = page->mapping->host;
kdev_t dev = inode->i_rdev;
unsigned long iblock, lblock;
struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
unsigned int blocks, blocksize, blocksize_bits;
int nr, i;
if (!PageLocked(page))
PAGE_BUG(page);
blocksize = block_size(dev);
blocksize_bits = blksize_bits(blocksize);
if (!page->buffers)
create_empty_buffers(page, dev, blocksize);
head = page->buffers;
blocks = PAGE_CACHE_SIZE >> blocksize_bits;
iblock = page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
lblock = max_block(dev);
bh = head;
nr = 0;
i = 0;
do {
if (buffer_uptodate(bh))
continue;
if (!buffer_mapped(bh)) {
if (iblock <= lblock) {
if (blkdev_get_block(inode, iblock, bh))
continue;
}
if (!buffer_mapped(bh)) {
memset(kmap(page) + i * blocksize, 0, blocksize);
flush_dcache_page(page);
kunmap(page);
set_bit(BH_Uptodate, &bh->b_state);
continue;
}
/* get_block() might have updated the buffer synchronously */
if (buffer_uptodate(bh))
continue;
}
arr[nr] = bh;
nr++;
} while (i++, iblock++, (bh = bh->b_this_page) != head);
if (!nr) {
/*
* all buffers are uptodate - we can set the page
* uptodate as well.
*/
SetPageUptodate(page);
UnlockPage(page);
return 0;
}
/* Stage two: lock the buffers */
for (i = 0; i < nr; i++) {
struct buffer_head * bh = arr[i];
lock_buffer(bh);
set_buffer_async_io(bh);
}
/* Stage 3: start the IO */
for (i = 0; i < nr; i++)
submit_bh(READ, arr[i]);
return 0;
}
static int __blkdev_prepare_write(struct inode *inode, struct page *page,
unsigned from, unsigned to)
{
kdev_t dev = inode->i_rdev;
unsigned block_start, block_end;
unsigned long block;
int err = 0;
struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
unsigned int blocksize, blocksize_bits;
blocksize = block_size(dev);
blocksize_bits = blksize_bits(blocksize);
if (!page->buffers)
create_empty_buffers(page, dev, blocksize);
head = page->buffers;
block = page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
for(bh = head, block_start = 0; bh != head || !block_start;
block++, block_start=block_end, bh = bh->b_this_page) {
if (!bh)
BUG();
block_end = block_start + blocksize;
if (block_end <= from)
continue;
if (block_start >= to)
break;
if (!buffer_mapped(bh)) {
err = blkdev_get_block(inode, block, bh);
if (err)
goto out;
}
if (Page_Uptodate(page)) {
set_bit(BH_Uptodate, &bh->b_state);
continue;
}
if (!buffer_uptodate(bh) &&
(block_start < from || block_end > to)) {
ll_rw_block(READ, 1, &bh);
*wait_bh++=bh;
}
}
/*
* If we issued read requests - let them complete.
*/
while(wait_bh > wait) {
wait_on_buffer(*--wait_bh);
err = -EIO;
if (!buffer_uptodate(*wait_bh))
goto out;
}
return 0;
out:
return err;
return block_read_full_page(page, blkdev_get_block);
}
static int blkdev_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
{
struct inode *inode = page->mapping->host;
int err = __blkdev_prepare_write(inode, page, from, to);
if (err) {
ClearPageUptodate(page);
}
return err;
}
static int __blkdev_commit_write(struct inode *inode, struct page *page,
unsigned from, unsigned to)
{
unsigned block_start, block_end;
int partial = 0, need_balance_dirty = 0;
struct buffer_head *bh, *head;
unsigned int blocksize;
blocksize = block_size(inode->i_rdev);
for(bh = head = page->buffers, block_start = 0;
bh != head || !block_start;
block_start=block_end, bh = bh->b_this_page) {
block_end = block_start + blocksize;
if (block_end <= from || block_start >= to) {
if (!buffer_uptodate(bh))
partial = 1;
} else {
set_bit(BH_Uptodate, &bh->b_state);
if (!atomic_set_buffer_dirty(bh)) {
__mark_dirty(bh);
need_balance_dirty = 1;
}
}
}
if (need_balance_dirty)
balance_dirty();
/*
* is this a partial write that happened to make all buffers
* uptodate then we can optimize away a bogus readpage() for
* the next read(). Here we 'discover' wether the page went
* uptodate as a result of this (potentially partial) write.
*/
if (!partial)
SetPageUptodate(page);
return 0;
return block_prepare_write(page, from, to, blkdev_get_block);
}
static int blkdev_commit_write(struct file *file, struct page *page,
unsigned from, unsigned to)
static int blkdev_commit_write(struct file *file, struct page *page, unsigned from, unsigned to)
{
struct inode *inode = page->mapping->host;
__blkdev_commit_write(inode,page,from,to);
return 0;
return block_commit_write(page, from, to);
}
/*
......@@ -565,12 +337,15 @@ struct block_device *bdget(dev_t dev)
if (new_bdev) {
struct inode *inode = new_inode(bd_mnt->mnt_sb);
if (inode) {
kdev_t kdev = to_kdev_t(dev);
atomic_set(&new_bdev->bd_count,1);
new_bdev->bd_dev = dev;
new_bdev->bd_op = NULL;
new_bdev->bd_inode = inode;
inode->i_rdev = to_kdev_t(dev);
inode->i_rdev = kdev;
inode->i_dev = kdev;
inode->i_bdev = new_bdev;
inode->i_blkbits = blksize_bits(block_size(kdev));
inode->i_data.a_ops = &def_blk_aops;
inode->i_data.gfp_mask = GFP_USER;
spin_lock(&bdev_lock);
......
......@@ -1482,10 +1482,10 @@ static int __block_write_full_page(struct inode *inode, struct page *page, get_b
BUG();
if (!page->buffers)
create_empty_buffers(page, inode->i_dev, inode->i_sb->s_blocksize);
create_empty_buffers(page, inode->i_dev, 1 << inode->i_blkbits);
head = page->buffers;
block = page->index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
block = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
bh = head;
i = 0;
......@@ -1547,12 +1547,12 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
char *kaddr = kmap(page);
blocksize = inode->i_sb->s_blocksize;
blocksize = 1 << inode->i_blkbits;
if (!page->buffers)
create_empty_buffers(page, inode->i_dev, blocksize);
head = page->buffers;
bbits = inode->i_sb->s_blocksize_bits;
bbits = inode->i_blkbits;
block = page->index << (PAGE_CACHE_SHIFT - bbits);
for(bh = head, block_start = 0; bh != head || !block_start;
......@@ -1615,7 +1615,7 @@ static int __block_commit_write(struct inode *inode, struct page *page,
unsigned blocksize;
struct buffer_head *bh, *head;
blocksize = inode->i_sb->s_blocksize;
blocksize = 1 << inode->i_blkbits;
for(bh = head = page->buffers, block_start = 0;
bh != head || !block_start;
......@@ -1664,14 +1664,14 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
if (!PageLocked(page))
PAGE_BUG(page);
blocksize = inode->i_sb->s_blocksize;
blocksize = 1 << inode->i_blkbits;
if (!page->buffers)
create_empty_buffers(page, inode->i_dev, blocksize);
head = page->buffers;
blocks = PAGE_CACHE_SIZE >> inode->i_sb->s_blocksize_bits;
iblock = page->index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
lblock = (inode->i_size+blocksize-1) >> inode->i_sb->s_blocksize_bits;
blocks = PAGE_CACHE_SIZE >> inode->i_blkbits;
iblock = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
lblock = (inode->i_size+blocksize-1) >> inode->i_blkbits;
bh = head;
nr = 0;
i = 0;
......@@ -1738,7 +1738,7 @@ int cont_prepare_write(struct page *page, unsigned offset, unsigned to, get_bloc
unsigned long pgpos;
long status;
unsigned zerofrom;
unsigned blocksize = inode->i_sb->s_blocksize;
unsigned blocksize = 1 << inode->i_blkbits;
char *kaddr;
while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
......@@ -1823,6 +1823,14 @@ int block_prepare_write(struct page *page, unsigned from, unsigned to,
return err;
}
int block_commit_write(struct page *page, unsigned from, unsigned to)
{
struct inode *inode = page->mapping->host;
__block_commit_write(inode,page,from,to);
kunmap(page);
return 0;
}
int generic_commit_write(struct file *file, struct page *page,
unsigned from, unsigned to)
{
......@@ -1847,7 +1855,7 @@ int block_truncate_page(struct address_space *mapping, loff_t from, get_block_t
struct buffer_head *bh;
int err;
blocksize = inode->i_sb->s_blocksize;
blocksize = 1 << inode->i_blkbits;
length = offset & (blocksize - 1);
/* Block boundary? Nothing to do */
......@@ -1855,7 +1863,7 @@ int block_truncate_page(struct address_space *mapping, loff_t from, get_block_t
return 0;
length = blocksize - length;
iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
iblock = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
page = grab_cache_page(mapping, index);
err = -ENOMEM;
......@@ -2391,6 +2399,10 @@ int try_to_free_buffers(struct page * page, unsigned int gfp_mask)
spin_lock(&unused_list_lock);
tmp = bh;
/* if this buffer was hashed, this page counts as buffermem */
if (bh->b_pprev)
atomic_dec(&buffermem_pages);
do {
struct buffer_head * p = tmp;
tmp = tmp->b_this_page;
......
......@@ -408,7 +408,6 @@ struct super_block * ext2_read_super (struct super_block * sb, void * data,
unsigned long offset = 0;
kdev_t dev = sb->s_dev;
int blocksize = BLOCK_SIZE;
int hblock;
int db_count;
int i, j;
......@@ -429,7 +428,10 @@ struct super_block * ext2_read_super (struct super_block * sb, void * data,
return NULL;
}
set_blocksize (dev, blocksize);
if (set_blocksize(dev, blocksize) < 0) {
printk ("EXT2-fs: unable to set blocksize %d\n", blocksize);
return NULL;
}
/*
* If the superblock doesn't start on a sector boundary,
......@@ -488,24 +490,19 @@ struct super_block * ext2_read_super (struct super_block * sb, void * data,
sb->s_maxbytes = ext2_max_size(sb->s_blocksize_bits);
if (sb->s_blocksize != BLOCK_SIZE &&
(sb->s_blocksize == 1024 || sb->s_blocksize == 2048 ||
sb->s_blocksize == 4096)) {
/*
* Make sure the blocksize for the filesystem is larger
* than the hardware sectorsize for the machine.
*/
hblock = get_hardsect_size(dev);
if (sb->s_blocksize < hblock) {
/* If the blocksize doesn't match, re-read the thing.. */
if (sb->s_blocksize != blocksize) {
blocksize = sb->s_blocksize;
brelse(bh);
if (set_blocksize(dev, blocksize) < 0) {
printk(KERN_ERR "EXT2-fs: blocksize too small for device.\n");
goto failed_mount;
return NULL;
}
brelse (bh);
set_blocksize (dev, sb->s_blocksize);
logic_sb_block = (sb_block*BLOCK_SIZE) / sb->s_blocksize;
offset = (sb_block*BLOCK_SIZE) % sb->s_blocksize;
bh = bread (dev, logic_sb_block, sb->s_blocksize);
logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
offset = (sb_block*BLOCK_SIZE) % blocksize;
bh = bread (dev, logic_sb_block, blocksize);
if(!bh) {
printk("EXT2-fs: Couldn't read superblock on "
"2nd try.\n");
......@@ -518,6 +515,7 @@ struct super_block * ext2_read_super (struct super_block * sb, void * data,
goto failed_mount;
}
}
if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV) {
sb->u.ext2_sb.s_inode_size = EXT2_GOOD_OLD_INODE_SIZE;
sb->u.ext2_sb.s_first_ino = EXT2_GOOD_OLD_FIRST_INO;
......
......@@ -816,6 +816,7 @@ struct inode * get_empty_inode(void)
list_add(&inode->i_list, &inode_in_use);
inode->i_sb = NULL;
inode->i_dev = 0;
inode->i_blkbits = 0;
inode->i_ino = ++last_ino;
inode->i_flags = 0;
atomic_set(&inode->i_count, 1);
......@@ -849,6 +850,7 @@ static struct inode * get_new_inode(struct super_block *sb, unsigned long ino, s
list_add(&inode->i_hash, head);
inode->i_sb = sb;
inode->i_dev = sb->s_dev;
inode->i_blkbits = sb->s_blocksize_bits;
inode->i_ino = ino;
inode->i_flags = 0;
atomic_set(&inode->i_count, 1);
......
......@@ -476,7 +476,7 @@ static struct dentry_operations pipefs_dentry_operations = {
static struct inode * get_pipe_inode(void)
{
struct inode *inode = get_empty_inode();
struct inode *inode = new_inode(pipe_mnt->mnt_sb);
if (!inode)
goto fail_inode;
......@@ -485,7 +485,6 @@ static struct inode * get_pipe_inode(void)
goto fail_iput;
PIPE_READERS(*inode) = PIPE_WRITERS(*inode) = 1;
inode->i_fop = &rdwr_pipe_fops;
inode->i_sb = pipe_mnt->mnt_sb;
/*
* Mark the inode dirty from the very beginning,
......
......@@ -1428,7 +1428,6 @@ struct inode * reiserfs_new_inode (struct reiserfs_transaction_handle *th,
}
sb = dir->i_sb;
inode->i_sb = sb;
inode->i_flags = 0;//inode->i_sb->s_flags;
/* item head of new item */
......
......@@ -533,7 +533,7 @@ int reiserfs_create (struct inode * dir, struct dentry *dentry, int mode)
struct reiserfs_transaction_handle th ;
inode = get_empty_inode() ;
inode = new_inode(dir->i_sb) ;
if (!inode) {
return -ENOMEM ;
}
......@@ -586,7 +586,7 @@ int reiserfs_mknod (struct inode * dir, struct dentry *dentry, int mode, int rde
struct reiserfs_transaction_handle th ;
int jbegin_count = JOURNAL_PER_BALANCE_CNT * 3;
inode = get_empty_inode() ;
inode = new_inode(dir->i_sb) ;
if (!inode) {
return -ENOMEM ;
}
......@@ -638,7 +638,7 @@ int reiserfs_mkdir (struct inode * dir, struct dentry *dentry, int mode)
struct reiserfs_transaction_handle th ;
int jbegin_count = JOURNAL_PER_BALANCE_CNT * 3;
inode = get_empty_inode() ;
inode = new_inode(dir->i_sb) ;
if (!inode) {
return -ENOMEM ;
}
......@@ -859,7 +859,7 @@ int reiserfs_symlink (struct inode * dir, struct dentry * dentry, const char * s
int jbegin_count = JOURNAL_PER_BALANCE_CNT * 3;
inode = get_empty_inode() ;
inode = new_inode(dir->i_sb) ;
if (!inode) {
return -ENOMEM ;
}
......
......@@ -278,7 +278,7 @@ void init_buffer(struct buffer_head *, bh_end_io_t *, void *);
extern void set_bh_page(struct buffer_head *bh, struct page *page, unsigned long offset);
#define touch_buffer(bh) SetPageReferenced(bh->b_page)
#define touch_buffer(bh) mark_page_accessed(bh->b_page)
#include <linux/pipe_fs_i.h>
......@@ -433,6 +433,7 @@ struct inode {
time_t i_atime;
time_t i_mtime;
time_t i_ctime;
unsigned int i_blkbits;
unsigned long i_blksize;
unsigned long i_blocks;
unsigned long i_version;
......@@ -1299,12 +1300,14 @@ static inline struct inode *iget(struct super_block *sb, unsigned long ino)
extern void clear_inode(struct inode *);
extern struct inode * get_empty_inode(void);
static inline struct inode * new_inode(struct super_block *sb)
{
struct inode *inode = get_empty_inode();
if (inode) {
inode->i_sb = sb;
inode->i_dev = sb->s_dev;
inode->i_blkbits = sb->s_blocksize_bits;
}
return inode;
}
......@@ -1331,7 +1334,7 @@ static inline void bforget(struct buffer_head *buf)
if (buf)
__bforget(buf);
}
extern void set_blocksize(kdev_t, int);
extern int set_blocksize(kdev_t, int);
extern struct buffer_head * bread(kdev_t, int, int);
extern void wakeup_bdflush(void);
......@@ -1349,6 +1352,7 @@ extern int block_read_full_page(struct page*, get_block_t*);
extern int block_prepare_write(struct page*, unsigned, unsigned, get_block_t*);
extern int cont_prepare_write(struct page*, unsigned, unsigned, get_block_t*,
unsigned long *);
extern int block_commit_write(struct page *page, unsigned from, unsigned to);
extern int block_sync_page(struct page *);
int generic_block_bmap(struct address_space *, long, get_block_t *);
......
......@@ -1244,6 +1244,13 @@ void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t *
if (mapping->i_mmap_shared != NULL)
flush_dcache_page(page);
/*
* Mark the page accessed if we read the
* beginning or we just did an lseek.
*/
if (!offset || !filp->f_reada)
mark_page_accessed(page);
/*
* Ok, we have the page, and it's up-to-date, so
* now we can copy it to user space...
......@@ -1259,7 +1266,6 @@ void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t *
index += offset >> PAGE_CACHE_SHIFT;
offset &= ~PAGE_CACHE_MASK;
mark_page_accessed(page);
page_cache_release(page);
if (ret == nr && desc->count)
continue;
......
......@@ -406,12 +406,6 @@ static int shrink_cache(int nr_pages, int max_scan, zone_t * classzone, unsigned
if (try_to_free_buffers(page, gfp_mask)) {
if (!page->mapping) {
/*
* Account we successfully freed a page
* of buffer cache.
*/
atomic_dec(&buffermem_pages);
/*
* We must not allow an anon page
* with no buffers to be visible on
......@@ -536,16 +530,20 @@ static void refill_inactive(int nr_pages)
static int FASTCALL(shrink_caches(int priority, zone_t * classzone, unsigned int gfp_mask, int nr_pages));
static int shrink_caches(int priority, zone_t * classzone, unsigned int gfp_mask, int nr_pages)
{
int max_scan = nr_inactive_pages / priority;
int max_scan;
int chunk_size = nr_pages;
unsigned long ratio;
nr_pages -= kmem_cache_reap(gfp_mask);
if (nr_pages <= 0)
return 0;
/* Do we want to age the active list? */
if (nr_inactive_pages < nr_active_pages*2)
refill_inactive(nr_pages);
nr_pages = chunk_size;
/* try to keep the active list 2/3 of the size of the cache */
ratio = (unsigned long) nr_pages * nr_active_pages / ((nr_inactive_pages + 1) * 2);
refill_inactive(ratio);
max_scan = nr_inactive_pages / priority;
nr_pages = shrink_cache(nr_pages, max_scan, classzone, gfp_mask);
if (nr_pages <= 0)
return 0;
......@@ -558,17 +556,28 @@ static int shrink_caches(int priority, zone_t * classzone, unsigned int gfp_mask
int try_to_free_pages(zone_t * classzone, unsigned int gfp_mask, unsigned int order)
{
int priority = DEF_PRIORITY;
int ret = 0;
do {
for (;;) {
int priority = DEF_PRIORITY;
int nr_pages = SWAP_CLUSTER_MAX;
nr_pages = shrink_caches(priority, classzone, gfp_mask, nr_pages);
if (nr_pages <= 0)
return 1;
ret |= swap_out(priority, classzone, gfp_mask, SWAP_CLUSTER_MAX << 2);
} while (--priority);
do {
nr_pages = shrink_caches(priority, classzone, gfp_mask, nr_pages);
if (nr_pages <= 0)
return 1;
ret |= swap_out(priority, classzone, gfp_mask, SWAP_CLUSTER_MAX << 2);
} while (--priority);
if (likely(ret))
break;
if (likely(current->pid != 1))
break;
current->policy |= SCHED_YIELD;
__set_current_state(TASK_RUNNING);
schedule();
}
return ret;
}
......
......@@ -440,11 +440,10 @@ struct socket *sock_alloc(void)
struct inode * inode;
struct socket * sock;
inode = get_empty_inode();
inode = new_inode(sock_mnt->mnt_sb);
if (!inode)
return NULL;
inode->i_sb = sock_mnt->mnt_sb;
sock = socki_lookup(inode);
inode->i_mode = S_IFSOCK|S_IRWXUGO;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment