Commit a315c77f authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://linuxusb.bkbits.net/pci_hp-2.5

into penguin.transmeta.com:/home/penguin/torvalds/repositories/kernel/linux
parents 3909a6f1 314e5f17
......@@ -492,7 +492,7 @@ pmac_pic_init(void)
* and disables all interrupts except for the nominated one.
* sleep_restore_intrs() restores the states of all interrupt enables.
*/
unsigned int sleep_save_mask[2];
unsigned long sleep_save_mask[2];
void __pmac
pmac_sleep_save_intrs(int viaint)
......
......@@ -237,6 +237,18 @@ int blk_ioctl(struct block_device *bdev, unsigned int cmd, unsigned long arg)
intval = (is_read_only(dev) != 0);
return put_user(intval, (int *)(arg));
case BLKRASET:
case BLKFRASET:
if(!capable(CAP_SYS_ADMIN))
return -EACCES;
return blk_set_readahead(dev, arg);
case BLKRAGET:
case BLKFRAGET:
if (!arg)
return -EINVAL;
return put_user(blk_get_readahead(dev), (long *)arg);
case BLKSECTGET:
if ((q = blk_get_queue(dev)) == NULL)
return -EINVAL;
......@@ -259,11 +271,11 @@ int blk_ioctl(struct block_device *bdev, unsigned int cmd, unsigned long arg)
case BLKGETSIZE:
/* size in sectors, works up to 2 TB */
ullval = blkdev_size_in_bytes(dev);
ullval = bdev->bd_inode->i_size;
return put_user((unsigned long)(ullval >> 9), (unsigned long *) arg);
case BLKGETSIZE64:
/* size in bytes */
ullval = blkdev_size_in_bytes(dev);
ullval = bdev->bd_inode->i_size;
return put_user(ullval, (u64 *) arg);
#if 0
case BLKRRPART: /* Re-read partition tables */
......
......@@ -108,6 +108,47 @@ inline request_queue_t *blk_get_queue(kdev_t dev)
return &blk_dev[major(dev)].request_queue;
}
/**
* blk_set_readahead - set a queue's readahead tunable
* @dev: device
* @sectors: readahead, in 512 byte sectors
*
* Returns zero on success, else negative errno
*/
int blk_set_readahead(kdev_t dev, unsigned sectors)
{
int ret = -EINVAL;
request_queue_t *q = blk_get_queue(dev);
if (q) {
q->ra_sectors = sectors;
ret = 0;
}
return ret;
}
/**
* blk_get_readahead - query a queue's readahead tunable
* @dev: device
*
* Locates the passed device's request queue and returns its
* readahead setting.
*
* The returned value is in units of 512 byte sectors.
*
* Will return zero if the queue has never had its readahead
* setting altered.
*/
unsigned blk_get_readahead(kdev_t dev)
{
unsigned ret = 0;
request_queue_t *q = blk_get_queue(dev);
if (q)
ret = q->ra_sectors;
return ret;
}
void blk_queue_prep_rq(request_queue_t *q, prep_rq_fn *pfn)
{
q->prep_rq_fn = pfn;
......@@ -810,7 +851,8 @@ int blk_init_queue(request_queue_t *q, request_fn_proc *rfn, spinlock_t *lock)
q->plug_tq.data = q;
q->queue_flags = (1 << QUEUE_FLAG_CLUSTER);
q->queue_lock = lock;
q->ra_sectors = 0; /* Use VM default */
blk_queue_segment_boundary(q, 0xffffffff);
blk_queue_make_request(q, __make_request);
......
......@@ -160,13 +160,7 @@ compute_loop_size(struct loop_device *lo,
{
loff_t size = 0;
if (S_ISREG(lo_dentry->d_inode->i_mode)) {
size = lo_dentry->d_inode->i_size;
} else {
size = blkdev_size_in_bytes(lodev);
if (size == 0)
return MAX_DISK_SIZE;
}
size = lo_dentry->d_inode->i_mapping->host->i_size;
return (size - lo->lo_offset) >> BLOCK_SIZE_BITS;
}
......
......@@ -156,7 +156,6 @@ static int rd_blkdev_pagecache_IO(int rw, struct bio_vec *vec,
do {
int count;
struct page ** hash;
struct page * page;
char * src, * dst;
int unlock = 0;
......@@ -166,8 +165,7 @@ static int rd_blkdev_pagecache_IO(int rw, struct bio_vec *vec,
count = size;
size -= count;
hash = page_hash(mapping, index);
page = __find_get_page(mapping, index, hash);
page = find_get_page(mapping, index);
if (!page) {
page = grab_cache_page(mapping, index);
err = -ENOMEM;
......
......@@ -538,7 +538,7 @@ static inline void n_tty_receive_char(struct tty_struct *tty, unsigned char c)
* handle specially, do shortcut processing to speed things
* up.
*/
if (!test_bit(c, &tty->process_char_map) || tty->lnext) {
if (!test_bit(c, tty->process_char_map) || tty->lnext) {
finish_erasing(tty);
tty->lnext = 0;
if (L_ECHO(tty)) {
......@@ -659,7 +659,7 @@ static inline void n_tty_receive_char(struct tty_struct *tty, unsigned char c)
handle_newline:
spin_lock_irqsave(&tty->read_lock, flags);
set_bit(tty->read_head, &tty->read_flags);
set_bit(tty->read_head, tty->read_flags);
put_tty_queue_nolock(c, tty);
tty->canon_head = tty->read_head;
tty->canon_data++;
......@@ -811,38 +811,38 @@ static void n_tty_set_termios(struct tty_struct *tty, struct termios * old)
memset(tty->process_char_map, 0, 256/8);
if (I_IGNCR(tty) || I_ICRNL(tty))
set_bit('\r', &tty->process_char_map);
set_bit('\r', tty->process_char_map);
if (I_INLCR(tty))
set_bit('\n', &tty->process_char_map);
set_bit('\n', tty->process_char_map);
if (L_ICANON(tty)) {
set_bit(ERASE_CHAR(tty), &tty->process_char_map);
set_bit(KILL_CHAR(tty), &tty->process_char_map);
set_bit(EOF_CHAR(tty), &tty->process_char_map);
set_bit('\n', &tty->process_char_map);
set_bit(EOL_CHAR(tty), &tty->process_char_map);
set_bit(ERASE_CHAR(tty), tty->process_char_map);
set_bit(KILL_CHAR(tty), tty->process_char_map);
set_bit(EOF_CHAR(tty), tty->process_char_map);
set_bit('\n', tty->process_char_map);
set_bit(EOL_CHAR(tty), tty->process_char_map);
if (L_IEXTEN(tty)) {
set_bit(WERASE_CHAR(tty),
&tty->process_char_map);
tty->process_char_map);
set_bit(LNEXT_CHAR(tty),
&tty->process_char_map);
tty->process_char_map);
set_bit(EOL2_CHAR(tty),
&tty->process_char_map);
tty->process_char_map);
if (L_ECHO(tty))
set_bit(REPRINT_CHAR(tty),
&tty->process_char_map);
tty->process_char_map);
}
}
if (I_IXON(tty)) {
set_bit(START_CHAR(tty), &tty->process_char_map);
set_bit(STOP_CHAR(tty), &tty->process_char_map);
set_bit(START_CHAR(tty), tty->process_char_map);
set_bit(STOP_CHAR(tty), tty->process_char_map);
}
if (L_ISIG(tty)) {
set_bit(INTR_CHAR(tty), &tty->process_char_map);
set_bit(QUIT_CHAR(tty), &tty->process_char_map);
set_bit(SUSP_CHAR(tty), &tty->process_char_map);
set_bit(INTR_CHAR(tty), tty->process_char_map);
set_bit(QUIT_CHAR(tty), tty->process_char_map);
set_bit(SUSP_CHAR(tty), tty->process_char_map);
}
clear_bit(__DISABLED_CHAR, &tty->process_char_map);
clear_bit(__DISABLED_CHAR, tty->process_char_map);
sti();
tty->raw = 0;
tty->real_raw = 0;
......@@ -1058,7 +1058,7 @@ static ssize_t read_chan(struct tty_struct *tty, struct file *file,
int eol;
eol = test_and_clear_bit(tty->read_tail,
&tty->read_flags);
tty->read_flags);
c = tty->read_buf[tty->read_tail];
spin_lock_irqsave(&tty->read_lock, flags);
tty->read_tail = ((tty->read_tail+1) &
......
......@@ -266,6 +266,7 @@ ssize_t rw_raw_dev(int rw, struct file *filp, char *buf,
unsigned long limit;
int sector_size, sector_bits, sector_mask;
sector_t blocknr;
struct block_device *bdev;
/*
* First, a few checks on device size limits
......@@ -286,12 +287,13 @@ ssize_t rw_raw_dev(int rw, struct file *filp, char *buf,
new_iobuf = 1;
}
dev = to_kdev_t(raw_devices[minor].binding->bd_dev);
bdev = raw_devices[minor].binding;
dev = to_kdev_t(bdev->bd_dev);
sector_size = raw_devices[minor].sector_size;
sector_bits = raw_devices[minor].sector_bits;
sector_mask = sector_size - 1;
limit = blkdev_size_in_bytes(dev) >> sector_bits;
limit = bdev->bd_inode->i_size >> sector_bits;
if (!limit)
limit = INT_MAX;
dprintk ("rw_raw_dev: dev %d:%d (+%d)\n",
......
......@@ -188,7 +188,7 @@ static unsigned long inq_canon(struct tty_struct * tty)
nr = (head - tail) & (N_TTY_BUF_SIZE-1);
/* Skip EOF-chars.. */
while (head != tail) {
if (test_bit(tail, &tty->read_flags) &&
if (test_bit(tail, tty->read_flags) &&
tty->read_buf[tail] == __DISABLED_CHAR)
nr--;
tail = (tail+1) & (N_TTY_BUF_SIZE-1);
......
......@@ -77,7 +77,7 @@ struct notifier_block *adb_client_list = NULL;
static int adb_got_sleep = 0;
static int adb_inited = 0;
static pid_t adb_probe_task_pid;
static int adb_probe_task_flag;
static unsigned long adb_probe_task_flag;
static wait_queue_head_t adb_probe_task_wq;
static int sleepy_trackpad;
int __adb_probe_sync;
......@@ -439,7 +439,7 @@ adb_probe_wakeup(struct adb_request *req)
}
static struct adb_request adb_sreq;
static int adb_sreq_lock; // Use semaphore ! */
static unsigned long adb_sreq_lock; // Use semaphore ! */
int
adb_request(struct adb_request *req, void (*done)(struct adb_request *),
......
......@@ -1577,7 +1577,7 @@ static int device_size_calculation(mddev_t * mddev)
if (!md_size[mdidx(mddev)])
md_size[mdidx(mddev)] = sb->size * data_disks;
readahead = MD_READAHEAD;
readahead = (blk_get_readahead(rdev->dev) * 512) / PAGE_SIZE;
if (!sb->level || (sb->level == 4) || (sb->level == 5)) {
readahead = (mddev->sb->chunk_size>>PAGE_SHIFT) * 4 * data_disks;
if (readahead < data_disks * (MAX_SECTORS>>(PAGE_SHIFT-9))*2)
......@@ -3387,7 +3387,7 @@ int md_do_sync(mddev_t *mddev, mdp_disk_t *spare)
/*
* Tune reconstruction:
*/
window = MAX_READAHEAD*(PAGE_SIZE/512);
window = 32*(PAGE_SIZE/512);
printk(KERN_INFO "md: using %dk window, over a total of %d blocks.\n",
window/2,max_sectors/2);
......@@ -3605,7 +3605,7 @@ static void md_geninit(void)
for(i = 0; i < MAX_MD_DEVS; i++) {
md_blocksizes[i] = 1024;
md_size[i] = 0;
md_maxreadahead[i] = MD_READAHEAD;
md_maxreadahead[i] = 32;
}
blksize_size[MAJOR_NR] = md_blocksizes;
blk_size[MAJOR_NR] = md_size;
......
......@@ -413,7 +413,7 @@ static void handle_mtdblock_request(void)
for (;;) {
INIT_REQUEST;
req = CURRENT;
spin_unlock_irq(&QUEUE->queue_lock);
spin_unlock_irq(QUEUE->queue_lock);
mtdblk = mtdblks[minor(req->rq_dev)];
res = 0;
......@@ -457,7 +457,7 @@ static void handle_mtdblock_request(void)
}
end_req:
spin_lock_irq(&QUEUE->queue_lock);
spin_lock_irq(QUEUE->queue_lock);
if (!end_that_request_first(req, res, req->hard_cur_sectors)) {
blkdev_dequeue_request(req);
end_that_request_last(req);
......@@ -487,16 +487,16 @@ int mtdblock_thread(void *dummy)
while (!leaving) {
add_wait_queue(&thr_wq, &wait);
set_current_state(TASK_INTERRUPTIBLE);
spin_lock_irq(&QUEUE->queue_lock);
spin_lock_irq(QUEUE->queue_lock);
if (QUEUE_EMPTY || QUEUE_PLUGGED) {
spin_unlock_irq(&QUEUE->queue_lock);
spin_unlock_irq(QUEUE->queue_lock);
schedule();
remove_wait_queue(&thr_wq, &wait);
} else {
remove_wait_queue(&thr_wq, &wait);
set_current_state(TASK_RUNNING);
handle_mtdblock_request();
spin_unlock_irq(&QUEUE->queue_lock);
spin_unlock_irq(QUEUE->queue_lock);
}
}
......@@ -541,7 +541,7 @@ static int mtdblock_ioctl(struct inode * inode, struct file * file,
return -EACCES;
#endif
fsync_bdev(inode->i_bdev);
invalidate_bdev(inode->b_rdev, 0);
invalidate_bdev(inode->i_bdev, 0);
down(&mtdblk->cache_sem);
write_cached_data(mtdblk);
up(&mtdblk->cache_sem);
......
......@@ -19,6 +19,7 @@
#include <linux/highmem.h>
#include <linux/blkdev.h>
#include <linux/module.h>
#include <linux/blkpg.h>
#include <asm/uaccess.h>
......@@ -172,7 +173,6 @@ static loff_t block_llseek(struct file *file, loff_t offset, int origin)
if (offset >= 0 && offset <= size) {
if (offset != file->f_pos) {
file->f_pos = offset;
file->f_reada = 0;
file->f_version = ++event;
}
retval = offset;
......@@ -692,9 +692,20 @@ int blkdev_close(struct inode * inode, struct file * filp)
static int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd,
unsigned long arg)
{
if (inode->i_bdev->bd_op->ioctl)
return inode->i_bdev->bd_op->ioctl(inode, file, cmd, arg);
return -EINVAL;
int ret = -EINVAL;
switch (cmd) {
case BLKRAGET:
case BLKFRAGET:
case BLKRASET:
case BLKFRASET:
ret = blk_ioctl(inode->i_bdev, cmd, arg);
break;
default:
if (inode->i_bdev->bd_op->ioctl)
ret =inode->i_bdev->bd_op->ioctl(inode, file, cmd, arg);
break;
}
return ret;
}
struct address_space_operations def_blk_aops = {
......
This diff is collapsed.
......@@ -134,7 +134,7 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i
return read_buffers[i] + blk_offset;
}
devsize = blkdev_size_in_bytes(sb->s_dev) >> 12;
devsize = sb->s_bdev->bd_inode->i_size >> 12;
if (!devsize)
devsize = ~0UL;
......
......@@ -1029,7 +1029,7 @@ static int ext3_prepare_write(struct file *file, struct page *page,
goto prepare_write_failed;
if (ext3_should_journal_data(inode)) {
ret = walk_page_buffers(handle, page->buffers,
ret = walk_page_buffers(handle, page_buffers(page),
from, to, NULL, do_journal_get_write_access);
if (ret) {
/*
......@@ -1102,7 +1102,7 @@ static int ext3_commit_write(struct file *file, struct page *page,
int partial = 0;
loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
ret = walk_page_buffers(handle, page->buffers,
ret = walk_page_buffers(handle, page_buffers(page),
from, to, &partial, commit_write_fn);
if (!partial)
SetPageUptodate(page);
......@@ -1112,7 +1112,7 @@ static int ext3_commit_write(struct file *file, struct page *page,
EXT3_I(inode)->i_state |= EXT3_STATE_JDATA;
} else {
if (ext3_should_order_data(inode)) {
ret = walk_page_buffers(handle, page->buffers,
ret = walk_page_buffers(handle, page_buffers(page),
from, to, NULL, journal_dirty_sync_data);
}
/* Be careful here if generic_commit_write becomes a
......@@ -1252,7 +1252,7 @@ static int bget_one(handle_t *handle, struct buffer_head *bh)
static int ext3_writepage(struct page *page)
{
struct inode *inode = page->mapping->host;
struct buffer_head *page_buffers;
struct buffer_head *page_bufs;
handle_t *handle = NULL;
int ret = 0, err;
int needed;
......@@ -1285,14 +1285,14 @@ static int ext3_writepage(struct page *page)
unlock_kernel();
page_buffers = NULL; /* Purely to prevent compiler warning */
page_bufs = NULL; /* Purely to prevent compiler warning */
/* bget() all the buffers */
if (order_data) {
if (!page->buffers)
if (!page_has_buffers(page))
create_empty_buffers(page, inode->i_sb->s_blocksize);
page_buffers = page->buffers;
walk_page_buffers(handle, page_buffers, 0,
page_bufs = page_buffers(page);
walk_page_buffers(handle, page_bufs, 0,
PAGE_CACHE_SIZE, NULL, bget_one);
}
......@@ -1301,7 +1301,7 @@ static int ext3_writepage(struct page *page)
/*
* The page can become unlocked at any point now, and
* truncate can then come in and change things. So we
* can't touch *page from now on. But *page_buffers is
* can't touch *page from now on. But *page_bufs is
* safe due to elevated refcount.
*/
......@@ -1310,7 +1310,7 @@ static int ext3_writepage(struct page *page)
/* And attach them to the current transaction */
if (order_data) {
err = walk_page_buffers(handle, page_buffers,
err = walk_page_buffers(handle, page_bufs,
0, PAGE_CACHE_SIZE, NULL, journal_dirty_async_data);
if (!ret)
ret = err;
......@@ -1392,11 +1392,11 @@ static int ext3_block_truncate_page(handle_t *handle,
if (!page)
goto out;
if (!page->buffers)
if (!page_has_buffers(page))
create_empty_buffers(page, blocksize);
/* Find the buffer that contains "offset" */
bh = page->buffers;
bh = page_buffers(page);
pos = blocksize;
while (offset >= pos) {
bh = bh->b_this_page;
......
......@@ -166,7 +166,6 @@ static hfs_rwret_t hfs_file_read(struct file * filp, char * buf,
}
if ((read = hfs_do_read(inode, HFS_I(inode)->fork, pos, buf, left)) > 0) {
*ppos += read;
filp->f_reada = 1;
}
return read;
......
......@@ -105,7 +105,6 @@ static loff_t cap_info_llseek(struct file *file, loff_t offset, int origin)
if (offset>=0 && offset<=HFS_FORK_MAX) {
if (offset != file->f_pos) {
file->f_pos = offset;
file->f_reada = 0;
}
retval = offset;
}
......
......@@ -361,7 +361,6 @@ loff_t hdr_llseek(struct file *file, loff_t offset, int origin)
if (offset>=0 && offset<file->f_dentry->d_inode->i_size) {
if (offset != file->f_pos) {
file->f_pos = offset;
file->f_reada = 0;
}
retval = offset;
}
......@@ -594,7 +593,6 @@ static hfs_rwret_t hdr_read(struct file * filp, char * buf,
} else if (fork) {
left = hfs_do_read(inode, fork, offset, buf, left);
if (left > 0) {
filp->f_reada = 1;
} else if (!read) {
return left;
} else {
......
......@@ -143,6 +143,8 @@ void inode_init_once(struct inode *inode)
INIT_LIST_HEAD(&inode->i_dirty_data_buffers);
INIT_LIST_HEAD(&inode->i_devices);
sema_init(&inode->i_sem, 1);
INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC);
rwlock_init(&inode->i_data.page_lock);
spin_lock_init(&inode->i_data.i_shared_lock);
INIT_LIST_HEAD(&inode->i_data.i_mmap);
INIT_LIST_HEAD(&inode->i_data.i_mmap_shared);
......@@ -431,7 +433,7 @@ void sync_inodes(void)
}
}
static void try_to_sync_unused_inodes(void * arg)
static void try_to_sync_unused_inodes(unsigned long pexclusive)
{
struct super_block * sb;
int nr_inodes = inodes_stat.nr_unused;
......@@ -448,10 +450,9 @@ static void try_to_sync_unused_inodes(void * arg)
}
spin_unlock(&sb_lock);
spin_unlock(&inode_lock);
clear_bit(0, (unsigned long *)pexclusive);
}
static struct tq_struct unused_inodes_flush_task;
/**
* write_inode_now - write an inode to disk
* @inode: inode to write to disk
......@@ -744,8 +745,15 @@ void prune_icache(int goal)
* from here or we're either synchronously dogslow
* or we deadlock with oom.
*/
if (goal)
schedule_task(&unused_inodes_flush_task);
if (goal) {
static unsigned long exclusive;
if (!test_and_set_bit(0, &exclusive)) {
if (pdflush_operation(try_to_sync_unused_inodes,
(unsigned long)&exclusive))
clear_bit(0, &exclusive);
}
}
}
/*
* This is called from kswapd when we think we need some
......@@ -1171,8 +1179,6 @@ void __init inode_init(unsigned long mempages)
NULL);
if (!inode_cachep)
panic("cannot create inode slab cache");
unused_inodes_flush_task.routine = try_to_sync_unused_inodes;
}
static inline void do_atime_update(struct inode *inode)
......
......@@ -1884,7 +1884,6 @@ static struct file *presto_filp_dopen(struct dentry *dentry, int flags)
f->f_dentry = dentry;
f->f_pos = 0;
f->f_reada = 0;
f->f_op = NULL;
if (inode->i_op)
/* XXX should we set to presto ops, or leave at cache ops? */
......
......@@ -1661,10 +1661,11 @@ int journal_try_to_free_buffers(journal_t *journal,
struct buffer_head *tmp;
int locked_or_dirty = 0;
int call_ttfb = 1;
int ret;
J_ASSERT(PageLocked(page));
bh = page->buffers;
bh = page_buffers(page);
tmp = bh;
spin_lock(&journal_datalist_lock);
do {
......@@ -1688,7 +1689,10 @@ int journal_try_to_free_buffers(journal_t *journal,
*/
call_ttfb = 1;
out:
return call_ttfb;
ret = 0;
if (call_ttfb)
ret = try_to_free_buffers(page, gfp_mask);
return ret;
}
/*
......@@ -1881,7 +1885,7 @@ int journal_flushpage(journal_t *journal,
if (!PageLocked(page))
BUG();
if (!page->buffers)
if (!page_has_buffers(page))
return 1;
/* We will potentially be playing with lists other than just the
......@@ -1889,7 +1893,7 @@ int journal_flushpage(journal_t *journal,
* cautious in our locking. */
lock_journal(journal);
head = bh = page->buffers;
head = bh = page_buffers(page);
do {
unsigned int next_off = curr_off + bh->b_size;
next = bh->b_this_page;
......@@ -1911,7 +1915,7 @@ int journal_flushpage(journal_t *journal,
if (!offset) {
if (!may_free || !try_to_free_buffers(page, 0))
return 0;
J_ASSERT(page->buffers == NULL);
J_ASSERT(!page_has_buffers(page));
}
return 1;
}
......
......@@ -94,12 +94,12 @@ static struct super_operations jffs2_super_operations =
static int jffs2_sb_compare(struct super_block *sb, void *data)
{
struct mtd_info *mtd = data;
struct jffs2_sb_info *p = data;
struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
/* The superblocks are considered to be equivalent if the underlying MTD
device is the same one */
if (c->mtd == mtd) {
if (c->mtd == p->mtd) {
D1(printk(KERN_DEBUG "jffs2_sb_compare: match on device %d (\"%s\")\n", mtd->index, mtd->name));
return 1;
} else {
......@@ -111,12 +111,14 @@ static int jffs2_sb_compare(struct super_block *sb, void *data)
static int jffs2_sb_set(struct super_block *sb, void *data)
{
struct mtd_info *mtd = data;
struct jffs2_sb_info *p = data;
/* For persistence of NFS exports etc. we use the same s_dev
each time we mount the device, don't just use an anonymous
device */
sb->s_dev = mk_kdev(MTD_BLOCK_MAJOR, mtd->index);
sb->u.generic_sbp = p;
p->os_priv = sb;
sb->s_dev = mk_kdev(MTD_BLOCK_MAJOR, p->mtd->index);
return 0;
}
......@@ -129,7 +131,13 @@ static struct super_block *jffs2_get_sb_mtd(struct file_system_type *fs_type,
struct jffs2_sb_info *c;
int ret;
sb = sget(fs_type, jffs2_sb_compare, jffs2_sb_set, mtd);
c = kmalloc(sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
memset(c, 0, sizeof(*c));
c->mtd = mtd;
sb = sget(fs_type, jffs2_sb_compare, jffs2_sb_set, c);
if (IS_ERR(sb))
goto out_put;
......@@ -144,19 +152,8 @@ static struct super_block *jffs2_get_sb_mtd(struct file_system_type *fs_type,
D1(printk(KERN_DEBUG "jffs2_get_sb_mtd(): New superblock for device %d (\"%s\")\n",
mtd->index, mtd->name));
c = kmalloc(sizeof(*c), GFP_KERNEL);
if (!c) {
sb = ERR_PTR(-ENOMEM);
goto out_put;
}
sb->u.generic_sbp = c;
sb->s_op = &jffs2_super_operations;
memset(c, 0, sizeof(*c));
c->os_priv = sb;
c->mtd = mtd;
ret = jffs2_do_fill_super(sb, data, (flags&MS_VERBOSE)?1:0);
if (ret) {
......@@ -164,13 +161,15 @@ static struct super_block *jffs2_get_sb_mtd(struct file_system_type *fs_type,
up_write(&sb->s_umount);
deactivate_super(sb);
sb = ERR_PTR(ret);
goto out_put;
goto out_put1;
}
sb->s_flags |= MS_ACTIVE;
return sb;
out_put:
kfree(c);
out_put1:
put_mtd_device(mtd);
return sb;
......@@ -288,18 +287,23 @@ void jffs2_put_super (struct super_block *sb)
kfree(c->blocks);
if (c->mtd->sync)
c->mtd->sync(c->mtd);
put_mtd_device(c->mtd);
kfree(c);
D1(printk(KERN_DEBUG "jffs2_put_super returning\n"));
}
static void jffs2_kill_sb(struct super_block *sb)
{
struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
generic_shutdown_super(sb);
put_mtd_device(c->mtd);
kfree(c);
}
static struct file_system_type jffs2_fs_type = {
owner: THIS_MODULE,
name: "jffs2",
get_sb: jffs2_get_sb,
kill_sb: generic_shutdown_super
kill_sb: jffs2_kill_sb,
};
......
......@@ -67,11 +67,7 @@ struct raparms {
unsigned int p_count;
ino_t p_ino;
kdev_t p_dev;
unsigned long p_reada,
p_ramax,
p_raend,
p_ralen,
p_rawin;
struct file_ra_state p_ra;
};
static struct raparms * raparml;
......@@ -564,11 +560,7 @@ nfsd_get_raparms(kdev_t dev, ino_t ino)
ra = *frap;
ra->p_dev = dev;
ra->p_ino = ino;
ra->p_reada = 0;
ra->p_ramax = 0;
ra->p_raend = 0;
ra->p_ralen = 0;
ra->p_rawin = 0;
memset(&ra->p_ra, 0, sizeof(ra->p_ra));
found:
if (rap != &raparm_cache) {
*rap = ra->p_next;
......@@ -611,31 +603,18 @@ nfsd_read(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t offset,
/* Get readahead parameters */
ra = nfsd_get_raparms(inode->i_dev, inode->i_ino);
if (ra) {
file.f_reada = ra->p_reada;
file.f_ramax = ra->p_ramax;
file.f_raend = ra->p_raend;
file.f_ralen = ra->p_ralen;
file.f_rawin = ra->p_rawin;
}
if (ra)
file.f_ra = ra->p_ra;
file.f_pos = offset;
oldfs = get_fs(); set_fs(KERNEL_DS);
oldfs = get_fs();
set_fs(KERNEL_DS);
err = file.f_op->read(&file, buf, *count, &file.f_pos);
set_fs(oldfs);
/* Write back readahead params */
if (ra != NULL) {
dprintk("nfsd: raparms %ld %ld %ld %ld %ld\n",
file.f_reada, file.f_ramax, file.f_raend,
file.f_ralen, file.f_rawin);
ra->p_reada = file.f_reada;
ra->p_ramax = file.f_ramax;
ra->p_raend = file.f_raend;
ra->p_ralen = file.f_ralen;
ra->p_rawin = file.f_rawin;
ra->p_count -= 1;
}
if (ra)
ra->p_ra = file.f_ra;
if (err >= 0) {
nfsdstats.io_read += err;
......
......@@ -635,7 +635,6 @@ struct file *dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags)
f->f_dentry = dentry;
f->f_vfsmnt = mnt;
f->f_pos = 0;
f->f_reada = 0;
f->f_op = fops_get(inode->i_fop);
file_move(f, &inode->i_sb->s_files);
......@@ -686,7 +685,7 @@ int get_unused_fd(void)
write_lock(&files->file_lock);
repeat:
fd = find_next_zero_bit(files->open_fds,
fd = find_next_zero_bit(files->open_fds->fds_bits,
files->max_fdset,
files->next_fd);
......
......@@ -37,7 +37,6 @@ loff_t generic_file_llseek(struct file *file, loff_t offset, int origin)
if (offset>=0 && offset<=inode->i_sb->s_maxbytes) {
if (offset != file->f_pos) {
file->f_pos = offset;
file->f_reada = 0;
file->f_version = ++event;
}
retval = offset;
......@@ -62,7 +61,6 @@ loff_t remote_llseek(struct file *file, loff_t offset, int origin)
if (offset>=0 && offset<=file->f_dentry->d_inode->i_sb->s_maxbytes) {
if (offset != file->f_pos) {
file->f_pos = offset;
file->f_reada = 0;
file->f_version = ++event;
}
retval = offset;
......@@ -92,7 +90,6 @@ loff_t default_llseek(struct file *file, loff_t offset, int origin)
if (offset >= 0) {
if (offset != file->f_pos) {
file->f_pos = offset;
file->f_reada = 0;
file->f_version = ++event;
}
retval = offset;
......
......@@ -146,8 +146,8 @@ static void add_to_flushlist(struct inode *inode, struct buffer_head *bh) {
static inline void fix_tail_page_for_writing(struct page *page) {
struct buffer_head *head, *next, *bh ;
if (page && page->buffers) {
head = page->buffers ;
if (page && page_has_buffers(page)) {
head = page_buffers(page) ;
bh = head ;
do {
next = bh->b_this_page ;
......@@ -1685,7 +1685,7 @@ static int grab_tail_page(struct inode *p_s_inode,
kunmap(page) ; /* mapped by block_prepare_write */
head = page->buffers ;
head = page_buffers(page) ;
bh = head;
do {
if (pos >= start) {
......@@ -1930,7 +1930,7 @@ static int reiserfs_write_full_page(struct page *page) {
struct buffer_head *arr[PAGE_CACHE_SIZE/512] ;
int nr = 0 ;
if (!page->buffers) {
if (!page_has_buffers(page)) {
block_prepare_write(page, 0, 0, NULL) ;
kunmap(page) ;
}
......@@ -1948,7 +1948,7 @@ static int reiserfs_write_full_page(struct page *page) {
flush_dcache_page(page) ;
kunmap(page) ;
}
head = page->buffers ;
head = page_buffers(page) ;
bh = head ;
block = page->index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits) ;
do {
......
......@@ -156,10 +156,10 @@ unmap_buffers(struct page *page, loff_t pos) {
unsigned long cur_index ;
if (page) {
if (page->buffers) {
if (page_has_buffers(page)) {
tail_index = pos & (PAGE_CACHE_SIZE - 1) ;
cur_index = 0 ;
head = page->buffers ;
head = page_buffers(page) ;
bh = head ;
do {
next = bh->b_this_page ;
......
......@@ -184,7 +184,7 @@ static void remove_super(struct super_block *s)
up_write(&s->s_umount);
}
static void generic_shutdown_super(struct super_block *sb)
void generic_shutdown_super(struct super_block *sb)
{
struct dentry *root = sb->s_root;
struct super_operations *sop = sb->s_op;
......
......@@ -469,18 +469,23 @@ static __inline__ int ffs(int x)
#ifdef __KERNEL__
#define ext2_set_bit __test_and_set_bit
#define ext2_clear_bit __test_and_clear_bit
#define ext2_test_bit test_bit
#define ext2_find_first_zero_bit find_first_zero_bit
#define ext2_find_next_zero_bit find_next_zero_bit
#define ext2_set_bit(nr,addr) \
__test_and_set_bit((nr),(unsigned long*)addr)
#define ext2_clear_bit(nr, addr) \
__test_and_clear_bit((nr),(unsigned long*)addr)
#define ext2_test_bit(nr, addr) test_bit((nr),(unsigned long*)addr)
#define ext2_find_first_zero_bit(addr, size) \
find_first_zero_bit((unsigned long*)addr, size)
#define ext2_find_next_zero_bit(addr, size, off) \
find_next_zero_bit((unsigned long*)addr, size, off)
/* Bitmap functions for the minix filesystem. */
#define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr)
#define minix_set_bit(nr,addr) __set_bit(nr,addr)
#define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr)
#define minix_test_bit(nr,addr) test_bit(nr,addr)
#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
#define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,(void*)addr)
#define minix_set_bit(nr,addr) __set_bit(nr,(void*)addr)
#define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,(void*)addr)
#define minix_test_bit(nr,addr) test_bit(nr,(void*)addr)
#define minix_find_first_zero_bit(addr,size) \
find_first_zero_bit((void*)addr,size)
#endif /* __KERNEL__ */
......
......@@ -223,10 +223,10 @@ static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; retur
static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; }
static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; }
static inline int ptep_test_and_clear_dirty(pte_t *ptep) { return test_and_clear_bit(_PAGE_BIT_DIRTY, ptep); }
static inline int ptep_test_and_clear_young(pte_t *ptep) { return test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep); }
static inline void ptep_set_wrprotect(pte_t *ptep) { clear_bit(_PAGE_BIT_RW, ptep); }
static inline void ptep_mkdirty(pte_t *ptep) { set_bit(_PAGE_BIT_DIRTY, ptep); }
static inline int ptep_test_and_clear_dirty(pte_t *ptep) { return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte_low); }
static inline int ptep_test_and_clear_young(pte_t *ptep) { return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte_low); }
static inline void ptep_set_wrprotect(pte_t *ptep) { clear_bit(_PAGE_BIT_RW, &ptep->pte_low); }
static inline void ptep_mkdirty(pte_t *ptep) { set_bit(_PAGE_BIT_DIRTY, &ptep->pte_low); }
/*
* Conversion functions: convert a page and protection to a page entry,
......
......@@ -23,7 +23,7 @@
struct thread_info {
struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain; /* execution domain */
__u32 flags; /* low level flags */
unsigned long flags; /* low level flags */
__u32 cpu; /* current CPU */
__s32 preempt_count; /* 0 => preemptable, <0 => BUG */
......
......@@ -394,8 +394,8 @@ static __inline__ unsigned long find_next_zero_bit(void * addr,
#ifdef __KERNEL__
#define ext2_set_bit(nr, addr) __test_and_set_bit((nr) ^ 0x18, addr)
#define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 0x18, addr)
#define ext2_set_bit(nr, addr) __test_and_set_bit((nr) ^ 0x18, (unsigned long *)(addr))
#define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 0x18, (unsigned long *)(addr))
static __inline__ int ext2_test_bit(int nr, __const__ void * addr)
{
......
......@@ -429,7 +429,7 @@ extern inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
\
if (__page != ZERO_PAGE(__physpage)) { \
int __users = page_count(__page); \
__users -= !!__page->buffers + !!__page->mapping; \
__users -= !!PagePrivate(__page) + !!__page->mapping; \
\
if (__users == 1) \
pte_val(__pte) |= _PAGE_MKCLEAR; \
......
......@@ -448,7 +448,7 @@ extern inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
\
if (__page != ZERO_PAGE(__physpage)) { \
int __users = page_count(__page); \
__users -= !!__page->buffers + !!__page->mapping; \
__users -= !!PagePrivate(page) + !!__page->mapping; \
\
if (__users == 1) \
pte_val(__pte) |= _PAGE_MKCLEAR; \
......
......@@ -152,6 +152,12 @@ struct request_queue
make_request_fn *make_request_fn;
prep_rq_fn *prep_rq_fn;
/*
* The VM-level readahead tunable for this device. In
* units of 512-byte sectors.
*/
unsigned ra_sectors;
/*
* The queue owner gets to use this for whatever they like.
* ll_rw_blk doesn't touch it.
......@@ -308,6 +314,8 @@ extern void blk_queue_hardsect_size(request_queue_t *q, unsigned short);
extern void blk_queue_segment_boundary(request_queue_t *q, unsigned long);
extern void blk_queue_assign_lock(request_queue_t *q, spinlock_t *);
extern void blk_queue_prep_rq(request_queue_t *q, prep_rq_fn *pfn);
extern int blk_set_readahead(kdev_t dev, unsigned sectors);
extern unsigned blk_get_readahead(kdev_t dev);
extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
extern void blk_dump_rq_flags(struct request *, char *);
......@@ -322,10 +330,6 @@ extern int * blksize_size[MAX_BLKDEV];
#define MAX_SEGMENT_SIZE 65536
/* read-ahead in pages.. */
#define MAX_READAHEAD 31
#define MIN_READAHEAD 3
#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
extern void drive_stat_acct(struct request *, int, int);
......
......@@ -21,6 +21,7 @@
#include <linux/cache.h>
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/radix-tree.h>
#include <asm/atomic.h>
#include <asm/bitops.h>
......@@ -172,12 +173,10 @@ extern int leases_enable, dir_notify_enable, lease_break_time;
#define BLKRRPART _IO(0x12,95) /* re-read partition table */
#define BLKGETSIZE _IO(0x12,96) /* return device size /512 (long *arg) */
#define BLKFLSBUF _IO(0x12,97) /* flush buffer cache */
#if 0 /* Obsolete, these don't do anything. */
#define BLKRASET _IO(0x12,98) /* set read ahead for block device */
#define BLKRAGET _IO(0x12,99) /* get current read ahead setting */
#define BLKFRASET _IO(0x12,100)/* set filesystem (mm/filemap.c) read-ahead */
#define BLKFRAGET _IO(0x12,101)/* get filesystem (mm/filemap.c) read-ahead */
#endif
#define BLKSECTSET _IO(0x12,102)/* set max sectors per request (ll_rw_blk.c) */
#define BLKSECTGET _IO(0x12,103)/* get max sectors per request (ll_rw_blk.c) */
#define BLKSSZGET _IO(0x12,104)/* get block device sector size */
......@@ -286,6 +285,24 @@ extern void set_bh_page(struct buffer_head *bh, struct page *page, unsigned long
#define touch_buffer(bh) mark_page_accessed(bh->b_page)
/* If we *know* page->private refers to buffer_heads */
#define page_buffers(page) \
({ \
if (!PagePrivate(page)) \
BUG(); \
((struct buffer_head *)(page)->private); \
})
#define page_has_buffers(page) PagePrivate(page)
#define set_page_buffers(page, buffers) \
do { \
SetPagePrivate(page); \
page->private = (unsigned long)buffers; \
} while (0)
#define clear_page_buffers(page) \
do { \
ClearPagePrivate(page); \
page->private = 0; \
} while (0)
#include <linux/pipe_fs_i.h>
/* #include <linux/umsdos_fs_i.h> */
......@@ -370,6 +387,8 @@ struct address_space_operations {
};
struct address_space {
struct radix_tree_root page_tree; /* radix tree of all pages */
rwlock_t page_lock; /* and rwlock protecting it */
struct list_head clean_pages; /* list of clean pages */
struct list_head dirty_pages; /* list of dirty pages */
struct list_head locked_pages; /* list of locked pages */
......@@ -484,6 +503,18 @@ struct fown_struct {
int signum; /* posix.1b rt signal to be delivered on IO */
};
/*
* Track a single file's readahead state
*/
struct file_ra_state {
unsigned long start; /* Current window */
unsigned long size;
unsigned long next_size; /* Next window size */
unsigned long prev_page; /* Cache last read() position */
unsigned long ahead_start; /* Ahead window */
unsigned long ahead_size;
};
struct file {
struct list_head f_list;
struct dentry *f_dentry;
......@@ -493,10 +524,10 @@ struct file {
unsigned int f_flags;
mode_t f_mode;
loff_t f_pos;
unsigned long f_reada, f_ramax, f_raend, f_ralen, f_rawin;
struct fown_struct f_owner;
unsigned int f_uid, f_gid;
int f_error;
struct file_ra_state f_ra;
unsigned long f_version;
......@@ -925,6 +956,7 @@ struct super_block *get_sb_single(struct file_system_type *fs_type,
struct super_block *get_sb_nodev(struct file_system_type *fs_type,
int flags, void *data,
int (*fill_super)(struct super_block *, void *, int));
void generic_shutdown_super(struct super_block *sb);
void kill_block_super(struct super_block *sb);
void kill_anon_super(struct super_block *sb);
void kill_litter_super(struct super_block *sb);
......
......@@ -200,16 +200,16 @@ static inline void *hfs_buffer_data(const hfs_buffer buffer) {
#endif
static inline int hfs_clear_bit(int bitnr, hfs_u32 *lword) {
return test_and_clear_bit(BITNR(bitnr), lword);
return test_and_clear_bit(BITNR(bitnr), (unsigned long *)lword);
}
static inline int hfs_set_bit(int bitnr, hfs_u32 *lword) {
return test_and_set_bit(BITNR(bitnr), lword);
return test_and_set_bit(BITNR(bitnr), (unsigned long *)lword);
}
static inline int hfs_test_bit(int bitnr, const hfs_u32 *lword) {
/* the kernel should declare the second arg of test_bit as const */
return test_bit(BITNR(bitnr), (void *)lword);
return test_bit(BITNR(bitnr), (unsigned long *)lword);
}
#undef BITNR
......
......@@ -149,15 +149,12 @@ typedef struct page {
struct list_head list; /* ->mapping has some page lists. */
struct address_space *mapping; /* The inode (or ...) we belong to. */
unsigned long index; /* Our offset within mapping. */
struct page *next_hash; /* Next page sharing our hash bucket in
the pagecache hash table. */
atomic_t count; /* Usage count, see below. */
unsigned long flags; /* atomic flags, some possibly
updated asynchronously */
struct list_head lru; /* Pageout list, eg. active_list;
protected by pagemap_lru_lock !! */
struct page **pprev_hash; /* Complement to *next_hash. */
struct buffer_head * buffers; /* Buffer maps us to a disk block. */
unsigned long private; /* fs-private opaque data */
/*
* On machines where all RAM is mapped into kernel address space,
......@@ -180,7 +177,7 @@ typedef struct page {
*
* What counts for a page usage:
* - cache mapping (page->mapping)
* - disk mapping (page->buffers)
* - private data (page->private)
* - page mapped in a task's page tables, each mapping
* is counted separately
*
......@@ -223,22 +220,23 @@ typedef struct page {
* page->mapping is the pointer to the inode, and page->index is the
* file offset of the page, in units of PAGE_CACHE_SIZE.
*
* A page may have buffers allocated to it. In this case,
* page->buffers is a circular list of these buffer heads. Else,
* page->buffers == NULL.
* A page contains an opaque `private' member, which belongs to the
* page's address_space. Usually, this is the address of a circular
* list of the page's disk buffers.
*
* The PG_private bitflag is set if page->private contains a valid
* value.
* For pages belonging to inodes, the page->count is the number of
* attaches, plus 1 if buffers are allocated to the page, plus one
* for the page cache itself.
* attaches, plus 1 if `private' contains something, plus one for
* the page cache itself.
*
* All pages belonging to an inode are in these doubly linked lists:
* mapping->clean_pages, mapping->dirty_pages and mapping->locked_pages;
* using the page->list list_head. These fields are also used for
* freelist managemet (when page->count==0).
*
* There is also a hash table mapping (mapping,index) to the page
* in memory if present. The lists for this hash table use the fields
* page->next_hash and page->pprev_hash.
* There is also a per-mapping radix tree mapping index to the page
* in memory if present. The tree is rooted at mapping->root.
*
* All process pages can do I/O:
* - inode pages may need to be read from disk,
......@@ -294,6 +292,8 @@ typedef struct page {
#define PG_reserved 14
#define PG_launder 15 /* written out by VM pressure.. */
#define PG_private 16 /* Has something at ->private */
/* Make it prettier to test the above... */
#define UnlockPage(page) unlock_page(page)
#define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags)
......@@ -310,6 +310,9 @@ typedef struct page {
#define PageLaunder(page) test_bit(PG_launder, &(page)->flags)
#define SetPageLaunder(page) set_bit(PG_launder, &(page)->flags)
#define __SetPageReserved(page) __set_bit(PG_reserved, &(page)->flags)
#define SetPagePrivate(page) set_bit(PG_private, &(page)->flags)
#define ClearPagePrivate(page) clear_bit(PG_private, &(page)->flags)
#define PagePrivate(page) test_bit(PG_private, &(page)->flags)
/*
* The zone field is never updated after free_area_init_core()
......@@ -470,7 +473,7 @@ extern struct address_space swapper_space;
static inline int is_page_cache_freeable(struct page * page)
{
return page_count(page) - !!page->buffers == 1;
return page_count(page) - !!PagePrivate(page) == 1;
}
extern int can_share_swap_page(struct page *);
......@@ -535,6 +538,13 @@ extern void truncate_inode_pages(struct address_space *, loff_t);
extern int filemap_sync(struct vm_area_struct *, unsigned long, size_t, unsigned int);
extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int);
/* readahead.c */
void do_page_cache_readahead(struct file *file,
unsigned long offset, unsigned long nr_to_read);
void page_cache_readahead(struct file *file, unsigned long offset);
void page_cache_readaround(struct file *file, unsigned long offset);
void handle_ra_thrashing(struct file *file);
/* vma is the first one with address < vma->vm_end,
* and even address < vma->vm_start. Have to extend vma. */
static inline int expand_stack(struct vm_area_struct * vma, unsigned long address)
......@@ -579,6 +589,9 @@ static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * m
extern struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr);
extern int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0);
extern int pdflush_flush(unsigned long nr_pages);
extern struct page * vmalloc_to_page(void *addr);
#endif /* __KERNEL__ */
......
......@@ -41,53 +41,39 @@ static inline struct page *page_cache_alloc(struct address_space *x)
*/
#define page_cache_entry(x) virt_to_page(x)
extern unsigned int page_hash_bits;
#define PAGE_HASH_BITS (page_hash_bits)
#define PAGE_HASH_SIZE (1 << PAGE_HASH_BITS)
extern atomic_t page_cache_size; /* # of pages currently in the hash table */
extern struct page **page_hash_table;
extern void page_cache_init(unsigned long);
extern atomic_t page_cache_size; /* # of pages currently in the page cache */
extern struct page * find_get_page(struct address_space *mapping,
unsigned long index);
extern struct page * find_lock_page(struct address_space *mapping,
unsigned long index);
extern struct page * find_trylock_page(struct address_space *mapping,
unsigned long index);
extern struct page * find_or_create_page(struct address_space *mapping,
unsigned long index, unsigned int gfp_mask);
/*
* We use a power-of-two hash table to avoid a modulus,
* and get a reasonable hash by knowing roughly how the
* inode pointer and indexes are distributed (ie, we
* roughly know which bits are "significant")
*
* For the time being it will work for struct address_space too (most of
* them sitting inside the inodes). We might want to change it later.
*/
static inline unsigned long _page_hashfn(struct address_space * mapping, unsigned long index)
extern struct page * grab_cache_page(struct address_space *mapping,
unsigned long index);
extern struct page * grab_cache_page_nowait(struct address_space *mapping,
unsigned long index);
extern int add_to_page_cache(struct page *page,
struct address_space *mapping, unsigned long index);
extern int add_to_page_cache_unique(struct page *page,
struct address_space *mapping, unsigned long index);
static inline void ___add_to_page_cache(struct page *page,
struct address_space *mapping, unsigned long index)
{
#define i (((unsigned long) mapping)/(sizeof(struct inode) & ~ (sizeof(struct inode) - 1)))
#define s(x) ((x)+((x)>>PAGE_HASH_BITS))
return s(i+index) & (PAGE_HASH_SIZE-1);
#undef i
#undef s
}
#define page_hash(mapping,index) (page_hash_table+_page_hashfn(mapping,index))
list_add(&page->list, &mapping->clean_pages);
page->mapping = mapping;
page->index = index;
extern struct page * __find_get_page(struct address_space *mapping,
unsigned long index, struct page **hash);
#define find_get_page(mapping, index) \
__find_get_page(mapping, index, page_hash(mapping, index))
extern struct page * __find_lock_page (struct address_space * mapping,
unsigned long index, struct page **hash);
extern struct page * find_or_create_page(struct address_space *mapping,
unsigned long index, unsigned int gfp_mask);
mapping->nrpages++;
atomic_inc(&page_cache_size);
}
extern void FASTCALL(lock_page(struct page *page));
extern void FASTCALL(unlock_page(struct page *page));
#define find_lock_page(mapping, index) \
__find_lock_page(mapping, index, page_hash(mapping, index))
extern struct page *find_trylock_page(struct address_space *, unsigned long);
extern void add_to_page_cache(struct page * page, struct address_space *mapping, unsigned long index);
extern void add_to_page_cache_locked(struct page * page, struct address_space *mapping, unsigned long index);
extern int add_to_page_cache_unique(struct page * page, struct address_space *mapping, unsigned long index, struct page **hash);
extern void ___wait_on_page(struct page *);
......@@ -99,9 +85,6 @@ static inline void wait_on_page(struct page * page)
extern void wake_up_page(struct page *);
extern struct page * grab_cache_page (struct address_space *, unsigned long);
extern struct page * grab_cache_page_nowait (struct address_space *, unsigned long);
typedef int filler_t(void *, struct page*);
extern struct page *read_cache_page(struct address_space *, unsigned long,
......
/*
* Copyright (C) 2001 Momchil Velikov
* Portions Copyright (C) 2001 Christoph Hellwig
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _LINUX_RADIX_TREE_H
#define _LINUX_RADIX_TREE_H
struct radix_tree_node;
#define RADIX_TREE_SLOT_RESERVED ((void *)~0UL)
struct radix_tree_root {
unsigned int height;
int gfp_mask;
struct radix_tree_node *rnode;
};
#define RADIX_TREE_INIT(mask) {0, (mask), NULL}
#define RADIX_TREE(name, mask) \
struct radix_tree_root name = RADIX_TREE_INIT(mask)
#define INIT_RADIX_TREE(root, mask) \
do { \
(root)->height = 0; \
(root)->gfp_mask = (mask); \
(root)->rnode = NULL; \
} while (0)
extern int radix_tree_reserve(struct radix_tree_root *, unsigned long, void ***);
extern int radix_tree_insert(struct radix_tree_root *, unsigned long, void *);
extern void *radix_tree_lookup(struct radix_tree_root *, unsigned long);
extern int radix_tree_delete(struct radix_tree_root *, unsigned long);
#endif /* _LINUX_RADIX_TREE_H */
......@@ -91,7 +91,6 @@ static inline mddev_t * kdev_to_mddev (kdev_t dev)
/*
* default readahead
*/
#define MD_READAHEAD MAX_READAHEAD
static inline int disk_faulty(mdp_disk_t * d)
{
......
......@@ -368,6 +368,7 @@ do { if (atomic_dec_and_test(&(tsk)->usage)) __put_task_struct(tsk); } while(0)
#define PF_MEMDIE 0x00001000 /* Killed for out-of-memory */
#define PF_FREE_PAGES 0x00002000 /* per process page freeing */
#define PF_NOIO 0x00004000 /* avoid generating further I/O */
#define PF_FLUSHER 0x00008000 /* responsible for disk writeback */
/*
* Ptrace flags
......
......@@ -109,7 +109,7 @@ extern void __remove_inode_page(struct page *);
struct task_struct;
struct vm_area_struct;
struct sysinfo;
struct address_space;
struct zone_t;
/* linux/mm/swap.c */
......@@ -139,6 +139,9 @@ extern void show_swap_cache_info(void);
extern int add_to_swap_cache(struct page *, swp_entry_t);
extern void __delete_from_swap_cache(struct page *page);
extern void delete_from_swap_cache(struct page *page);
extern int move_to_swap_cache(struct page *page, swp_entry_t entry);
extern int move_from_swap_cache(struct page *page, unsigned long index,
struct address_space *mapping);
extern void free_page_and_swap_cache(struct page *page);
extern struct page * lookup_swap_cache(swp_entry_t);
extern struct page * read_swap_cache_async(swp_entry_t);
......
......@@ -69,6 +69,7 @@ extern void sbus_init(void);
extern void sysctl_init(void);
extern void signals_init(void);
extern void radix_tree_init(void);
extern void free_initmem(void);
#ifdef CONFIG_TC
......@@ -392,7 +393,7 @@ asmlinkage void __init start_kernel(void)
proc_caches_init();
vfs_caches_init(mempages);
buffer_init(mempages);
page_cache_init(mempages);
radix_tree_init();
#if defined(CONFIG_ARCH_S390)
ccwcache_init();
#endif
......
......@@ -224,8 +224,6 @@ EXPORT_SYMBOL(generic_file_write);
EXPORT_SYMBOL(generic_file_mmap);
EXPORT_SYMBOL(generic_ro_fops);
EXPORT_SYMBOL(generic_buffer_fdatasync);
EXPORT_SYMBOL(page_hash_bits);
EXPORT_SYMBOL(page_hash_table);
EXPORT_SYMBOL(file_lock_list);
EXPORT_SYMBOL(locks_init_lock);
EXPORT_SYMBOL(locks_copy_lock);
......@@ -266,8 +264,8 @@ EXPORT_SYMBOL(no_llseek);
EXPORT_SYMBOL(__pollwait);
EXPORT_SYMBOL(poll_freewait);
EXPORT_SYMBOL(ROOT_DEV);
EXPORT_SYMBOL(__find_get_page);
EXPORT_SYMBOL(__find_lock_page);
EXPORT_SYMBOL(find_get_page);
EXPORT_SYMBOL(find_lock_page);
EXPORT_SYMBOL(grab_cache_page);
EXPORT_SYMBOL(grab_cache_page_nowait);
EXPORT_SYMBOL(read_cache_page);
......@@ -295,6 +293,7 @@ EXPORT_SYMBOL(get_sb_nodev);
EXPORT_SYMBOL(get_sb_single);
EXPORT_SYMBOL(kill_anon_super);
EXPORT_SYMBOL(kill_litter_super);
EXPORT_SYMBOL(generic_shutdown_super);
EXPORT_SYMBOL(deactivate_super);
EXPORT_SYMBOL(sget);
EXPORT_SYMBOL(set_anon_super);
......
......@@ -8,9 +8,11 @@
L_TARGET := lib.a
export-objs := cmdline.o dec_and_lock.o rwsem-spinlock.o rwsem.o crc32.o rbtree.o
export-objs := cmdline.o dec_and_lock.o rwsem-spinlock.o rwsem.o \
crc32.o rbtree.o radix-tree.o
obj-y := errno.o ctype.o string.o vsprintf.o brlock.o cmdline.o bust_spinlocks.o rbtree.o
obj-y := errno.o ctype.o string.o vsprintf.o brlock.o cmdline.o \
bust_spinlocks.o rbtree.o radix-tree.o
obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
......
/*
* Copyright (C) 2001 Momchil Velikov
* Portions Copyright (C) 2001 Christoph Hellwig
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/radix-tree.h>
#include <linux/slab.h>
/*
* Radix tree node definition.
*/
#define RADIX_TREE_MAP_SHIFT 7
#define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT)
#define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1)
struct radix_tree_node {
unsigned int count;
void *slots[RADIX_TREE_MAP_SIZE];
};
struct radix_tree_path {
struct radix_tree_node *node, **slot;
};
#define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long))
/*
* Radix tree node cache.
*/
static kmem_cache_t *radix_tree_node_cachep;
static mempool_t *radix_tree_node_pool;
#define radix_tree_node_alloc(root) \
mempool_alloc(radix_tree_node_pool, (root)->gfp_mask)
#define radix_tree_node_free(node) \
mempool_free((node), radix_tree_node_pool);
/*
* Return the maximum key which can be store into a
* radix tree with height HEIGHT.
*/
static inline unsigned long radix_tree_maxindex(unsigned int height)
{
unsigned int tmp = height * RADIX_TREE_MAP_SHIFT;
unsigned long index = (~0UL >> (RADIX_TREE_INDEX_BITS - tmp - 1)) >> 1;
if (tmp >= RADIX_TREE_INDEX_BITS)
index = ~0UL;
return index;
}
/*
* Extend a radix tree so it can store key @index.
*/
static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
{
struct radix_tree_node *node;
unsigned int height;
/* Figure out what the height should be. */
height = root->height + 1;
while (index > radix_tree_maxindex(height))
height++;
if (root->rnode) {
do {
if (!(node = radix_tree_node_alloc(root)))
return -ENOMEM;
/* Increase the height. */
node->slots[0] = root->rnode;
if (root->rnode)
node->count = 1;
root->rnode = node;
root->height++;
} while (height > root->height);
} else
root->height = height;
return 0;
}
/**
* radix_tree_reserve - reserve space in a radix tree
* @root: radix tree root
* @index: index key
* @pslot: pointer to reserved slot
*
* Reserve a slot in a radix tree for the key @index.
*/
int radix_tree_reserve(struct radix_tree_root *root, unsigned long index, void ***pslot)
{
struct radix_tree_node *node = NULL, *tmp, **slot;
unsigned int height, shift;
int error;
/* Make sure the tree is high enough. */
if (index > radix_tree_maxindex(root->height)) {
error = radix_tree_extend(root, index);
if (error)
return error;
}
slot = &root->rnode;
height = root->height;
shift = (height-1) * RADIX_TREE_MAP_SHIFT;
while (height > 0) {
if (*slot == NULL) {
/* Have to add a child node. */
if (!(tmp = radix_tree_node_alloc(root)))
return -ENOMEM;
*slot = tmp;
if (node)
node->count++;
}
/* Go a level down. */
node = *slot;
slot = (struct radix_tree_node **)
(node->slots + ((index >> shift) & RADIX_TREE_MAP_MASK));
shift -= RADIX_TREE_MAP_SHIFT;
height--;
}
if (*slot != NULL)
return -EEXIST;
if (node)
node->count++;
*pslot = (void **)slot;
**pslot = RADIX_TREE_SLOT_RESERVED;
return 0;
}
EXPORT_SYMBOL(radix_tree_reserve);
/**
* radix_tree_insert - insert into a radix tree
* @root: radix tree root
* @index: index key
* @item: item to insert
*
* Insert an item into the radix tree at position @index.
*/
int radix_tree_insert(struct radix_tree_root *root, unsigned long index, void *item)
{
void **slot;
int error;
error = radix_tree_reserve(root, index, &slot);
if (!error)
*slot = item;
return error;
}
EXPORT_SYMBOL(radix_tree_insert);
/**
* radix_tree_lookup - perform lookup operation on a radix tree
* @root: radix tree root
* @index: index key
*
* Lookup them item at the position @index in the radix tree @root.
*/
void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index)
{
unsigned int height, shift;
struct radix_tree_node **slot;
height = root->height;
if (index > radix_tree_maxindex(height))
return NULL;
shift = (height-1) * RADIX_TREE_MAP_SHIFT;
slot = &root->rnode;
while (height > 0) {
if (*slot == NULL)
return NULL;
slot = (struct radix_tree_node **)
((*slot)->slots + ((index >> shift) & RADIX_TREE_MAP_MASK));
shift -= RADIX_TREE_MAP_SHIFT;
height--;
}
return (void *) *slot;
}
EXPORT_SYMBOL(radix_tree_lookup);
/**
* radix_tree_delete - delete an item from a radix tree
* @root: radix tree root
* @index: index key
*
* Remove the item at @index from the radix tree rooted at @root.
*/
int radix_tree_delete(struct radix_tree_root *root, unsigned long index)
{
struct radix_tree_path path[RADIX_TREE_INDEX_BITS/RADIX_TREE_MAP_SHIFT + 2], *pathp = path;
unsigned int height, shift;
height = root->height;
if (index > radix_tree_maxindex(height))
return -ENOENT;
shift = (height-1) * RADIX_TREE_MAP_SHIFT;
pathp->node = NULL;
pathp->slot = &root->rnode;
while (height > 0) {
if (*pathp->slot == NULL)
return -ENOENT;
pathp[1].node = *pathp[0].slot;
pathp[1].slot = (struct radix_tree_node **)
(pathp[1].node->slots + ((index >> shift) & RADIX_TREE_MAP_MASK));
pathp++;
shift -= RADIX_TREE_MAP_SHIFT;
height--;
}
if (*pathp[0].slot == NULL)
return -ENOENT;
*pathp[0].slot = NULL;
while (pathp[0].node && --pathp[0].node->count == 0) {
pathp--;
*pathp[0].slot = NULL;
radix_tree_node_free(pathp[1].node);
}
return 0;
}
EXPORT_SYMBOL(radix_tree_delete);
static void radix_tree_node_ctor(void *node, kmem_cache_t *cachep, unsigned long flags)
{
memset(node, 0, sizeof(struct radix_tree_node));
}
static void *radix_tree_node_pool_alloc(int gfp_mask, void *data)
{
return kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
}
static void radix_tree_node_pool_free(void *node, void *data)
{
kmem_cache_free(radix_tree_node_cachep, node);
}
/*
* FIXME! 512 nodes is 200-300k of memory. This needs to be
* scaled by the amount of available memory, and hopefully
* reduced also.
*/
void __init radix_tree_init(void)
{
radix_tree_node_cachep = kmem_cache_create("radix_tree_node",
sizeof(struct radix_tree_node), 0,
SLAB_HWCACHE_ALIGN, radix_tree_node_ctor, NULL);
if (!radix_tree_node_cachep)
panic ("Failed to create radix_tree_node cache\n");
radix_tree_node_pool = mempool_create(512, radix_tree_node_pool_alloc,
radix_tree_node_pool_free, NULL);
if (!radix_tree_node_pool)
panic ("Failed to create radix_tree_node pool\n");
}
......@@ -14,6 +14,7 @@ export-objs := shmem.o filemap.o mempool.o page_alloc.o
obj-y := memory.o mmap.o filemap.o mprotect.o mlock.o mremap.o \
vmalloc.o slab.o bootmem.o swap.o vmscan.o page_io.o \
page_alloc.o swap_state.o swapfile.o numa.o oom_kill.o \
shmem.o highmem.o mempool.o msync.o mincore.o
shmem.o highmem.o mempool.o msync.o mincore.o readahead.o \
pdflush.o
include $(TOPDIR)/Rules.make
This diff is collapsed.
......@@ -27,9 +27,9 @@ static unsigned char mincore_page(struct vm_area_struct * vma,
{
unsigned char present = 0;
struct address_space * as = vma->vm_file->f_dentry->d_inode->i_mapping;
struct page * page, ** hash = page_hash(as, pgoff);
struct page * page;
page = __find_get_page(as, pgoff, hash);
page = find_get_page(as, pgoff);
if (page) {
present = Page_Uptodate(page);
page_cache_release(page);
......
......@@ -97,7 +97,7 @@ static void __free_pages_ok (struct page *page, unsigned int order)
struct page *base;
zone_t *zone;
if (page->buffers)
if (PagePrivate(page))
BUG();
if (page->mapping)
BUG();
......@@ -290,7 +290,7 @@ static struct page * balance_classzone(zone_t * classzone, unsigned int gfp_mask
set_page_count(tmp, 1);
page = tmp;
if (page->buffers)
if (PagePrivate(page))
BUG();
if (page->mapping)
BUG();
......
/*
* mm/pdflush.c - worker threads for writing back filesystem data
*
* Copyright (C) 2002, Linus Torvalds.
*
* 09Apr2002 akpm@zip.com.au
* Initial version
*/
#include <linux/sched.h>
#include <linux/list.h>
#include <linux/signal.h>
#include <linux/spinlock.h>
#include <linux/gfp.h>
#include <linux/init.h>
#include <linux/module.h>
/*
* Minimum and maximum number of pdflush instances
*/
#define MIN_PDFLUSH_THREADS 2
#define MAX_PDFLUSH_THREADS 8
static void start_one_pdflush_thread(void);
/*
* The pdflush threads are worker threads for writing back dirty data.
* Ideally, we'd like one thread per active disk spindle. But the disk
* topology is very hard to divine at this level. Instead, we take
* care in various places to prevent more than one pdflush thread from
* performing writeback against a single filesystem. pdflush threads
* have the PF_FLUSHER flag set in current->flags to aid in this.
*/
/*
* All the pdflush threads. Protected by pdflush_lock
*/
static LIST_HEAD(pdflush_list);
static spinlock_t pdflush_lock = SPIN_LOCK_UNLOCKED;
/*
* The count of currently-running pdflush threads. Protected
* by pdflush_lock.
*/
static int nr_pdflush_threads = 0;
/*
* The time at which the pdflush thread pool last went empty
*/
static unsigned long last_empty_jifs;
/*
* The pdflush thread.
*
* Thread pool management algorithm:
*
* - The minumum and maximum number of pdflush instances are bound
* by MIN_PDFLUSH_THREADS and MAX_PDFLUSH_THREADS.
*
* - If there have been no idle pdflush instances for 1 second, create
* a new one.
*
* - If the least-recently-went-to-sleep pdflush thread has been asleep
* for more than one second, terminate a thread.
*/
/*
* A structure for passing work to a pdflush thread. Also for passing
* state information between pdflush threads. Protected by pdflush_lock.
*/
struct pdflush_work {
struct task_struct *who; /* The thread */
void (*fn)(unsigned long); /* A callback function for pdflush to work on */
unsigned long arg0; /* An argument to the callback function */
struct list_head list; /* On pdflush_list, when the thread is idle */
unsigned long when_i_went_to_sleep;
};
/*
* preemption is disabled in pdflush. There was a bug in preempt
* which was causing pdflush to get flipped into state TASK_RUNNING
* when it performed a spin_unlock. That bug is probably fixed,
* but play it safe. The preempt-off paths are very short.
*/
static int __pdflush(struct pdflush_work *my_work)
{
daemonize();
reparent_to_init();
strcpy(current->comm, "pdflush");
/* interruptible sleep, so block all signals */
spin_lock_irq(&current->sigmask_lock);
siginitsetinv(&current->blocked, 0);
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
current->flags |= PF_FLUSHER;
my_work->fn = NULL;
my_work->who = current;
preempt_disable();
spin_lock_irq(&pdflush_lock);
nr_pdflush_threads++;
for ( ; ; ) {
struct pdflush_work *pdf;
list_add(&my_work->list, &pdflush_list);
my_work->when_i_went_to_sleep = jiffies;
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irq(&pdflush_lock);
schedule();
preempt_enable();
(*my_work->fn)(my_work->arg0);
preempt_disable();
/*
* Thread creation: For how long have there been zero
* available threads?
*/
if (jiffies - last_empty_jifs > 1 * HZ) {
/* unlocked list_empty() test is OK here */
if (list_empty(&pdflush_list)) {
/* unlocked nr_pdflush_threads test is OK here */
if (nr_pdflush_threads < MAX_PDFLUSH_THREADS)
start_one_pdflush_thread();
}
}
spin_lock_irq(&pdflush_lock);
/*
* Thread destruction: For how long has the sleepiest
* thread slept?
*/
if (list_empty(&pdflush_list))
continue;
if (nr_pdflush_threads <= MIN_PDFLUSH_THREADS)
continue;
pdf = list_entry(pdflush_list.prev, struct pdflush_work, list);
if (jiffies - pdf->when_i_went_to_sleep > 1 * HZ) {
pdf->when_i_went_to_sleep = jiffies; /* Limit exit rate */
break; /* exeunt */
}
}
nr_pdflush_threads--;
spin_unlock_irq(&pdflush_lock);
preempt_enable();
return 0;
}
/*
* Of course, my_work wants to be just a local in __pdflush(). It is
* separated out in this manner to hopefully prevent the compiler from
* performing unfortunate optimisations agains the auto variables. Because
* there are visible to other tasks and CPUs. (No problem has actually
* been observed. This is just paranoia).
*/
static int pdflush(void *dummy)
{
struct pdflush_work my_work;
return __pdflush(&my_work);
}
/*
* Attempt to wake up a pdflush thread, and get it to do some work for you.
* Returns zero if it indeed managed to find a worker thread, and passed your
* payload to it.
*/
int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0)
{
unsigned long flags;
int ret = 0;
if (fn == NULL)
BUG(); /* Hard to diagnose if it's deferred */
spin_lock_irqsave(&pdflush_lock, flags);
if (list_empty(&pdflush_list)) {
spin_unlock_irqrestore(&pdflush_lock, flags);
ret = -1;
} else {
struct pdflush_work *pdf;
pdf = list_entry(pdflush_list.next, struct pdflush_work, list);
list_del_init(&pdf->list);
if (list_empty(&pdflush_list))
last_empty_jifs = jiffies;
spin_unlock_irqrestore(&pdflush_lock, flags);
pdf->fn = fn;
pdf->arg0 = arg0;
wmb(); /* ? */
wake_up_process(pdf->who);
}
return ret;
}
static void start_one_pdflush_thread(void)
{
kernel_thread(pdflush, NULL,
CLONE_FS | CLONE_FILES | CLONE_SIGNAL);
}
static int __init pdflush_init(void)
{
int i;
for (i = 0; i < MIN_PDFLUSH_THREADS; i++)
start_one_pdflush_thread();
return 0;
}
module_init(pdflush_init);
This diff is collapsed.
......@@ -370,9 +370,10 @@ static int shmem_unuse_inode (struct shmem_inode_info *info, swp_entry_t entry,
swp_entry_t *ptr;
unsigned long idx;
int offset;
idx = 0;
spin_lock (&info->lock);
repeat:
idx = 0;
offset = shmem_clear_swp (entry, info->i_direct, SHMEM_NR_DIRECT);
if (offset >= 0)
goto found;
......@@ -389,13 +390,16 @@ static int shmem_unuse_inode (struct shmem_inode_info *info, swp_entry_t entry,
spin_unlock (&info->lock);
return 0;
found:
delete_from_swap_cache(page);
add_to_page_cache(page, info->vfs_inode.i_mapping, offset + idx);
SetPageDirty(page);
SetPageUptodate(page);
info->swapped--;
spin_unlock(&info->lock);
return 1;
if (!move_from_swap_cache (page, offset+idx, info->vfs_inode.i_mapping)) {
info->swapped--;
SetPageUptodate (page);
spin_unlock (&info->lock);
return 1;
}
/* Yield for kswapd, and try again */
yield();
goto repeat;
}
/*
......@@ -425,6 +429,7 @@ void shmem_unuse(swp_entry_t entry, struct page *page)
*/
static int shmem_writepage(struct page * page)
{
int err;
struct shmem_inode_info *info;
swp_entry_t *entry, swap;
struct address_space *mapping;
......@@ -442,7 +447,6 @@ static int shmem_writepage(struct page * page)
info = SHMEM_I(inode);
if (info->locked)
return fail_writepage(page);
getswap:
swap = get_swap_page();
if (!swap.val)
return fail_writepage(page);
......@@ -455,29 +459,20 @@ static int shmem_writepage(struct page * page)
if (entry->val)
BUG();
/* Remove it from the page cache */
remove_inode_page(page);
page_cache_release(page);
/* Add it to the swap cache */
if (add_to_swap_cache(page, swap) != 0) {
/*
* Raced with "speculative" read_swap_cache_async.
* Add page back to page cache, unref swap, try again.
*/
add_to_page_cache_locked(page, mapping, index);
err = move_to_swap_cache(page, swap);
if (!err) {
*entry = swap;
info->swapped++;
spin_unlock(&info->lock);
swap_free(swap);
goto getswap;
SetPageUptodate(page);
set_page_dirty(page);
UnlockPage(page);
return 0;
}
*entry = swap;
info->swapped++;
spin_unlock(&info->lock);
SetPageUptodate(page);
set_page_dirty(page);
UnlockPage(page);
return 0;
swap_free(swap);
return fail_writepage(page);
}
/*
......@@ -493,10 +488,11 @@ static int shmem_writepage(struct page * page)
*/
static struct page * shmem_getpage_locked(struct shmem_inode_info *info, struct inode * inode, unsigned long idx)
{
struct address_space * mapping = inode->i_mapping;
struct address_space *mapping = inode->i_mapping;
struct shmem_sb_info *sbinfo;
struct page * page;
struct page *page;
swp_entry_t *entry;
int error;
repeat:
page = find_lock_page(mapping, idx);
......@@ -524,8 +520,6 @@ static struct page * shmem_getpage_locked(struct shmem_inode_info *info, struct
shmem_recalc_inode(inode);
if (entry->val) {
unsigned long flags;
/* Look it up and read it in.. */
page = find_get_page(&swapper_space, entry->val);
if (!page) {
......@@ -550,16 +544,18 @@ static struct page * shmem_getpage_locked(struct shmem_inode_info *info, struct
goto repeat;
}
/* We have to this with page locked to prevent races */
/* We have to do this with page locked to prevent races */
if (TryLockPage(page))
goto wait_retry;
error = move_from_swap_cache(page, idx, mapping);
if (error < 0) {
UnlockPage(page);
return ERR_PTR(error);
}
swap_free(*entry);
*entry = (swp_entry_t) {0};
delete_from_swap_cache(page);
flags = page->flags & ~((1 << PG_uptodate) | (1 << PG_error) | (1 << PG_referenced) | (1 << PG_arch_1));
page->flags = flags | (1 << PG_dirty);
add_to_page_cache_locked(page, mapping, idx);
info->swapped--;
spin_unlock (&info->lock);
} else {
......@@ -581,9 +577,13 @@ static struct page * shmem_getpage_locked(struct shmem_inode_info *info, struct
page = page_cache_alloc(mapping);
if (!page)
return ERR_PTR(-ENOMEM);
error = add_to_page_cache(page, mapping, idx);
if (error < 0) {
page_cache_release(page);
return ERR_PTR(-ENOMEM);
}
clear_highpage(page);
inode->i_blocks += BLOCKS_PER_PAGE;
add_to_page_cache (page, mapping, idx);
}
/* We have the page */
......
......@@ -37,11 +37,13 @@ static struct address_space_operations swap_aops = {
};
struct address_space swapper_space = {
LIST_HEAD_INIT(swapper_space.clean_pages),
LIST_HEAD_INIT(swapper_space.dirty_pages),
LIST_HEAD_INIT(swapper_space.locked_pages),
0, /* nrpages */
&swap_aops,
page_tree: RADIX_TREE_INIT(GFP_ATOMIC),
page_lock: RW_LOCK_UNLOCKED,
clean_pages: LIST_HEAD_INIT(swapper_space.clean_pages),
dirty_pages: LIST_HEAD_INIT(swapper_space.dirty_pages),
locked_pages: LIST_HEAD_INIT(swapper_space.locked_pages),
a_ops: &swap_aops,
i_shared_lock: SPIN_LOCK_UNLOCKED,
};
#ifdef SWAP_CACHE_INFO
......@@ -69,17 +71,21 @@ void show_swap_cache_info(void)
int add_to_swap_cache(struct page *page, swp_entry_t entry)
{
int error;
if (page->mapping)
BUG();
if (!swap_duplicate(entry)) {
INC_CACHE_INFO(noent_race);
return -ENOENT;
}
if (add_to_page_cache_unique(page, &swapper_space, entry.val,
page_hash(&swapper_space, entry.val)) != 0) {
error = add_to_page_cache_unique(page, &swapper_space, entry.val);
if (error != 0) {
swap_free(entry);
INC_CACHE_INFO(exist_race);
return -EEXIST;
if (error == -EEXIST)
INC_CACHE_INFO(exist_race);
return error;
}
if (!PageLocked(page))
BUG();
......@@ -121,14 +127,96 @@ void delete_from_swap_cache(struct page *page)
entry.val = page->index;
spin_lock(&pagecache_lock);
write_lock(&swapper_space.page_lock);
__delete_from_swap_cache(page);
spin_unlock(&pagecache_lock);
write_unlock(&swapper_space.page_lock);
swap_free(entry);
page_cache_release(page);
}
int move_to_swap_cache(struct page *page, swp_entry_t entry)
{
struct address_space *mapping = page->mapping;
void **pslot;
int err;
if (!mapping)
BUG();
if (!swap_duplicate(entry)) {
INC_CACHE_INFO(noent_race);
return -ENOENT;
}
write_lock(&swapper_space.page_lock);
write_lock(&mapping->page_lock);
err = radix_tree_reserve(&swapper_space.page_tree, entry.val, &pslot);
if (!err) {
/* Remove it from the page cache */
__remove_inode_page (page);
/* Add it to the swap cache */
*pslot = page;
page->flags = ((page->flags & ~(1 << PG_uptodate | 1 << PG_error
| 1 << PG_dirty | 1 << PG_referenced
| 1 << PG_arch_1 | 1 << PG_checked))
| (1 << PG_locked));
___add_to_page_cache(page, &swapper_space, entry.val);
}
write_unlock(&mapping->page_lock);
write_unlock(&swapper_space.page_lock);
if (!err) {
INC_CACHE_INFO(add_total);
return 0;
}
swap_free(entry);
if (err == -EEXIST)
INC_CACHE_INFO(exist_race);
return err;
}
int move_from_swap_cache(struct page *page, unsigned long index,
struct address_space *mapping)
{
void **pslot;
int err;
if (!PageLocked(page))
BUG();
write_lock(&swapper_space.page_lock);
write_lock(&mapping->page_lock);
err = radix_tree_reserve(&mapping->page_tree, index, &pslot);
if (!err) {
swp_entry_t entry;
block_flushpage(page, 0);
entry.val = page->index;
__delete_from_swap_cache(page);
swap_free(entry);
*pslot = page;
page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
1 << PG_referenced | 1 << PG_arch_1 |
1 << PG_checked);
page->flags |= (1 << PG_dirty);
___add_to_page_cache(page, mapping, index);
}
write_unlock(&mapping->page_lock);
write_unlock(&swapper_space.page_lock);
return err;
}
/*
* Perform a free_page(), also freeing any swap cache associated with
* this page if it is the last user of the page. Can not do a lock_page,
......@@ -213,6 +301,7 @@ struct page * read_swap_cache_async(swp_entry_t entry)
* swap cache: added by a racing read_swap_cache_async,
* or by try_to_swap_out (or shmem_writepage) re-using
* the just freed swap entry for an existing page.
* May fail (-ENOMEM) if radix-tree node allocation failed.
*/
err = add_to_swap_cache(new_page, entry);
if (!err) {
......@@ -222,7 +311,7 @@ struct page * read_swap_cache_async(swp_entry_t entry)
rw_swap_page(READ, new_page);
return new_page;
}
} while (err != -ENOENT);
} while (err != -ENOENT && err != -ENOMEM);
if (new_page)
page_cache_release(new_page);
......
......@@ -239,10 +239,10 @@ static int exclusive_swap_page(struct page *page)
/* Is the only swap cache user the cache itself? */
if (p->swap_map[SWP_OFFSET(entry)] == 1) {
/* Recheck the page count with the pagecache lock held.. */
spin_lock(&pagecache_lock);
if (page_count(page) - !!page->buffers == 2)
read_lock(&swapper_space.page_lock);
if (page_count(page) - !!PagePrivate(page) == 2)
retval = 1;
spin_unlock(&pagecache_lock);
read_unlock(&swapper_space.page_lock);
}
swap_info_put(p);
}
......@@ -265,7 +265,7 @@ int can_share_swap_page(struct page *page)
BUG();
switch (page_count(page)) {
case 3:
if (!page->buffers)
if (!PagePrivate(page))
break;
/* Fallthrough */
case 2:
......@@ -295,7 +295,7 @@ int remove_exclusive_swap_page(struct page *page)
BUG();
if (!PageSwapCache(page))
return 0;
if (page_count(page) - !!page->buffers != 2) /* 2: us + cache */
if (page_count(page) - !!PagePrivate(page) != 2) /* 2: us + cache */
return 0;
entry.val = page->index;
......@@ -307,13 +307,13 @@ int remove_exclusive_swap_page(struct page *page)
retval = 0;
if (p->swap_map[SWP_OFFSET(entry)] == 1) {
/* Recheck the page count with the pagecache lock held.. */
spin_lock(&pagecache_lock);
if (page_count(page) - !!page->buffers == 2) {
read_lock(&swapper_space.page_lock);
if (page_count(page) - !!PagePrivate(page) == 2) {
__delete_from_swap_cache(page);
SetPageDirty(page);
retval = 1;
}
spin_unlock(&pagecache_lock);
read_unlock(&swapper_space.page_lock);
}
swap_info_put(p);
......@@ -344,7 +344,7 @@ void free_swap_and_cache(swp_entry_t entry)
if (page) {
page_cache_get(page);
/* Only cache user (+us), or swap space full? Free it! */
if (page_count(page) - !!page->buffers == 2 || vm_swap_full()) {
if (page_count(page) - !!PagePrivate(page) == 2 || vm_swap_full()) {
delete_from_swap_cache(page);
SetPageDirty(page);
}
......@@ -959,7 +959,7 @@ asmlinkage long sys_swapon(const char * specialfile, int swap_flags)
p->lowest_bit = 0;
p->highest_bit = 0;
for (i = 1 ; i < 8*PAGE_SIZE ; i++) {
if (test_bit(i,(char *) swap_header)) {
if (test_bit(i,(unsigned long *) swap_header)) {
if (!p->lowest_bit)
p->lowest_bit = i;
p->highest_bit = i;
......@@ -974,7 +974,7 @@ asmlinkage long sys_swapon(const char * specialfile, int swap_flags)
goto bad_swap;
}
for (i = 1 ; i < maxpages ; i++) {
if (test_bit(i,(char *) swap_header))
if (test_bit(i,(unsigned long *) swap_header))
p->swap_map[i] = 0;
else
p->swap_map[i] = SWAP_MAP_BAD;
......
......@@ -92,7 +92,8 @@ static inline int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct*
mm->rss--;
UnlockPage(page);
{
int freeable = page_count(page) - !!page->buffers <= 2;
int freeable = page_count(page) -
!!PagePrivate(page) <= 2;
page_cache_release(page);
return freeable;
}
......@@ -121,7 +122,7 @@ static inline int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct*
* Anonymous buffercache pages can be left behind by
* concurrent truncate and pagefault.
*/
if (page->buffers)
if (PagePrivate(page))
goto preserve;
/*
......@@ -138,10 +139,16 @@ static inline int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct*
* (adding to the page cache will clear the dirty
* and uptodate bits, so we need to do it again)
*/
if (add_to_swap_cache(page, entry) == 0) {
switch (add_to_swap_cache(page, entry)) {
case 0: /* Success */
SetPageUptodate(page);
set_page_dirty(page);
goto set_swap_pte;
case -ENOMEM: /* radix-tree allocation */
swap_free(entry);
goto preserve;
default: /* ENOENT: raced */
break;
}
/* Raced with "speculative" read_swap_cache_async */
swap_free(entry);
......@@ -341,6 +348,7 @@ static int FASTCALL(shrink_cache(int nr_pages, zone_t * classzone, unsigned int
static int shrink_cache(int nr_pages, zone_t * classzone, unsigned int gfp_mask, int priority)
{
struct list_head * entry;
struct address_space *mapping;
int max_scan = nr_inactive_pages / priority;
int max_mapped = nr_pages << (9 - priority);
......@@ -377,7 +385,7 @@ static int shrink_cache(int nr_pages, zone_t * classzone, unsigned int gfp_mask,
continue;
/* Racy check to avoid trylocking when not worthwhile */
if (!page->buffers && (page_count(page) != 1 || !page->mapping))
if (!PagePrivate(page) && (page_count(page) != 1 || !page->mapping))
goto page_mapped;
/*
......@@ -395,7 +403,9 @@ static int shrink_cache(int nr_pages, zone_t * classzone, unsigned int gfp_mask,
continue;
}
if (PageDirty(page) && is_page_cache_freeable(page) && page->mapping) {
mapping = page->mapping;
if (PageDirty(page) && is_page_cache_freeable(page) && mapping) {
/*
* It is not critical here to write it only if
* the page is unmapped beause any direct writer
......@@ -406,7 +416,7 @@ static int shrink_cache(int nr_pages, zone_t * classzone, unsigned int gfp_mask,
*/
int (*writepage)(struct page *);
writepage = page->mapping->a_ops->writepage;
writepage = mapping->a_ops->writepage;
if ((gfp_mask & __GFP_FS) && writepage) {
ClearPageDirty(page);
SetPageLaunder(page);
......@@ -426,14 +436,14 @@ static int shrink_cache(int nr_pages, zone_t * classzone, unsigned int gfp_mask,
* associated with this page. If we succeed we try to free
* the page as well.
*/
if (page->buffers) {
if (PagePrivate(page)) {
spin_unlock(&pagemap_lru_lock);
/* avoid to free a locked page */
page_cache_get(page);
if (try_to_release_page(page, gfp_mask)) {
if (!page->mapping) {
if (!mapping) {
/*
* We must not allow an anon page
* with no buffers to be visible on
......@@ -470,33 +480,35 @@ static int shrink_cache(int nr_pages, zone_t * classzone, unsigned int gfp_mask,
}
}
spin_lock(&pagecache_lock);
/*
* this is the non-racy check for busy page.
* This is the non-racy check for busy page.
*/
if (!page->mapping || !is_page_cache_freeable(page)) {
spin_unlock(&pagecache_lock);
UnlockPage(page);
if (mapping) {
write_lock(&mapping->page_lock);
if (is_page_cache_freeable(page))
goto page_freeable;
write_unlock(&mapping->page_lock);
}
UnlockPage(page);
page_mapped:
if (--max_mapped >= 0)
continue;
if (--max_mapped >= 0)
continue;
/*
* Alert! We've found too many mapped pages on the
* inactive list, so we start swapping out now!
*/
spin_unlock(&pagemap_lru_lock);
swap_out(priority, gfp_mask, classzone);
return nr_pages;
}
/*
* Alert! We've found too many mapped pages on the
* inactive list, so we start swapping out now!
*/
spin_unlock(&pagemap_lru_lock);
swap_out(priority, gfp_mask, classzone);
return nr_pages;
page_freeable:
/*
* It is critical to check PageDirty _after_ we made sure
* the page is freeable* so not in use by anybody.
*/
if (PageDirty(page)) {
spin_unlock(&pagecache_lock);
write_unlock(&mapping->page_lock);
UnlockPage(page);
continue;
}
......@@ -504,12 +516,12 @@ static int shrink_cache(int nr_pages, zone_t * classzone, unsigned int gfp_mask,
/* point of no return */
if (likely(!PageSwapCache(page))) {
__remove_inode_page(page);
spin_unlock(&pagecache_lock);
write_unlock(&mapping->page_lock);
} else {
swp_entry_t swap;
swap.val = page->index;
__delete_from_swap_cache(page);
spin_unlock(&pagecache_lock);
write_unlock(&mapping->page_lock);
swap_free(swap);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment