Commit 6c4b34bf authored by Linus Torvalds's avatar Linus Torvalds

v2.4.7.7 -> v2.4.7.8

  - Jeff Hartmann: serverworks AGP gart unload memory leak fix
  - Marcelo Tosatti: make zone_inactive_shortage() return how big the shortage is.
  - Hugh Dickins: tidy up age_page_down()
  - Al Viro: super block handling cleanups
parent dfc05323
......@@ -771,6 +771,7 @@ struct parport_driver {
<funcprototype>
<funcdef>struct pardevice *<function>parport_open</function></funcdef>
<paramdef>int <parameter>devnum</parameter></paramdef>
<paramdef>const char *<parameter>name</parameter></paramdef>
<paramdef>int <parameter>(*pf)</parameter>
<funcparams>void *</funcparams></paramdef>
<paramdef>int <parameter>(*kf)</parameter>
......
VERSION = 2
PATCHLEVEL = 4
SUBLEVEL = 8
EXTRAVERSION =-pre7
EXTRAVERSION =-pre8
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
......
......@@ -109,9 +109,11 @@ static int hpux_ustat(dev_t dev, struct hpux_ustat *ubuf)
lock_kernel();
s = get_super(to_kdev_t(dev));
unlock_kernel();
if (s == NULL)
goto out;
err = vfs_statfs(s, &sbuf);
drop_super(s);
if (err)
goto out;
......@@ -124,7 +126,6 @@ static int hpux_ustat(dev_t dev, struct hpux_ustat *ubuf)
/* Changed to hpux_ustat: */
err = copy_to_user(ubuf,&tmp,sizeof(struct hpux_ustat)) ? -EFAULT : 0;
out:
unlock_kernel();
return err;
}
......
......@@ -2375,6 +2375,7 @@ static int serverworks_free_gatt_table(void)
serverworks_free_gatt_pages();
serverworks_free_page_map(&page_dir);
serverworks_free_page_map(&serverworks_private.scratch_dir);
return 0;
}
......
......@@ -3,9 +3,7 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
O_TARGET := drm.o
export-objs := gamma_drv.o tdfx_drv.o r128_drv.o mga_drv.o i810_drv.o \
ffb_drv.o
list-multi := gamma.o tdfx.o r128.o mga.o i810.o ffb.o
list-multi := gamma.o tdfx.o r128.o mga.o i810.o radeon.o ffb.o
gamma-objs := gamma_drv.o gamma_dma.o
tdfx-objs := tdfx_drv.o
......
......@@ -678,8 +678,10 @@ int blkdev_put(struct block_device *bdev, int kind)
down(&bdev->bd_sem);
/* syncing will go here */
lock_kernel();
if (kind == BDEV_FILE || kind == BDEV_FS)
if (kind == BDEV_FILE)
fsync_dev(rdev);
else if (kind == BDEV_FS)
fsync_no_super(rdev);
if (atomic_dec_and_test(&bdev->bd_openers)) {
/* invalidating buffers will go here */
invalidate_buffers(rdev);
......
......@@ -346,6 +346,12 @@ int fsync_super(struct super_block *sb)
return sync_buffers(dev, 1);
}
int fsync_no_super(kdev_t dev)
{
sync_buffers(dev, 0);
return sync_buffers(dev, 1);
}
int fsync_dev(kdev_t dev)
{
sync_buffers(dev, 0);
......
......@@ -1597,6 +1597,8 @@ asmlinkage long sys_quotactl(int cmd, const char *special, int id, caddr_t addr)
if (sb && sb_has_quota_enabled(sb, type))
ret = set_dqblk(sb, id, type, flags, (struct dqblk *) addr);
out:
if (sb)
drop_super(sb);
unlock_kernel();
return ret;
}
......@@ -258,23 +258,6 @@ static inline void sync_list(struct list_head *head)
__sync_one(list_entry(tmp, struct inode, i_list), 0);
}
static inline int wait_on_dirty(struct list_head *head)
{
struct list_head * tmp;
list_for_each(tmp, head) {
struct inode *inode = list_entry(tmp, struct inode, i_list);
if (!inode->i_state & I_DIRTY)
continue;
__iget(inode);
spin_unlock(&inode_lock);
__wait_on_inode(inode);
iput(inode);
spin_lock(&inode_lock);
return 1;
}
return 0;
}
static inline void wait_on_locked(struct list_head *head)
{
struct list_head * tmp;
......@@ -319,61 +302,96 @@ static inline int try_to_sync_unused_list(struct list_head *head)
return 1;
}
/**
* sync_inodes
* @dev: device to sync the inodes from.
*
* sync_inodes goes through the super block's dirty list,
* writes them out, and puts them back on the normal list.
*/
/*
* caller holds exclusive lock on sb->s_umount
*/
void sync_inodes_sb(struct super_block *sb)
{
spin_lock(&inode_lock);
sync_list(&sb->s_dirty);
wait_on_locked(&sb->s_locked_inodes);
while (!list_empty(&sb->s_dirty)||!list_empty(&sb->s_locked_inodes)) {
sync_list(&sb->s_dirty);
wait_on_locked(&sb->s_locked_inodes);
}
spin_unlock(&inode_lock);
}
/*
* Note:
* We don't need to grab a reference to superblock here. If it has non-empty
* ->s_dirty it's hadn't been killed yet and kill_super() won't proceed
* past sync_inodes_sb() until both ->s_dirty and ->s_locked_inodes are
* empty. Since __sync_one() regains inode_lock before it finally moves
* inode from superblock lists we are OK.
*/
void sync_unlocked_inodes(void)
{
struct super_block * sb = sb_entry(super_blocks.next);
struct super_block * sb;
spin_lock(&inode_lock);
spin_lock(&sb_lock);
sb = sb_entry(super_blocks.next);
for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.next)) {
if (!list_empty(&sb->s_dirty)) {
spin_lock(&inode_lock);
spin_unlock(&sb_lock);
sync_list(&sb->s_dirty);
spin_unlock(&inode_lock);
spin_lock(&sb_lock);
}
}
spin_unlock(&sb_lock);
spin_unlock(&inode_lock);
}
/*
* Find a superblock with inodes that need to be synced
*/
static struct super_block *get_super_to_sync(void)
{
struct list_head *p;
restart:
spin_lock(&inode_lock);
spin_lock(&sb_lock);
list_for_each(p, &super_blocks) {
struct super_block *s = list_entry(p,struct super_block,s_list);
if (list_empty(&s->s_dirty) && list_empty(&s->s_locked_inodes))
continue;
s->s_count++;
spin_unlock(&sb_lock);
spin_unlock(&inode_lock);
down_read(&s->s_umount);
if (!s->s_root) {
drop_super(s);
goto restart;
}
return s;
}
spin_unlock(&sb_lock);
spin_unlock(&inode_lock);
return NULL;
}
/**
* sync_inodes
* @dev: device to sync the inodes from.
*
* sync_inodes goes through the super block's dirty list,
* writes them out, and puts them back on the normal list.
*/
void sync_inodes(kdev_t dev)
{
struct super_block * sb = sb_entry(super_blocks.next);
struct super_block * s;
/*
* Search the super_blocks array for the device(s) to sync.
*/
for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.next)) {
if (!sb->s_dev)
continue;
if (dev && sb->s_dev != dev)
continue;
down_read(&sb->s_umount);
if (sb->s_dev && (sb->s_dev == dev || !dev)) {
spin_lock(&inode_lock);
do {
sync_list(&sb->s_dirty);
} while (wait_on_dirty(&sb->s_locked_inodes));
spin_unlock(&inode_lock);
if (dev) {
if ((s = get_super(dev)) != NULL) {
sync_inodes_sb(s);
drop_super(s);
}
} else {
while ((s = get_super_to_sync()) != NULL) {
sync_inodes_sb(s);
drop_super(s);
}
up_read(&sb->s_umount);
if (dev)
break;
}
}
......@@ -382,13 +400,19 @@ void sync_inodes(kdev_t dev)
*/
static void try_to_sync_unused_inodes(void)
{
struct super_block * sb = sb_entry(super_blocks.next);
struct super_block * sb;
spin_lock(&sb_lock);
sb = sb_entry(super_blocks.next);
for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.next)) {
if (!sb->s_dev)
continue;
spin_unlock(&sb_lock);
if (!try_to_sync_unused_list(&sb->s_dirty))
break;
return;
spin_lock(&sb_lock);
}
spin_unlock(&sb_lock);
}
/**
......@@ -598,15 +622,18 @@ int invalidate_inodes(struct super_block * sb)
int invalidate_device(kdev_t dev, int do_sync)
{
struct super_block *sb = get_super(dev);
struct super_block *sb;
int res;
if (do_sync)
fsync_dev(dev);
res = 0;
if (sb)
sb = get_super(dev);
if (sb) {
res = invalidate_inodes(sb);
drop_super(sb);
}
invalidate_buffers(dev);
return res;
}
......
......@@ -59,9 +59,8 @@ static int do_remount_sb(struct super_block *sb, int flags, char * data);
/* this is initialized in init/main.c */
kdev_t ROOT_DEV;
int nr_super_blocks;
int max_super_blocks = NR_SUPER;
LIST_HEAD(super_blocks);
spinlock_t sb_lock = SPIN_LOCK_UNLOCKED;
/*
* Handling of filesystem drivers list.
......@@ -386,7 +385,6 @@ static struct vfsmount *add_vfsmnt(struct dentry *root, const char *dev_name)
mnt->mnt_parent = mnt;
spin_lock(&dcache_lock);
list_add(&mnt->mnt_instances, &sb->s_mounts);
list_add(&mnt->mnt_list, vfsmntlist.prev);
spin_unlock(&dcache_lock);
if (sb->s_type->fs_flags & FS_SINGLE)
......@@ -395,10 +393,11 @@ static struct vfsmount *add_vfsmnt(struct dentry *root, const char *dev_name)
return mnt;
}
static struct vfsmount *clone_mnt(struct vfsmount *old_mnt, struct dentry *root)
static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root)
{
char *name = old_mnt->mnt_devname;
char *name = old->mnt_devname;
struct vfsmount *mnt = alloc_vfsmnt();
struct super_block *sb = old->mnt_sb;
if (!mnt)
goto out;
......@@ -408,14 +407,12 @@ static struct vfsmount *clone_mnt(struct vfsmount *old_mnt, struct dentry *root)
if (mnt->mnt_devname)
strcpy(mnt->mnt_devname, name);
}
mnt->mnt_sb = old_mnt->mnt_sb;
mnt->mnt_sb = sb;
mnt->mnt_root = dget(root);
mnt->mnt_mountpoint = mnt->mnt_root;
mnt->mnt_parent = mnt;
spin_lock(&dcache_lock);
list_add(&mnt->mnt_instances, &old_mnt->mnt_instances);
spin_unlock(&dcache_lock);
atomic_inc(&sb->s_active);
out:
return mnt;
}
......@@ -487,16 +484,12 @@ void __mntput(struct vfsmount *mnt)
struct super_block *sb = mnt->mnt_sb;
dput(mnt->mnt_root);
spin_lock(&dcache_lock);
list_del(&mnt->mnt_instances);
spin_unlock(&dcache_lock);
if (mnt->mnt_devname)
kfree(mnt->mnt_devname);
kmem_cache_free(mnt_cache, mnt);
kill_super(sb);
}
/* Use octal escapes, like mount does, for embedded spaces etc. */
static unsigned char need_escaping[] = { ' ', '\t', '\n', '\\' };
......@@ -645,6 +638,49 @@ int get_filesystem_info( char *buf )
#undef MANGLE
#undef FREEROOM
}
static inline void __put_super(struct super_block *sb)
{
spin_lock(&sb_lock);
if (!--sb->s_count)
kfree(sb);
spin_unlock(&sb_lock);
}
static inline struct super_block * find_super(kdev_t dev)
{
struct list_head *p;
list_for_each(p, &super_blocks) {
struct super_block * s = sb_entry(p);
if (s->s_dev == dev) {
s->s_count++;
return s;
}
}
return NULL;
}
void drop_super(struct super_block *sb)
{
up_read(&sb->s_umount);
__put_super(sb);
}
static void put_super(struct super_block *sb)
{
up_write(&sb->s_umount);
__put_super(sb);
}
static inline void write_super(struct super_block *sb)
{
lock_super(sb);
if (sb->s_root && sb->s_dirt)
if (sb->s_op && sb->s_op->write_super)
sb->s_op->write_super(sb);
unlock_super(sb);
}
/*
* Note: check the dirty flag before waiting, so we don't
......@@ -655,21 +691,29 @@ void sync_supers(kdev_t dev)
{
struct super_block * sb;
for (sb = sb_entry(super_blocks.next);
sb != sb_entry(&super_blocks);
sb = sb_entry(sb->s_list.next)) {
if (!sb->s_dev)
continue;
if (dev && sb->s_dev != dev)
continue;
if (!sb->s_dirt)
continue;
lock_super(sb);
if (sb->s_dev && sb->s_dirt && (!dev || dev == sb->s_dev))
if (sb->s_op && sb->s_op->write_super)
sb->s_op->write_super(sb);
unlock_super(sb);
if (dev) {
sb = get_super(dev);
if (sb) {
if (sb->s_dirt)
write_super(sb);
drop_super(sb);
}
return;
}
restart:
spin_lock(&sb_lock);
sb = sb_entry(super_blocks.next);
while (sb != sb_entry(&super_blocks))
if (sb->s_dirt) {
sb->s_count++;
spin_unlock(&sb_lock);
down_read(&sb->s_umount);
write_super(sb);
drop_super(sb);
goto restart;
} else
sb = sb_entry(sb->s_list.next);
spin_unlock(&sb_lock);
}
/**
......@@ -687,17 +731,21 @@ struct super_block * get_super(kdev_t dev)
if (!dev)
return NULL;
restart:
s = sb_entry(super_blocks.next);
while (s != sb_entry(&super_blocks))
if (s->s_dev == dev) {
/* Yes, it sucks. As soon as we get refcounting... */
lock_super(s);
unlock_super(s);
if (s->s_dev == dev)
return s;
goto restart;
} else
s = sb_entry(s->s_list.next);
spin_lock(&sb_lock);
s = find_super(dev);
if (s) {
spin_unlock(&sb_lock);
/* Yes, it sucks. As soon as we get refcounting... */
/* Almost there - next two lines will go away RSN */
lock_super(s);
unlock_super(s);
down_read(&s->s_umount);
if (s->s_root)
return s;
drop_super(s);
goto restart;
}
spin_unlock(&sb_lock);
return NULL;
}
......@@ -714,6 +762,7 @@ asmlinkage long sys_ustat(dev_t dev, struct ustat * ubuf)
if (s == NULL)
goto out;
err = vfs_statfs(s, &sbuf);
drop_super(s);
if (err)
goto out;
......@@ -735,35 +784,23 @@ asmlinkage long sys_ustat(dev_t dev, struct ustat * ubuf)
* the request.
*/
static struct super_block *get_empty_super(void)
static struct super_block *alloc_super(void)
{
struct super_block *s;
for (s = sb_entry(super_blocks.next);
s != sb_entry(&super_blocks);
s = sb_entry(s->s_list.next)) {
if (s->s_dev)
continue;
return s;
}
/* Need a new one... */
if (nr_super_blocks >= max_super_blocks)
return NULL;
s = kmalloc(sizeof(struct super_block), GFP_USER);
struct super_block *s = kmalloc(sizeof(struct super_block), GFP_USER);
if (s) {
nr_super_blocks++;
memset(s, 0, sizeof(struct super_block));
INIT_LIST_HEAD(&s->s_dirty);
INIT_LIST_HEAD(&s->s_locked_inodes);
list_add (&s->s_list, super_blocks.prev);
INIT_LIST_HEAD(&s->s_files);
INIT_LIST_HEAD(&s->s_mounts);
init_rwsem(&s->s_umount);
sema_init(&s->s_lock, 1);
s->s_count = 1;
atomic_set(&s->s_active, 1);
sema_init(&s->s_vfs_rename_sem,1);
sema_init(&s->s_nfsd_free_path_sem,1);
sema_init(&s->s_dquot.dqio_sem, 1);
sema_init(&s->s_dquot.dqoff_sem, 1);
s->s_maxbytes = MAX_NON_LFS;
}
return s;
}
......@@ -773,16 +810,16 @@ static struct super_block * read_super(kdev_t dev, struct block_device *bdev,
void *data, int silent)
{
struct super_block * s;
s = get_empty_super();
s = alloc_super();
if (!s)
goto out;
s->s_dev = dev;
s->s_bdev = bdev;
s->s_flags = flags;
s->s_dirt = 0;
s->s_type = type;
s->s_dquot.flags = 0;
s->s_maxbytes = MAX_NON_LFS;
spin_lock(&sb_lock);
list_add (&s->s_list, super_blocks.prev);
spin_unlock(&sb_lock);
lock_super(s);
if (!type->read_super(s, data, silent))
goto out_fail;
......@@ -798,6 +835,11 @@ static struct super_block * read_super(kdev_t dev, struct block_device *bdev,
s->s_bdev = 0;
s->s_type = NULL;
unlock_super(s);
atomic_dec(&s->s_active);
spin_lock(&sb_lock);
list_del(&s->s_list);
spin_unlock(&sb_lock);
__put_super(s);
return NULL;
}
......@@ -863,9 +905,25 @@ static struct super_block *get_sb_bdev(struct file_system_type *fs_type,
if (sb) {
if (fs_type == sb->s_type &&
((flags ^ sb->s_flags) & MS_RDONLY) == 0) {
/*
* We are heavily relying on mount_sem here. We _will_ get rid of that
* ugliness RSN (and then atomicity of ->s_active will play), but first
* we need to get rid of "reuse" branch of get_empty_super() and that
* requires reference counters. Chicken and egg problem, but fortunately
* we can use the fact that right now all accesses to ->s_active are
* under mount_sem.
*/
if (atomic_read(&sb->s_active)) {
spin_lock(&sb_lock);
sb->s_count--;
spin_unlock(&sb_lock);
}
atomic_inc(&sb->s_active);
up_read(&sb->s_umount);
path_release(&nd);
return sb;
}
drop_super(sb);
} else {
mode_t mode = FMODE_READ; /* we always need it ;-) */
if (!(flags & MS_RDONLY))
......@@ -926,6 +984,7 @@ static struct super_block *get_sb_single(struct file_system_type *fs_type,
sb = fs_type->kern_mnt->mnt_sb;
if (!sb)
BUG();
atomic_inc(&sb->s_active);
do_remount_sb(sb, flags, data);
return sb;
}
......@@ -938,12 +997,8 @@ static void kill_super(struct super_block *sb)
struct file_system_type *fs = sb->s_type;
struct super_operations *sop = sb->s_op;
spin_lock(&dcache_lock);
if (!list_empty(&sb->s_mounts)) {
spin_unlock(&dcache_lock);
if (!atomic_dec_and_test(&sb->s_active))
return;
}
spin_unlock(&dcache_lock);
down_write(&sb->s_umount);
lock_kernel();
sb->s_root = NULL;
......@@ -975,12 +1030,15 @@ static void kill_super(struct super_block *sb)
sb->s_type = NULL;
unlock_super(sb);
unlock_kernel();
up_write(&sb->s_umount);
if (bdev) {
blkdev_put(bdev, BDEV_FS);
bdput(bdev);
} else
put_unnamed_dev(dev);
spin_lock(&sb_lock);
list_del(&sb->s_list);
spin_unlock(&sb_lock);
put_super(sb);
}
/*
......@@ -1045,9 +1103,6 @@ struct vfsmount *kern_mount(struct file_system_type *type)
mnt->mnt_root = dget(sb->s_root);
mnt->mnt_mountpoint = mnt->mnt_root;
mnt->mnt_parent = mnt;
spin_lock(&dcache_lock);
list_add(&mnt->mnt_instances, &sb->s_mounts);
spin_unlock(&dcache_lock);
type->kern_mnt = mnt;
return mnt;
}
......@@ -1092,7 +1147,7 @@ static int do_umount(struct vfsmount *mnt, int flags)
spin_lock(&dcache_lock);
if (mnt->mnt_instances.next != mnt->mnt_instances.prev) {
if (atomic_read(&sb->s_active) > 1) {
if (atomic_read(&mnt->mnt_count) > 2) {
spin_unlock(&dcache_lock);
return -EBUSY;
......@@ -1324,9 +1379,6 @@ static int do_add_mount(struct nameidata *nd, char *type, int flags,
mnt->mnt_root = dget(sb->s_root);
mnt->mnt_mountpoint = mnt->mnt_root;
mnt->mnt_parent = mnt;
spin_lock(&dcache_lock);
list_add(&mnt->mnt_instances, &sb->s_mounts);
spin_unlock(&dcache_lock);
/* Something was mounted here while we slept */
while(d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry))
......@@ -1583,7 +1635,10 @@ void __init mount_root(void)
check_disk_change(ROOT_DEV);
sb = get_super(ROOT_DEV);
if (sb) {
/* FIXME */
fs_type = sb->s_type;
atomic_inc(&sb->s_active);
up_read(&sb->s_umount);
goto mount_it;
}
......
......@@ -631,7 +631,7 @@ struct thread_struct {
#define INIT_THREAD { \
INIT_SP, /* ksp */ \
0, /* wchan */ \
(struct pt_regs *)INIT_SP - 1, /* regs */ \
0, /* regs */ \
KERNEL_DS, /*fs*/ \
swapper_pg_dir, /* pgdir */ \
0, /* last_syscall */ \
......@@ -659,8 +659,8 @@ static inline unsigned long thread_saved_pc(struct thread_struct *t)
unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) ((tsk)->thread.regs->nip)
#define KSTK_ESP(tsk) ((tsk)->thread.regs->gpr[1])
#define KSTK_EIP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
#define KSTK_ESP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->gpr[1]: 0)
/*
* NOTE! The task struct and the stack go together
......
......@@ -61,7 +61,6 @@ struct inodes_stat_t {
};
extern struct inodes_stat_t inodes_stat;
extern int max_super_blocks, nr_super_blocks;
extern int leases_enable, dir_notify_enable, lease_break_time;
#define NR_FILE 8192 /* this can well be larger on a larger system */
......@@ -663,6 +662,7 @@ struct quota_mount_options
#include <linux/cramfs_fs_sb.h>
extern struct list_head super_blocks;
extern spinlock_t sb_lock;
#define sb_entry(list) list_entry((list), struct super_block, s_list)
struct super_block {
......@@ -680,13 +680,14 @@ struct super_block {
struct dentry *s_root;
struct rw_semaphore s_umount;
struct semaphore s_lock;
int s_count;
atomic_t s_active;
struct list_head s_dirty; /* dirty inodes */
struct list_head s_locked_inodes;/* inodes being synced */
struct list_head s_files;
struct block_device *s_bdev;
struct list_head s_mounts; /* vfsmount(s) of this one */
struct quota_mount_options s_dquot; /* Diskquota specific options */
union {
......@@ -1161,6 +1162,7 @@ extern void write_inode_now(struct inode *, int);
extern void sync_dev(kdev_t);
extern int fsync_dev(kdev_t);
extern int fsync_super(struct super_block *);
extern int fsync_no_super(kdev_t);
extern void sync_inodes_sb(struct super_block *);
extern int fsync_inode_buffers(struct inode *);
extern int osync_inode_buffers(struct inode *);
......@@ -1358,11 +1360,12 @@ extern int dcache_readdir(struct file *, void *, filldir_t);
extern struct file_system_type *get_fs_type(const char *name);
extern struct super_block *get_super(kdev_t);
extern void drop_super(struct super_block *sb);
static inline int is_mounted(kdev_t dev)
{
struct super_block *sb = get_super(dev);
if (sb) {
/* drop_super(sb); will go here */
drop_super(sb);
return 1;
}
return 0;
......
......@@ -18,7 +18,6 @@ struct vfsmount
struct vfsmount *mnt_parent; /* fs we are mounted on */
struct dentry *mnt_mountpoint; /* dentry of mountpoint */
struct dentry *mnt_root; /* root of the mounted tree */
struct list_head mnt_instances; /* other vfsmounts of the same fs */
struct super_block *mnt_sb; /* pointer to superblock */
struct list_head mnt_mounts; /* list of children, anchored here */
struct list_head mnt_child; /* and going through their mnt_child */
......
......@@ -72,7 +72,9 @@ static inline unsigned long _page_hashfn(struct address_space * mapping, unsigne
#define page_hash(mapping,index) (page_hash_table+_page_hashfn(mapping,index))
extern struct page * __find_get_page(struct address_space *mapping,
unsigned long offset, struct page **hash);
unsigned long index, struct page **hash);
#define find_get_page(mapping, index) \
__find_get_page(mapping, index, page_hash(mapping, index))
extern struct page * __find_lock_page (struct address_space * mapping,
unsigned long index, struct page **hash);
extern void lock_page(struct page *page);
......
......@@ -100,9 +100,6 @@ struct zone_t;
/* linux/mm/swap.c */
extern int memory_pressure;
extern void age_page_down(struct page *);
extern void age_page_down_nolock(struct page *);
extern void age_page_down_ageonly(struct page *);
extern void deactivate_page(struct page *);
extern void deactivate_page_nolock(struct page *);
extern void activate_page(struct page *);
......
......@@ -128,6 +128,7 @@ EXPORT_SYMBOL(def_blk_fops);
EXPORT_SYMBOL(update_atime);
EXPORT_SYMBOL(get_fs_type);
EXPORT_SYMBOL(get_super);
EXPORT_SYMBOL(drop_super);
EXPORT_SYMBOL(getname);
EXPORT_SYMBOL(names_cachep);
EXPORT_SYMBOL(fput);
......@@ -244,6 +245,7 @@ EXPORT_SYMBOL(generic_read_dir);
EXPORT_SYMBOL(__pollwait);
EXPORT_SYMBOL(poll_freewait);
EXPORT_SYMBOL(ROOT_DEV);
EXPORT_SYMBOL(__find_get_page);
EXPORT_SYMBOL(__find_lock_page);
EXPORT_SYMBOL(grab_cache_page);
EXPORT_SYMBOL(read_cache_page);
......
......@@ -286,10 +286,6 @@ static ctl_table fs_table[] = {
0444, NULL, &proc_dointvec},
{FS_MAXFILE, "file-max", &files_stat.max_files, sizeof(int),
0644, NULL, &proc_dointvec},
{FS_NRSUPER, "super-nr", &nr_super_blocks, sizeof(int),
0444, NULL, &proc_dointvec},
{FS_MAXSUPER, "super-max", &max_super_blocks, sizeof(int),
0644, NULL, &proc_dointvec},
{FS_NRDQUOT, "dquot-nr", &nr_dquots, 2*sizeof(int),
0444, NULL, &proc_dointvec},
{FS_MAXDQUOT, "dquot-max", &max_dquots, sizeof(int),
......
......@@ -402,7 +402,7 @@ static int shmem_getpage(struct inode * inode, unsigned long idx, struct page **
int error;
down (&inode->i_sem);
if (inode->i_size < (loff_t) idx * PAGE_CACHE_SIZE)
if (inode->i_size <= (loff_t) idx * PAGE_CACHE_SIZE)
goto sigbus;
*ptr = shmem_getpage_locked(inode, idx);
if (IS_ERR (*ptr))
......
......@@ -74,42 +74,6 @@ pager_daemon_t pager_daemon = {
8, /* do swap I/O in clusters of this size */
};
/*
* We use this (minimal) function in the case where we
* know we can't deactivate the page (yet).
*/
void age_page_down_ageonly(struct page * page)
{
page->age /= 2;
}
void age_page_down_nolock(struct page * page)
{
/* The actual page aging bit */
page->age /= 2;
/*
* The page is now an old page. Move to the inactive
* list (if possible ... see below).
*/
if (!page->age)
deactivate_page_nolock(page);
}
void age_page_down(struct page * page)
{
/* The actual page aging bit */
page->age /= 2;
/*
* The page is now an old page. Move to the inactive
* list (if possible ... see below).
*/
if (!page->age)
deactivate_page(page);
}
/**
* (de)activate_page - move pages from/to active and inactive lists
* @page: the page we want to move
......
......@@ -26,6 +26,19 @@
#define MAX(a,b) ((a) > (b) ? (a) : (b))
static inline void age_page_up(struct page *page)
{
unsigned age = page->age + PAGE_AGE_ADV;
if (age > PAGE_AGE_MAX)
age = PAGE_AGE_MAX;
page->age = age;
}
static inline void age_page_down(struct page * page)
{
page->age /= 2;
}
/*
* The swap-out function returns 1 if it successfully
* scanned all the pages it was asked to (`count').
......@@ -54,16 +67,19 @@ static unsigned int zone_inactive_plenty(zone_t *zone)
static unsigned int zone_inactive_shortage(zone_t *zone)
{
unsigned int inactive;
unsigned int sum;
if (!zone->size)
return 0;
inactive = zone->inactive_dirty_pages;
inactive += zone->inactive_clean_pages;
inactive += zone->free_pages;
return inactive < zone->pages_high;
sum = zone->pages_high;
sum -= zone->inactive_dirty_pages;
sum -= zone->inactive_clean_pages;
sum -= zone->free_pages;
if (sum > 0)
return sum;
return 0;
}
static unsigned int zone_free_plenty(zone_t *zone)
......@@ -103,9 +119,7 @@ static void try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, u
/* Don't look at this pte if it's been accessed recently. */
if (ptep_test_and_clear_young(page_table)) {
page->age += PAGE_AGE_ADV;
if (page->age > PAGE_AGE_MAX)
page->age = PAGE_AGE_MAX;
age_page_up(page);
return;
}
......@@ -700,15 +714,6 @@ int page_launder(int gfp_mask, int sync)
return cleaned_pages;
}
static inline void age_page_up(struct page *page)
{
unsigned age = page->age + PAGE_AGE_ADV;
if (age > PAGE_AGE_MAX)
age = PAGE_AGE_MAX;
page->age = age;
}
/**
* refill_inactive_scan - scan the active list and find pages to deactivate
* @priority: the priority at which to scan
......@@ -760,7 +765,7 @@ int refill_inactive_scan(zone_t *zone, unsigned int priority, int target)
age_page_up(page);
page_active = 1;
} else {
age_page_down_ageonly(page);
age_page_down(page);
/*
* Since we don't hold a reference on the page
* ourselves, we have to do our test a bit more
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment