Commit bc3b14cb authored by Dave Chinner's avatar Dave Chinner Committed by Al Viro

inode: convert inode lru list to generic lru list code.

[glommer@openvz.org: adapted for new LRU return codes]
Signed-off-by: default avatarDave Chinner <dchinner@redhat.com>
Signed-off-by: default avatarGlauber Costa <glommer@openvz.org>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Cc: Arve Hjønnevåg <arve@android.com>
Cc: Carlos Maiolino <cmaiolino@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Chuck Lever <chuck.lever@oracle.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: David Rientjes <rientjes@google.com>
Cc: Gleb Natapov <gleb@redhat.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: J. Bruce Fields <bfields@redhat.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Kent Overstreet <koverstreet@google.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Thomas Hellstrom <thellstrom@vmware.com>
Cc: Trond Myklebust <Trond.Myklebust@netapp.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent a38e4082
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/prefetch.h> #include <linux/prefetch.h>
#include <linux/buffer_head.h> /* for inode_has_buffers */ #include <linux/buffer_head.h> /* for inode_has_buffers */
#include <linux/ratelimit.h> #include <linux/ratelimit.h>
#include <linux/list_lru.h>
#include "internal.h" #include "internal.h"
/* /*
...@@ -24,7 +25,7 @@ ...@@ -24,7 +25,7 @@
* *
* inode->i_lock protects: * inode->i_lock protects:
* inode->i_state, inode->i_hash, __iget() * inode->i_state, inode->i_hash, __iget()
* inode->i_sb->s_inode_lru_lock protects: * Inode LRU list locks protect:
* inode->i_sb->s_inode_lru, inode->i_lru * inode->i_sb->s_inode_lru, inode->i_lru
* inode_sb_list_lock protects: * inode_sb_list_lock protects:
* sb->s_inodes, inode->i_sb_list * sb->s_inodes, inode->i_sb_list
...@@ -37,7 +38,7 @@ ...@@ -37,7 +38,7 @@
* *
* inode_sb_list_lock * inode_sb_list_lock
* inode->i_lock * inode->i_lock
* inode->i_sb->s_inode_lru_lock * Inode LRU list locks
* *
* bdi->wb.list_lock * bdi->wb.list_lock
* inode->i_lock * inode->i_lock
...@@ -401,13 +402,8 @@ EXPORT_SYMBOL(ihold); ...@@ -401,13 +402,8 @@ EXPORT_SYMBOL(ihold);
static void inode_lru_list_add(struct inode *inode) static void inode_lru_list_add(struct inode *inode)
{ {
spin_lock(&inode->i_sb->s_inode_lru_lock); if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru))
if (list_empty(&inode->i_lru)) {
list_add(&inode->i_lru, &inode->i_sb->s_inode_lru);
inode->i_sb->s_nr_inodes_unused++;
this_cpu_inc(nr_unused); this_cpu_inc(nr_unused);
}
spin_unlock(&inode->i_sb->s_inode_lru_lock);
} }
/* /*
...@@ -425,13 +421,9 @@ void inode_add_lru(struct inode *inode) ...@@ -425,13 +421,9 @@ void inode_add_lru(struct inode *inode)
static void inode_lru_list_del(struct inode *inode) static void inode_lru_list_del(struct inode *inode)
{ {
spin_lock(&inode->i_sb->s_inode_lru_lock);
if (!list_empty(&inode->i_lru)) { if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru))
list_del_init(&inode->i_lru);
inode->i_sb->s_nr_inodes_unused--;
this_cpu_dec(nr_unused); this_cpu_dec(nr_unused);
}
spin_unlock(&inode->i_sb->s_inode_lru_lock);
} }
/** /**
...@@ -675,24 +667,8 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty) ...@@ -675,24 +667,8 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty)
return busy; return busy;
} }
static int can_unuse(struct inode *inode)
{
if (inode->i_state & ~I_REFERENCED)
return 0;
if (inode_has_buffers(inode))
return 0;
if (atomic_read(&inode->i_count))
return 0;
if (inode->i_data.nrpages)
return 0;
return 1;
}
/* /*
* Walk the superblock inode LRU for freeable inodes and attempt to free them. * Isolate the inode from the LRU in preparation for freeing it.
* This is called from the superblock shrinker function with a number of inodes
* to trim from the LRU. Inodes to be freed are moved to a temporary list and
* then are freed outside inode_lock by dispose_list().
* *
* Any inodes which are pinned purely because of attached pagecache have their * Any inodes which are pinned purely because of attached pagecache have their
* pagecache removed. If the inode has metadata buffers attached to * pagecache removed. If the inode has metadata buffers attached to
...@@ -706,90 +682,79 @@ static int can_unuse(struct inode *inode) ...@@ -706,90 +682,79 @@ static int can_unuse(struct inode *inode)
* LRU does not have strict ordering. Hence we don't want to reclaim inodes * LRU does not have strict ordering. Hence we don't want to reclaim inodes
* with this flag set because they are the inodes that are out of order. * with this flag set because they are the inodes that are out of order.
*/ */
long prune_icache_sb(struct super_block *sb, unsigned long nr_to_scan) static enum lru_status
inode_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
{ {
LIST_HEAD(freeable); struct list_head *freeable = arg;
long nr_scanned; struct inode *inode = container_of(item, struct inode, i_lru);
long freed = 0;
unsigned long reap = 0;
spin_lock(&sb->s_inode_lru_lock);
for (nr_scanned = nr_to_scan; nr_scanned >= 0; nr_scanned--) {
struct inode *inode;
if (list_empty(&sb->s_inode_lru))
break;
inode = list_entry(sb->s_inode_lru.prev, struct inode, i_lru);
/* /*
* we are inverting the sb->s_inode_lru_lock/inode->i_lock here, * we are inverting the lru lock/inode->i_lock here, so use a trylock.
* so use a trylock. If we fail to get the lock, just move the * If we fail to get the lock, just skip it.
* inode to the back of the list so we don't spin on it.
*/ */
if (!spin_trylock(&inode->i_lock)) { if (!spin_trylock(&inode->i_lock))
list_move(&inode->i_lru, &sb->s_inode_lru); return LRU_SKIP;
continue;
}
/* /*
* Referenced or dirty inodes are still in use. Give them * Referenced or dirty inodes are still in use. Give them another pass
* another pass through the LRU as we canot reclaim them now. * through the LRU as we canot reclaim them now.
*/ */
if (atomic_read(&inode->i_count) || if (atomic_read(&inode->i_count) ||
(inode->i_state & ~I_REFERENCED)) { (inode->i_state & ~I_REFERENCED)) {
list_del_init(&inode->i_lru); list_del_init(&inode->i_lru);
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
sb->s_nr_inodes_unused--;
this_cpu_dec(nr_unused); this_cpu_dec(nr_unused);
continue; return LRU_REMOVED;
} }
/* recently referenced inodes get one more pass */ /* recently referenced inodes get one more pass */
if (inode->i_state & I_REFERENCED) { if (inode->i_state & I_REFERENCED) {
inode->i_state &= ~I_REFERENCED; inode->i_state &= ~I_REFERENCED;
list_move(&inode->i_lru, &sb->s_inode_lru);
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
continue; return LRU_ROTATE;
} }
if (inode_has_buffers(inode) || inode->i_data.nrpages) { if (inode_has_buffers(inode) || inode->i_data.nrpages) {
__iget(inode); __iget(inode);
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
spin_unlock(&sb->s_inode_lru_lock); spin_unlock(lru_lock);
if (remove_inode_buffers(inode)) if (remove_inode_buffers(inode)) {
reap += invalidate_mapping_pages(&inode->i_data, unsigned long reap;
0, -1); reap = invalidate_mapping_pages(&inode->i_data, 0, -1);
iput(inode); if (current_is_kswapd())
spin_lock(&sb->s_inode_lru_lock); __count_vm_events(KSWAPD_INODESTEAL, reap);
else
if (inode != list_entry(sb->s_inode_lru.next, __count_vm_events(PGINODESTEAL, reap);
struct inode, i_lru)) if (current->reclaim_state)
continue; /* wrong inode or list_empty */ current->reclaim_state->reclaimed_slab += reap;
/* avoid lock inversions with trylock */
if (!spin_trylock(&inode->i_lock))
continue;
if (!can_unuse(inode)) {
spin_unlock(&inode->i_lock);
continue;
} }
iput(inode);
spin_lock(lru_lock);
return LRU_RETRY;
} }
WARN_ON(inode->i_state & I_NEW); WARN_ON(inode->i_state & I_NEW);
inode->i_state |= I_FREEING; inode->i_state |= I_FREEING;
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
list_move(&inode->i_lru, &freeable); list_move(&inode->i_lru, freeable);
sb->s_nr_inodes_unused--;
this_cpu_dec(nr_unused); this_cpu_dec(nr_unused);
freed++; return LRU_REMOVED;
} }
if (current_is_kswapd())
__count_vm_events(KSWAPD_INODESTEAL, reap); /*
else * Walk the superblock inode LRU for freeable inodes and attempt to free them.
__count_vm_events(PGINODESTEAL, reap); * This is called from the superblock shrinker function with a number of inodes
spin_unlock(&sb->s_inode_lru_lock); * to trim from the LRU. Inodes to be freed are moved to a temporary list and
if (current->reclaim_state) * then are freed outside inode_lock by dispose_list().
current->reclaim_state->reclaimed_slab += reap; */
long prune_icache_sb(struct super_block *sb, unsigned long nr_to_scan)
{
LIST_HEAD(freeable);
long freed;
freed = list_lru_walk(&sb->s_inode_lru, inode_lru_isolate,
&freeable, nr_to_scan);
dispose_list(&freeable); dispose_list(&freeable);
return freed; return freed;
} }
......
...@@ -78,14 +78,13 @@ static unsigned long super_cache_scan(struct shrinker *shrink, ...@@ -78,14 +78,13 @@ static unsigned long super_cache_scan(struct shrinker *shrink,
if (sb->s_op->nr_cached_objects) if (sb->s_op->nr_cached_objects)
fs_objects = sb->s_op->nr_cached_objects(sb); fs_objects = sb->s_op->nr_cached_objects(sb);
total_objects = sb->s_nr_dentry_unused + inodes = list_lru_count(&sb->s_inode_lru);
sb->s_nr_inodes_unused + fs_objects + 1; total_objects = sb->s_nr_dentry_unused + inodes + fs_objects + 1;
/* proportion the scan between the caches */ /* proportion the scan between the caches */
dentries = mult_frac(sc->nr_to_scan, sb->s_nr_dentry_unused, dentries = mult_frac(sc->nr_to_scan, sb->s_nr_dentry_unused,
total_objects); total_objects);
inodes = mult_frac(sc->nr_to_scan, sb->s_nr_inodes_unused, inodes = mult_frac(sc->nr_to_scan, inodes, total_objects);
total_objects);
/* /*
* prune the dcache first as the icache is pinned by it, then * prune the dcache first as the icache is pinned by it, then
...@@ -119,7 +118,7 @@ static unsigned long super_cache_count(struct shrinker *shrink, ...@@ -119,7 +118,7 @@ static unsigned long super_cache_count(struct shrinker *shrink,
total_objects = sb->s_op->nr_cached_objects(sb); total_objects = sb->s_op->nr_cached_objects(sb);
total_objects += sb->s_nr_dentry_unused; total_objects += sb->s_nr_dentry_unused;
total_objects += sb->s_nr_inodes_unused; total_objects += list_lru_count(&sb->s_inode_lru);
total_objects = vfs_pressure_ratio(total_objects); total_objects = vfs_pressure_ratio(total_objects);
drop_super(sb); drop_super(sb);
...@@ -194,8 +193,7 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags) ...@@ -194,8 +193,7 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags)
INIT_LIST_HEAD(&s->s_inodes); INIT_LIST_HEAD(&s->s_inodes);
INIT_LIST_HEAD(&s->s_dentry_lru); INIT_LIST_HEAD(&s->s_dentry_lru);
spin_lock_init(&s->s_dentry_lru_lock); spin_lock_init(&s->s_dentry_lru_lock);
INIT_LIST_HEAD(&s->s_inode_lru); list_lru_init(&s->s_inode_lru);
spin_lock_init(&s->s_inode_lru_lock);
INIT_LIST_HEAD(&s->s_mounts); INIT_LIST_HEAD(&s->s_mounts);
init_rwsem(&s->s_umount); init_rwsem(&s->s_umount);
lockdep_set_class(&s->s_umount, &type->s_umount_key); lockdep_set_class(&s->s_umount, &type->s_umount_key);
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/stat.h> #include <linux/stat.h>
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/list_lru.h>
#include <linux/llist.h> #include <linux/llist.h>
#include <linux/radix-tree.h> #include <linux/radix-tree.h>
#include <linux/rbtree.h> #include <linux/rbtree.h>
...@@ -1275,10 +1276,7 @@ struct super_block { ...@@ -1275,10 +1276,7 @@ struct super_block {
struct list_head s_dentry_lru; /* unused dentry lru */ struct list_head s_dentry_lru; /* unused dentry lru */
long s_nr_dentry_unused; /* # of dentry on lru */ long s_nr_dentry_unused; /* # of dentry on lru */
/* s_inode_lru_lock protects s_inode_lru and s_nr_inodes_unused */ struct list_lru s_inode_lru ____cacheline_aligned_in_smp;
spinlock_t s_inode_lru_lock ____cacheline_aligned_in_smp;
struct list_head s_inode_lru; /* unused inode lru */
long s_nr_inodes_unused; /* # of inodes on lru */
struct block_device *s_bdev; struct block_device *s_bdev;
struct backing_dev_info *s_bdi; struct backing_dev_info *s_bdi;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment