Commit a4082357 authored by Dave Chinner's avatar Dave Chinner Committed by Al Viro

xfs: rework buffer dispose list tracking

In converting the buffer lru lists to use the generic code, the locking
for marking the buffers as on the dispose list was lost.  This results in
confusion in LRU buffer tracking and acocunting, resulting in reference
counts being mucked up and filesystem beig unmountable.

To fix this, introduce an internal buffer spinlock to protect the state
field that holds the dispose list information.  Because there is now
locking needed around xfs_buf_lru_add/del, and they are used in exactly
one place each two lines apart, get rid of the wrappers and code the logic
directly in place.

Further, the LRU emptying code used on unmount is less than optimal.
Convert it to use a dispose list as per a normal shrinker walk, and repeat
the walk that fills the dispose list until the LRU is empty.  Thi avoids
needing to drop and regain the LRU lock for every item being freed, and
allows the same logic as the shrinker isolate call to be used.  Simpler,
easier to understand.
Signed-off-by: default avatarDave Chinner <dchinner@redhat.com>
Signed-off-by: default avatarGlauber Costa <glommer@openvz.org>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Cc: Arve Hjønnevåg <arve@android.com>
Cc: Carlos Maiolino <cmaiolino@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Chuck Lever <chuck.lever@oracle.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: David Rientjes <rientjes@google.com>
Cc: Gleb Natapov <gleb@redhat.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: J. Bruce Fields <bfields@redhat.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Kent Overstreet <koverstreet@google.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Thomas Hellstrom <thellstrom@vmware.com>
Cc: Trond Myklebust <Trond.Myklebust@netapp.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent addbda40
...@@ -80,37 +80,6 @@ xfs_buf_vmap_len( ...@@ -80,37 +80,6 @@ xfs_buf_vmap_len(
return (bp->b_page_count * PAGE_SIZE) - bp->b_offset; return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
} }
/*
* xfs_buf_lru_add - add a buffer to the LRU.
*
* The LRU takes a new reference to the buffer so that it will only be freed
* once the shrinker takes the buffer off the LRU.
*/
static void
xfs_buf_lru_add(
struct xfs_buf *bp)
{
if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) {
bp->b_lru_flags &= ~_XBF_LRU_DISPOSE;
atomic_inc(&bp->b_hold);
}
}
/*
* xfs_buf_lru_del - remove a buffer from the LRU
*
* The unlocked check is safe here because it only occurs when there are not
* b_lru_ref counts left on the inode under the pag->pag_buf_lock. it is there
* to optimise the shrinker removing the buffer from the LRU and calling
* xfs_buf_free().
*/
static void
xfs_buf_lru_del(
struct xfs_buf *bp)
{
list_lru_del(&bp->b_target->bt_lru, &bp->b_lru);
}
/* /*
* When we mark a buffer stale, we remove the buffer from the LRU and clear the * When we mark a buffer stale, we remove the buffer from the LRU and clear the
* b_lru_ref count so that the buffer is freed immediately when the buffer * b_lru_ref count so that the buffer is freed immediately when the buffer
...@@ -134,12 +103,14 @@ xfs_buf_stale( ...@@ -134,12 +103,14 @@ xfs_buf_stale(
*/ */
bp->b_flags &= ~_XBF_DELWRI_Q; bp->b_flags &= ~_XBF_DELWRI_Q;
atomic_set(&(bp)->b_lru_ref, 0); spin_lock(&bp->b_lock);
if (!(bp->b_lru_flags & _XBF_LRU_DISPOSE) && atomic_set(&bp->b_lru_ref, 0);
if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
(list_lru_del(&bp->b_target->bt_lru, &bp->b_lru))) (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru)))
atomic_dec(&bp->b_hold); atomic_dec(&bp->b_hold);
ASSERT(atomic_read(&bp->b_hold) >= 1); ASSERT(atomic_read(&bp->b_hold) >= 1);
spin_unlock(&bp->b_lock);
} }
static int static int
...@@ -203,6 +174,7 @@ _xfs_buf_alloc( ...@@ -203,6 +174,7 @@ _xfs_buf_alloc(
INIT_LIST_HEAD(&bp->b_list); INIT_LIST_HEAD(&bp->b_list);
RB_CLEAR_NODE(&bp->b_rbnode); RB_CLEAR_NODE(&bp->b_rbnode);
sema_init(&bp->b_sema, 0); /* held, no waiters */ sema_init(&bp->b_sema, 0); /* held, no waiters */
spin_lock_init(&bp->b_lock);
XB_SET_OWNER(bp); XB_SET_OWNER(bp);
bp->b_target = target; bp->b_target = target;
bp->b_flags = flags; bp->b_flags = flags;
...@@ -892,12 +864,33 @@ xfs_buf_rele( ...@@ -892,12 +864,33 @@ xfs_buf_rele(
ASSERT(atomic_read(&bp->b_hold) > 0); ASSERT(atomic_read(&bp->b_hold) > 0);
if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) { if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
if (!(bp->b_flags & XBF_STALE) && spin_lock(&bp->b_lock);
atomic_read(&bp->b_lru_ref)) { if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
xfs_buf_lru_add(bp); /*
* If the buffer is added to the LRU take a new
* reference to the buffer for the LRU and clear the
* (now stale) dispose list state flag
*/
if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) {
bp->b_state &= ~XFS_BSTATE_DISPOSE;
atomic_inc(&bp->b_hold);
}
spin_unlock(&bp->b_lock);
spin_unlock(&pag->pag_buf_lock); spin_unlock(&pag->pag_buf_lock);
} else { } else {
xfs_buf_lru_del(bp); /*
* most of the time buffers will already be removed from
* the LRU, so optimise that case by checking for the
* XFS_BSTATE_DISPOSE flag indicating the last list the
* buffer was on was the disposal list
*/
if (!(bp->b_state & XFS_BSTATE_DISPOSE)) {
list_lru_del(&bp->b_target->bt_lru, &bp->b_lru);
} else {
ASSERT(list_empty(&bp->b_lru));
}
spin_unlock(&bp->b_lock);
ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
rb_erase(&bp->b_rbnode, &pag->pag_buf_tree); rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
spin_unlock(&pag->pag_buf_lock); spin_unlock(&pag->pag_buf_lock);
...@@ -1485,33 +1478,48 @@ xfs_buftarg_wait_rele( ...@@ -1485,33 +1478,48 @@ xfs_buftarg_wait_rele(
{ {
struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru); struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
struct list_head *dispose = arg;
if (atomic_read(&bp->b_hold) > 1) { if (atomic_read(&bp->b_hold) > 1) {
/* need to wait */ /* need to wait, so skip it this pass */
trace_xfs_buf_wait_buftarg(bp, _RET_IP_); trace_xfs_buf_wait_buftarg(bp, _RET_IP_);
spin_unlock(lru_lock); return LRU_SKIP;
delay(100); }
} else { if (!spin_trylock(&bp->b_lock))
return LRU_SKIP;
/* /*
* clear the LRU reference count so the buffer doesn't get * clear the LRU reference count so the buffer doesn't get
* ignored in xfs_buf_rele(). * ignored in xfs_buf_rele().
*/ */
atomic_set(&bp->b_lru_ref, 0); atomic_set(&bp->b_lru_ref, 0);
spin_unlock(lru_lock); bp->b_state |= XFS_BSTATE_DISPOSE;
xfs_buf_rele(bp); list_move(item, dispose);
} spin_unlock(&bp->b_lock);
return LRU_REMOVED;
spin_lock(lru_lock);
return LRU_RETRY;
} }
void void
xfs_wait_buftarg( xfs_wait_buftarg(
struct xfs_buftarg *btp) struct xfs_buftarg *btp)
{ {
while (list_lru_count(&btp->bt_lru)) LIST_HEAD(dispose);
int loop = 0;
/* loop until there is nothing left on the lru list. */
while (list_lru_count(&btp->bt_lru)) {
list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele, list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele,
NULL, LONG_MAX); &dispose, LONG_MAX);
while (!list_empty(&dispose)) {
struct xfs_buf *bp;
bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
list_del_init(&bp->b_lru);
xfs_buf_rele(bp);
}
if (loop++ != 0)
delay(100);
}
} }
static enum lru_status static enum lru_status
...@@ -1523,16 +1531,25 @@ xfs_buftarg_isolate( ...@@ -1523,16 +1531,25 @@ xfs_buftarg_isolate(
struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru); struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
struct list_head *dispose = arg; struct list_head *dispose = arg;
/*
* we are inverting the lru lock/bp->b_lock here, so use a trylock.
* If we fail to get the lock, just skip it.
*/
if (!spin_trylock(&bp->b_lock))
return LRU_SKIP;
/* /*
* Decrement the b_lru_ref count unless the value is already * Decrement the b_lru_ref count unless the value is already
* zero. If the value is already zero, we need to reclaim the * zero. If the value is already zero, we need to reclaim the
* buffer, otherwise it gets another trip through the LRU. * buffer, otherwise it gets another trip through the LRU.
*/ */
if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
spin_unlock(&bp->b_lock);
return LRU_ROTATE; return LRU_ROTATE;
}
bp->b_lru_flags |= _XBF_LRU_DISPOSE; bp->b_state |= XFS_BSTATE_DISPOSE;
list_move(item, dispose); list_move(item, dispose);
spin_unlock(&bp->b_lock);
return LRU_REMOVED; return LRU_REMOVED;
} }
......
...@@ -60,7 +60,6 @@ typedef enum { ...@@ -60,7 +60,6 @@ typedef enum {
#define _XBF_KMEM (1 << 21)/* backed by heap memory */ #define _XBF_KMEM (1 << 21)/* backed by heap memory */
#define _XBF_DELWRI_Q (1 << 22)/* buffer on a delwri queue */ #define _XBF_DELWRI_Q (1 << 22)/* buffer on a delwri queue */
#define _XBF_COMPOUND (1 << 23)/* compound buffer */ #define _XBF_COMPOUND (1 << 23)/* compound buffer */
#define _XBF_LRU_DISPOSE (1 << 24)/* buffer being discarded */
typedef unsigned int xfs_buf_flags_t; typedef unsigned int xfs_buf_flags_t;
...@@ -79,8 +78,12 @@ typedef unsigned int xfs_buf_flags_t; ...@@ -79,8 +78,12 @@ typedef unsigned int xfs_buf_flags_t;
{ _XBF_PAGES, "PAGES" }, \ { _XBF_PAGES, "PAGES" }, \
{ _XBF_KMEM, "KMEM" }, \ { _XBF_KMEM, "KMEM" }, \
{ _XBF_DELWRI_Q, "DELWRI_Q" }, \ { _XBF_DELWRI_Q, "DELWRI_Q" }, \
{ _XBF_COMPOUND, "COMPOUND" }, \ { _XBF_COMPOUND, "COMPOUND" }
{ _XBF_LRU_DISPOSE, "LRU_DISPOSE" }
/*
* Internal state flags.
*/
#define XFS_BSTATE_DISPOSE (1 << 0) /* buffer being discarded */
typedef struct xfs_buftarg { typedef struct xfs_buftarg {
dev_t bt_dev; dev_t bt_dev;
...@@ -136,7 +139,8 @@ typedef struct xfs_buf { ...@@ -136,7 +139,8 @@ typedef struct xfs_buf {
* bt_lru_lock and not by b_sema * bt_lru_lock and not by b_sema
*/ */
struct list_head b_lru; /* lru list */ struct list_head b_lru; /* lru list */
xfs_buf_flags_t b_lru_flags; /* internal lru status flags */ spinlock_t b_lock; /* internal state lock */
unsigned int b_state; /* internal state flags */
wait_queue_head_t b_waiters; /* unpin waiters */ wait_queue_head_t b_waiters; /* unpin waiters */
struct list_head b_list; struct list_head b_list;
struct xfs_perag *b_pag; /* contains rbtree root */ struct xfs_perag *b_pag; /* contains rbtree root */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment