Commit 368e1361 authored by Dave Chinner's avatar Dave Chinner Committed by Alex Elder

xfs: remove duplicate code from dquot reclaim

The dquot shaker and the free-list reclaim code use exactly the same
algorithm but the code is duplicated and slightly different in each
case. Make the shaker code use the single dquot reclaim code to
remove the code duplication.
Signed-off-by: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
parent 3a25404b
...@@ -653,8 +653,6 @@ DEFINE_EVENT(xfs_dquot_class, name, \ ...@@ -653,8 +653,6 @@ DEFINE_EVENT(xfs_dquot_class, name, \
TP_PROTO(struct xfs_dquot *dqp), \ TP_PROTO(struct xfs_dquot *dqp), \
TP_ARGS(dqp)) TP_ARGS(dqp))
DEFINE_DQUOT_EVENT(xfs_dqadjust); DEFINE_DQUOT_EVENT(xfs_dqadjust);
DEFINE_DQUOT_EVENT(xfs_dqshake_dirty);
DEFINE_DQUOT_EVENT(xfs_dqshake_unlink);
DEFINE_DQUOT_EVENT(xfs_dqreclaim_want); DEFINE_DQUOT_EVENT(xfs_dqreclaim_want);
DEFINE_DQUOT_EVENT(xfs_dqreclaim_dirty); DEFINE_DQUOT_EVENT(xfs_dqreclaim_dirty);
DEFINE_DQUOT_EVENT(xfs_dqreclaim_unlink); DEFINE_DQUOT_EVENT(xfs_dqreclaim_unlink);
......
...@@ -1926,53 +1926,46 @@ xfs_qm_init_quotainos( ...@@ -1926,53 +1926,46 @@ xfs_qm_init_quotainos(
} }
/* /*
* Traverse the freelist of dquots and attempt to reclaim a maximum of * Just pop the least recently used dquot off the freelist and
* 'howmany' dquots. This operation races with dqlookup(), and attempts to * recycle it. The returned dquot is locked.
* favor the lookup function ...
* XXXsup merge this with qm_reclaim_one().
*/ */
STATIC int STATIC xfs_dquot_t *
xfs_qm_shake_freelist( xfs_qm_dqreclaim_one(void)
int howmany)
{ {
int nreclaimed; xfs_dquot_t *dqpout;
xfs_dqhash_t *hash; xfs_dquot_t *dqp;
xfs_dquot_t *dqp, *nextdqp;
int restarts; int restarts;
int nflushes;
if (howmany <= 0)
return 0;
nreclaimed = 0;
restarts = 0; restarts = 0;
nflushes = 0; dqpout = NULL;
#ifdef QUOTADEBUG /* lockorder: hashchainlock, freelistlock, mplistlock, dqlock, dqflock */
cmn_err(CE_DEBUG, "Shake free 0x%x", howmany); startagain:
#endif
/* lock order is : hashchainlock, freelistlock, mplistlock */
tryagain:
xfs_qm_freelist_lock(xfs_Gqm); xfs_qm_freelist_lock(xfs_Gqm);
for (dqp = xfs_Gqm->qm_dqfreelist.qh_next; FOREACH_DQUOT_IN_FREELIST(dqp, &(xfs_Gqm->qm_dqfreelist)) {
((dqp != (xfs_dquot_t *) &xfs_Gqm->qm_dqfreelist) &&
nreclaimed < howmany); ) {
struct xfs_mount *mp = dqp->q_mount; struct xfs_mount *mp = dqp->q_mount;
xfs_dqlock(dqp); xfs_dqlock(dqp);
/* /*
* We are racing with dqlookup here. Naturally we don't * We are racing with dqlookup here. Naturally we don't
* want to reclaim a dquot that lookup wants. * want to reclaim a dquot that lookup wants. We release the
* freelist lock and start over, so that lookup will grab
* both the dquot and the freelistlock.
*/ */
if (dqp->dq_flags & XFS_DQ_WANT) { if (dqp->dq_flags & XFS_DQ_WANT) {
ASSERT(! (dqp->dq_flags & XFS_DQ_INACTIVE));
trace_xfs_dqreclaim_want(dqp);
xfs_dqunlock(dqp); xfs_dqunlock(dqp);
xfs_qm_freelist_unlock(xfs_Gqm); xfs_qm_freelist_unlock(xfs_Gqm);
if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS) if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
return nreclaimed; return NULL;
XQM_STATS_INC(xqmstats.xs_qm_dqwants); XQM_STATS_INC(xqmstats.xs_qm_dqwants);
goto tryagain; goto startagain;
} }
/* /*
...@@ -1985,19 +1978,22 @@ xfs_qm_shake_freelist( ...@@ -1985,19 +1978,22 @@ xfs_qm_shake_freelist(
ASSERT(! XFS_DQ_IS_DIRTY(dqp)); ASSERT(! XFS_DQ_IS_DIRTY(dqp));
ASSERT(dqp->HL_PREVP == NULL); ASSERT(dqp->HL_PREVP == NULL);
ASSERT(list_empty(&dqp->q_mplist)); ASSERT(list_empty(&dqp->q_mplist));
XQM_FREELIST_REMOVE(dqp);
xfs_dqunlock(dqp);
dqpout = dqp;
XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims); XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims);
nextdqp = dqp->dq_flnext; break;
goto off_freelist;
} }
ASSERT(dqp->q_hash);
ASSERT(!list_empty(&dqp->q_mplist)); ASSERT(!list_empty(&dqp->q_mplist));
/* /*
* Try to grab the flush lock. If this dquot is in the process of * Try to grab the flush lock. If this dquot is in the process of
* getting flushed to disk, we don't want to reclaim it. * getting flushed to disk, we don't want to reclaim it.
*/ */
if (!xfs_dqflock_nowait(dqp)) { if (!xfs_dqflock_nowait(dqp)) {
xfs_dqunlock(dqp); xfs_dqunlock(dqp);
dqp = dqp->dq_flnext;
continue; continue;
} }
...@@ -2010,21 +2006,21 @@ xfs_qm_shake_freelist( ...@@ -2010,21 +2006,21 @@ xfs_qm_shake_freelist(
if (XFS_DQ_IS_DIRTY(dqp)) { if (XFS_DQ_IS_DIRTY(dqp)) {
int error; int error;
trace_xfs_dqshake_dirty(dqp); trace_xfs_dqreclaim_dirty(dqp);
/* /*
* We flush it delayed write, so don't bother * We flush it delayed write, so don't bother
* releasing the mplock. * releasing the freelist lock.
*/ */
error = xfs_qm_dqflush(dqp, 0); error = xfs_qm_dqflush(dqp, 0);
if (error) { if (error) {
xfs_fs_cmn_err(CE_WARN, mp, xfs_fs_cmn_err(CE_WARN, mp,
"xfs_qm_dqflush_all: dquot %p flush failed", dqp); "xfs_qm_dqreclaim: dquot %p flush failed", dqp);
} }
xfs_dqunlock(dqp); /* dqflush unlocks dqflock */ xfs_dqunlock(dqp); /* dqflush unlocks dqflock */
dqp = dqp->dq_flnext;
continue; continue;
} }
/* /*
* We're trying to get the hashlock out of order. This races * We're trying to get the hashlock out of order. This races
* with dqlookup; so, we giveup and goto the next dquot if * with dqlookup; so, we giveup and goto the next dquot if
...@@ -2033,57 +2029,71 @@ xfs_qm_shake_freelist( ...@@ -2033,57 +2029,71 @@ xfs_qm_shake_freelist(
* waiting for the freelist lock. * waiting for the freelist lock.
*/ */
if (!mutex_trylock(&dqp->q_hash->qh_lock)) { if (!mutex_trylock(&dqp->q_hash->qh_lock)) {
xfs_dqfunlock(dqp); restarts++;
xfs_dqunlock(dqp); goto dqfunlock;
dqp = dqp->dq_flnext;
continue;
} }
/* /*
* This races with dquot allocation code as well as dqflush_all * This races with dquot allocation code as well as dqflush_all
* and reclaim code. So, if we failed to grab the mplist lock, * and reclaim code. So, if we failed to grab the mplist lock,
* giveup everything and start over. * giveup everything and start over.
*/ */
hash = dqp->q_hash;
ASSERT(hash);
if (!mutex_trylock(&mp->m_quotainfo->qi_dqlist_lock)) { if (!mutex_trylock(&mp->m_quotainfo->qi_dqlist_lock)) {
/* XXX put a sentinel so that we can come back here */ restarts++;
mutex_unlock(&dqp->q_hash->qh_lock);
xfs_dqfunlock(dqp); xfs_dqfunlock(dqp);
xfs_dqunlock(dqp); xfs_dqunlock(dqp);
mutex_unlock(&hash->qh_lock);
xfs_qm_freelist_unlock(xfs_Gqm); xfs_qm_freelist_unlock(xfs_Gqm);
if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS) if (restarts++ >= XFS_QM_RECLAIM_MAX_RESTARTS)
return nreclaimed; return NULL;
goto tryagain; goto startagain;
} }
trace_xfs_dqshake_unlink(dqp);
#ifdef QUOTADEBUG
cmn_err(CE_DEBUG, "Shake 0x%p, ID 0x%x\n",
dqp, be32_to_cpu(dqp->q_core.d_id));
#endif
ASSERT(dqp->q_nrefs == 0); ASSERT(dqp->q_nrefs == 0);
nextdqp = dqp->dq_flnext;
XQM_HASHLIST_REMOVE(hash, dqp);
list_del_init(&dqp->q_mplist); list_del_init(&dqp->q_mplist);
mp->m_quotainfo->qi_dquots--; mp->m_quotainfo->qi_dquots--;
mp->m_quotainfo->qi_dqreclaims++; mp->m_quotainfo->qi_dqreclaims++;
xfs_dqfunlock(dqp); XQM_HASHLIST_REMOVE(dqp->q_hash, dqp);
mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock);
mutex_unlock(&hash->qh_lock);
off_freelist:
XQM_FREELIST_REMOVE(dqp); XQM_FREELIST_REMOVE(dqp);
dqpout = dqp;
mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock);
mutex_unlock(&dqp->q_hash->qh_lock);
dqfunlock:
xfs_dqfunlock(dqp);
xfs_dqunlock(dqp); xfs_dqunlock(dqp);
nreclaimed++; if (dqpout)
XQM_STATS_INC(xqmstats.xs_qm_dqshake_reclaims); break;
xfs_qm_dqdestroy(dqp); if (restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
dqp = nextdqp; return NULL;
} }
xfs_qm_freelist_unlock(xfs_Gqm); xfs_qm_freelist_unlock(xfs_Gqm);
return nreclaimed; return dqpout;
} }
/*
* Traverse the freelist of dquots and attempt to reclaim a maximum of
* 'howmany' dquots. This operation races with dqlookup(), and attempts to
* favor the lookup function ...
*/
STATIC int
xfs_qm_shake_freelist(
int howmany)
{
int nreclaimed = 0;
xfs_dquot_t *dqp;
if (howmany <= 0)
return 0;
while (nreclaimed < howmany) {
dqp = xfs_qm_dqreclaim_one();
if (!dqp)
return nreclaimed;
xfs_qm_dqdestroy(dqp);
nreclaimed++;
}
return nreclaimed;
}
/* /*
* The kmem_shake interface is invoked when memory is running low. * The kmem_shake interface is invoked when memory is running low.
...@@ -2115,134 +2125,6 @@ xfs_qm_shake(int nr_to_scan, gfp_t gfp_mask) ...@@ -2115,134 +2125,6 @@ xfs_qm_shake(int nr_to_scan, gfp_t gfp_mask)
} }
/*
* Just pop the least recently used dquot off the freelist and
* recycle it. The returned dquot is locked.
*/
STATIC xfs_dquot_t *
xfs_qm_dqreclaim_one(void)
{
xfs_dquot_t *dqpout;
xfs_dquot_t *dqp;
int restarts;
int nflushes;
restarts = 0;
dqpout = NULL;
nflushes = 0;
/* lockorder: hashchainlock, freelistlock, mplistlock, dqlock, dqflock */
startagain:
xfs_qm_freelist_lock(xfs_Gqm);
FOREACH_DQUOT_IN_FREELIST(dqp, &(xfs_Gqm->qm_dqfreelist)) {
struct xfs_mount *mp = dqp->q_mount;
xfs_dqlock(dqp);
/*
* We are racing with dqlookup here. Naturally we don't
* want to reclaim a dquot that lookup wants. We release the
* freelist lock and start over, so that lookup will grab
* both the dquot and the freelistlock.
*/
if (dqp->dq_flags & XFS_DQ_WANT) {
ASSERT(! (dqp->dq_flags & XFS_DQ_INACTIVE));
trace_xfs_dqreclaim_want(dqp);
xfs_dqunlock(dqp);
xfs_qm_freelist_unlock(xfs_Gqm);
if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
return NULL;
XQM_STATS_INC(xqmstats.xs_qm_dqwants);
goto startagain;
}
/*
* If the dquot is inactive, we are assured that it is
* not on the mplist or the hashlist, and that makes our
* life easier.
*/
if (dqp->dq_flags & XFS_DQ_INACTIVE) {
ASSERT(mp == NULL);
ASSERT(! XFS_DQ_IS_DIRTY(dqp));
ASSERT(dqp->HL_PREVP == NULL);
ASSERT(list_empty(&dqp->q_mplist));
XQM_FREELIST_REMOVE(dqp);
xfs_dqunlock(dqp);
dqpout = dqp;
XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims);
break;
}
ASSERT(dqp->q_hash);
ASSERT(!list_empty(&dqp->q_mplist));
/*
* Try to grab the flush lock. If this dquot is in the process of
* getting flushed to disk, we don't want to reclaim it.
*/
if (!xfs_dqflock_nowait(dqp)) {
xfs_dqunlock(dqp);
continue;
}
/*
* We have the flush lock so we know that this is not in the
* process of being flushed. So, if this is dirty, flush it
* DELWRI so that we don't get a freelist infested with
* dirty dquots.
*/
if (XFS_DQ_IS_DIRTY(dqp)) {
int error;
trace_xfs_dqreclaim_dirty(dqp);
/*
* We flush it delayed write, so don't bother
* releasing the freelist lock.
*/
error = xfs_qm_dqflush(dqp, 0);
if (error) {
xfs_fs_cmn_err(CE_WARN, mp,
"xfs_qm_dqreclaim: dquot %p flush failed", dqp);
}
xfs_dqunlock(dqp); /* dqflush unlocks dqflock */
continue;
}
if (!mutex_trylock(&mp->m_quotainfo->qi_dqlist_lock)) {
xfs_dqfunlock(dqp);
xfs_dqunlock(dqp);
continue;
}
if (!mutex_trylock(&dqp->q_hash->qh_lock))
goto mplistunlock;
trace_xfs_dqreclaim_unlink(dqp);
ASSERT(dqp->q_nrefs == 0);
list_del_init(&dqp->q_mplist);
mp->m_quotainfo->qi_dquots--;
mp->m_quotainfo->qi_dqreclaims++;
XQM_HASHLIST_REMOVE(dqp->q_hash, dqp);
XQM_FREELIST_REMOVE(dqp);
dqpout = dqp;
mutex_unlock(&dqp->q_hash->qh_lock);
mplistunlock:
mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock);
xfs_dqfunlock(dqp);
xfs_dqunlock(dqp);
if (dqpout)
break;
}
xfs_qm_freelist_unlock(xfs_Gqm);
return dqpout;
}
/*------------------------------------------------------------------*/ /*------------------------------------------------------------------*/
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment