Commit 65de5567 authored by David Chinner's avatar David Chinner Committed by Tim Shimmin

[XFS] On-demand reaping of the MRU cache

Instead of running the mru cache reaper all the time based on a timeout,
we should only run it when the cache has active objects. This allows CPUs
to sleep when there is no activity rather than be woken repeatedly just to
check if there is anything to do.

SGI-PV: 968554
SGI-Modid: xfs-linux-melb:xfs-kern:29305a
Signed-off-by: default avatarDavid Chinner <dgc@sgi.com>
Signed-off-by: default avatarDonald Douwsma <donaldd@sgi.com>
Signed-off-by: default avatarTim Shimmin <tes@sgi.com>
parent c2f82897
...@@ -467,8 +467,7 @@ void ...@@ -467,8 +467,7 @@ void
xfs_filestream_flush( xfs_filestream_flush(
xfs_mount_t *mp) xfs_mount_t *mp)
{ {
/* point in time flush, so keep the reaper running */ xfs_mru_cache_flush(mp->m_filestream);
xfs_mru_cache_flush(mp->m_filestream, 1);
} }
/* /*
......
...@@ -206,8 +206,11 @@ _xfs_mru_cache_list_insert( ...@@ -206,8 +206,11 @@ _xfs_mru_cache_list_insert(
*/ */
if (!_xfs_mru_cache_migrate(mru, now)) { if (!_xfs_mru_cache_migrate(mru, now)) {
mru->time_zero = now; mru->time_zero = now;
if (!mru->next_reap) if (!mru->queued) {
mru->next_reap = mru->grp_count * mru->grp_time; mru->queued = 1;
queue_delayed_work(xfs_mru_reap_wq, &mru->work,
mru->grp_count * mru->grp_time);
}
} else { } else {
grp = (now - mru->time_zero) / mru->grp_time; grp = (now - mru->time_zero) / mru->grp_time;
grp = (mru->lru_grp + grp) % mru->grp_count; grp = (mru->lru_grp + grp) % mru->grp_count;
...@@ -271,29 +274,26 @@ _xfs_mru_cache_reap( ...@@ -271,29 +274,26 @@ _xfs_mru_cache_reap(
struct work_struct *work) struct work_struct *work)
{ {
xfs_mru_cache_t *mru = container_of(work, xfs_mru_cache_t, work.work); xfs_mru_cache_t *mru = container_of(work, xfs_mru_cache_t, work.work);
unsigned long now; unsigned long now, next;
ASSERT(mru && mru->lists); ASSERT(mru && mru->lists);
if (!mru || !mru->lists) if (!mru || !mru->lists)
return; return;
mutex_spinlock(&mru->lock); mutex_spinlock(&mru->lock);
now = jiffies; next = _xfs_mru_cache_migrate(mru, jiffies);
if (mru->reap_all || _xfs_mru_cache_clear_reap_list(mru);
(mru->next_reap && time_after(now, mru->next_reap))) {
if (mru->reap_all) mru->queued = next;
now += mru->grp_count * mru->grp_time * 2; if ((mru->queued > 0)) {
mru->next_reap = _xfs_mru_cache_migrate(mru, now); now = jiffies;
_xfs_mru_cache_clear_reap_list(mru); if (next <= now)
next = 0;
else
next -= now;
queue_delayed_work(xfs_mru_reap_wq, &mru->work, next);
} }
/*
* the process that triggered the reap_all is responsible
* for restating the periodic reap if it is required.
*/
if (!mru->reap_all)
queue_delayed_work(xfs_mru_reap_wq, &mru->work, mru->grp_time);
mru->reap_all = 0;
mutex_spinunlock(&mru->lock, 0); mutex_spinunlock(&mru->lock, 0);
} }
...@@ -352,7 +352,7 @@ xfs_mru_cache_create( ...@@ -352,7 +352,7 @@ xfs_mru_cache_create(
/* An extra list is needed to avoid reaping up to a grp_time early. */ /* An extra list is needed to avoid reaping up to a grp_time early. */
mru->grp_count = grp_count + 1; mru->grp_count = grp_count + 1;
mru->lists = kmem_alloc(mru->grp_count * sizeof(*mru->lists), KM_SLEEP); mru->lists = kmem_zalloc(mru->grp_count * sizeof(*mru->lists), KM_SLEEP);
if (!mru->lists) { if (!mru->lists) {
err = ENOMEM; err = ENOMEM;
...@@ -374,11 +374,6 @@ xfs_mru_cache_create( ...@@ -374,11 +374,6 @@ xfs_mru_cache_create(
mru->grp_time = grp_time; mru->grp_time = grp_time;
mru->free_func = free_func; mru->free_func = free_func;
/* start up the reaper event */
mru->next_reap = 0;
mru->reap_all = 0;
queue_delayed_work(xfs_mru_reap_wq, &mru->work, mru->grp_time);
*mrup = mru; *mrup = mru;
exit: exit:
...@@ -394,35 +389,25 @@ xfs_mru_cache_create( ...@@ -394,35 +389,25 @@ xfs_mru_cache_create(
* Call xfs_mru_cache_flush() to flush out all cached entries, calling their * Call xfs_mru_cache_flush() to flush out all cached entries, calling their
* free functions as they're deleted. When this function returns, the caller is * free functions as they're deleted. When this function returns, the caller is
* guaranteed that all the free functions for all the elements have finished * guaranteed that all the free functions for all the elements have finished
* executing. * executing and the reaper is not running.
*
* While we are flushing, we stop the periodic reaper event from triggering.
* Normally, we want to restart this periodic event, but if we are shutting
* down the cache we do not want it restarted. hence the restart parameter
* where 0 = do not restart reaper and 1 = restart reaper.
*/ */
void void
xfs_mru_cache_flush( xfs_mru_cache_flush(
xfs_mru_cache_t *mru, xfs_mru_cache_t *mru)
int restart)
{ {
if (!mru || !mru->lists) if (!mru || !mru->lists)
return; return;
cancel_rearming_delayed_workqueue(xfs_mru_reap_wq, &mru->work);
mutex_spinlock(&mru->lock); mutex_spinlock(&mru->lock);
mru->reap_all = 1; if (mru->queued) {
mutex_spinunlock(&mru->lock, 0); mutex_spinunlock(&mru->lock, 0);
cancel_rearming_delayed_workqueue(xfs_mru_reap_wq, &mru->work);
mutex_spinlock(&mru->lock);
}
queue_work(xfs_mru_reap_wq, &mru->work.work); _xfs_mru_cache_migrate(mru, jiffies + mru->grp_count * mru->grp_time);
flush_workqueue(xfs_mru_reap_wq); _xfs_mru_cache_clear_reap_list(mru);
mutex_spinlock(&mru->lock);
WARN_ON_ONCE(mru->reap_all != 0);
mru->reap_all = 0;
if (restart)
queue_delayed_work(xfs_mru_reap_wq, &mru->work, mru->grp_time);
mutex_spinunlock(&mru->lock, 0); mutex_spinunlock(&mru->lock, 0);
} }
...@@ -433,8 +418,7 @@ xfs_mru_cache_destroy( ...@@ -433,8 +418,7 @@ xfs_mru_cache_destroy(
if (!mru || !mru->lists) if (!mru || !mru->lists)
return; return;
/* we don't want the reaper to restart here */ xfs_mru_cache_flush(mru);
xfs_mru_cache_flush(mru, 0);
kmem_free(mru->lists, mru->grp_count * sizeof(*mru->lists)); kmem_free(mru->lists, mru->grp_count * sizeof(*mru->lists));
kmem_free(mru, sizeof(*mru)); kmem_free(mru, sizeof(*mru));
......
...@@ -32,11 +32,9 @@ typedef struct xfs_mru_cache ...@@ -32,11 +32,9 @@ typedef struct xfs_mru_cache
unsigned int grp_time; /* Time period spanned by grps. */ unsigned int grp_time; /* Time period spanned by grps. */
unsigned int lru_grp; /* Group containing time zero. */ unsigned int lru_grp; /* Group containing time zero. */
unsigned long time_zero; /* Time first element was added. */ unsigned long time_zero; /* Time first element was added. */
unsigned long next_reap; /* Time that the reaper should
next do something. */
unsigned int reap_all; /* if set, reap all lists */
xfs_mru_cache_free_func_t free_func; /* Function pointer for freeing. */ xfs_mru_cache_free_func_t free_func; /* Function pointer for freeing. */
struct delayed_work work; /* Workqueue data for reaping. */ struct delayed_work work; /* Workqueue data for reaping. */
unsigned int queued; /* work has been queued */
} xfs_mru_cache_t; } xfs_mru_cache_t;
int xfs_mru_cache_init(void); int xfs_mru_cache_init(void);
...@@ -44,7 +42,7 @@ void xfs_mru_cache_uninit(void); ...@@ -44,7 +42,7 @@ void xfs_mru_cache_uninit(void);
int xfs_mru_cache_create(struct xfs_mru_cache **mrup, unsigned int lifetime_ms, int xfs_mru_cache_create(struct xfs_mru_cache **mrup, unsigned int lifetime_ms,
unsigned int grp_count, unsigned int grp_count,
xfs_mru_cache_free_func_t free_func); xfs_mru_cache_free_func_t free_func);
void xfs_mru_cache_flush(xfs_mru_cache_t *mru, int restart); void xfs_mru_cache_flush(xfs_mru_cache_t *mru);
void xfs_mru_cache_destroy(struct xfs_mru_cache *mru); void xfs_mru_cache_destroy(struct xfs_mru_cache *mru);
int xfs_mru_cache_insert(struct xfs_mru_cache *mru, unsigned long key, int xfs_mru_cache_insert(struct xfs_mru_cache *mru, unsigned long key,
void *value); void *value);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment