Commit 6cf4cc8f authored by Joe Thornber's avatar Joe Thornber Committed by Mike Snitzer

dm cache policy smq: stop preemptively demoting blocks

It causes a lot of churn if the working set's size is close to the fast
device's size.
Signed-off-by: default avatarJoe Thornber <ejt@redhat.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 4d44ec5a
...@@ -1134,13 +1134,10 @@ static bool clean_target_met(struct smq_policy *mq, bool idle) ...@@ -1134,13 +1134,10 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
percent_to_target(mq, CLEAN_TARGET); percent_to_target(mq, CLEAN_TARGET);
} }
static bool free_target_met(struct smq_policy *mq, bool idle) static bool free_target_met(struct smq_policy *mq)
{ {
unsigned nr_free; unsigned nr_free;
if (!idle)
return true;
nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated; nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated;
return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >= return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >=
percent_to_target(mq, FREE_TARGET); percent_to_target(mq, FREE_TARGET);
...@@ -1220,7 +1217,7 @@ static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock, ...@@ -1220,7 +1217,7 @@ static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
* We always claim to be 'idle' to ensure some demotions happen * We always claim to be 'idle' to ensure some demotions happen
* with continuous loads. * with continuous loads.
*/ */
if (!free_target_met(mq, true)) if (!free_target_met(mq))
queue_demotion(mq); queue_demotion(mq);
return; return;
} }
...@@ -1421,14 +1418,10 @@ static int smq_get_background_work(struct dm_cache_policy *p, bool idle, ...@@ -1421,14 +1418,10 @@ static int smq_get_background_work(struct dm_cache_policy *p, bool idle,
spin_lock_irqsave(&mq->lock, flags); spin_lock_irqsave(&mq->lock, flags);
r = btracker_issue(mq->bg_work, result); r = btracker_issue(mq->bg_work, result);
if (r == -ENODATA) { if (r == -ENODATA) {
/* find some writeback work to do */ if (!clean_target_met(mq, idle)) {
if (mq->migrations_allowed && !free_target_met(mq, idle))
queue_demotion(mq);
else if (!clean_target_met(mq, idle))
queue_writeback(mq); queue_writeback(mq);
r = btracker_issue(mq->bg_work, result);
r = btracker_issue(mq->bg_work, result); }
} }
spin_unlock_irqrestore(&mq->lock, flags); spin_unlock_irqrestore(&mq->lock, flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment