Commit 40775257 authored by Joe Thornber's avatar Joe Thornber Committed by Mike Snitzer

dm cache: boost promotion of blocks that will be overwritten

When considering whether to move a block to the cache we already give
preferential treatment to discarded blocks, since they are cheap to
promote (no read of the origin required since the data is junk).

The same is true of blocks that are about to be completely
overwritten, so we likewise boost their promotion chances.
Signed-off-by: default avatarJoe Thornber <ejt@redhat.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 651f5fa2
...@@ -1697,17 +1697,17 @@ static void process_cell(struct cache *cache, struct prealloc *structs, ...@@ -1697,17 +1697,17 @@ static void process_cell(struct cache *cache, struct prealloc *structs,
dm_oblock_t block = get_bio_block(cache, bio); dm_oblock_t block = get_bio_block(cache, bio);
struct policy_result lookup_result; struct policy_result lookup_result;
bool passthrough = passthrough_mode(&cache->features); bool passthrough = passthrough_mode(&cache->features);
bool discarded_block, can_migrate; bool fast_promotion, can_migrate;
struct old_oblock_lock ool; struct old_oblock_lock ool;
discarded_block = is_discarded_oblock(cache, block); fast_promotion = is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio);
can_migrate = !passthrough && (discarded_block || spare_migration_bandwidth(cache)); can_migrate = !passthrough && (fast_promotion || spare_migration_bandwidth(cache));
ool.locker.fn = cell_locker; ool.locker.fn = cell_locker;
ool.cache = cache; ool.cache = cache;
ool.structs = structs; ool.structs = structs;
ool.cell = NULL; ool.cell = NULL;
r = policy_map(cache->policy, block, true, can_migrate, discarded_block, r = policy_map(cache->policy, block, true, can_migrate, fast_promotion,
bio, &ool.locker, &lookup_result); bio, &ool.locker, &lookup_result);
if (r == -EWOULDBLOCK) if (r == -EWOULDBLOCK)
...@@ -2895,7 +2895,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio) ...@@ -2895,7 +2895,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
dm_oblock_t block = get_bio_block(cache, bio); dm_oblock_t block = get_bio_block(cache, bio);
size_t pb_data_size = get_per_bio_data_size(cache); size_t pb_data_size = get_per_bio_data_size(cache);
bool can_migrate = false; bool can_migrate = false;
bool discarded_block; bool fast_promotion;
struct policy_result lookup_result; struct policy_result lookup_result;
struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size); struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size);
struct old_oblock_lock ool; struct old_oblock_lock ool;
...@@ -2937,9 +2937,9 @@ static int cache_map(struct dm_target *ti, struct bio *bio) ...@@ -2937,9 +2937,9 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_SUBMITTED; return DM_MAPIO_SUBMITTED;
} }
discarded_block = is_discarded_oblock(cache, block); fast_promotion = is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio);
r = policy_map(cache->policy, block, false, can_migrate, discarded_block, r = policy_map(cache->policy, block, false, can_migrate, fast_promotion,
bio, &ool.locker, &lookup_result); bio, &ool.locker, &lookup_result);
if (r == -EWOULDBLOCK) { if (r == -EWOULDBLOCK) {
cell_defer(cache, cell, true); cell_defer(cache, cell, true);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment