Commit 34f5affd authored by Hannes Reinecke's avatar Hannes Reinecke Committed by Mike Snitzer

dm zoned: separate random and cache zones

Instead of lumping emulated zones together with random zones we
should be handling them as separate 'cache' zones. This improves
code readability and allows an easier implementation of different
cache policies.

Also add additional allocation flags, to separate the type (cache,
random, or sequential) from the purpose (eg reclaim).

Also switch the allocation policy to not use random zones as buffer
zones if cache zones are present. This avoids a performance drop when
all cache zones are used.
Signed-off-by: default avatarHannes Reinecke <hare@suse.de>
Reviewed-by: default avatarDamien Le Moal <damien.lemoal@wdc.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 489dc0f0
This diff is collapsed.
...@@ -43,13 +43,13 @@ enum { ...@@ -43,13 +43,13 @@ enum {
* Percentage of unmapped (free) random zones below which reclaim starts * Percentage of unmapped (free) random zones below which reclaim starts
* even if the target is busy. * even if the target is busy.
*/ */
#define DMZ_RECLAIM_LOW_UNMAP_RND 30 #define DMZ_RECLAIM_LOW_UNMAP_ZONES 30
/* /*
* Percentage of unmapped (free) random zones above which reclaim will * Percentage of unmapped (free) random zones above which reclaim will
* stop if the target is busy. * stop if the target is busy.
*/ */
#define DMZ_RECLAIM_HIGH_UNMAP_RND 50 #define DMZ_RECLAIM_HIGH_UNMAP_ZONES 50
/* /*
* Align a sequential zone write pointer to chunk_block. * Align a sequential zone write pointer to chunk_block.
...@@ -281,17 +281,21 @@ static int dmz_reclaim_rnd_data(struct dmz_reclaim *zrc, struct dm_zone *dzone) ...@@ -281,17 +281,21 @@ static int dmz_reclaim_rnd_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
struct dm_zone *szone = NULL; struct dm_zone *szone = NULL;
struct dmz_metadata *zmd = zrc->metadata; struct dmz_metadata *zmd = zrc->metadata;
int ret; int ret;
int alloc_flags = dmz_nr_cache_zones(zmd) ?
DMZ_ALLOC_RND : DMZ_ALLOC_SEQ;
/* Get a free sequential zone */ /* Get a free sequential zone */
dmz_lock_map(zmd); dmz_lock_map(zmd);
szone = dmz_alloc_zone(zmd, DMZ_ALLOC_RECLAIM); szone = dmz_alloc_zone(zmd, alloc_flags | DMZ_ALLOC_RECLAIM);
dmz_unlock_map(zmd); dmz_unlock_map(zmd);
if (!szone) if (!szone)
return -ENOSPC; return -ENOSPC;
DMDEBUG("(%s): Chunk %u, move rnd zone %u (weight %u) to seq zone %u", DMDEBUG("(%s): Chunk %u, move %s zone %u (weight %u) to %s zone %u",
dmz_metadata_label(zmd), dmz_metadata_label(zmd), chunk,
chunk, dzone->id, dmz_weight(dzone), szone->id); dmz_is_cache(dzone) ? "cache" : "rnd",
dzone->id, dmz_weight(dzone),
dmz_is_rnd(szone) ? "rnd" : "seq", szone->id);
/* Flush the random data zone into the sequential zone */ /* Flush the random data zone into the sequential zone */
ret = dmz_reclaim_copy(zrc, dzone, szone); ret = dmz_reclaim_copy(zrc, dzone, szone);
...@@ -356,7 +360,7 @@ static int dmz_do_reclaim(struct dmz_reclaim *zrc) ...@@ -356,7 +360,7 @@ static int dmz_do_reclaim(struct dmz_reclaim *zrc)
return -EBUSY; return -EBUSY;
start = jiffies; start = jiffies;
if (dmz_is_rnd(dzone)) { if (dmz_is_cache(dzone) || dmz_is_rnd(dzone)) {
if (!dmz_weight(dzone)) { if (!dmz_weight(dzone)) {
/* Empty zone */ /* Empty zone */
dmz_reclaim_empty(zrc, dzone); dmz_reclaim_empty(zrc, dzone);
...@@ -422,29 +426,41 @@ static inline int dmz_target_idle(struct dmz_reclaim *zrc) ...@@ -422,29 +426,41 @@ static inline int dmz_target_idle(struct dmz_reclaim *zrc)
return time_is_before_jiffies(zrc->atime + DMZ_IDLE_PERIOD); return time_is_before_jiffies(zrc->atime + DMZ_IDLE_PERIOD);
} }
/* static unsigned int dmz_reclaim_percentage(struct dmz_reclaim *zrc)
* Test if reclaim is necessary.
*/
static bool dmz_should_reclaim(struct dmz_reclaim *zrc)
{ {
struct dmz_metadata *zmd = zrc->metadata; struct dmz_metadata *zmd = zrc->metadata;
unsigned int nr_cache = dmz_nr_cache_zones(zmd);
unsigned int nr_rnd = dmz_nr_rnd_zones(zmd); unsigned int nr_rnd = dmz_nr_rnd_zones(zmd);
unsigned int nr_unmap_rnd = dmz_nr_unmap_rnd_zones(zmd); unsigned int nr_unmap, nr_zones;
unsigned int p_unmap_rnd = nr_unmap_rnd * 100 / nr_rnd;
if (nr_cache) {
nr_zones = nr_cache;
nr_unmap = dmz_nr_unmap_cache_zones(zmd);
} else {
nr_zones = nr_rnd;
nr_unmap = dmz_nr_unmap_rnd_zones(zmd);
}
return nr_unmap * 100 / nr_zones;
}
/*
* Test if reclaim is necessary.
*/
static bool dmz_should_reclaim(struct dmz_reclaim *zrc, unsigned int p_unmap)
{
/* Reclaim when idle */ /* Reclaim when idle */
if (dmz_target_idle(zrc) && nr_unmap_rnd < nr_rnd) if (dmz_target_idle(zrc) && p_unmap < 100)
return true; return true;
/* If there are still plenty of random zones, do not reclaim */ /* If there are still plenty of cache zones, do not reclaim */
if (p_unmap_rnd >= DMZ_RECLAIM_HIGH_UNMAP_RND) if (p_unmap >= DMZ_RECLAIM_HIGH_UNMAP_ZONES)
return false; return false;
/* /*
* If the percentage of unmapped random zones is low, * If the percentage of unmapped cache zones is low,
* reclaim even if the target is busy. * reclaim even if the target is busy.
*/ */
return p_unmap_rnd <= DMZ_RECLAIM_LOW_UNMAP_RND; return p_unmap <= DMZ_RECLAIM_LOW_UNMAP_ZONES;
} }
/* /*
...@@ -454,14 +470,14 @@ static void dmz_reclaim_work(struct work_struct *work) ...@@ -454,14 +470,14 @@ static void dmz_reclaim_work(struct work_struct *work)
{ {
struct dmz_reclaim *zrc = container_of(work, struct dmz_reclaim, work.work); struct dmz_reclaim *zrc = container_of(work, struct dmz_reclaim, work.work);
struct dmz_metadata *zmd = zrc->metadata; struct dmz_metadata *zmd = zrc->metadata;
unsigned int nr_rnd, nr_unmap_rnd; unsigned int p_unmap;
unsigned int p_unmap_rnd;
int ret; int ret;
if (dmz_dev_is_dying(zmd)) if (dmz_dev_is_dying(zmd))
return; return;
if (!dmz_should_reclaim(zrc)) { p_unmap = dmz_reclaim_percentage(zrc);
if (!dmz_should_reclaim(zrc, p_unmap)) {
mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD); mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
return; return;
} }
...@@ -472,22 +488,22 @@ static void dmz_reclaim_work(struct work_struct *work) ...@@ -472,22 +488,22 @@ static void dmz_reclaim_work(struct work_struct *work)
* and slower if there are still some free random zones to avoid * and slower if there are still some free random zones to avoid
* as much as possible to negatively impact the user workload. * as much as possible to negatively impact the user workload.
*/ */
nr_rnd = dmz_nr_rnd_zones(zmd); if (dmz_target_idle(zrc) || p_unmap < DMZ_RECLAIM_LOW_UNMAP_ZONES / 2) {
nr_unmap_rnd = dmz_nr_unmap_rnd_zones(zmd);
p_unmap_rnd = nr_unmap_rnd * 100 / nr_rnd;
if (dmz_target_idle(zrc) || p_unmap_rnd < DMZ_RECLAIM_LOW_UNMAP_RND / 2) {
/* Idle or very low percentage: go fast */ /* Idle or very low percentage: go fast */
zrc->kc_throttle.throttle = 100; zrc->kc_throttle.throttle = 100;
} else { } else {
/* Busy but we still have some random zone: throttle */ /* Busy but we still have some random zone: throttle */
zrc->kc_throttle.throttle = min(75U, 100U - p_unmap_rnd / 2); zrc->kc_throttle.throttle = min(75U, 100U - p_unmap / 2);
} }
DMDEBUG("(%s): Reclaim (%u): %s, %u%% free rnd zones (%u/%u)", DMDEBUG("(%s): Reclaim (%u): %s, %u%% free zones (%u/%u cache %u/%u random)",
dmz_metadata_label(zmd), dmz_metadata_label(zmd),
zrc->kc_throttle.throttle, zrc->kc_throttle.throttle,
(dmz_target_idle(zrc) ? "Idle" : "Busy"), (dmz_target_idle(zrc) ? "Idle" : "Busy"),
p_unmap_rnd, nr_unmap_rnd, nr_rnd); p_unmap, dmz_nr_unmap_cache_zones(zmd),
dmz_nr_cache_zones(zmd),
dmz_nr_unmap_rnd_zones(zmd),
dmz_nr_rnd_zones(zmd));
ret = dmz_do_reclaim(zrc); ret = dmz_do_reclaim(zrc);
if (ret) { if (ret) {
...@@ -585,7 +601,9 @@ void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc) ...@@ -585,7 +601,9 @@ void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc)
*/ */
void dmz_schedule_reclaim(struct dmz_reclaim *zrc) void dmz_schedule_reclaim(struct dmz_reclaim *zrc)
{ {
if (dmz_should_reclaim(zrc)) unsigned int p_unmap = dmz_reclaim_percentage(zrc);
if (dmz_should_reclaim(zrc, p_unmap))
mod_delayed_work(zrc->wq, &zrc->work, 0); mod_delayed_work(zrc->wq, &zrc->work, 0);
} }
...@@ -190,7 +190,8 @@ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone, ...@@ -190,7 +190,8 @@ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
DMDEBUG("(%s): READ chunk %llu -> %s zone %u, block %llu, %u blocks", DMDEBUG("(%s): READ chunk %llu -> %s zone %u, block %llu, %u blocks",
dmz_metadata_label(zmd), dmz_metadata_label(zmd),
(unsigned long long)dmz_bio_chunk(zmd, bio), (unsigned long long)dmz_bio_chunk(zmd, bio),
(dmz_is_rnd(zone) ? "RND" : "SEQ"), (dmz_is_rnd(zone) ? "RND" :
(dmz_is_cache(zone) ? "CACHE" : "SEQ")),
zone->id, zone->id,
(unsigned long long)chunk_block, nr_blocks); (unsigned long long)chunk_block, nr_blocks);
...@@ -198,7 +199,8 @@ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone, ...@@ -198,7 +199,8 @@ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
bzone = zone->bzone; bzone = zone->bzone;
while (chunk_block < end_block) { while (chunk_block < end_block) {
nr_blocks = 0; nr_blocks = 0;
if (dmz_is_rnd(zone) || chunk_block < zone->wp_block) { if (dmz_is_rnd(zone) || dmz_is_cache(zone) ||
chunk_block < zone->wp_block) {
/* Test block validity in the data zone */ /* Test block validity in the data zone */
ret = dmz_block_valid(zmd, zone, chunk_block); ret = dmz_block_valid(zmd, zone, chunk_block);
if (ret < 0) if (ret < 0)
...@@ -331,11 +333,13 @@ static int dmz_handle_write(struct dmz_target *dmz, struct dm_zone *zone, ...@@ -331,11 +333,13 @@ static int dmz_handle_write(struct dmz_target *dmz, struct dm_zone *zone,
DMDEBUG("(%s): WRITE chunk %llu -> %s zone %u, block %llu, %u blocks", DMDEBUG("(%s): WRITE chunk %llu -> %s zone %u, block %llu, %u blocks",
dmz_metadata_label(zmd), dmz_metadata_label(zmd),
(unsigned long long)dmz_bio_chunk(zmd, bio), (unsigned long long)dmz_bio_chunk(zmd, bio),
(dmz_is_rnd(zone) ? "RND" : "SEQ"), (dmz_is_rnd(zone) ? "RND" :
(dmz_is_cache(zone) ? "CACHE" : "SEQ")),
zone->id, zone->id,
(unsigned long long)chunk_block, nr_blocks); (unsigned long long)chunk_block, nr_blocks);
if (dmz_is_rnd(zone) || chunk_block == zone->wp_block) { if (dmz_is_rnd(zone) || dmz_is_cache(zone) ||
chunk_block == zone->wp_block) {
/* /*
* zone is a random zone or it is a sequential zone * zone is a random zone or it is a sequential zone
* and the BIO is aligned to the zone write pointer: * and the BIO is aligned to the zone write pointer:
...@@ -381,7 +385,8 @@ static int dmz_handle_discard(struct dmz_target *dmz, struct dm_zone *zone, ...@@ -381,7 +385,8 @@ static int dmz_handle_discard(struct dmz_target *dmz, struct dm_zone *zone,
* Invalidate blocks in the data zone and its * Invalidate blocks in the data zone and its
* buffer zone if one is mapped. * buffer zone if one is mapped.
*/ */
if (dmz_is_rnd(zone) || chunk_block < zone->wp_block) if (dmz_is_rnd(zone) || dmz_is_cache(zone) ||
chunk_block < zone->wp_block)
ret = dmz_invalidate_blocks(zmd, zone, chunk_block, nr_blocks); ret = dmz_invalidate_blocks(zmd, zone, chunk_block, nr_blocks);
if (ret == 0 && zone->bzone) if (ret == 0 && zone->bzone)
ret = dmz_invalidate_blocks(zmd, zone->bzone, ret = dmz_invalidate_blocks(zmd, zone->bzone,
...@@ -1065,8 +1070,10 @@ static void dmz_status(struct dm_target *ti, status_type_t type, ...@@ -1065,8 +1070,10 @@ static void dmz_status(struct dm_target *ti, status_type_t type,
switch (type) { switch (type) {
case STATUSTYPE_INFO: case STATUSTYPE_INFO:
DMEMIT("%u zones %u/%u random %u/%u sequential", DMEMIT("%u zones %u/%u cache %u/%u random %u/%u sequential",
dmz_nr_zones(dmz->metadata), dmz_nr_zones(dmz->metadata),
dmz_nr_unmap_cache_zones(dmz->metadata),
dmz_nr_cache_zones(dmz->metadata),
dmz_nr_unmap_rnd_zones(dmz->metadata), dmz_nr_unmap_rnd_zones(dmz->metadata),
dmz_nr_rnd_zones(dmz->metadata), dmz_nr_rnd_zones(dmz->metadata),
dmz_nr_unmap_seq_zones(dmz->metadata), dmz_nr_unmap_seq_zones(dmz->metadata),
......
...@@ -111,6 +111,7 @@ struct dm_zone { ...@@ -111,6 +111,7 @@ struct dm_zone {
*/ */
enum { enum {
/* Zone write type */ /* Zone write type */
DMZ_CACHE,
DMZ_RND, DMZ_RND,
DMZ_SEQ, DMZ_SEQ,
...@@ -131,6 +132,7 @@ enum { ...@@ -131,6 +132,7 @@ enum {
/* /*
* Zone data accessors. * Zone data accessors.
*/ */
#define dmz_is_cache(z) test_bit(DMZ_CACHE, &(z)->flags)
#define dmz_is_rnd(z) test_bit(DMZ_RND, &(z)->flags) #define dmz_is_rnd(z) test_bit(DMZ_RND, &(z)->flags)
#define dmz_is_seq(z) test_bit(DMZ_SEQ, &(z)->flags) #define dmz_is_seq(z) test_bit(DMZ_SEQ, &(z)->flags)
#define dmz_is_empty(z) ((z)->wp_block == 0) #define dmz_is_empty(z) ((z)->wp_block == 0)
...@@ -189,7 +191,9 @@ bool dmz_check_dev(struct dmz_metadata *zmd); ...@@ -189,7 +191,9 @@ bool dmz_check_dev(struct dmz_metadata *zmd);
bool dmz_dev_is_dying(struct dmz_metadata *zmd); bool dmz_dev_is_dying(struct dmz_metadata *zmd);
#define DMZ_ALLOC_RND 0x01 #define DMZ_ALLOC_RND 0x01
#define DMZ_ALLOC_RECLAIM 0x02 #define DMZ_ALLOC_CACHE 0x02
#define DMZ_ALLOC_SEQ 0x04
#define DMZ_ALLOC_RECLAIM 0x10
struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, unsigned long flags); struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, unsigned long flags);
void dmz_free_zone(struct dmz_metadata *zmd, struct dm_zone *zone); void dmz_free_zone(struct dmz_metadata *zmd, struct dm_zone *zone);
...@@ -198,6 +202,8 @@ void dmz_map_zone(struct dmz_metadata *zmd, struct dm_zone *zone, ...@@ -198,6 +202,8 @@ void dmz_map_zone(struct dmz_metadata *zmd, struct dm_zone *zone,
unsigned int chunk); unsigned int chunk);
void dmz_unmap_zone(struct dmz_metadata *zmd, struct dm_zone *zone); void dmz_unmap_zone(struct dmz_metadata *zmd, struct dm_zone *zone);
unsigned int dmz_nr_zones(struct dmz_metadata *zmd); unsigned int dmz_nr_zones(struct dmz_metadata *zmd);
unsigned int dmz_nr_cache_zones(struct dmz_metadata *zmd);
unsigned int dmz_nr_unmap_cache_zones(struct dmz_metadata *zmd);
unsigned int dmz_nr_rnd_zones(struct dmz_metadata *zmd); unsigned int dmz_nr_rnd_zones(struct dmz_metadata *zmd);
unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd); unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd);
unsigned int dmz_nr_seq_zones(struct dmz_metadata *zmd); unsigned int dmz_nr_seq_zones(struct dmz_metadata *zmd);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment