Commit de7180ff authored by Mike Snitzer's avatar Mike Snitzer

dm cache: add support for discard passdown to the origin device

DM cache now defaults to passing discards down to the origin device.
User may disable this using the "no_discard_passdown" feature when
creating the cache device.

If the cache's underlying origin device doesn't support discards then
passdown is disabled (with warning).  Similarly, if the underlying
origin device's max_discard_sectors is less than a cache block discard
passdown will be disabled (this is required because sizing of the cache
internal discard bitset depends on it).
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent f87e033b
...@@ -206,6 +206,9 @@ Optional feature arguments are: ...@@ -206,6 +206,9 @@ Optional feature arguments are:
in a separate btree, which improves speed of shutting in a separate btree, which improves speed of shutting
down the cache. down the cache.
no_discard_passdown : disable passing down discards from the cache
to the origin's data device.
A policy called 'default' is always registered. This is an alias for A policy called 'default' is always registered. This is an alias for
the policy we currently think is giving best all round performance. the policy we currently think is giving best all round performance.
......
...@@ -353,6 +353,7 @@ struct cache_features { ...@@ -353,6 +353,7 @@ struct cache_features {
enum cache_metadata_mode mode; enum cache_metadata_mode mode;
enum cache_io_mode io_mode; enum cache_io_mode io_mode;
unsigned metadata_version; unsigned metadata_version;
bool discard_passdown:1;
}; };
struct cache_stats { struct cache_stats {
...@@ -1899,7 +1900,11 @@ static bool process_discard_bio(struct cache *cache, struct bio *bio) ...@@ -1899,7 +1900,11 @@ static bool process_discard_bio(struct cache *cache, struct bio *bio)
b = to_dblock(from_dblock(b) + 1); b = to_dblock(from_dblock(b) + 1);
} }
bio_endio(bio); if (cache->features.discard_passdown) {
remap_to_origin(cache, bio);
generic_make_request(bio);
} else
bio_endio(bio);
return false; return false;
} }
...@@ -2233,13 +2238,14 @@ static void init_features(struct cache_features *cf) ...@@ -2233,13 +2238,14 @@ static void init_features(struct cache_features *cf)
cf->mode = CM_WRITE; cf->mode = CM_WRITE;
cf->io_mode = CM_IO_WRITEBACK; cf->io_mode = CM_IO_WRITEBACK;
cf->metadata_version = 1; cf->metadata_version = 1;
cf->discard_passdown = true;
} }
static int parse_features(struct cache_args *ca, struct dm_arg_set *as, static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
char **error) char **error)
{ {
static const struct dm_arg _args[] = { static const struct dm_arg _args[] = {
{0, 2, "Invalid number of cache feature arguments"}, {0, 3, "Invalid number of cache feature arguments"},
}; };
int r, mode_ctr = 0; int r, mode_ctr = 0;
...@@ -2274,6 +2280,9 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as, ...@@ -2274,6 +2280,9 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
else if (!strcasecmp(arg, "metadata2")) else if (!strcasecmp(arg, "metadata2"))
cf->metadata_version = 2; cf->metadata_version = 2;
else if (!strcasecmp(arg, "no_discard_passdown"))
cf->discard_passdown = false;
else { else {
*error = "Unrecognised cache feature requested"; *error = "Unrecognised cache feature requested";
return -EINVAL; return -EINVAL;
...@@ -3119,6 +3128,39 @@ static void cache_resume(struct dm_target *ti) ...@@ -3119,6 +3128,39 @@ static void cache_resume(struct dm_target *ti)
do_waker(&cache->waker.work); do_waker(&cache->waker.work);
} }
static void emit_flags(struct cache *cache, char *result,
unsigned maxlen, ssize_t *sz_ptr)
{
ssize_t sz = *sz_ptr;
struct cache_features *cf = &cache->features;
unsigned count = (cf->metadata_version == 2) + !cf->discard_passdown + 1;
DMEMIT("%u ", count);
if (cf->metadata_version == 2)
DMEMIT("metadata2 ");
if (writethrough_mode(cache))
DMEMIT("writethrough ");
else if (passthrough_mode(cache))
DMEMIT("passthrough ");
else if (writeback_mode(cache))
DMEMIT("writeback ");
else {
DMEMIT("unknown ");
DMERR("%s: internal error: unknown io mode: %d",
cache_device_name(cache), (int) cf->io_mode);
}
if (!cf->discard_passdown)
DMEMIT("no_discard_passdown ");
*sz_ptr = sz;
}
/* /*
* Status format: * Status format:
* *
...@@ -3185,25 +3227,7 @@ static void cache_status(struct dm_target *ti, status_type_t type, ...@@ -3185,25 +3227,7 @@ static void cache_status(struct dm_target *ti, status_type_t type,
(unsigned) atomic_read(&cache->stats.promotion), (unsigned) atomic_read(&cache->stats.promotion),
(unsigned long) atomic_read(&cache->nr_dirty)); (unsigned long) atomic_read(&cache->nr_dirty));
if (cache->features.metadata_version == 2) emit_flags(cache, result, maxlen, &sz);
DMEMIT("2 metadata2 ");
else
DMEMIT("1 ");
if (writethrough_mode(cache))
DMEMIT("writethrough ");
else if (passthrough_mode(cache))
DMEMIT("passthrough ");
else if (writeback_mode(cache))
DMEMIT("writeback ");
else {
DMERR("%s: internal error: unknown io mode: %d",
cache_device_name(cache), (int) cache->features.io_mode);
goto err;
}
DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold); DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold);
...@@ -3432,14 +3456,62 @@ static int cache_iterate_devices(struct dm_target *ti, ...@@ -3432,14 +3456,62 @@ static int cache_iterate_devices(struct dm_target *ti,
return r; return r;
} }
static bool origin_dev_supports_discard(struct block_device *origin_bdev)
{
struct request_queue *q = bdev_get_queue(origin_bdev);
return q && blk_queue_discard(q);
}
/*
* If discard_passdown was enabled verify that the origin device
* supports discards. Disable discard_passdown if not.
*/
static void disable_passdown_if_not_supported(struct cache *cache)
{
struct block_device *origin_bdev = cache->origin_dev->bdev;
struct queue_limits *origin_limits = &bdev_get_queue(origin_bdev)->limits;
const char *reason = NULL;
char buf[BDEVNAME_SIZE];
if (!cache->features.discard_passdown)
return;
if (!origin_dev_supports_discard(origin_bdev))
reason = "discard unsupported";
else if (origin_limits->max_discard_sectors < cache->sectors_per_block)
reason = "max discard sectors smaller than a block";
if (reason) {
DMWARN("Origin device (%s) %s: Disabling discard passdown.",
bdevname(origin_bdev, buf), reason);
cache->features.discard_passdown = false;
}
}
static void set_discard_limits(struct cache *cache, struct queue_limits *limits) static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
{ {
struct block_device *origin_bdev = cache->origin_dev->bdev;
struct queue_limits *origin_limits = &bdev_get_queue(origin_bdev)->limits;
if (!cache->features.discard_passdown) {
/* No passdown is done so setting own virtual limits */
limits->max_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024,
cache->origin_sectors);
limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
return;
}
/* /*
* FIXME: these limits may be incompatible with the cache device * cache_iterate_devices() is stacking both origin and fast device limits
* but discards aren't passed to fast device, so inherit origin's limits.
*/ */
limits->max_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024, limits->max_discard_sectors = origin_limits->max_discard_sectors;
cache->origin_sectors); limits->max_hw_discard_sectors = origin_limits->max_hw_discard_sectors;
limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT; limits->discard_granularity = origin_limits->discard_granularity;
limits->discard_alignment = origin_limits->discard_alignment;
limits->discard_misaligned = origin_limits->discard_misaligned;
} }
static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
...@@ -3456,6 +3528,8 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) ...@@ -3456,6 +3528,8 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
blk_limits_io_min(limits, cache->sectors_per_block << SECTOR_SHIFT); blk_limits_io_min(limits, cache->sectors_per_block << SECTOR_SHIFT);
blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT); blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
} }
disable_passdown_if_not_supported(cache);
set_discard_limits(cache, limits); set_discard_limits(cache, limits);
} }
...@@ -3463,7 +3537,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) ...@@ -3463,7 +3537,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type cache_target = { static struct target_type cache_target = {
.name = "cache", .name = "cache",
.version = {2, 0, 0}, .version = {2, 1, 0},
.module = THIS_MODULE, .module = THIS_MODULE,
.ctr = cache_ctr, .ctr = cache_ctr,
.dtr = cache_dtr, .dtr = cache_dtr,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment