Commit 9ed84698 authored by Joe Thornber's avatar Joe Thornber Committed by Mike Snitzer

dm cache: make the 'mq' policy an alias for 'smq'

smq seems to be performing better than the old mq policy in all
situations, as well as using a quarter of the memory.

Make 'mq' an alias for 'smq' when choosing a cache policy.  The tunables
that were present for the old mq are faked, and have no effect.  mq
should be considered deprecated now.
Signed-off-by: default avatarJoe Thornber <ejt@redhat.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent e233d800
...@@ -28,51 +28,16 @@ Overview of supplied cache replacement policies ...@@ -28,51 +28,16 @@ Overview of supplied cache replacement policies
multiqueue (mq) multiqueue (mq)
--------------- ---------------
This policy has been deprecated in favor of the smq policy (see below). This policy is now an alias for smq (see below).
The multiqueue policy has three sets of 16 queues: one set for entries The following tunables are accepted, but have no effect:
waiting for the cache and another two for those in the cache (a set for
clean entries and a set for dirty entries).
Cache entries in the queues are aged based on logical time. Entry into
the cache is based on variable thresholds and queue selection is based
on hit count on entry. The policy aims to take different cache miss
costs into account and to adjust to varying load patterns automatically.
Message and constructor argument pairs are:
'sequential_threshold <#nr_sequential_ios>' 'sequential_threshold <#nr_sequential_ios>'
'random_threshold <#nr_random_ios>' 'random_threshold <#nr_random_ios>'
'read_promote_adjustment <value>' 'read_promote_adjustment <value>'
'write_promote_adjustment <value>' 'write_promote_adjustment <value>'
'discard_promote_adjustment <value>' 'discard_promote_adjustment <value>'
The sequential threshold indicates the number of contiguous I/Os
required before a stream is treated as sequential. Once a stream is
considered sequential it will bypass the cache. The random threshold
is the number of intervening non-contiguous I/Os that must be seen
before the stream is treated as random again.
The sequential and random thresholds default to 512 and 4 respectively.
Large, sequential I/Os are probably better left on the origin device
since spindles tend to have good sequential I/O bandwidth. The
io_tracker counts contiguous I/Os to try to spot when the I/O is in one
of these sequential modes. But there are use-cases for wanting to
promote sequential blocks to the cache (e.g. fast application startup).
If sequential threshold is set to 0 the sequential I/O detection is
disabled and sequential I/O will no longer implicitly bypass the cache.
Setting the random threshold to 0 does _not_ disable the random I/O
stream detection.
Internally the mq policy determines a promotion threshold. If the hit
count of a block not in the cache goes above this threshold it gets
promoted to the cache. The read, write and discard promote adjustment
tunables allow you to tweak the promotion threshold by adding a small
value based on the io type. They default to 4, 8 and 1 respectively.
If you're trying to quickly warm a new cache device you may wish to
reduce these to encourage promotion. Remember to switch them back to
their defaults after the cache fills though.
Stochastic multiqueue (smq) Stochastic multiqueue (smq)
--------------------------- ---------------------------
......
...@@ -304,16 +304,6 @@ config DM_CACHE ...@@ -304,16 +304,6 @@ config DM_CACHE
algorithms used to select which blocks are promoted, demoted, algorithms used to select which blocks are promoted, demoted,
cleaned etc. It supports writeback and writethrough modes. cleaned etc. It supports writeback and writethrough modes.
config DM_CACHE_MQ
tristate "MQ Cache Policy (EXPERIMENTAL)"
depends on DM_CACHE
default y
---help---
A cache policy that uses a multiqueue ordered by recent hit
count to select which blocks should be promoted and demoted.
This is meant to be a general purpose policy. It prioritises
reads over writes.
config DM_CACHE_SMQ config DM_CACHE_SMQ
tristate "Stochastic MQ Cache Policy (EXPERIMENTAL)" tristate "Stochastic MQ Cache Policy (EXPERIMENTAL)"
depends on DM_CACHE depends on DM_CACHE
......
...@@ -12,7 +12,6 @@ dm-log-userspace-y \ ...@@ -12,7 +12,6 @@ dm-log-userspace-y \
+= dm-log-userspace-base.o dm-log-userspace-transfer.o += dm-log-userspace-base.o dm-log-userspace-transfer.o
dm-thin-pool-y += dm-thin.o dm-thin-metadata.o dm-thin-pool-y += dm-thin.o dm-thin-metadata.o
dm-cache-y += dm-cache-target.o dm-cache-metadata.o dm-cache-policy.o dm-cache-y += dm-cache-target.o dm-cache-metadata.o dm-cache-policy.o
dm-cache-mq-y += dm-cache-policy-mq.o
dm-cache-smq-y += dm-cache-policy-smq.o dm-cache-smq-y += dm-cache-policy-smq.o
dm-cache-cleaner-y += dm-cache-policy-cleaner.o dm-cache-cleaner-y += dm-cache-policy-cleaner.o
dm-era-y += dm-era-target.o dm-era-y += dm-era-target.o
...@@ -55,7 +54,6 @@ obj-$(CONFIG_DM_RAID) += dm-raid.o ...@@ -55,7 +54,6 @@ obj-$(CONFIG_DM_RAID) += dm-raid.o
obj-$(CONFIG_DM_THIN_PROVISIONING) += dm-thin-pool.o obj-$(CONFIG_DM_THIN_PROVISIONING) += dm-thin-pool.o
obj-$(CONFIG_DM_VERITY) += dm-verity.o obj-$(CONFIG_DM_VERITY) += dm-verity.o
obj-$(CONFIG_DM_CACHE) += dm-cache.o obj-$(CONFIG_DM_CACHE) += dm-cache.o
obj-$(CONFIG_DM_CACHE_MQ) += dm-cache-mq.o
obj-$(CONFIG_DM_CACHE_SMQ) += dm-cache-smq.o obj-$(CONFIG_DM_CACHE_SMQ) += dm-cache-smq.o
obj-$(CONFIG_DM_CACHE_CLEANER) += dm-cache-cleaner.o obj-$(CONFIG_DM_CACHE_CLEANER) += dm-cache-cleaner.o
obj-$(CONFIG_DM_ERA) += dm-era.o obj-$(CONFIG_DM_ERA) += dm-era.o
......
This diff is collapsed.
...@@ -1567,8 +1567,48 @@ static void smq_tick(struct dm_cache_policy *p, bool can_block) ...@@ -1567,8 +1567,48 @@ static void smq_tick(struct dm_cache_policy *p, bool can_block)
spin_unlock_irqrestore(&mq->lock, flags); spin_unlock_irqrestore(&mq->lock, flags);
} }
/*
* smq has no config values, but the old mq policy did. To avoid breaking
* software we continue to accept these configurables for the mq policy,
* but they have no effect.
*/
static int mq_set_config_value(struct dm_cache_policy *p,
const char *key, const char *value)
{
unsigned long tmp;
if (kstrtoul(value, 10, &tmp))
return -EINVAL;
if (!strcasecmp(key, "random_threshold") ||
!strcasecmp(key, "sequential_threshold") ||
!strcasecmp(key, "discard_promote_adjustment") ||
!strcasecmp(key, "read_promote_adjustment") ||
!strcasecmp(key, "write_promote_adjustment")) {
DMWARN("tunable '%s' no longer has any effect, mq policy is now an alias for smq", key);
return 0;
}
return -EINVAL;
}
static int mq_emit_config_values(struct dm_cache_policy *p, char *result,
unsigned maxlen, ssize_t *sz_ptr)
{
ssize_t sz = *sz_ptr;
DMEMIT("10 random_threshold 0 "
"sequential_threshold 0 "
"discard_promote_adjustment 0 "
"read_promote_adjustment 0 "
"write_promote_adjustment 0 ");
*sz_ptr = sz;
return 0;
}
/* Init the policy plugin interface function pointers. */ /* Init the policy plugin interface function pointers. */
static void init_policy_functions(struct smq_policy *mq) static void init_policy_functions(struct smq_policy *mq, bool mimic_mq)
{ {
mq->policy.destroy = smq_destroy; mq->policy.destroy = smq_destroy;
mq->policy.map = smq_map; mq->policy.map = smq_map;
...@@ -1583,6 +1623,11 @@ static void init_policy_functions(struct smq_policy *mq) ...@@ -1583,6 +1623,11 @@ static void init_policy_functions(struct smq_policy *mq)
mq->policy.force_mapping = smq_force_mapping; mq->policy.force_mapping = smq_force_mapping;
mq->policy.residency = smq_residency; mq->policy.residency = smq_residency;
mq->policy.tick = smq_tick; mq->policy.tick = smq_tick;
if (mimic_mq) {
mq->policy.set_config_value = mq_set_config_value;
mq->policy.emit_config_values = mq_emit_config_values;
}
} }
static bool too_many_hotspot_blocks(sector_t origin_size, static bool too_many_hotspot_blocks(sector_t origin_size,
...@@ -1606,9 +1651,10 @@ static void calc_hotspot_params(sector_t origin_size, ...@@ -1606,9 +1651,10 @@ static void calc_hotspot_params(sector_t origin_size,
*hotspot_block_size /= 2u; *hotspot_block_size /= 2u;
} }
static struct dm_cache_policy *smq_create(dm_cblock_t cache_size, static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size,
sector_t origin_size, sector_t origin_size,
sector_t cache_block_size) sector_t cache_block_size,
bool mimic_mq)
{ {
unsigned i; unsigned i;
unsigned nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS; unsigned nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS;
...@@ -1618,7 +1664,7 @@ static struct dm_cache_policy *smq_create(dm_cblock_t cache_size, ...@@ -1618,7 +1664,7 @@ static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
if (!mq) if (!mq)
return NULL; return NULL;
init_policy_functions(mq); init_policy_functions(mq, mimic_mq);
mq->cache_size = cache_size; mq->cache_size = cache_size;
mq->cache_block_size = cache_block_size; mq->cache_block_size = cache_block_size;
...@@ -1706,19 +1752,41 @@ static struct dm_cache_policy *smq_create(dm_cblock_t cache_size, ...@@ -1706,19 +1752,41 @@ static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
return NULL; return NULL;
} }
static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
sector_t origin_size,
sector_t cache_block_size)
{
return __smq_create(cache_size, origin_size, cache_block_size, false);
}
static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
sector_t origin_size,
sector_t cache_block_size)
{
return __smq_create(cache_size, origin_size, cache_block_size, true);
}
/*----------------------------------------------------------------*/ /*----------------------------------------------------------------*/
static struct dm_cache_policy_type smq_policy_type = { static struct dm_cache_policy_type smq_policy_type = {
.name = "smq", .name = "smq",
.version = {1, 0, 0}, .version = {1, 5, 0},
.hint_size = 4, .hint_size = 4,
.owner = THIS_MODULE, .owner = THIS_MODULE,
.create = smq_create .create = smq_create
}; };
static struct dm_cache_policy_type mq_policy_type = {
.name = "mq",
.version = {1, 5, 0},
.hint_size = 4,
.owner = THIS_MODULE,
.create = mq_create,
};
static struct dm_cache_policy_type default_policy_type = { static struct dm_cache_policy_type default_policy_type = {
.name = "default", .name = "default",
.version = {1, 4, 0}, .version = {1, 5, 0},
.hint_size = 4, .hint_size = 4,
.owner = THIS_MODULE, .owner = THIS_MODULE,
.create = smq_create, .create = smq_create,
...@@ -1735,9 +1803,17 @@ static int __init smq_init(void) ...@@ -1735,9 +1803,17 @@ static int __init smq_init(void)
return -ENOMEM; return -ENOMEM;
} }
r = dm_cache_policy_register(&mq_policy_type);
if (r) {
DMERR("register failed %d", r);
dm_cache_policy_unregister(&smq_policy_type);
return -ENOMEM;
}
r = dm_cache_policy_register(&default_policy_type); r = dm_cache_policy_register(&default_policy_type);
if (r) { if (r) {
DMERR("register failed (as default) %d", r); DMERR("register failed (as default) %d", r);
dm_cache_policy_unregister(&mq_policy_type);
dm_cache_policy_unregister(&smq_policy_type); dm_cache_policy_unregister(&smq_policy_type);
return -ENOMEM; return -ENOMEM;
} }
...@@ -1748,6 +1824,7 @@ static int __init smq_init(void) ...@@ -1748,6 +1824,7 @@ static int __init smq_init(void)
static void __exit smq_exit(void) static void __exit smq_exit(void)
{ {
dm_cache_policy_unregister(&smq_policy_type); dm_cache_policy_unregister(&smq_policy_type);
dm_cache_policy_unregister(&mq_policy_type);
dm_cache_policy_unregister(&default_policy_type); dm_cache_policy_unregister(&default_policy_type);
} }
...@@ -1759,3 +1836,4 @@ MODULE_LICENSE("GPL"); ...@@ -1759,3 +1836,4 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("smq cache policy"); MODULE_DESCRIPTION("smq cache policy");
MODULE_ALIAS("dm-cache-default"); MODULE_ALIAS("dm-cache-default");
MODULE_ALIAS("dm-cache-mq");
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment