Commit 2eecf3a4 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-4.12/dm-fixes' of...

Merge tag 'for-4.12/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper fixes from Mike Snitzer:

 - DM cache metadata fixes to short-circuit operations that require the
   metadata not be in 'fail_io' mode. Otherwise crashes are possible.

 - a DM cache fix to address the inability to adapt to continuous IO
   that happened to also reflect a changing working set (which required
   old blocks be demoted before the new working set could be promoted)

 - a DM cache smq policy cleanup that fell out from reviewing the above

 - fix the Kconfig help text for CONFIG_DM_INTEGRITY

* tag 'for-4.12/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm cache metadata: fail operations if fail_io mode has been established
  dm integrity: improve the Kconfig help text for DM_INTEGRITY
  dm cache policy smq: cleanup free_target_met() and clean_target_met()
  dm cache policy smq: allow demotions to happen even during continuous IO
parents 53ef7d0e 10add84e
......@@ -503,13 +503,24 @@ config DM_LOG_WRITES
If unsure, say N.
config DM_INTEGRITY
tristate "Integrity target"
tristate "Integrity target support"
depends on BLK_DEV_DM
select BLK_DEV_INTEGRITY
select DM_BUFIO
select CRYPTO
select ASYNC_XOR
---help---
This is the integrity target.
This device-mapper target emulates a block device that has
additional per-sector tags that can be used for storing
integrity information.
This integrity target is used with the dm-crypt target to
provide authenticated disk encryption or it can be used
standalone.
To compile this code as a module, choose M here: the module will
be called dm-integrity.
If unsure, say N.
endif # MD
......@@ -1624,17 +1624,19 @@ void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown)
{
int r;
int r = -EINVAL;
flags_mutator mutator = (clean_shutdown ? set_clean_shutdown :
clear_clean_shutdown);
WRITE_LOCK(cmd);
if (cmd->fail_io)
goto out;
r = __commit_transaction(cmd, mutator);
if (r)
goto out;
r = __begin_transaction(cmd);
out:
WRITE_UNLOCK(cmd);
return r;
......@@ -1646,7 +1648,8 @@ int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
int r = -EINVAL;
READ_LOCK(cmd);
r = dm_sm_get_nr_free(cmd->metadata_sm, result);
if (!cmd->fail_io)
r = dm_sm_get_nr_free(cmd->metadata_sm, result);
READ_UNLOCK(cmd);
return r;
......@@ -1658,7 +1661,8 @@ int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
int r = -EINVAL;
READ_LOCK(cmd);
r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
if (!cmd->fail_io)
r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
READ_UNLOCK(cmd);
return r;
......
......@@ -1120,28 +1120,30 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
* Cache entries may not be populated. So we cannot rely on the
* size of the clean queue.
*/
unsigned nr_clean = from_cblock(mq->cache_size) - q_size(&mq->dirty);
unsigned nr_clean;
if (idle)
if (idle) {
/*
* We'd like to clean everything.
*/
return q_size(&mq->dirty) == 0u;
else
return (nr_clean + btracker_nr_writebacks_queued(mq->bg_work)) >=
percent_to_target(mq, CLEAN_TARGET);
}
nr_clean = from_cblock(mq->cache_size) - q_size(&mq->dirty);
return (nr_clean + btracker_nr_writebacks_queued(mq->bg_work)) >=
percent_to_target(mq, CLEAN_TARGET);
}
static bool free_target_met(struct smq_policy *mq, bool idle)
{
unsigned nr_free = from_cblock(mq->cache_size) -
mq->cache_alloc.nr_allocated;
unsigned nr_free;
if (idle)
return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >=
percent_to_target(mq, FREE_TARGET);
else
if (!idle)
return true;
nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated;
return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >=
percent_to_target(mq, FREE_TARGET);
}
/*----------------------------------------------------------------*/
......@@ -1214,7 +1216,11 @@ static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
return;
if (allocator_empty(&mq->cache_alloc)) {
if (!free_target_met(mq, false))
/*
* We always claim to be 'idle' to ensure some demotions happen
* with continuous loads.
*/
if (!free_target_met(mq, true))
queue_demotion(mq);
return;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment