Commit 1e1a4e8f authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dm-4.3-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper update from Mike Snitzer:

 - a couple small cleanups in dm-cache, dm-verity, persistent-data's
   dm-btree, and DM core.

 - a 4.1-stable fix for dm-cache that fixes the leaking of deferred bio
   prison cells

 - a 4.2-stable fix that adds feature reporting for the dm-stats
   features added in 4.2

 - improve DM-snapshot to not invalidate the on-disk snapshot if
   snapshot device write overflow occurs; but a write overflow triggered
   through the origin device will still invalidate the snapshot.

 - optimize DM-thinp's async discard submission a bit now that late bio
   splitting has been included in block core.

 - switch DM-cache's SMQ policy lock from using a mutex to a spinlock;
   improves performance on very low latency devices (eg. NVMe SSD).

 - document DM RAID 4/5/6's discard support

[ I did not pull the slab changes, which weren't appropriate for this
  tree, and weren't obviously the right thing to do anyway.  At the very
  least they need some discussion and explanation before getting merged.

  Because not pulling the actual tagged commit but doing a partial pull
  instead, this merge commit thus also obviously is missing the git
  signature from the original tag ]

* tag 'dm-4.3-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm cache: fix use after freeing migrations
  dm cache: small cleanups related to deferred prison cell cleanup
  dm cache: fix leaking of deferred bio prison cells
  dm raid: document RAID 4/5/6 discard support
  dm stats: report precise_timestamps and histogram in @stats_list output
  dm thin: optimize async discard submission
  dm snapshot: don't invalidate on-disk image on snapshot write overflow
  dm: remove unlikely() before IS_ERR()
  dm: do not override error code returned from dm_get_device()
  dm: test return value for DM_MAPIO_SUBMITTED
  dm verity: remove unused mempool
  dm cache: move wake_waker() from free_migrations() to where it is needed
  dm btree remove: remove unused function get_nr_entries()
  dm btree: remove unused "dm_block_t root" parameter in btree_split_sibling()
  dm cache policy smq: change the mutex to a spinlock
parents d975f309 cc7da0ba
...@@ -209,6 +209,37 @@ include: ...@@ -209,6 +209,37 @@ include:
"repair" - Initiate a repair of the array. "repair" - Initiate a repair of the array.
"reshape"- Currently unsupported (-EINVAL). "reshape"- Currently unsupported (-EINVAL).
Discard Support
---------------
The implementation of discard support among hardware vendors varies.
When a block is discarded, some storage devices will return zeroes when
the block is read. These devices set the 'discard_zeroes_data'
attribute. Other devices will return random data. Confusingly, some
devices that advertise 'discard_zeroes_data' will not reliably return
zeroes when discarded blocks are read! Since RAID 4/5/6 uses blocks
from a number of devices to calculate parity blocks and (for performance
reasons) relies on 'discard_zeroes_data' being reliable, it is important
that the devices be consistent. Blocks may be discarded in the middle
of a RAID 4/5/6 stripe and if subsequent read results are not
consistent, the parity blocks may be calculated differently at any time;
making the parity blocks useless for redundancy. It is important to
understand how your hardware behaves with discards if you are going to
enable discards with RAID 4/5/6.
Since the behavior of storage devices is unreliable in this respect,
even when reporting 'discard_zeroes_data', by default RAID 4/5/6
discard support is disabled -- this ensures data integrity at the
expense of losing some performance.
Storage devices that properly support 'discard_zeroes_data' are
increasingly whitelisted in the kernel and can thus be trusted.
For trusted devices, the following dm-raid module parameter can be set
to safely enable discard support for RAID 4/5/6:
'devices_handle_discards_safely'
Version History Version History
--------------- ---------------
1.0.0 Initial version. Support for RAID 4/5/6 1.0.0 Initial version. Support for RAID 4/5/6
......
...@@ -121,6 +121,10 @@ Messages ...@@ -121,6 +121,10 @@ Messages
Output format: Output format:
<region_id>: <start_sector>+<length> <step> <program_id> <aux_data> <region_id>: <start_sector>+<length> <step> <program_id> <aux_data>
precise_timestamps histogram:n1,n2,n3,...
The strings "precise_timestamps" and "histogram" are printed only
if they were specified when creating the region.
@stats_print <region_id> [<starting_line> <number_of_lines>] @stats_print <region_id> [<starting_line> <number_of_lines>]
......
...@@ -772,7 +772,7 @@ struct smq_policy { ...@@ -772,7 +772,7 @@ struct smq_policy {
struct dm_cache_policy policy; struct dm_cache_policy policy;
/* protects everything */ /* protects everything */
struct mutex lock; spinlock_t lock;
dm_cblock_t cache_size; dm_cblock_t cache_size;
sector_t cache_block_size; sector_t cache_block_size;
...@@ -807,13 +807,7 @@ struct smq_policy { ...@@ -807,13 +807,7 @@ struct smq_policy {
/* /*
* Keeps track of time, incremented by the core. We use this to * Keeps track of time, incremented by the core. We use this to
* avoid attributing multiple hits within the same tick. * avoid attributing multiple hits within the same tick.
*
* Access to tick_protected should be done with the spin lock held.
* It's copied to tick at the start of the map function (within the
* mutex).
*/ */
spinlock_t tick_lock;
unsigned tick_protected;
unsigned tick; unsigned tick;
/* /*
...@@ -1296,46 +1290,20 @@ static void smq_destroy(struct dm_cache_policy *p) ...@@ -1296,46 +1290,20 @@ static void smq_destroy(struct dm_cache_policy *p)
kfree(mq); kfree(mq);
} }
static void copy_tick(struct smq_policy *mq)
{
unsigned long flags, tick;
spin_lock_irqsave(&mq->tick_lock, flags);
tick = mq->tick_protected;
if (tick != mq->tick) {
update_sentinels(mq);
end_hotspot_period(mq);
end_cache_period(mq);
mq->tick = tick;
}
spin_unlock_irqrestore(&mq->tick_lock, flags);
}
static bool maybe_lock(struct smq_policy *mq, bool can_block)
{
if (can_block) {
mutex_lock(&mq->lock);
return true;
} else
return mutex_trylock(&mq->lock);
}
static int smq_map(struct dm_cache_policy *p, dm_oblock_t oblock, static int smq_map(struct dm_cache_policy *p, dm_oblock_t oblock,
bool can_block, bool can_migrate, bool fast_promote, bool can_block, bool can_migrate, bool fast_promote,
struct bio *bio, struct policy_locker *locker, struct bio *bio, struct policy_locker *locker,
struct policy_result *result) struct policy_result *result)
{ {
int r; int r;
unsigned long flags;
struct smq_policy *mq = to_smq_policy(p); struct smq_policy *mq = to_smq_policy(p);
result->op = POLICY_MISS; result->op = POLICY_MISS;
if (!maybe_lock(mq, can_block)) spin_lock_irqsave(&mq->lock, flags);
return -EWOULDBLOCK;
copy_tick(mq);
r = map(mq, bio, oblock, can_migrate, fast_promote, locker, result); r = map(mq, bio, oblock, can_migrate, fast_promote, locker, result);
mutex_unlock(&mq->lock); spin_unlock_irqrestore(&mq->lock, flags);
return r; return r;
} }
...@@ -1343,20 +1311,18 @@ static int smq_map(struct dm_cache_policy *p, dm_oblock_t oblock, ...@@ -1343,20 +1311,18 @@ static int smq_map(struct dm_cache_policy *p, dm_oblock_t oblock,
static int smq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock) static int smq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock)
{ {
int r; int r;
unsigned long flags;
struct smq_policy *mq = to_smq_policy(p); struct smq_policy *mq = to_smq_policy(p);
struct entry *e; struct entry *e;
if (!mutex_trylock(&mq->lock)) spin_lock_irqsave(&mq->lock, flags);
return -EWOULDBLOCK;
e = h_lookup(&mq->table, oblock); e = h_lookup(&mq->table, oblock);
if (e) { if (e) {
*cblock = infer_cblock(mq, e); *cblock = infer_cblock(mq, e);
r = 0; r = 0;
} else } else
r = -ENOENT; r = -ENOENT;
spin_unlock_irqrestore(&mq->lock, flags);
mutex_unlock(&mq->lock);
return r; return r;
} }
...@@ -1375,20 +1341,22 @@ static void __smq_set_clear_dirty(struct smq_policy *mq, dm_oblock_t oblock, boo ...@@ -1375,20 +1341,22 @@ static void __smq_set_clear_dirty(struct smq_policy *mq, dm_oblock_t oblock, boo
static void smq_set_dirty(struct dm_cache_policy *p, dm_oblock_t oblock) static void smq_set_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
{ {
unsigned long flags;
struct smq_policy *mq = to_smq_policy(p); struct smq_policy *mq = to_smq_policy(p);
mutex_lock(&mq->lock); spin_lock_irqsave(&mq->lock, flags);
__smq_set_clear_dirty(mq, oblock, true); __smq_set_clear_dirty(mq, oblock, true);
mutex_unlock(&mq->lock); spin_unlock_irqrestore(&mq->lock, flags);
} }
static void smq_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock) static void smq_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
{ {
struct smq_policy *mq = to_smq_policy(p); struct smq_policy *mq = to_smq_policy(p);
unsigned long flags;
mutex_lock(&mq->lock); spin_lock_irqsave(&mq->lock, flags);
__smq_set_clear_dirty(mq, oblock, false); __smq_set_clear_dirty(mq, oblock, false);
mutex_unlock(&mq->lock); spin_unlock_irqrestore(&mq->lock, flags);
} }
static int smq_load_mapping(struct dm_cache_policy *p, static int smq_load_mapping(struct dm_cache_policy *p,
...@@ -1433,14 +1401,14 @@ static int smq_walk_mappings(struct dm_cache_policy *p, policy_walk_fn fn, ...@@ -1433,14 +1401,14 @@ static int smq_walk_mappings(struct dm_cache_policy *p, policy_walk_fn fn,
struct smq_policy *mq = to_smq_policy(p); struct smq_policy *mq = to_smq_policy(p);
int r = 0; int r = 0;
mutex_lock(&mq->lock); /*
* We don't need to lock here since this method is only called once
* the IO has stopped.
*/
r = smq_save_hints(mq, &mq->clean, fn, context); r = smq_save_hints(mq, &mq->clean, fn, context);
if (!r) if (!r)
r = smq_save_hints(mq, &mq->dirty, fn, context); r = smq_save_hints(mq, &mq->dirty, fn, context);
mutex_unlock(&mq->lock);
return r; return r;
} }
...@@ -1458,10 +1426,11 @@ static void __remove_mapping(struct smq_policy *mq, dm_oblock_t oblock) ...@@ -1458,10 +1426,11 @@ static void __remove_mapping(struct smq_policy *mq, dm_oblock_t oblock)
static void smq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock) static void smq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
{ {
struct smq_policy *mq = to_smq_policy(p); struct smq_policy *mq = to_smq_policy(p);
unsigned long flags;
mutex_lock(&mq->lock); spin_lock_irqsave(&mq->lock, flags);
__remove_mapping(mq, oblock); __remove_mapping(mq, oblock);
mutex_unlock(&mq->lock); spin_unlock_irqrestore(&mq->lock, flags);
} }
static int __remove_cblock(struct smq_policy *mq, dm_cblock_t cblock) static int __remove_cblock(struct smq_policy *mq, dm_cblock_t cblock)
...@@ -1480,11 +1449,12 @@ static int __remove_cblock(struct smq_policy *mq, dm_cblock_t cblock) ...@@ -1480,11 +1449,12 @@ static int __remove_cblock(struct smq_policy *mq, dm_cblock_t cblock)
static int smq_remove_cblock(struct dm_cache_policy *p, dm_cblock_t cblock) static int smq_remove_cblock(struct dm_cache_policy *p, dm_cblock_t cblock)
{ {
int r; int r;
unsigned long flags;
struct smq_policy *mq = to_smq_policy(p); struct smq_policy *mq = to_smq_policy(p);
mutex_lock(&mq->lock); spin_lock_irqsave(&mq->lock, flags);
r = __remove_cblock(mq, cblock); r = __remove_cblock(mq, cblock);
mutex_unlock(&mq->lock); spin_unlock_irqrestore(&mq->lock, flags);
return r; return r;
} }
...@@ -1537,11 +1507,12 @@ static int smq_writeback_work(struct dm_cache_policy *p, dm_oblock_t *oblock, ...@@ -1537,11 +1507,12 @@ static int smq_writeback_work(struct dm_cache_policy *p, dm_oblock_t *oblock,
dm_cblock_t *cblock, bool critical_only) dm_cblock_t *cblock, bool critical_only)
{ {
int r; int r;
unsigned long flags;
struct smq_policy *mq = to_smq_policy(p); struct smq_policy *mq = to_smq_policy(p);
mutex_lock(&mq->lock); spin_lock_irqsave(&mq->lock, flags);
r = __smq_writeback_work(mq, oblock, cblock, critical_only); r = __smq_writeback_work(mq, oblock, cblock, critical_only);
mutex_unlock(&mq->lock); spin_unlock_irqrestore(&mq->lock, flags);
return r; return r;
} }
...@@ -1562,21 +1533,23 @@ static void __force_mapping(struct smq_policy *mq, ...@@ -1562,21 +1533,23 @@ static void __force_mapping(struct smq_policy *mq,
static void smq_force_mapping(struct dm_cache_policy *p, static void smq_force_mapping(struct dm_cache_policy *p,
dm_oblock_t current_oblock, dm_oblock_t new_oblock) dm_oblock_t current_oblock, dm_oblock_t new_oblock)
{ {
unsigned long flags;
struct smq_policy *mq = to_smq_policy(p); struct smq_policy *mq = to_smq_policy(p);
mutex_lock(&mq->lock); spin_lock_irqsave(&mq->lock, flags);
__force_mapping(mq, current_oblock, new_oblock); __force_mapping(mq, current_oblock, new_oblock);
mutex_unlock(&mq->lock); spin_unlock_irqrestore(&mq->lock, flags);
} }
static dm_cblock_t smq_residency(struct dm_cache_policy *p) static dm_cblock_t smq_residency(struct dm_cache_policy *p)
{ {
dm_cblock_t r; dm_cblock_t r;
unsigned long flags;
struct smq_policy *mq = to_smq_policy(p); struct smq_policy *mq = to_smq_policy(p);
mutex_lock(&mq->lock); spin_lock_irqsave(&mq->lock, flags);
r = to_cblock(mq->cache_alloc.nr_allocated); r = to_cblock(mq->cache_alloc.nr_allocated);
mutex_unlock(&mq->lock); spin_unlock_irqrestore(&mq->lock, flags);
return r; return r;
} }
...@@ -1586,15 +1559,12 @@ static void smq_tick(struct dm_cache_policy *p, bool can_block) ...@@ -1586,15 +1559,12 @@ static void smq_tick(struct dm_cache_policy *p, bool can_block)
struct smq_policy *mq = to_smq_policy(p); struct smq_policy *mq = to_smq_policy(p);
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&mq->tick_lock, flags); spin_lock_irqsave(&mq->lock, flags);
mq->tick_protected++; mq->tick++;
spin_unlock_irqrestore(&mq->tick_lock, flags); update_sentinels(mq);
end_hotspot_period(mq);
if (can_block) { end_cache_period(mq);
mutex_lock(&mq->lock); spin_unlock_irqrestore(&mq->lock, flags);
copy_tick(mq);
mutex_unlock(&mq->lock);
}
} }
/* Init the policy plugin interface function pointers. */ /* Init the policy plugin interface function pointers. */
...@@ -1694,10 +1664,8 @@ static struct dm_cache_policy *smq_create(dm_cblock_t cache_size, ...@@ -1694,10 +1664,8 @@ static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
} else } else
mq->cache_hit_bits = NULL; mq->cache_hit_bits = NULL;
mq->tick_protected = 0;
mq->tick = 0; mq->tick = 0;
mutex_init(&mq->lock); spin_lock_init(&mq->lock);
spin_lock_init(&mq->tick_lock);
q_init(&mq->hotspot, &mq->es, NR_HOTSPOT_LEVELS); q_init(&mq->hotspot, &mq->es, NR_HOTSPOT_LEVELS);
mq->hotspot.nr_top_levels = 8; mq->hotspot.nr_top_levels = 8;
......
...@@ -424,7 +424,6 @@ static void free_migration(struct dm_cache_migration *mg) ...@@ -424,7 +424,6 @@ static void free_migration(struct dm_cache_migration *mg)
wake_up(&cache->migration_wait); wake_up(&cache->migration_wait);
mempool_free(mg, cache->migration_pool); mempool_free(mg, cache->migration_pool);
wake_worker(cache);
} }
static int prealloc_data_structs(struct cache *cache, struct prealloc *p) static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
...@@ -1064,14 +1063,6 @@ static void dec_io_migrations(struct cache *cache) ...@@ -1064,14 +1063,6 @@ static void dec_io_migrations(struct cache *cache)
atomic_dec(&cache->nr_io_migrations); atomic_dec(&cache->nr_io_migrations);
} }
static void __cell_release(struct cache *cache, struct dm_bio_prison_cell *cell,
bool holder, struct bio_list *bios)
{
(holder ? dm_cell_release : dm_cell_release_no_holder)
(cache->prison, cell, bios);
free_prison_cell(cache, cell);
}
static bool discard_or_flush(struct bio *bio) static bool discard_or_flush(struct bio *bio)
{ {
return bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD); return bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD);
...@@ -1079,14 +1070,13 @@ static bool discard_or_flush(struct bio *bio) ...@@ -1079,14 +1070,13 @@ static bool discard_or_flush(struct bio *bio)
static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell) static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell)
{ {
if (discard_or_flush(cell->holder)) if (discard_or_flush(cell->holder)) {
/* /*
* We have to handle these bios * We have to handle these bios individually.
* individually.
*/ */
__cell_release(cache, cell, true, &cache->deferred_bios); dm_cell_release(cache->prison, cell, &cache->deferred_bios);
free_prison_cell(cache, cell);
else } else
list_add_tail(&cell->user_list, &cache->deferred_cells); list_add_tail(&cell->user_list, &cache->deferred_cells);
} }
...@@ -1113,7 +1103,7 @@ static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, boo ...@@ -1113,7 +1103,7 @@ static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, boo
static void cell_error_with_code(struct cache *cache, struct dm_bio_prison_cell *cell, int err) static void cell_error_with_code(struct cache *cache, struct dm_bio_prison_cell *cell, int err)
{ {
dm_cell_error(cache->prison, cell, err); dm_cell_error(cache->prison, cell, err);
dm_bio_prison_free_cell(cache->prison, cell); free_prison_cell(cache, cell);
} }
static void cell_requeue(struct cache *cache, struct dm_bio_prison_cell *cell) static void cell_requeue(struct cache *cache, struct dm_bio_prison_cell *cell)
...@@ -1123,8 +1113,11 @@ static void cell_requeue(struct cache *cache, struct dm_bio_prison_cell *cell) ...@@ -1123,8 +1113,11 @@ static void cell_requeue(struct cache *cache, struct dm_bio_prison_cell *cell)
static void free_io_migration(struct dm_cache_migration *mg) static void free_io_migration(struct dm_cache_migration *mg)
{ {
dec_io_migrations(mg->cache); struct cache *cache = mg->cache;
dec_io_migrations(cache);
free_migration(mg); free_migration(mg);
wake_worker(cache);
} }
static void migration_failure(struct dm_cache_migration *mg) static void migration_failure(struct dm_cache_migration *mg)
...@@ -1351,16 +1344,18 @@ static void issue_discard(struct dm_cache_migration *mg) ...@@ -1351,16 +1344,18 @@ static void issue_discard(struct dm_cache_migration *mg)
{ {
dm_dblock_t b, e; dm_dblock_t b, e;
struct bio *bio = mg->new_ocell->holder; struct bio *bio = mg->new_ocell->holder;
struct cache *cache = mg->cache;
calc_discard_block_range(mg->cache, bio, &b, &e); calc_discard_block_range(cache, bio, &b, &e);
while (b != e) { while (b != e) {
set_discard(mg->cache, b); set_discard(cache, b);
b = to_dblock(from_dblock(b) + 1); b = to_dblock(from_dblock(b) + 1);
} }
bio_endio(bio); bio_endio(bio);
cell_defer(mg->cache, mg->new_ocell, false); cell_defer(cache, mg->new_ocell, false);
free_migration(mg); free_migration(mg);
wake_worker(cache);
} }
static void issue_copy_or_discard(struct dm_cache_migration *mg) static void issue_copy_or_discard(struct dm_cache_migration *mg)
...@@ -1729,6 +1724,8 @@ static void remap_cell_to_origin_clear_discard(struct cache *cache, ...@@ -1729,6 +1724,8 @@ static void remap_cell_to_origin_clear_discard(struct cache *cache,
remap_to_origin(cache, bio); remap_to_origin(cache, bio);
issue(cache, bio); issue(cache, bio);
} }
free_prison_cell(cache, cell);
} }
static void remap_cell_to_cache_dirty(struct cache *cache, struct dm_bio_prison_cell *cell, static void remap_cell_to_cache_dirty(struct cache *cache, struct dm_bio_prison_cell *cell,
...@@ -1763,6 +1760,8 @@ static void remap_cell_to_cache_dirty(struct cache *cache, struct dm_bio_prison_ ...@@ -1763,6 +1760,8 @@ static void remap_cell_to_cache_dirty(struct cache *cache, struct dm_bio_prison_
remap_to_cache(cache, bio, cblock); remap_to_cache(cache, bio, cblock);
issue(cache, bio); issue(cache, bio);
} }
free_prison_cell(cache, cell);
} }
/*----------------------------------------------------------------*/ /*----------------------------------------------------------------*/
......
...@@ -1811,11 +1811,13 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -1811,11 +1811,13 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
} }
cc->iv_offset = tmpll; cc->iv_offset = tmpll;
if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) { ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev);
if (ret) {
ti->error = "Device lookup failed"; ti->error = "Device lookup failed";
goto bad; goto bad;
} }
ret = -EINVAL;
if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) { if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
ti->error = "Invalid device sector"; ti->error = "Invalid device sector";
goto bad; goto bad;
......
...@@ -129,6 +129,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -129,6 +129,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
struct delay_c *dc; struct delay_c *dc;
unsigned long long tmpll; unsigned long long tmpll;
char dummy; char dummy;
int ret;
if (argc != 3 && argc != 6) { if (argc != 3 && argc != 6) {
ti->error = "requires exactly 3 or 6 arguments"; ti->error = "requires exactly 3 or 6 arguments";
...@@ -143,6 +144,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -143,6 +144,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
dc->reads = dc->writes = 0; dc->reads = dc->writes = 0;
ret = -EINVAL;
if (sscanf(argv[1], "%llu%c", &tmpll, &dummy) != 1) { if (sscanf(argv[1], "%llu%c", &tmpll, &dummy) != 1) {
ti->error = "Invalid device sector"; ti->error = "Invalid device sector";
goto bad; goto bad;
...@@ -154,12 +156,14 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -154,12 +156,14 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad; goto bad;
} }
if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
&dc->dev_read)) { &dc->dev_read);
if (ret) {
ti->error = "Device lookup failed"; ti->error = "Device lookup failed";
goto bad; goto bad;
} }
ret = -EINVAL;
dc->dev_write = NULL; dc->dev_write = NULL;
if (argc == 3) if (argc == 3)
goto out; goto out;
...@@ -175,13 +179,15 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -175,13 +179,15 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad_dev_read; goto bad_dev_read;
} }
if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table),
&dc->dev_write)) { &dc->dev_write);
if (ret) {
ti->error = "Write device lookup failed"; ti->error = "Write device lookup failed";
goto bad_dev_read; goto bad_dev_read;
} }
out: out:
ret = -EINVAL;
dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0); dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
if (!dc->kdelayd_wq) { if (!dc->kdelayd_wq) {
DMERR("Couldn't start kdelayd"); DMERR("Couldn't start kdelayd");
...@@ -208,7 +214,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -208,7 +214,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
dm_put_device(ti, dc->dev_read); dm_put_device(ti, dc->dev_read);
bad: bad:
kfree(dc); kfree(dc);
return -EINVAL; return ret;
} }
static void delay_dtr(struct dm_target *ti) static void delay_dtr(struct dm_target *ti)
......
...@@ -183,6 +183,7 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -183,6 +183,7 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
devname = dm_shift_arg(&as); devname = dm_shift_arg(&as);
r = -EINVAL;
if (sscanf(dm_shift_arg(&as), "%llu%c", &tmpll, &dummy) != 1) { if (sscanf(dm_shift_arg(&as), "%llu%c", &tmpll, &dummy) != 1) {
ti->error = "Invalid device sector"; ti->error = "Invalid device sector";
goto bad; goto bad;
...@@ -211,7 +212,8 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -211,7 +212,8 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (r) if (r)
goto bad; goto bad;
if (dm_get_device(ti, devname, dm_table_get_mode(ti->table), &fc->dev)) { r = dm_get_device(ti, devname, dm_table_get_mode(ti->table), &fc->dev);
if (r) {
ti->error = "Device lookup failed"; ti->error = "Device lookup failed";
goto bad; goto bad;
} }
...@@ -224,7 +226,7 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -224,7 +226,7 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
bad: bad:
kfree(fc); kfree(fc);
return -EINVAL; return r;
} }
static void flakey_dtr(struct dm_target *ti) static void flakey_dtr(struct dm_target *ti)
......
...@@ -30,6 +30,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -30,6 +30,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
struct linear_c *lc; struct linear_c *lc;
unsigned long long tmp; unsigned long long tmp;
char dummy; char dummy;
int ret;
if (argc != 2) { if (argc != 2) {
ti->error = "Invalid argument count"; ti->error = "Invalid argument count";
...@@ -42,13 +43,15 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -42,13 +43,15 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
return -ENOMEM; return -ENOMEM;
} }
ret = -EINVAL;
if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1) { if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1) {
ti->error = "dm-linear: Invalid device sector"; ti->error = "dm-linear: Invalid device sector";
goto bad; goto bad;
} }
lc->start = tmp; lc->start = tmp;
if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &lc->dev)) { ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &lc->dev);
if (ret) {
ti->error = "dm-linear: Device lookup failed"; ti->error = "dm-linear: Device lookup failed";
goto bad; goto bad;
} }
...@@ -61,7 +64,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -61,7 +64,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
bad: bad:
kfree(lc); kfree(lc);
return -EINVAL; return ret;
} }
static void linear_dtr(struct dm_target *ti) static void linear_dtr(struct dm_target *ti)
......
...@@ -417,6 +417,7 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -417,6 +417,7 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv)
struct log_writes_c *lc; struct log_writes_c *lc;
struct dm_arg_set as; struct dm_arg_set as;
const char *devname, *logdevname; const char *devname, *logdevname;
int ret;
as.argc = argc; as.argc = argc;
as.argv = argv; as.argv = argv;
...@@ -440,18 +441,22 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -440,18 +441,22 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv)
atomic_set(&lc->pending_blocks, 0); atomic_set(&lc->pending_blocks, 0);
devname = dm_shift_arg(&as); devname = dm_shift_arg(&as);
if (dm_get_device(ti, devname, dm_table_get_mode(ti->table), &lc->dev)) { ret = dm_get_device(ti, devname, dm_table_get_mode(ti->table), &lc->dev);
if (ret) {
ti->error = "Device lookup failed"; ti->error = "Device lookup failed";
goto bad; goto bad;
} }
logdevname = dm_shift_arg(&as); logdevname = dm_shift_arg(&as);
if (dm_get_device(ti, logdevname, dm_table_get_mode(ti->table), &lc->logdev)) { ret = dm_get_device(ti, logdevname, dm_table_get_mode(ti->table),
&lc->logdev);
if (ret) {
ti->error = "Log device lookup failed"; ti->error = "Log device lookup failed";
dm_put_device(ti, lc->dev); dm_put_device(ti, lc->dev);
goto bad; goto bad;
} }
ret = -EINVAL;
lc->log_kthread = kthread_run(log_writes_kthread, lc, "log-write"); lc->log_kthread = kthread_run(log_writes_kthread, lc, "log-write");
if (!lc->log_kthread) { if (!lc->log_kthread) {
ti->error = "Couldn't alloc kthread"; ti->error = "Couldn't alloc kthread";
...@@ -476,7 +481,7 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -476,7 +481,7 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv)
bad: bad:
kfree(lc); kfree(lc);
return -EINVAL; return ret;
} }
static int log_mark(struct log_writes_c *lc, char *data) static int log_mark(struct log_writes_c *lc, char *data)
......
...@@ -945,16 +945,18 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti, ...@@ -945,16 +945,18 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
{ {
unsigned long long offset; unsigned long long offset;
char dummy; char dummy;
int ret;
if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1) { if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1) {
ti->error = "Invalid offset"; ti->error = "Invalid offset";
return -EINVAL; return -EINVAL;
} }
if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
&ms->mirror[mirror].dev)) { &ms->mirror[mirror].dev);
if (ret) {
ti->error = "Device lookup failure"; ti->error = "Device lookup failure";
return -ENXIO; return ret;
} }
ms->mirror[mirror].ms = ms; ms->mirror[mirror].ms = ms;
......
...@@ -533,7 +533,7 @@ static int read_exceptions(struct pstore *ps, ...@@ -533,7 +533,7 @@ static int read_exceptions(struct pstore *ps,
chunk = area_location(ps, ps->current_area); chunk = area_location(ps, ps->current_area);
area = dm_bufio_read(client, chunk, &bp); area = dm_bufio_read(client, chunk, &bp);
if (unlikely(IS_ERR(area))) { if (IS_ERR(area)) {
r = PTR_ERR(area); r = PTR_ERR(area);
goto ret_destroy_bufio; goto ret_destroy_bufio;
} }
......
...@@ -63,6 +63,13 @@ struct dm_snapshot { ...@@ -63,6 +63,13 @@ struct dm_snapshot {
*/ */
int valid; int valid;
/*
* The snapshot overflowed because of a write to the snapshot device.
* We don't have to invalidate the snapshot in this case, but we need
* to prevent further writes.
*/
int snapshot_overflowed;
/* Origin writes don't trigger exceptions until this is set */ /* Origin writes don't trigger exceptions until this is set */
int active; int active;
...@@ -1152,6 +1159,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -1152,6 +1159,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
s->ti = ti; s->ti = ti;
s->valid = 1; s->valid = 1;
s->snapshot_overflowed = 0;
s->active = 0; s->active = 0;
atomic_set(&s->pending_exceptions_count, 0); atomic_set(&s->pending_exceptions_count, 0);
s->exception_start_sequence = 0; s->exception_start_sequence = 0;
...@@ -1301,6 +1309,7 @@ static void __handover_exceptions(struct dm_snapshot *snap_src, ...@@ -1301,6 +1309,7 @@ static void __handover_exceptions(struct dm_snapshot *snap_src,
snap_dest->ti->max_io_len = snap_dest->store->chunk_size; snap_dest->ti->max_io_len = snap_dest->store->chunk_size;
snap_dest->valid = snap_src->valid; snap_dest->valid = snap_src->valid;
snap_dest->snapshot_overflowed = snap_src->snapshot_overflowed;
/* /*
* Set source invalid to ensure it receives no further I/O. * Set source invalid to ensure it receives no further I/O.
...@@ -1691,7 +1700,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio) ...@@ -1691,7 +1700,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
* to copy an exception */ * to copy an exception */
down_write(&s->lock); down_write(&s->lock);
if (!s->valid) { if (!s->valid || (unlikely(s->snapshot_overflowed) && bio_rw(bio) == WRITE)) {
r = -EIO; r = -EIO;
goto out_unlock; goto out_unlock;
} }
...@@ -1715,7 +1724,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio) ...@@ -1715,7 +1724,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
pe = alloc_pending_exception(s); pe = alloc_pending_exception(s);
down_write(&s->lock); down_write(&s->lock);
if (!s->valid) { if (!s->valid || s->snapshot_overflowed) {
free_pending_exception(pe); free_pending_exception(pe);
r = -EIO; r = -EIO;
goto out_unlock; goto out_unlock;
...@@ -1730,7 +1739,8 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio) ...@@ -1730,7 +1739,8 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
pe = __find_pending_exception(s, pe, chunk); pe = __find_pending_exception(s, pe, chunk);
if (!pe) { if (!pe) {
__invalidate_snapshot(s, -ENOMEM); s->snapshot_overflowed = 1;
DMERR("Snapshot overflowed: Unable to allocate exception.");
r = -EIO; r = -EIO;
goto out_unlock; goto out_unlock;
} }
...@@ -1990,6 +2000,8 @@ static void snapshot_status(struct dm_target *ti, status_type_t type, ...@@ -1990,6 +2000,8 @@ static void snapshot_status(struct dm_target *ti, status_type_t type,
DMEMIT("Invalid"); DMEMIT("Invalid");
else if (snap->merge_failed) else if (snap->merge_failed)
DMEMIT("Merge failed"); DMEMIT("Merge failed");
else if (snap->snapshot_overflowed)
DMEMIT("Overflow");
else { else {
if (snap->store->type->usage) { if (snap->store->type->usage) {
sector_t total_sectors, sectors_allocated, sector_t total_sectors, sectors_allocated,
...@@ -2353,7 +2365,7 @@ static struct target_type origin_target = { ...@@ -2353,7 +2365,7 @@ static struct target_type origin_target = {
static struct target_type snapshot_target = { static struct target_type snapshot_target = {
.name = "snapshot", .name = "snapshot",
.version = {1, 13, 0}, .version = {1, 14, 0},
.module = THIS_MODULE, .module = THIS_MODULE,
.ctr = snapshot_ctr, .ctr = snapshot_ctr,
.dtr = snapshot_dtr, .dtr = snapshot_dtr,
......
...@@ -457,12 +457,24 @@ static int dm_stats_list(struct dm_stats *stats, const char *program, ...@@ -457,12 +457,24 @@ static int dm_stats_list(struct dm_stats *stats, const char *program,
list_for_each_entry(s, &stats->list, list_entry) { list_for_each_entry(s, &stats->list, list_entry) {
if (!program || !strcmp(program, s->program_id)) { if (!program || !strcmp(program, s->program_id)) {
len = s->end - s->start; len = s->end - s->start;
DMEMIT("%d: %llu+%llu %llu %s %s\n", s->id, DMEMIT("%d: %llu+%llu %llu %s %s", s->id,
(unsigned long long)s->start, (unsigned long long)s->start,
(unsigned long long)len, (unsigned long long)len,
(unsigned long long)s->step, (unsigned long long)s->step,
s->program_id, s->program_id,
s->aux_data); s->aux_data);
if (s->stat_flags & STAT_PRECISE_TIMESTAMPS)
DMEMIT(" precise_timestamps");
if (s->n_histogram_entries) {
unsigned i;
DMEMIT(" histogram:");
for (i = 0; i < s->n_histogram_entries; i++) {
if (i)
DMEMIT(",");
DMEMIT("%llu", s->histogram_boundaries[i]);
}
}
DMEMIT("\n");
} }
} }
mutex_unlock(&stats->mutex); mutex_unlock(&stats->mutex);
......
...@@ -75,13 +75,15 @@ static int get_stripe(struct dm_target *ti, struct stripe_c *sc, ...@@ -75,13 +75,15 @@ static int get_stripe(struct dm_target *ti, struct stripe_c *sc,
{ {
unsigned long long start; unsigned long long start;
char dummy; char dummy;
int ret;
if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1) if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1)
return -EINVAL; return -EINVAL;
if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
&sc->stripe[stripe].dev)) &sc->stripe[stripe].dev);
return -ENXIO; if (ret)
return ret;
sc->stripe[stripe].physical_start = start; sc->stripe[stripe].physical_start = start;
......
...@@ -332,9 +332,6 @@ struct thin_c { ...@@ -332,9 +332,6 @@ struct thin_c {
* *
* Description: * Description:
* Asynchronously issue a discard request for the sectors in question. * Asynchronously issue a discard request for the sectors in question.
* NOTE: this variant of blk-core's blkdev_issue_discard() is a stop-gap
* that is being kept local to DM thinp until the block changes to allow
* late bio splitting land upstream.
*/ */
static int __blkdev_issue_discard_async(struct block_device *bdev, sector_t sector, static int __blkdev_issue_discard_async(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, unsigned long flags, sector_t nr_sects, gfp_t gfp_mask, unsigned long flags,
...@@ -342,91 +339,36 @@ static int __blkdev_issue_discard_async(struct block_device *bdev, sector_t sect ...@@ -342,91 +339,36 @@ static int __blkdev_issue_discard_async(struct block_device *bdev, sector_t sect
{ {
struct request_queue *q = bdev_get_queue(bdev); struct request_queue *q = bdev_get_queue(bdev);
int type = REQ_WRITE | REQ_DISCARD; int type = REQ_WRITE | REQ_DISCARD;
unsigned int max_discard_sectors, granularity;
int alignment;
struct bio *bio; struct bio *bio;
int ret = 0;
struct blk_plug plug;
if (!q) if (!q || !nr_sects)
return -ENXIO; return -ENXIO;
if (!blk_queue_discard(q)) if (!blk_queue_discard(q))
return -EOPNOTSUPP; return -EOPNOTSUPP;
/* Zero-sector (unknown) and one-sector granularities are the same. */
granularity = max(q->limits.discard_granularity >> 9, 1U);
alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
/*
* Ensure that max_discard_sectors is of the proper
* granularity, so that requests stay aligned after a split.
*/
max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
max_discard_sectors -= max_discard_sectors % granularity;
if (unlikely(!max_discard_sectors)) {
/* Avoid infinite loop below. Being cautious never hurts. */
return -EOPNOTSUPP;
}
if (flags & BLKDEV_DISCARD_SECURE) { if (flags & BLKDEV_DISCARD_SECURE) {
if (!blk_queue_secdiscard(q)) if (!blk_queue_secdiscard(q))
return -EOPNOTSUPP; return -EOPNOTSUPP;
type |= REQ_SECURE; type |= REQ_SECURE;
} }
blk_start_plug(&plug); /*
while (nr_sects) { * Required bio_put occurs in bio_endio thanks to bio_chain below
unsigned int req_sects; */
sector_t end_sect, tmp; bio = bio_alloc(gfp_mask, 1);
if (!bio)
/* return -ENOMEM;
* Required bio_put occurs in bio_endio thanks to bio_chain below
*/
bio = bio_alloc(gfp_mask, 1);
if (!bio) {
ret = -ENOMEM;
break;
}
req_sects = min_t(sector_t, nr_sects, max_discard_sectors);
/*
* If splitting a request, and the next starting sector would be
* misaligned, stop the discard at the previous aligned sector.
*/
end_sect = sector + req_sects;
tmp = end_sect;
if (req_sects < nr_sects &&
sector_div(tmp, granularity) != alignment) {
end_sect = end_sect - alignment;
sector_div(end_sect, granularity);
end_sect = end_sect * granularity + alignment;
req_sects = end_sect - sector;
}
bio_chain(bio, parent_bio);
bio->bi_iter.bi_sector = sector;
bio->bi_bdev = bdev;
bio->bi_iter.bi_size = req_sects << 9; bio_chain(bio, parent_bio);
nr_sects -= req_sects;
sector = end_sect;
submit_bio(type, bio); bio->bi_iter.bi_sector = sector;
bio->bi_bdev = bdev;
bio->bi_iter.bi_size = nr_sects << 9;
/* submit_bio(type, bio);
* We can loop for a long time in here, if someone does
* full device discards (like mkfs). Be nice and allow
* us to schedule out to avoid softlocking if preempt
* is disabled.
*/
cond_resched();
}
blk_finish_plug(&plug);
return ret; return 0;
} }
static bool block_size_is_power_of_two(struct pool *pool) static bool block_size_is_power_of_two(struct pool *pool)
...@@ -1543,9 +1485,8 @@ static void process_discard_cell_no_passdown(struct thin_c *tc, ...@@ -1543,9 +1485,8 @@ static void process_discard_cell_no_passdown(struct thin_c *tc,
} }
/* /*
* FIXME: DM local hack to defer parent bios's end_io until we * __bio_inc_remaining() is used to defer parent bios's end_io until
* _know_ all chained sub range discard bios have completed. * we _know_ all chained sub range discard bios have completed.
* Will go away once late bio splitting lands upstream!
*/ */
static inline void __bio_inc_remaining(struct bio *bio) static inline void __bio_inc_remaining(struct bio *bio)
{ {
......
...@@ -26,8 +26,6 @@ ...@@ -26,8 +26,6 @@
#define DM_VERITY_ENV_LENGTH 42 #define DM_VERITY_ENV_LENGTH 42
#define DM_VERITY_ENV_VAR_NAME "DM_VERITY_ERR_BLOCK_NR" #define DM_VERITY_ENV_VAR_NAME "DM_VERITY_ERR_BLOCK_NR"
#define DM_VERITY_IO_VEC_INLINE 16
#define DM_VERITY_MEMPOOL_SIZE 4
#define DM_VERITY_DEFAULT_PREFETCH_SIZE 262144 #define DM_VERITY_DEFAULT_PREFETCH_SIZE 262144
#define DM_VERITY_MAX_LEVELS 63 #define DM_VERITY_MAX_LEVELS 63
...@@ -76,8 +74,6 @@ struct dm_verity { ...@@ -76,8 +74,6 @@ struct dm_verity {
enum verity_mode mode; /* mode for handling verification errors */ enum verity_mode mode; /* mode for handling verification errors */
unsigned corrupted_errs;/* Number of errors for corrupted blocks */ unsigned corrupted_errs;/* Number of errors for corrupted blocks */
mempool_t *vec_mempool; /* mempool of bio vector */
struct workqueue_struct *verify_wq; struct workqueue_struct *verify_wq;
/* starting blocks for each tree level. 0 is the lowest level. */ /* starting blocks for each tree level. 0 is the lowest level. */
...@@ -271,7 +267,7 @@ static int verity_verify_level(struct dm_verity_io *io, sector_t block, ...@@ -271,7 +267,7 @@ static int verity_verify_level(struct dm_verity_io *io, sector_t block,
verity_hash_at_level(v, block, level, &hash_block, &offset); verity_hash_at_level(v, block, level, &hash_block, &offset);
data = dm_bufio_read(v->bufio, hash_block, &buf); data = dm_bufio_read(v->bufio, hash_block, &buf);
if (unlikely(IS_ERR(data))) if (IS_ERR(data))
return PTR_ERR(data); return PTR_ERR(data);
aux = dm_bufio_get_aux_data(buf); aux = dm_bufio_get_aux_data(buf);
...@@ -677,9 +673,6 @@ static void verity_dtr(struct dm_target *ti) ...@@ -677,9 +673,6 @@ static void verity_dtr(struct dm_target *ti)
if (v->verify_wq) if (v->verify_wq)
destroy_workqueue(v->verify_wq); destroy_workqueue(v->verify_wq);
if (v->vec_mempool)
mempool_destroy(v->vec_mempool);
if (v->bufio) if (v->bufio)
dm_bufio_client_destroy(v->bufio); dm_bufio_client_destroy(v->bufio);
...@@ -948,14 +941,6 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv) ...@@ -948,14 +941,6 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->per_bio_data_size = roundup(sizeof(struct dm_verity_io) + v->shash_descsize + v->digest_size * 2, __alignof__(struct dm_verity_io)); ti->per_bio_data_size = roundup(sizeof(struct dm_verity_io) + v->shash_descsize + v->digest_size * 2, __alignof__(struct dm_verity_io));
v->vec_mempool = mempool_create_kmalloc_pool(DM_VERITY_MEMPOOL_SIZE,
BIO_MAX_PAGES * sizeof(struct bio_vec));
if (!v->vec_mempool) {
ti->error = "Cannot allocate vector mempool";
r = -ENOMEM;
goto bad;
}
/* WQ_UNBOUND greatly improves performance when running on ramdisk */ /* WQ_UNBOUND greatly improves performance when running on ramdisk */
v->verify_wq = alloc_workqueue("kverityd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus()); v->verify_wq = alloc_workqueue("kverityd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus());
if (!v->verify_wq) { if (!v->verify_wq) {
......
...@@ -1464,7 +1464,7 @@ static void __map_bio(struct dm_target_io *tio) ...@@ -1464,7 +1464,7 @@ static void __map_bio(struct dm_target_io *tio)
md = tio->io->md; md = tio->io->md;
dec_pending(tio->io, r); dec_pending(tio->io, r);
free_tio(md, tio); free_tio(md, tio);
} else if (r) { } else if (r != DM_MAPIO_SUBMITTED) {
DMWARN("unimplemented target map return value: %d", r); DMWARN("unimplemented target map return value: %d", r);
BUG(); BUG();
} }
......
...@@ -454,7 +454,7 @@ int dm_bm_read_lock(struct dm_block_manager *bm, dm_block_t b, ...@@ -454,7 +454,7 @@ int dm_bm_read_lock(struct dm_block_manager *bm, dm_block_t b,
int r; int r;
p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result); p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result);
if (unlikely(IS_ERR(p))) if (IS_ERR(p))
return PTR_ERR(p); return PTR_ERR(p);
aux = dm_bufio_get_aux_data(to_buffer(*result)); aux = dm_bufio_get_aux_data(to_buffer(*result));
...@@ -490,7 +490,7 @@ int dm_bm_write_lock(struct dm_block_manager *bm, ...@@ -490,7 +490,7 @@ int dm_bm_write_lock(struct dm_block_manager *bm,
return -EPERM; return -EPERM;
p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result); p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result);
if (unlikely(IS_ERR(p))) if (IS_ERR(p))
return PTR_ERR(p); return PTR_ERR(p);
aux = dm_bufio_get_aux_data(to_buffer(*result)); aux = dm_bufio_get_aux_data(to_buffer(*result));
...@@ -523,7 +523,7 @@ int dm_bm_read_try_lock(struct dm_block_manager *bm, ...@@ -523,7 +523,7 @@ int dm_bm_read_try_lock(struct dm_block_manager *bm,
int r; int r;
p = dm_bufio_get(bm->bufio, b, (struct dm_buffer **) result); p = dm_bufio_get(bm->bufio, b, (struct dm_buffer **) result);
if (unlikely(IS_ERR(p))) if (IS_ERR(p))
return PTR_ERR(p); return PTR_ERR(p);
if (unlikely(!p)) if (unlikely(!p))
return -EWOULDBLOCK; return -EWOULDBLOCK;
...@@ -559,7 +559,7 @@ int dm_bm_write_lock_zero(struct dm_block_manager *bm, ...@@ -559,7 +559,7 @@ int dm_bm_write_lock_zero(struct dm_block_manager *bm,
return -EPERM; return -EPERM;
p = dm_bufio_new(bm->bufio, b, (struct dm_buffer **) result); p = dm_bufio_new(bm->bufio, b, (struct dm_buffer **) result);
if (unlikely(IS_ERR(p))) if (IS_ERR(p))
return PTR_ERR(p); return PTR_ERR(p);
memset(p, 0, dm_bm_block_size(bm)); memset(p, 0, dm_bm_block_size(bm));
......
...@@ -409,29 +409,11 @@ static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info, ...@@ -409,29 +409,11 @@ static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
return 0; return 0;
} }
static int get_nr_entries(struct dm_transaction_manager *tm,
dm_block_t b, uint32_t *result)
{
int r;
struct dm_block *block;
struct btree_node *n;
r = dm_tm_read_lock(tm, b, &btree_node_validator, &block);
if (r)
return r;
n = dm_block_data(block);
*result = le32_to_cpu(n->header.nr_entries);
return dm_tm_unlock(tm, block);
}
static int rebalance_children(struct shadow_spine *s, static int rebalance_children(struct shadow_spine *s,
struct dm_btree_info *info, struct dm_btree_info *info,
struct dm_btree_value_type *vt, uint64_t key) struct dm_btree_value_type *vt, uint64_t key)
{ {
int i, r, has_left_sibling, has_right_sibling; int i, r, has_left_sibling, has_right_sibling;
uint32_t child_entries;
struct btree_node *n; struct btree_node *n;
n = dm_block_data(shadow_current(s)); n = dm_block_data(shadow_current(s));
...@@ -458,10 +440,6 @@ static int rebalance_children(struct shadow_spine *s, ...@@ -458,10 +440,6 @@ static int rebalance_children(struct shadow_spine *s,
if (i < 0) if (i < 0)
return -ENODATA; return -ENODATA;
r = get_nr_entries(info->tm, value64(n, i), &child_entries);
if (r)
return r;
has_left_sibling = i > 0; has_left_sibling = i > 0;
has_right_sibling = i < (le32_to_cpu(n->header.nr_entries) - 1); has_right_sibling = i < (le32_to_cpu(n->header.nr_entries) - 1);
......
...@@ -420,8 +420,8 @@ EXPORT_SYMBOL_GPL(dm_btree_lookup); ...@@ -420,8 +420,8 @@ EXPORT_SYMBOL_GPL(dm_btree_lookup);
* *
* Where A* is a shadow of A. * Where A* is a shadow of A.
*/ */
static int btree_split_sibling(struct shadow_spine *s, dm_block_t root, static int btree_split_sibling(struct shadow_spine *s, unsigned parent_index,
unsigned parent_index, uint64_t key) uint64_t key)
{ {
int r; int r;
size_t size; size_t size;
...@@ -625,7 +625,7 @@ static int btree_insert_raw(struct shadow_spine *s, dm_block_t root, ...@@ -625,7 +625,7 @@ static int btree_insert_raw(struct shadow_spine *s, dm_block_t root,
if (top) if (top)
r = btree_split_beneath(s, key); r = btree_split_beneath(s, key);
else else
r = btree_split_sibling(s, root, i, key); r = btree_split_sibling(s, i, key);
if (r < 0) if (r < 0)
return r; return r;
......
...@@ -267,9 +267,9 @@ enum { ...@@ -267,9 +267,9 @@ enum {
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4 #define DM_VERSION_MAJOR 4
#define DM_VERSION_MINOR 32 #define DM_VERSION_MINOR 33
#define DM_VERSION_PATCHLEVEL 0 #define DM_VERSION_PATCHLEVEL 0
#define DM_VERSION_EXTRA "-ioctl (2015-6-26)" #define DM_VERSION_EXTRA "-ioctl (2015-8-18)"
/* Status bits */ /* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */ #define DM_READONLY_FLAG (1 << 0) /* In/Out */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment