Commit dac94e29 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-4.12/dm-fixes-2' of...

Merge tag 'for-4.12/dm-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper fixes from Mike Snitzer:

 - a couple DM thin provisioning fixes

 - a few request-based DM and DM multipath fixes for issues that were
   made when merging Christoph's changes with Bart's changes for 4.12

 - a DM bufio unsigned overflow fix

 - a couple pure fixes for the DM cache target.

 - various very small tweaks to the DM cache target that enable
   considerable speed improvements in the face of continuous IO. Given
   that the cache target was significantly reworked for 4.12 I see no
   reason to sit on these advances until 4.13 considering the favorable
   results associated with such minimalist tweaks.

* tag 'for-4.12/dm-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm cache: handle kmalloc failure allocating background_tracker struct
  dm bufio: make the parameter "retain_bytes" unsigned long
  dm mpath: multipath_clone_and_map must not return -EIO
  dm mpath: don't return -EIO from dm_report_EIO
  dm rq: add a missing break to map_request
  dm space map disk: fix some book keeping in the disk space map
  dm thin metadata: call precommit before saving the roots
  dm cache policy smq: don't do any writebacks unless IDLE
  dm cache: simplify the IDLE vs BUSY state calculation
  dm cache: track all IO to the cache rather than just the origin device's IO
  dm cache policy smq: stop preemptively demoting blocks
  dm cache policy smq: put newly promoted entries at the top of the multiqueue
  dm cache policy smq: be more aggressive about triggering a writeback
  dm cache policy smq: only demote entries in bottom half of the clean multiqueue
  dm cache: fix incorrect 'idle_time' reset in IO tracker
parents 243bfd2c 7e1b9521
...@@ -218,7 +218,7 @@ static DEFINE_SPINLOCK(param_spinlock); ...@@ -218,7 +218,7 @@ static DEFINE_SPINLOCK(param_spinlock);
* Buffers are freed after this timeout * Buffers are freed after this timeout
*/ */
static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS; static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
static unsigned dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES; static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
static unsigned long dm_bufio_peak_allocated; static unsigned long dm_bufio_peak_allocated;
static unsigned long dm_bufio_allocated_kmem_cache; static unsigned long dm_bufio_allocated_kmem_cache;
...@@ -1558,10 +1558,10 @@ static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp) ...@@ -1558,10 +1558,10 @@ static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
return true; return true;
} }
static unsigned get_retain_buffers(struct dm_bufio_client *c) static unsigned long get_retain_buffers(struct dm_bufio_client *c)
{ {
unsigned retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes); unsigned long retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes);
return retain_bytes / c->block_size; return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT);
} }
static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan, static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
...@@ -1571,7 +1571,7 @@ static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan, ...@@ -1571,7 +1571,7 @@ static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
struct dm_buffer *b, *tmp; struct dm_buffer *b, *tmp;
unsigned long freed = 0; unsigned long freed = 0;
unsigned long count = nr_to_scan; unsigned long count = nr_to_scan;
unsigned retain_target = get_retain_buffers(c); unsigned long retain_target = get_retain_buffers(c);
for (l = 0; l < LIST_SIZE; l++) { for (l = 0; l < LIST_SIZE; l++) {
list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) { list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
...@@ -1794,8 +1794,8 @@ static bool older_than(struct dm_buffer *b, unsigned long age_hz) ...@@ -1794,8 +1794,8 @@ static bool older_than(struct dm_buffer *b, unsigned long age_hz)
static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz) static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
{ {
struct dm_buffer *b, *tmp; struct dm_buffer *b, *tmp;
unsigned retain_target = get_retain_buffers(c); unsigned long retain_target = get_retain_buffers(c);
unsigned count; unsigned long count;
LIST_HEAD(write_list); LIST_HEAD(write_list);
dm_bufio_lock(c); dm_bufio_lock(c);
...@@ -1955,7 +1955,7 @@ MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache"); ...@@ -1955,7 +1955,7 @@ MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR); module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds"); MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
module_param_named(retain_bytes, dm_bufio_retain_bytes, uint, S_IRUGO | S_IWUSR); module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory"); MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR); module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
......
...@@ -33,6 +33,11 @@ struct background_tracker *btracker_create(unsigned max_work) ...@@ -33,6 +33,11 @@ struct background_tracker *btracker_create(unsigned max_work)
{ {
struct background_tracker *b = kmalloc(sizeof(*b), GFP_KERNEL); struct background_tracker *b = kmalloc(sizeof(*b), GFP_KERNEL);
if (!b) {
DMERR("couldn't create background_tracker");
return NULL;
}
b->max_work = max_work; b->max_work = max_work;
atomic_set(&b->pending_promotes, 0); atomic_set(&b->pending_promotes, 0);
atomic_set(&b->pending_writebacks, 0); atomic_set(&b->pending_writebacks, 0);
......
...@@ -1120,8 +1120,6 @@ static bool clean_target_met(struct smq_policy *mq, bool idle) ...@@ -1120,8 +1120,6 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
* Cache entries may not be populated. So we cannot rely on the * Cache entries may not be populated. So we cannot rely on the
* size of the clean queue. * size of the clean queue.
*/ */
unsigned nr_clean;
if (idle) { if (idle) {
/* /*
* We'd like to clean everything. * We'd like to clean everything.
...@@ -1129,18 +1127,16 @@ static bool clean_target_met(struct smq_policy *mq, bool idle) ...@@ -1129,18 +1127,16 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
return q_size(&mq->dirty) == 0u; return q_size(&mq->dirty) == 0u;
} }
nr_clean = from_cblock(mq->cache_size) - q_size(&mq->dirty); /*
return (nr_clean + btracker_nr_writebacks_queued(mq->bg_work)) >= * If we're busy we don't worry about cleaning at all.
percent_to_target(mq, CLEAN_TARGET); */
return true;
} }
static bool free_target_met(struct smq_policy *mq, bool idle) static bool free_target_met(struct smq_policy *mq)
{ {
unsigned nr_free; unsigned nr_free;
if (!idle)
return true;
nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated; nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated;
return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >= return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >=
percent_to_target(mq, FREE_TARGET); percent_to_target(mq, FREE_TARGET);
...@@ -1190,9 +1186,9 @@ static void queue_demotion(struct smq_policy *mq) ...@@ -1190,9 +1186,9 @@ static void queue_demotion(struct smq_policy *mq)
if (unlikely(WARN_ON_ONCE(!mq->migrations_allowed))) if (unlikely(WARN_ON_ONCE(!mq->migrations_allowed)))
return; return;
e = q_peek(&mq->clean, mq->clean.nr_levels, true); e = q_peek(&mq->clean, mq->clean.nr_levels / 2, true);
if (!e) { if (!e) {
if (!clean_target_met(mq, false)) if (!clean_target_met(mq, true))
queue_writeback(mq); queue_writeback(mq);
return; return;
} }
...@@ -1220,7 +1216,7 @@ static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock, ...@@ -1220,7 +1216,7 @@ static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
* We always claim to be 'idle' to ensure some demotions happen * We always claim to be 'idle' to ensure some demotions happen
* with continuous loads. * with continuous loads.
*/ */
if (!free_target_met(mq, true)) if (!free_target_met(mq))
queue_demotion(mq); queue_demotion(mq);
return; return;
} }
...@@ -1421,15 +1417,11 @@ static int smq_get_background_work(struct dm_cache_policy *p, bool idle, ...@@ -1421,15 +1417,11 @@ static int smq_get_background_work(struct dm_cache_policy *p, bool idle,
spin_lock_irqsave(&mq->lock, flags); spin_lock_irqsave(&mq->lock, flags);
r = btracker_issue(mq->bg_work, result); r = btracker_issue(mq->bg_work, result);
if (r == -ENODATA) { if (r == -ENODATA) {
/* find some writeback work to do */ if (!clean_target_met(mq, idle)) {
if (mq->migrations_allowed && !free_target_met(mq, idle))
queue_demotion(mq);
else if (!clean_target_met(mq, idle))
queue_writeback(mq); queue_writeback(mq);
r = btracker_issue(mq->bg_work, result); r = btracker_issue(mq->bg_work, result);
} }
}
spin_unlock_irqrestore(&mq->lock, flags); spin_unlock_irqrestore(&mq->lock, flags);
return r; return r;
...@@ -1452,6 +1444,7 @@ static void __complete_background_work(struct smq_policy *mq, ...@@ -1452,6 +1444,7 @@ static void __complete_background_work(struct smq_policy *mq,
clear_pending(mq, e); clear_pending(mq, e);
if (success) { if (success) {
e->oblock = work->oblock; e->oblock = work->oblock;
e->level = NR_CACHE_LEVELS - 1;
push(mq, e); push(mq, e);
// h, q, a // h, q, a
} else { } else {
......
...@@ -94,6 +94,9 @@ static void iot_io_begin(struct io_tracker *iot, sector_t len) ...@@ -94,6 +94,9 @@ static void iot_io_begin(struct io_tracker *iot, sector_t len)
static void __iot_io_end(struct io_tracker *iot, sector_t len) static void __iot_io_end(struct io_tracker *iot, sector_t len)
{ {
if (!len)
return;
iot->in_flight -= len; iot->in_flight -= len;
if (!iot->in_flight) if (!iot->in_flight)
iot->idle_time = jiffies; iot->idle_time = jiffies;
...@@ -474,7 +477,7 @@ struct cache { ...@@ -474,7 +477,7 @@ struct cache {
spinlock_t invalidation_lock; spinlock_t invalidation_lock;
struct list_head invalidation_requests; struct list_head invalidation_requests;
struct io_tracker origin_tracker; struct io_tracker tracker;
struct work_struct commit_ws; struct work_struct commit_ws;
struct batcher committer; struct batcher committer;
...@@ -901,8 +904,7 @@ static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio) ...@@ -901,8 +904,7 @@ static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
static bool accountable_bio(struct cache *cache, struct bio *bio) static bool accountable_bio(struct cache *cache, struct bio *bio)
{ {
return ((bio->bi_bdev == cache->origin_dev->bdev) && return bio_op(bio) != REQ_OP_DISCARD;
bio_op(bio) != REQ_OP_DISCARD);
} }
static void accounted_begin(struct cache *cache, struct bio *bio) static void accounted_begin(struct cache *cache, struct bio *bio)
...@@ -912,7 +914,7 @@ static void accounted_begin(struct cache *cache, struct bio *bio) ...@@ -912,7 +914,7 @@ static void accounted_begin(struct cache *cache, struct bio *bio)
if (accountable_bio(cache, bio)) { if (accountable_bio(cache, bio)) {
pb->len = bio_sectors(bio); pb->len = bio_sectors(bio);
iot_io_begin(&cache->origin_tracker, pb->len); iot_io_begin(&cache->tracker, pb->len);
} }
} }
...@@ -921,7 +923,7 @@ static void accounted_complete(struct cache *cache, struct bio *bio) ...@@ -921,7 +923,7 @@ static void accounted_complete(struct cache *cache, struct bio *bio)
size_t pb_data_size = get_per_bio_data_size(cache); size_t pb_data_size = get_per_bio_data_size(cache);
struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
iot_io_end(&cache->origin_tracker, pb->len); iot_io_end(&cache->tracker, pb->len);
} }
static void accounted_request(struct cache *cache, struct bio *bio) static void accounted_request(struct cache *cache, struct bio *bio)
...@@ -1716,20 +1718,19 @@ static int invalidate_start(struct cache *cache, dm_cblock_t cblock, ...@@ -1716,20 +1718,19 @@ static int invalidate_start(struct cache *cache, dm_cblock_t cblock,
enum busy { enum busy {
IDLE, IDLE,
MODERATE,
BUSY BUSY
}; };
static enum busy spare_migration_bandwidth(struct cache *cache) static enum busy spare_migration_bandwidth(struct cache *cache)
{ {
bool idle = iot_idle_for(&cache->origin_tracker, HZ); bool idle = iot_idle_for(&cache->tracker, HZ);
sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) * sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) *
cache->sectors_per_block; cache->sectors_per_block;
if (current_volume <= cache->migration_threshold) if (idle && current_volume <= cache->migration_threshold)
return idle ? IDLE : MODERATE; return IDLE;
else else
return idle ? MODERATE : BUSY; return BUSY;
} }
static void inc_hit_counter(struct cache *cache, struct bio *bio) static void inc_hit_counter(struct cache *cache, struct bio *bio)
...@@ -2045,8 +2046,6 @@ static void check_migrations(struct work_struct *ws) ...@@ -2045,8 +2046,6 @@ static void check_migrations(struct work_struct *ws)
for (;;) { for (;;) {
b = spare_migration_bandwidth(cache); b = spare_migration_bandwidth(cache);
if (b == BUSY)
break;
r = policy_get_background_work(cache->policy, b == IDLE, &op); r = policy_get_background_work(cache->policy, b == IDLE, &op);
if (r == -ENODATA) if (r == -ENODATA)
...@@ -2717,7 +2716,7 @@ static int cache_create(struct cache_args *ca, struct cache **result) ...@@ -2717,7 +2716,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
batcher_init(&cache->committer, commit_op, cache, batcher_init(&cache->committer, commit_op, cache,
issue_op, cache, cache->wq); issue_op, cache, cache->wq);
iot_init(&cache->origin_tracker); iot_init(&cache->tracker);
init_rwsem(&cache->background_work_lock); init_rwsem(&cache->background_work_lock);
prevent_background_work(cache); prevent_background_work(cache);
...@@ -2941,7 +2940,7 @@ static void cache_postsuspend(struct dm_target *ti) ...@@ -2941,7 +2940,7 @@ static void cache_postsuspend(struct dm_target *ti)
cancel_delayed_work(&cache->waker); cancel_delayed_work(&cache->waker);
flush_workqueue(cache->wq); flush_workqueue(cache->wq);
WARN_ON(cache->origin_tracker.in_flight); WARN_ON(cache->tracker.in_flight);
/* /*
* If it's a flush suspend there won't be any deferred bios, so this * If it's a flush suspend there won't be any deferred bios, so this
......
...@@ -447,7 +447,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes) ...@@ -447,7 +447,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
* it has been invoked. * it has been invoked.
*/ */
#define dm_report_EIO(m) \ #define dm_report_EIO(m) \
({ \ do { \
struct mapped_device *md = dm_table_get_md((m)->ti->table); \ struct mapped_device *md = dm_table_get_md((m)->ti->table); \
\ \
pr_debug("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d\n", \ pr_debug("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d\n", \
...@@ -455,8 +455,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes) ...@@ -455,8 +455,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \ test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \
test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \ test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \
dm_noflush_suspending((m)->ti)); \ dm_noflush_suspending((m)->ti)); \
-EIO; \ } while (0)
})
/* /*
* Map cloned requests (request-based multipath) * Map cloned requests (request-based multipath)
...@@ -481,7 +480,8 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, ...@@ -481,7 +480,8 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
if (!pgpath) { if (!pgpath) {
if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
return DM_MAPIO_DELAY_REQUEUE; return DM_MAPIO_DELAY_REQUEUE;
return dm_report_EIO(m); /* Failed */ dm_report_EIO(m); /* Failed */
return DM_MAPIO_KILL;
} else if (test_bit(MPATHF_QUEUE_IO, &m->flags) || } else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) { test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
if (pg_init_all_paths(m)) if (pg_init_all_paths(m))
...@@ -558,7 +558,8 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m ...@@ -558,7 +558,8 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
if (!pgpath) { if (!pgpath) {
if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
return DM_MAPIO_REQUEUE; return DM_MAPIO_REQUEUE;
return dm_report_EIO(m); dm_report_EIO(m);
return -EIO;
} }
mpio->pgpath = pgpath; mpio->pgpath = pgpath;
...@@ -1493,7 +1494,7 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone, ...@@ -1493,7 +1494,7 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
if (atomic_read(&m->nr_valid_paths) == 0 && if (atomic_read(&m->nr_valid_paths) == 0 &&
!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
if (error == -EIO) if (error == -EIO)
error = dm_report_EIO(m); dm_report_EIO(m);
/* complete with the original error */ /* complete with the original error */
r = DM_ENDIO_DONE; r = DM_ENDIO_DONE;
} }
...@@ -1524,8 +1525,10 @@ static int do_end_io_bio(struct multipath *m, struct bio *clone, ...@@ -1524,8 +1525,10 @@ static int do_end_io_bio(struct multipath *m, struct bio *clone,
fail_path(mpio->pgpath); fail_path(mpio->pgpath);
if (atomic_read(&m->nr_valid_paths) == 0 && if (atomic_read(&m->nr_valid_paths) == 0 &&
!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
return dm_report_EIO(m); dm_report_EIO(m);
return -EIO;
}
/* Queue for the daemon to resubmit */ /* Queue for the daemon to resubmit */
dm_bio_restore(get_bio_details_from_bio(clone), clone); dm_bio_restore(get_bio_details_from_bio(clone), clone);
......
...@@ -507,6 +507,7 @@ static int map_request(struct dm_rq_target_io *tio) ...@@ -507,6 +507,7 @@ static int map_request(struct dm_rq_target_io *tio)
case DM_MAPIO_KILL: case DM_MAPIO_KILL:
/* The target wants to complete the I/O */ /* The target wants to complete the I/O */
dm_kill_unmapped_request(rq, -EIO); dm_kill_unmapped_request(rq, -EIO);
break;
default: default:
DMWARN("unimplemented target map return value: %d", r); DMWARN("unimplemented target map return value: %d", r);
BUG(); BUG();
......
...@@ -484,11 +484,11 @@ static int __write_initial_superblock(struct dm_pool_metadata *pmd) ...@@ -484,11 +484,11 @@ static int __write_initial_superblock(struct dm_pool_metadata *pmd)
if (r < 0) if (r < 0)
return r; return r;
r = save_sm_roots(pmd); r = dm_tm_pre_commit(pmd->tm);
if (r < 0) if (r < 0)
return r; return r;
r = dm_tm_pre_commit(pmd->tm); r = save_sm_roots(pmd);
if (r < 0) if (r < 0)
return r; return r;
......
...@@ -142,10 +142,23 @@ static int sm_disk_inc_block(struct dm_space_map *sm, dm_block_t b) ...@@ -142,10 +142,23 @@ static int sm_disk_inc_block(struct dm_space_map *sm, dm_block_t b)
static int sm_disk_dec_block(struct dm_space_map *sm, dm_block_t b) static int sm_disk_dec_block(struct dm_space_map *sm, dm_block_t b)
{ {
int r;
uint32_t old_count;
enum allocation_event ev; enum allocation_event ev;
struct sm_disk *smd = container_of(sm, struct sm_disk, sm); struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
return sm_ll_dec(&smd->ll, b, &ev); r = sm_ll_dec(&smd->ll, b, &ev);
if (!r && (ev == SM_FREE)) {
/*
* It's only free if it's also free in the last
* transaction.
*/
r = sm_ll_lookup(&smd->old_ll, b, &old_count);
if (!r && !old_count)
smd->nr_allocated_this_transaction--;
}
return r;
} }
static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b) static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment