Commit 0ef0b471 authored by Heinz Mauelshagen's avatar Heinz Mauelshagen Committed by Mike Snitzer

dm: add missing empty lines

Signed-off-by: default avatarHeinz Mauelshagen <heinzm@redhat.com>
Signed-off-by: default avatarMike Snitzer <snitzer@kernel.org>
parent 02f10ba1
......@@ -628,6 +628,7 @@ static void bio_complete(struct bio *bio)
{
struct dm_buffer *b = bio->bi_private;
blk_status_t status = bio->bi_status;
bio_uninit(bio);
kfree(bio);
b->end_io(b, status);
......@@ -660,6 +661,7 @@ static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
do {
unsigned int this_step = min((unsigned int)(PAGE_SIZE - offset_in_page(ptr)), len);
if (!bio_add_page(bio, virt_to_page(ptr), this_step,
offset_in_page(ptr))) {
bio_put(bio);
......@@ -782,6 +784,7 @@ static void __write_dirty_buffer(struct dm_buffer *b,
static void __flush_write_list(struct list_head *write_list)
{
struct blk_plug plug;
blk_start_plug(&plug);
while (!list_empty(write_list)) {
struct dm_buffer *b =
......@@ -1179,6 +1182,7 @@ void dm_bufio_prefetch(struct dm_bufio_client *c,
for (; n_blocks--; block++) {
int need_submit;
struct dm_buffer *b;
b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
&write_list);
if (unlikely(!list_empty(&write_list))) {
......@@ -1463,6 +1467,7 @@ void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
__link_buffer(b, new_block, LIST_DIRTY);
} else {
sector_t old_block;
wait_on_bit_lock_io(&b->state, B_WRITING,
TASK_UNINTERRUPTIBLE);
/*
......@@ -1553,6 +1558,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
{
sector_t s = bdev_nr_sectors(c->bdev);
if (s >= c->start)
s -= c->start;
else
......@@ -1668,10 +1674,12 @@ static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
static unsigned long get_retain_buffers(struct dm_bufio_client *c)
{
unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes);
if (likely(c->sectors_per_block_bits >= 0))
retain_bytes >>= c->sectors_per_block_bits + SECTOR_SHIFT;
else
retain_bytes /= c->block_size;
return retain_bytes;
}
......@@ -1806,6 +1814,7 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
if (block_size <= KMALLOC_MAX_SIZE &&
(block_size < PAGE_SIZE || !is_power_of_2(block_size))) {
unsigned int align = min(1U << __ffs(block_size), (unsigned int)PAGE_SIZE);
snprintf(slab_name, sizeof slab_name, "dm_bufio_cache-%u", block_size);
c->slab_cache = kmem_cache_create(slab_name, block_size, align,
SLAB_RECLAIM_ACCOUNT, NULL);
......
......@@ -535,6 +535,7 @@ static int __create_persistent_data_objects(struct dm_cache_metadata *cmd,
bool may_format_device)
{
int r;
cmd->bm = dm_block_manager_create(cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
CACHE_MAX_CONCURRENT_LOCKS);
if (IS_ERR(cmd->bm)) {
......@@ -568,6 +569,7 @@ static void update_flags(struct cache_disk_superblock *disk_super,
flags_mutator mutator)
{
uint32_t sb_flags = mutator(le32_to_cpu(disk_super->flags));
disk_super->flags = cpu_to_le32(sb_flags);
}
......@@ -732,6 +734,7 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,
static __le64 pack_value(dm_oblock_t block, unsigned int flags)
{
uint64_t value = from_oblock(block);
value <<= 16;
value = value | (flags & FLAGS_MASK);
return cpu_to_le64(value);
......@@ -741,6 +744,7 @@ static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned int *flag
{
uint64_t value = le64_to_cpu(value_le);
uint64_t b = value >> 16;
*block = to_oblock(b);
*flags = value & FLAGS_MASK;
}
......@@ -1254,6 +1258,7 @@ static int __insert(struct dm_cache_metadata *cmd,
{
int r;
__le64 value = pack_value(oblock, M_VALID);
__dm_bless_for_disk(&value);
r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
......@@ -1580,6 +1585,7 @@ static int __set_dirty_bits_v1(struct dm_cache_metadata *cmd, unsigned int nr_bi
{
int r;
unsigned int i;
for (i = 0; i < nr_bits; i++) {
r = __dirty(cmd, to_cblock(i), test_bit(i, bits));
if (r)
......
......@@ -89,6 +89,7 @@ static inline int policy_emit_config_values(struct dm_cache_policy *p, char *res
unsigned int maxlen, ssize_t *sz_ptr)
{
ssize_t sz = *sz_ptr;
if (p->emit_config_values)
return p->emit_config_values(p, result, maxlen, sz_ptr);
......@@ -121,12 +122,14 @@ static inline size_t bitset_size_in_bytes(unsigned int nr_entries)
static inline unsigned long *alloc_bitset(unsigned int nr_entries)
{
size_t s = bitset_size_in_bytes(nr_entries);
return vzalloc(s);
}
static inline void clear_bitset(void *bitset, unsigned int nr_entries)
{
size_t s = bitset_size_in_bytes(nr_entries);
memset(bitset, 0, s);
}
......
......@@ -624,6 +624,7 @@ static void __h_insert(struct smq_hash_table *ht, unsigned int bucket, struct en
static void h_insert(struct smq_hash_table *ht, struct entry *e)
{
unsigned int h = hash_64(from_oblock(e->oblock), ht->hash_bits);
__h_insert(ht, h, e);
}
......@@ -1633,6 +1634,7 @@ static void smq_tick(struct dm_cache_policy *p, bool can_block)
static void smq_allow_migrations(struct dm_cache_policy *p, bool allow)
{
struct smq_policy *mq = to_smq_policy(p);
mq->migrations_allowed = allow;
}
......
......@@ -534,6 +534,7 @@ static unsigned int lock_level(struct bio *bio)
static struct per_bio_data *get_per_bio_data(struct bio *bio)
{
struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
BUG_ON(!pb);
return pb;
}
......@@ -690,6 +691,7 @@ static void clear_discard(struct cache *cache, dm_dblock_t b)
static bool is_discarded(struct cache *cache, dm_dblock_t b)
{
int r;
spin_lock_irq(&cache->lock);
r = test_bit(from_dblock(b), cache->discard_bitset);
spin_unlock_irq(&cache->lock);
......@@ -700,6 +702,7 @@ static bool is_discarded(struct cache *cache, dm_dblock_t b)
static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
{
int r;
spin_lock_irq(&cache->lock);
r = test_bit(from_dblock(oblock_to_dblock(cache, b)),
cache->discard_bitset);
......@@ -814,6 +817,7 @@ static void accounted_request(struct cache *cache, struct bio *bio)
static void issue_op(struct bio *bio, void *context)
{
struct cache *cache = context;
accounted_request(cache, bio);
}
......@@ -1074,6 +1078,7 @@ static void quiesce(struct dm_cache_migration *mg,
static struct dm_cache_migration *ws_to_mg(struct work_struct *ws)
{
struct continuation *k = container_of(ws, struct continuation, ws);
return container_of(k, struct dm_cache_migration, k);
}
......@@ -1225,6 +1230,7 @@ static void mg_complete(struct dm_cache_migration *mg, bool success)
static void mg_success(struct work_struct *ws)
{
struct dm_cache_migration *mg = ws_to_mg(ws);
mg_complete(mg, mg->k.input == 0);
}
......@@ -1363,6 +1369,7 @@ static void mg_copy(struct work_struct *ws)
* Fallback to a real full copy after doing some tidying up.
*/
bool rb = bio_detain_shared(mg->cache, mg->op->oblock, mg->overwrite_bio);
BUG_ON(rb); /* An exclussive lock must _not_ be held for this block */
mg->overwrite_bio = NULL;
inc_io_migrations(mg->cache);
......@@ -1465,12 +1472,15 @@ static void invalidate_complete(struct dm_cache_migration *mg, bool success)
static void invalidate_completed(struct work_struct *ws)
{
struct dm_cache_migration *mg = ws_to_mg(ws);
invalidate_complete(mg, !mg->k.input);
}
static int invalidate_cblock(struct cache *cache, dm_cblock_t cblock)
{
int r = policy_invalidate_mapping(cache->policy, cblock);
int r;
r = policy_invalidate_mapping(cache->policy, cblock);
if (!r) {
r = dm_cache_remove_mapping(cache->cmd, cblock);
if (r) {
......
......@@ -1257,6 +1257,7 @@ static __le64 *org_sector_of_dmreq(struct crypt_config *cc,
struct dm_crypt_request *dmreq)
{
u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + cc->iv_size;
return (__le64 *) ptr;
}
......@@ -1265,6 +1266,7 @@ static unsigned int *org_tag_of_dmreq(struct crypt_config *cc,
{
u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size +
cc->iv_size + sizeof(uint64_t);
return (unsigned int *)ptr;
}
......@@ -1741,6 +1743,7 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
static void kcryptd_io_bio_endio(struct work_struct *work)
{
struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
bio_endio(io->base_bio);
}
......
......@@ -112,13 +112,14 @@ static int writeset_marked_on_disk(struct dm_disk_bitset *info,
struct writeset_metadata *m, dm_block_t block,
bool *result)
{
int r;
dm_block_t old = m->root;
/*
* The bitset was flushed when it was archived, so we know there'll
* be no change to the root.
*/
int r = dm_bitset_test_bit(info, m->root, block, &m->root, result);
r = dm_bitset_test_bit(info, m->root, block, &m->root, result);
if (r) {
DMERR("%s: dm_bitset_test_bit failed", __func__);
return r;
......@@ -210,6 +211,7 @@ static void sb_prepare_for_write(struct dm_block_validator *v,
static int check_metadata_version(struct superblock_disk *disk)
{
uint32_t metadata_version = le32_to_cpu(disk->version);
if (metadata_version < MIN_ERA_VERSION || metadata_version > MAX_ERA_VERSION) {
DMERR("Era metadata version %u found, but only versions between %u and %u supported.",
metadata_version, MIN_ERA_VERSION, MAX_ERA_VERSION);
......@@ -409,6 +411,7 @@ static int ws_eq(void *context, const void *value1, const void *value2)
static void setup_writeset_tree_info(struct era_metadata *md)
{
struct dm_btree_value_type *vt = &md->writeset_tree_info.value_type;
md->writeset_tree_info.tm = md->tm;
md->writeset_tree_info.levels = 1;
vt->context = md;
......@@ -419,9 +422,9 @@ static void setup_writeset_tree_info(struct era_metadata *md)
}
static void setup_era_array_info(struct era_metadata *md)
{
struct dm_btree_value_type vt;
vt.context = NULL;
vt.size = sizeof(__le32);
vt.inc = NULL;
......@@ -1391,6 +1394,7 @@ static int perform_rpc(struct era *era, struct rpc *rpc)
static int in_worker0(struct era *era, int (*fn)(struct era_metadata *md))
{
struct rpc rpc;
rpc.fn0 = fn;
rpc.fn1 = NULL;
......@@ -1401,6 +1405,7 @@ static int in_worker1(struct era *era,
int (*fn)(struct era_metadata *md, void *ref), void *arg)
{
struct rpc rpc;
rpc.fn0 = NULL;
rpc.fn1 = fn;
rpc.arg = arg;
......@@ -1712,6 +1717,7 @@ static int era_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
struct era *era = ti->private;
return fn(ti, era->origin_dev, 0, get_dev_size(era->origin_dev), data);
}
......
......@@ -327,6 +327,7 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
struct flakey_c *fc = ti->private;
unsigned int elapsed;
struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
pb->bio_submitted = false;
if (op_is_zone_mgmt(bio_op(bio)))
......
This diff is collapsed.
......@@ -197,6 +197,7 @@ static void list_get_page(struct dpages *dp,
static void list_next_page(struct dpages *dp)
{
struct page_list *pl = (struct page_list *) dp->context_ptr;
dp->context_ptr = pl->next;
dp->context_u = 0;
}
......
......@@ -92,7 +92,9 @@ static struct hash_cell *__get_name_cell(const char *str)
while (n) {
struct hash_cell *hc = container_of(n, struct hash_cell, name_node);
int c = strcmp(hc->name, str);
int c;
c = strcmp(hc->name, str);
if (!c) {
dm_get(hc->md);
return hc;
......@@ -109,7 +111,9 @@ static struct hash_cell *__get_uuid_cell(const char *str)
while (n) {
struct hash_cell *hc = container_of(n, struct hash_cell, uuid_node);
int c = strcmp(hc->uuid, str);
int c;
c = strcmp(hc->uuid, str);
if (!c) {
dm_get(hc->md);
return hc;
......@@ -149,7 +153,9 @@ static void __link_name(struct hash_cell *new_hc)
while (*n) {
struct hash_cell *hc = container_of(*n, struct hash_cell, name_node);
int c = strcmp(hc->name, new_hc->name);
int c;
c = strcmp(hc->name, new_hc->name);
BUG_ON(!c);
parent = *n;
n = c >= 0 ? &hc->name_node.rb_left : &hc->name_node.rb_right;
......@@ -172,7 +178,9 @@ static void __link_uuid(struct hash_cell *new_hc)
while (*n) {
struct hash_cell *hc = container_of(*n, struct hash_cell, uuid_node);
int c = strcmp(hc->uuid, new_hc->uuid);
int c;
c = strcmp(hc->uuid, new_hc->uuid);
BUG_ON(!c);
parent = *n;
n = c > 0 ? &hc->uuid_node.rb_left : &hc->uuid_node.rb_right;
......@@ -621,6 +629,7 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
*/
for (n = rb_first(&name_rb_tree); n; n = rb_next(n)) {
void *uuid_ptr;
hc = container_of(n, struct hash_cell, name_node);
if (!filter_device(hc, param->name, param->uuid))
continue;
......@@ -848,6 +857,7 @@ static void __dev_status(struct mapped_device *md, struct dm_ioctl *param)
if (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) {
int srcu_idx;
table = dm_get_inactive_table(md, &srcu_idx);
if (table) {
if (!(dm_table_get_mode(table) & FMODE_WRITE))
......
......@@ -151,6 +151,7 @@ static void io_job_start(struct dm_kcopyd_throttle *t)
if (unlikely(t->total_period >= (1 << ACCOUNT_INTERVAL_SHIFT))) {
int shift = fls(t->total_period >> ACCOUNT_INTERVAL_SHIFT);
t->total_period >>= shift;
t->io_period >>= shift;
}
......@@ -678,6 +679,7 @@ static void do_work(struct work_struct *work)
static void dispatch_job(struct kcopyd_job *job)
{
struct dm_kcopyd_client *kc = job->kc;
atomic_inc(&kc->nr_jobs);
if (unlikely(!job->source.count))
push(&kc->callback_jobs, job);
......
......@@ -655,12 +655,14 @@ static int disk_resume(struct dm_dirty_log *log)
static uint32_t core_get_region_size(struct dm_dirty_log *log)
{
struct log_c *lc = (struct log_c *) log->context;
return lc->region_size;
}
static int core_resume(struct dm_dirty_log *log)
{
struct log_c *lc = (struct log_c *) log->context;
lc->sync_search = 0;
return 0;
}
......@@ -668,12 +670,14 @@ static int core_resume(struct dm_dirty_log *log)
static int core_is_clean(struct dm_dirty_log *log, region_t region)
{
struct log_c *lc = (struct log_c *) log->context;
return log_test_bit(lc->clean_bits, region);
}
static int core_in_sync(struct dm_dirty_log *log, region_t region, int block)
{
struct log_c *lc = (struct log_c *) log->context;
return log_test_bit(lc->sync_bits, region);
}
......@@ -726,12 +730,14 @@ static int disk_flush(struct dm_dirty_log *log)
static void core_mark_region(struct dm_dirty_log *log, region_t region)
{
struct log_c *lc = (struct log_c *) log->context;
log_clear_bit(lc, lc->clean_bits, region);
}
static void core_clear_region(struct dm_dirty_log *log, region_t region)
{
struct log_c *lc = (struct log_c *) log->context;
if (likely(!lc->flush_failed))
log_set_bit(lc, lc->clean_bits, region);
}
......
......@@ -137,6 +137,7 @@ static bool mpath_double_check_test_bit(int MPATHF_bit, struct multipath *m)
if (r) {
unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
r = test_bit(MPATHF_bit, &m->flags);
spin_unlock_irqrestore(&m->lock, flags);
......@@ -711,6 +712,7 @@ static void process_queued_bios(struct work_struct *work)
blk_start_plug(&plug);
while ((bio = bio_list_pop(&bios))) {
struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
dm_bio_restore(get_bio_details_from_mpio(mpio), bio);
r = __multipath_map_bio(m, bio, mpio);
switch (r) {
......@@ -2122,6 +2124,7 @@ static int multipath_busy(struct dm_target *ti)
/* no paths available, for blk-mq: rely on IO mapping to delay requeue */
if (!atomic_read(&m->nr_valid_paths)) {
unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
spin_unlock_irqrestore(&m->lock, flags);
......
......@@ -128,6 +128,7 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig)
{
if (unlikely(dm_stats_used(&md->stats))) {
struct dm_rq_target_io *tio = tio_from_request(orig);
tio->duration_jiffies = jiffies - tio->duration_jiffies;
dm_stats_account_io(&md->stats, rq_data_dir(orig),
blk_rq_pos(orig), tio->n_sectors, true,
......@@ -435,6 +436,7 @@ static void dm_start_request(struct mapped_device *md, struct request *orig)
if (unlikely(dm_stats_used(&md->stats))) {
struct dm_rq_target_io *tio = tio_from_request(orig);
tio->duration_jiffies = jiffies;
tio->n_sectors = blk_rq_sectors(orig);
dm_stats_account_io(&md->stats, rq_data_dir(orig),
......
......@@ -276,6 +276,7 @@ static void skip_metadata(struct pstore *ps)
{
uint32_t stride = ps->exceptions_per_area + 1;
chunk_t next_free = ps->next_free;
if (sector_div(next_free, stride) == NUM_SNAPSHOT_HDR_CHUNKS)
ps->next_free++;
}
......@@ -521,6 +522,7 @@ static int read_exceptions(struct pstore *ps,
if (DM_PREFETCH_CHUNKS) {
do {
chunk_t pf_chunk = area_location(ps, prefetch_area);
if (unlikely(pf_chunk >= dm_bufio_get_device_size(client)))
break;
dm_bufio_prefetch(client, pf_chunk, 1);
......@@ -879,6 +881,7 @@ static int persistent_ctr(struct dm_exception_store *store, char *options)
if (options) {
char overflow = toupper(options[0]);
if (overflow == 'O')
store->userspace_supports_overflow = true;
else {
......
......@@ -245,12 +245,14 @@ struct dm_snap_tracked_chunk {
static void init_tracked_chunk(struct bio *bio)
{
struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
INIT_HLIST_NODE(&c->node);
}
static bool is_bio_tracked(struct bio *bio)
{
struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
return !hlist_unhashed(&c->node);
}
......@@ -399,6 +401,7 @@ static struct origin *__lookup_origin(struct block_device *origin)
static void __insert_origin(struct origin *o)
{
struct list_head *sl = &_origins[origin_hash(o->bdev)];
list_add_tail(&o->hash_list, sl);
}
......@@ -418,6 +421,7 @@ static struct dm_origin *__lookup_dm_origin(struct block_device *origin)
static void __insert_dm_origin(struct dm_origin *o)
{
struct list_head *sl = &_dm_origins[origin_hash(o->dev->bdev)];
list_add_tail(&o->hash_list, sl);
}
......@@ -868,6 +872,7 @@ static int calc_max_buckets(void)
{
/* use a fixed size of 2MB */
unsigned long mem = 2 * 1024 * 1024;
mem /= sizeof(struct hlist_bl_head);
return mem;
......@@ -1552,6 +1557,7 @@ static bool wait_for_in_progress(struct dm_snapshot *s, bool unlock_origins)
* throttling is unlikely to negatively impact performance.
*/
DECLARE_WAITQUEUE(wait, current);
__add_wait_queue(&s->in_progress_wait, &wait);
__set_current_state(TASK_UNINTERRUPTIBLE);
spin_unlock(&s->in_progress_wait.lock);
......@@ -2567,6 +2573,7 @@ static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit)
if (o) {
if (limit) {
struct dm_snapshot *s;
list_for_each_entry(s, &o->snapshots, list)
if (unlikely(!wait_for_in_progress(s, true)))
goto again;
......
......@@ -336,6 +336,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
if (s->n_histogram_entries) {
unsigned long long *hi;
hi = dm_kvzalloc(s->histogram_alloc_size, NUMA_NO_NODE);
if (!hi) {
r = -ENOMEM;
......@@ -357,6 +358,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
s->stat_percpu[cpu] = p;
if (s->n_histogram_entries) {
unsigned long long *hi;
hi = dm_kvzalloc(s->histogram_alloc_size, cpu_to_node(cpu));
if (!hi) {
r = -ENOMEM;
......@@ -495,6 +497,7 @@ static int dm_stats_list(struct dm_stats *stats, const char *program,
DMEMIT(" precise_timestamps");
if (s->n_histogram_entries) {
unsigned int i;
DMEMIT(" histogram:");
for (i = 0; i < s->n_histogram_entries; i++) {
if (i)
......@@ -567,6 +570,7 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
*/
#if BITS_PER_LONG == 32
unsigned long flags;
local_irq_save(flags);
#else
preempt_disable();
......@@ -578,6 +582,7 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
atomic_inc(&shared->in_flight[idx]);
} else {
unsigned long long duration;
dm_stat_round(s, shared, p);
atomic_dec(&shared->in_flight[idx]);
p->sectors[idx] += len;
......@@ -592,8 +597,10 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
}
if (s->n_histogram_entries) {
unsigned int lo = 0, hi = s->n_histogram_entries + 1;
while (lo + 1 < hi) {
unsigned int mid = (lo + hi) / 2;
if (s->histogram_boundaries[mid - 1] > duration) {
hi = mid;
} else {
......@@ -741,6 +748,7 @@ static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared
shared->tmp.time_in_queue += READ_ONCE(p->time_in_queue);
if (s->n_histogram_entries) {
unsigned int i;
for (i = 0; i < s->n_histogram_entries + 1; i++)
shared->tmp.histogram[i] += READ_ONCE(p->histogram[i]);
}
......@@ -775,6 +783,7 @@ static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end,
local_irq_enable();
if (s->n_histogram_entries) {
unsigned int i;
for (i = 0; i < s->n_histogram_entries + 1; i++) {
local_irq_disable();
p = &s->stat_percpu[smp_processor_id()][x];
......@@ -890,6 +899,7 @@ static int dm_stats_print(struct dm_stats *stats, int id,
dm_jiffies_to_msec64(s, shared->tmp.io_ticks[WRITE]));
if (s->n_histogram_entries) {
unsigned int i;
for (i = 0; i < s->n_histogram_entries + 1; i++) {
DMEMIT("%s%llu", !i ? " " : ":", shared->tmp.histogram[i]);
}
......@@ -962,6 +972,7 @@ static int parse_histogram(const char *h, unsigned int *n_histogram_entries,
unsigned long long hi;
int s;
char ch;
s = sscanf(h, "%llu%c", &hi, &ch);
if (!s || (s == 2 && ch != ','))
return -EINVAL;
......@@ -987,10 +998,8 @@ static int message_stats_create(struct mapped_device *md,
unsigned int divisor;
const char *program_id, *aux_data;
unsigned int stat_flags = 0;
unsigned int n_histogram_entries = 0;
unsigned long long *histogram_boundaries = NULL;
struct dm_arg_set as, as_backup;
const char *a;
unsigned int feature_args;
......
......@@ -143,6 +143,7 @@ int dm_sysfs_init(struct mapped_device *md)
void dm_sysfs_exit(struct mapped_device *md)
{
struct kobject *kobj = dm_kobject(md);
kobject_put(kobj);
wait_for_completion(dm_get_completion_from_kobject(kobj));
}
......@@ -403,6 +403,7 @@ static void subtree_dec(void *context, const void *value, unsigned int count)
static int subtree_equal(void *context, const void *value1_le, const void *value2_le)
{
__le64 v1_le, v2_le;
memcpy(&v1_le, value1_le, sizeof(v1_le));
memcpy(&v2_le, value2_le, sizeof(v2_le));
......
......@@ -1038,6 +1038,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
static void free_discard_mapping(struct dm_thin_new_mapping *m)
{
struct thin_c *tc = m->tc;
if (m->cell)
cell_defer_no_holder(tc, m->cell);
mempool_free(m, &tc->pool->mapping_pool);
......@@ -2412,6 +2413,7 @@ static void do_worker(struct work_struct *ws)
static void do_waker(struct work_struct *ws)
{
struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker);
wake_worker(pool);
queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
}
......@@ -2474,6 +2476,7 @@ static struct noflush_work *to_noflush(struct work_struct *ws)
static void do_noflush_start(struct work_struct *ws)
{
struct noflush_work *w = to_noflush(ws);
w->tc->requeue_mode = true;
requeue_io(w->tc);
pool_work_complete(&w->pw);
......@@ -2482,6 +2485,7 @@ static void do_noflush_start(struct work_struct *ws)
static void do_noflush_stop(struct work_struct *ws)
{
struct noflush_work *w = to_noflush(ws);
w->tc->requeue_mode = false;
pool_work_complete(&w->pw);
}
......@@ -3241,6 +3245,7 @@ static dm_block_t calc_metadata_threshold(struct pool_c *pt)
* delete after you've grown the device).
*/
dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4;
return min((dm_block_t)1024ULL /* 4M */, quarter);
}
......
......@@ -114,6 +114,7 @@ static int verity_hash_update(struct dm_verity *v, struct ahash_request *req,
do {
int r;
size_t this_step = min_t(size_t, len, PAGE_SIZE - offset_in_page(data));
flush_kernel_vmap_range((void *)data, this_step);
sg_init_table(&sg, 1);
sg_set_page(&sg, vmalloc_to_page(data), this_step, offset_in_page(data));
......@@ -683,8 +684,10 @@ static void verity_prefetch_io(struct work_struct *work)
for (i = v->levels - 2; i >= 0; i--) {
sector_t hash_block_start;
sector_t hash_block_end;
verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL);
verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL);
if (!i) {
unsigned int cluster = READ_ONCE(dm_verity_prefetch_cluster);
......@@ -1367,6 +1370,7 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
hash_position = v->hash_start;
for (i = v->levels - 1; i >= 0; i--) {
sector_t s;
v->hash_level_block[i] = hash_position;
s = (v->data_blocks + ((sector_t)1 << ((i + 1) * v->hash_per_block_bits)) - 1)
>> ((i + 1) * v->hash_per_block_bits);
......
......@@ -83,16 +83,13 @@ struct wc_entry {
struct rb_node rb_node;
struct list_head lru;
unsigned short wc_list_contiguous;
bool write_in_progress
#if BITS_PER_LONG == 64
: 1
#endif
;
unsigned long index
#if BITS_PER_LONG == 64
: 47
bool write_in_progress : 1;
unsigned long index : 47;
#else
bool write_in_progress;
unsigned long index;
#endif
;
unsigned long age;
#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
uint64_t original_sector;
......@@ -300,6 +297,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
}
if (da != p) {
long i;
wc->memory_map = NULL;
pages = kvmalloc_array(p, sizeof(struct page *), GFP_KERNEL);
if (!pages) {
......@@ -309,6 +307,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
i = 0;
do {
long daa;
daa = dax_direct_access(wc->ssd_dev->dax_dev, offset + i,
p - i, DAX_ACCESS, NULL, &pfn);
if (daa <= 0) {
......@@ -507,6 +506,7 @@ static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
while (1) {
unsigned int j;
i = find_next_bit(wc->dirty_bitmap, bitmap_bits, i);
if (unlikely(i == bitmap_bits))
break;
......@@ -637,6 +637,7 @@ static struct wc_entry *writecache_find_entry(struct dm_writecache *wc,
while (1) {
struct wc_entry *e2;
if (flags & WFE_LOWEST_SEQ)
node = rb_prev(&e->rb_node);
else
......@@ -679,6 +680,7 @@ static void writecache_add_to_freelist(struct dm_writecache *wc, struct wc_entry
{
if (WC_MODE_SORT_FREELIST(wc)) {
struct rb_node **node = &wc->freetree.rb_node, *parent = NULL;
if (unlikely(!*node))
wc->current_free = e;
while (*node) {
......@@ -718,6 +720,7 @@ static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc, s
if (WC_MODE_SORT_FREELIST(wc)) {
struct rb_node *next;
if (unlikely(!wc->current_free))
return NULL;
e = wc->current_free;
......@@ -864,6 +867,7 @@ static void writecache_flush_work(struct work_struct *work)
static void writecache_autocommit_timer(struct timer_list *t)
{
struct dm_writecache *wc = from_timer(wc, t, autocommit_timer);
if (!writecache_has_error(wc))
queue_work(wc->writeback_wq, &wc->flush_work);
}
......@@ -963,6 +967,7 @@ static int writecache_alloc_entries(struct dm_writecache *wc)
return -ENOMEM;
for (b = 0; b < wc->n_blocks; b++) {
struct wc_entry *e = &wc->entries[b];
e->index = b;
e->write_in_progress = false;
cond_resched();
......@@ -1006,6 +1011,7 @@ static void writecache_resume(struct dm_target *ti)
r = writecache_read_metadata(wc, wc->metadata_sectors);
if (r) {
size_t sb_entries_offset;
writecache_error(wc, r, "unable to read metadata: %d", r);
sb_entries_offset = offsetof(struct wc_memory_superblock, entries);
memset((char *)wc->memory_map + sb_entries_offset, -1,
......@@ -1035,6 +1041,7 @@ static void writecache_resume(struct dm_target *ti)
for (b = 0; b < wc->n_blocks; b++) {
struct wc_entry *e = &wc->entries[b];
struct wc_memory_entry wme;
if (writecache_has_error(wc)) {
e->original_sector = -1;
e->seq_count = -1;
......@@ -1056,6 +1063,7 @@ static void writecache_resume(struct dm_target *ti)
#endif
for (b = 0; b < wc->n_blocks; b++) {
struct wc_entry *e = &wc->entries[b];
if (!writecache_entry_is_committed(wc, e)) {
if (read_seq_count(wc, e) != -1) {
erase_this:
......@@ -1245,6 +1253,7 @@ static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data
do {
struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter);
buf = bvec_kmap_local(&bv);
size = bv.bv_len;
if (unlikely(size > remaining_size))
......@@ -1252,6 +1261,7 @@ static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data
if (rw == READ) {
int r;
r = copy_mc_to_kernel(buf, data, size);
flush_dcache_page(bio_page(bio));
if (unlikely(r)) {
......@@ -1379,6 +1389,7 @@ static void writecache_bio_copy_ssd(struct dm_writecache *wc, struct bio *bio,
while (bio_size < bio->bi_iter.bi_size) {
if (!search_used) {
struct wc_entry *f = writecache_pop_from_freelist(wc, current_cache_sec);
if (!f)
break;
write_original_sector_seq_count(wc, f, bio->bi_iter.bi_sector +
......@@ -1388,6 +1399,7 @@ static void writecache_bio_copy_ssd(struct dm_writecache *wc, struct bio *bio,
} else {
struct wc_entry *f;
struct rb_node *next = rb_next(&e->rb_node);
if (!next)
break;
f = container_of(next, struct wc_entry, rb_node);
......@@ -1428,6 +1440,7 @@ static enum wc_map_op writecache_map_write(struct dm_writecache *wc, struct bio
do {
bool found_entry = false;
bool search_used = false;
if (writecache_has_error(wc)) {
wc->stats.writes += bio->bi_iter.bi_size >> wc->block_size_bits;
return WC_MAP_ERROR;
......@@ -1606,6 +1619,7 @@ static int writecache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t
if (bio->bi_private == (void *)1) {
int dir = bio_data_dir(bio);
if (atomic_dec_and_test(&wc->bio_in_progress[dir]))
if (unlikely(waitqueue_active(&wc->bio_in_progress_wait[dir])))
wake_up(&wc->bio_in_progress_wait[dir]);
......@@ -1944,6 +1958,7 @@ static void writecache_writeback(struct work_struct *work)
if (likely(wc->pause != 0)) {
while (1) {
unsigned long idle;
if (unlikely(wc->cleaner) || unlikely(wc->writeback_all) ||
unlikely(dm_suspended(wc->ti)))
break;
......@@ -2389,6 +2404,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned int argc, char **argv)
string = dm_shift_arg(&as), opt_params--;
if (!strcasecmp(string, "start_sector") && opt_params >= 1) {
unsigned long long start_sector;
string = dm_shift_arg(&as), opt_params--;
if (sscanf(string, "%llu%c", &start_sector, &dummy) != 1)
goto invalid_optional;
......@@ -2425,6 +2441,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned int argc, char **argv)
wc->autocommit_blocks_set = true;
} else if (!strcasecmp(string, "autocommit_time") && opt_params >= 1) {
unsigned int autocommit_msecs;
string = dm_shift_arg(&as), opt_params--;
if (sscanf(string, "%u%c", &autocommit_msecs, &dummy) != 1)
goto invalid_optional;
......@@ -2435,6 +2452,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned int argc, char **argv)
wc->autocommit_time_set = true;
} else if (!strcasecmp(string, "max_age") && opt_params >= 1) {
unsigned int max_age_msecs;
string = dm_shift_arg(&as), opt_params--;
if (sscanf(string, "%u%c", &max_age_msecs, &dummy) != 1)
goto invalid_optional;
......@@ -2462,6 +2480,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned int argc, char **argv)
wc->metadata_only = true;
} else if (!strcasecmp(string, "pause_writeback") && opt_params >= 1) {
unsigned int pause_msecs;
if (WC_MODE_PMEM(wc))
goto invalid_optional;
string = dm_shift_arg(&as), opt_params--;
......
......@@ -112,6 +112,7 @@ static void on_entries(struct dm_array_info *info, struct array_block *ab,
void (*fn)(void *, const void *, unsigned int))
{
unsigned int nr_entries = le32_to_cpu(ab->nr_entries);
fn(info->value_type.context, element_at(info, ab, 0), nr_entries);
}
......@@ -438,6 +439,7 @@ static int drop_blocks(struct resize *resize, unsigned int begin_index,
while (begin_index != end_index) {
uint64_t key = begin_index++;
r = dm_btree_remove(&resize->info->btree_info, resize->root,
&key, &resize->root);
if (r)
......@@ -622,6 +624,7 @@ static void __block_dec(void *context, const void *value)
static void block_dec(void *context, const void *value, unsigned int count)
{
unsigned int i;
for (i = 0; i < count; i++, value += sizeof(__le64))
__block_dec(context, value);
}
......@@ -695,6 +698,7 @@ int dm_array_resize(struct dm_array_info *info, dm_block_t root,
__dm_written_to_disk(value)
{
int r = array_resize(info, root, old_size, new_size, value, new_root);
__dm_unbless_for_disk(value);
return r;
}
......
......@@ -74,6 +74,7 @@ int dm_bitset_new(struct dm_disk_bitset *info, dm_block_t *root,
uint32_t size, bit_value_fn fn, void *context)
{
struct packer_context p;
p.fn = fn;
p.nr_bits = size;
p.context = context;
......
......@@ -92,6 +92,7 @@ static void __add_holder(struct block_lock *lock, struct task_struct *task)
static void __del_holder(struct block_lock *lock, struct task_struct *task)
{
unsigned int h = __find_holder(lock, task);
lock->holders[h] = NULL;
put_task_struct(task);
}
......@@ -355,6 +356,7 @@ struct buffer_aux {
static void dm_block_manager_alloc_callback(struct dm_buffer *buf)
{
struct buffer_aux *aux = dm_bufio_get_aux_data(buf);
aux->validator = NULL;
bl_init(&aux->lock);
}
......@@ -362,6 +364,7 @@ static void dm_block_manager_alloc_callback(struct dm_buffer *buf)
static void dm_block_manager_write_callback(struct dm_buffer *buf)
{
struct buffer_aux *aux = dm_bufio_get_aux_data(buf);
if (aux->validator) {
aux->validator->prepare_for_write(aux->validator, (struct dm_block *) buf,
dm_bufio_get_block_size(dm_bufio_get_client(buf)));
......@@ -436,6 +439,7 @@ static int dm_bm_validate_buffer(struct dm_block_manager *bm,
{
if (unlikely(!aux->validator)) {
int r;
if (!v)
return 0;
r = v->check(v, (struct dm_block *) buf, dm_bufio_get_block_size(bm->bufio));
......@@ -591,8 +595,7 @@ EXPORT_SYMBOL_GPL(dm_bm_write_lock_zero);
void dm_bm_unlock(struct dm_block *b)
{
struct buffer_aux *aux;
aux = dm_bufio_get_aux_data(to_buffer(b));
struct buffer_aux *aux = dm_bufio_get_aux_data(to_buffer(b));
if (aux->write_locked) {
dm_bufio_mark_buffer_dirty(to_buffer(b));
......
......@@ -119,6 +119,7 @@ static inline void *value_base(struct btree_node *n)
static inline void *value_ptr(struct btree_node *n, uint32_t index)
{
uint32_t value_size = le32_to_cpu(n->header.value_size);
return value_base(n) + (value_size * index);
}
......
......@@ -87,6 +87,7 @@ static int node_copy(struct btree_node *left, struct btree_node *right, int shif
{
uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
uint32_t value_size = le32_to_cpu(left->header.value_size);
if (value_size != le32_to_cpu(right->header.value_size)) {
DMERR("mismatched value size");
return -EILSEQ;
......@@ -130,6 +131,7 @@ static void delete_at(struct btree_node *n, unsigned int index)
unsigned int nr_entries = le32_to_cpu(n->header.nr_entries);
unsigned int nr_to_copy = nr_entries - (index + 1);
uint32_t value_size = le32_to_cpu(n->header.value_size);
BUG_ON(index >= nr_entries);
if (nr_to_copy) {
......@@ -265,6 +267,7 @@ static int __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
* Rebalance.
*/
unsigned int target_left = (nr_left + nr_right) / 2;
ret = shift(left, right, nr_left - target_left);
if (ret)
return ret;
......@@ -557,6 +560,7 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
*/
if (shadow_has_parent(s)) {
__le64 location = cpu_to_le64(dm_block_location(shadow_current(s)));
memcpy(value_ptr(dm_block_data(shadow_parent(s)), i),
&location, sizeof(__le64));
}
......@@ -650,6 +654,7 @@ static int remove_nearest(struct shadow_spine *s, struct dm_btree_info *info,
*/
if (shadow_has_parent(s)) {
__le64 location = cpu_to_le64(dm_block_location(shadow_current(s)));
memcpy(value_ptr(dm_block_data(shadow_parent(s)), i),
&location, sizeof(__le64));
}
......
......@@ -510,6 +510,7 @@ static void copy_entries(struct btree_node *dest, unsigned int dest_offset,
unsigned int count)
{
size_t value_size = le32_to_cpu(dest->header.value_size);
memcpy(dest->keys + dest_offset, src->keys + src_offset, count * sizeof(uint64_t));
memcpy(value_ptr(dest, dest_offset), value_ptr(src, src_offset), count * value_size);
}
......@@ -523,6 +524,7 @@ static void move_entries(struct btree_node *dest, unsigned int dest_offset,
unsigned int count)
{
size_t value_size = le32_to_cpu(dest->header.value_size);
memmove(dest->keys + dest_offset, src->keys + src_offset, count * sizeof(uint64_t));
memmove(value_ptr(dest, dest_offset), value_ptr(src, src_offset), count * value_size);
}
......@@ -559,10 +561,12 @@ static void redistribute2(struct btree_node *left, struct btree_node *right)
if (nr_left < target_left) {
unsigned int delta = target_left - nr_left;
copy_entries(left, nr_left, right, 0, delta);
shift_down(right, delta);
} else if (nr_left > target_left) {
unsigned int delta = nr_left - target_left;
if (nr_right)
shift_up(right, delta);
copy_entries(right, 0, left, target_left, delta);
......@@ -593,18 +597,21 @@ static void redistribute3(struct btree_node *left, struct btree_node *center,
if (nr_left < target_left) {
unsigned int left_short = target_left - nr_left;
copy_entries(left, nr_left, right, 0, left_short);
copy_entries(center, 0, right, left_short, target_center);
shift_down(right, nr_right - target_right);
} else if (nr_left < (target_left + target_center)) {
unsigned int left_to_center = nr_left - target_left;
copy_entries(center, 0, left, target_left, left_to_center);
copy_entries(center, left_to_center, right, 0, target_center - left_to_center);
shift_down(right, nr_right - target_right);
} else {
unsigned int right_short = target_right - nr_right;
shift_up(right, right_short);
copy_entries(right, 0, left, nr_left - right_short, right_short);
copy_entries(center, 0, left, target_left, nr_left - target_left);
......@@ -1004,6 +1011,7 @@ static int rebalance_or_split(struct shadow_spine *s, struct dm_btree_value_type
/* Should we move entries to the left sibling? */
if (parent_index > 0) {
dm_block_t left_b = value64(parent, parent_index - 1);
r = dm_tm_block_is_shared(s->info->tm, left_b, &left_shared);
if (r)
return r;
......@@ -1021,6 +1029,7 @@ static int rebalance_or_split(struct shadow_spine *s, struct dm_btree_value_type
/* Should we move entries to the right sibling? */
if (parent_index < (nr_parent - 1)) {
dm_block_t right_b = value64(parent, parent_index + 1);
r = dm_tm_block_is_shared(s->info->tm, right_b, &right_shared);
if (r)
return r;
......@@ -1588,6 +1597,7 @@ EXPORT_SYMBOL_GPL(dm_btree_cursor_end);
int dm_btree_cursor_next(struct dm_btree_cursor *c)
{
int r = inc_or_backtrack(c);
if (!r) {
r = find_leaf(c);
if (r)
......
......@@ -609,6 +609,7 @@ static int sm_ll_inc_overflow(struct ll_disk *ll, dm_block_t b, struct inc_conte
static inline int shadow_bitmap(struct ll_disk *ll, struct inc_context *ic)
{
int r, inc;
r = dm_tm_shadow_block(ll->tm, le64_to_cpu(ic->ie_disk.blocknr),
&dm_sm_bitmap_validator, &ic->bitmap_block, &inc);
if (r < 0) {
......@@ -748,6 +749,7 @@ int sm_ll_inc(struct ll_disk *ll, dm_block_t b, dm_block_t e,
*nr_allocations = 0;
while (b != e) {
int r = __sm_ll_inc(ll, b, e, nr_allocations, &b);
if (r)
return r;
}
......@@ -930,6 +932,7 @@ int sm_ll_dec(struct ll_disk *ll, dm_block_t b, dm_block_t e,
*nr_allocations = 0;
while (b != e) {
int r = __sm_ll_dec(ll, b, e, nr_allocations, &b);
if (r)
return r;
}
......@@ -1166,8 +1169,10 @@ static int disk_ll_save_ie(struct ll_disk *ll, dm_block_t index,
static int disk_ll_init_index(struct ll_disk *ll)
{
unsigned int i;
for (i = 0; i < IE_CACHE_SIZE; i++) {
struct ie_cache *iec = ll->ie_cache + i;
iec->valid = false;
iec->dirty = false;
}
......@@ -1191,6 +1196,7 @@ static int disk_ll_commit(struct ll_disk *ll)
for (i = 0; i < IE_CACHE_SIZE; i++) {
struct ie_cache *iec = ll->ie_cache + i;
if (iec->valid && iec->dirty)
r = ie_cache_writeback(ll, iec);
}
......
......@@ -49,6 +49,7 @@ static int sm_disk_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
static int sm_disk_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
{
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
*count = smd->old_ll.nr_blocks;
return 0;
......@@ -57,6 +58,7 @@ static int sm_disk_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
static int sm_disk_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
{
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
*count = (smd->old_ll.nr_blocks - smd->old_ll.nr_allocated) - smd->nr_allocated_this_transaction;
return 0;
......@@ -66,6 +68,7 @@ static int sm_disk_get_count(struct dm_space_map *sm, dm_block_t b,
uint32_t *result)
{
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
return sm_ll_lookup(&smd->ll, b, result);
}
......
......@@ -114,6 +114,7 @@ static bool brb_empty(struct bop_ring_buffer *brb)
static unsigned int brb_next(struct bop_ring_buffer *brb, unsigned int old)
{
unsigned int r = old + 1;
return r >= ARRAY_SIZE(brb->bops) ? 0 : r;
}
......@@ -182,6 +183,7 @@ struct sm_metadata {
static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b, dm_block_t e)
{
int r = brb_push(&smm->uncommitted, type, b, e);
if (r) {
DMERR("too many recursive allocations");
return -ENOMEM;
......@@ -487,6 +489,7 @@ static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b)
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
int r = sm_metadata_new_block_(sm, b);
if (r) {
DMERR_LIMIT("unable to allocate new metadata block");
return r;
......
......@@ -37,6 +37,7 @@ static unsigned int prefetch_hash(dm_block_t b)
static void prefetch_wipe(struct prefetch_set *p)
{
unsigned int i;
for (i = 0; i < PREFETCH_SIZE; i++)
p->blocks[i] = PREFETCH_SENTINEL;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment