Commit f0b2769a authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-6.3/dm-changes' of...

Merge tag 'for-6.3/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper updates from Mike Snitzer:

 - Fix DM cache target to free background tracker work items, otherwise
   slab BUG will occur when kmem_cache_destroy() is called.

 - Improve 2 of DM's shrinker names to reflect their use.

 - Fix the DM flakey target to not corrupt the zero page. Fix dm-flakey
   on 32-bit hughmem systems by using bvec_kmap_local instead of
   page_address. Also, fix logic used when imposing the
   "corrupt_bio_byte" feature.

 - Stop using WQ_UNBOUND for DM verity target's verify_wq because it
   causes significant Android latencies on ARM64 (and doesn't show real
   benefit on other architectures).

 - Add negative check to catch simple case of a DM table referencing
   itself. More complex scenarios that use intermediate devices to
   self-reference still need to be avoided/handled in userspace.

 - Fix DM core's resize to only send one uevent instead of two. This
   fixes a race with udev, that if udev wins, will cause udev to miss
   uevents (which caused premature unmount attempts by systemd).

 - Add cond_resched() to workqueue functions in DM core, dn-thin and
   dm-cache so that their loops aren't the cause of unintended cpu
   scheduling fairness issues.

 - Fix all of DM's checkpatch errors and warnings (famous last words).
   Various other small cleanups.

* tag 'for-6.3/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: (62 commits)
  dm: remove unnecessary (void*) conversion in event_callback()
  dm ioctl: remove unnecessary check when using dm_get_mdptr()
  dm ioctl: assert _hash_lock is held in __hash_remove
  dm cache: add cond_resched() to various workqueue loops
  dm thin: add cond_resched() to various workqueue loops
  dm: add cond_resched() to dm_wq_requeue_work()
  dm: add cond_resched() to dm_wq_work()
  dm sysfs: make kobj_type structure constant
  dm: update targets using system workqueues to use a local workqueue
  dm: remove flush_scheduled_work() during local_exit()
  dm clone: prefer kvmalloc_array()
  dm: declare variables static when sensible
  dm: fix suspect indent whitespace
  dm ioctl: prefer strscpy() instead of strlcpy()
  dm: avoid void function return statements
  dm integrity: change macros min/max() -> min_t/max_t where appropriate
  dm: fix use of sizeof() macro
  dm: avoid 'do {} while(0)' loop in single statement macros
  dm log: avoid multiple line dereference
  dm log: avoid trailing semicolon in macro
  ...
parents 23064dfe d695e441
// SPDX-License-Identifier: GPL-2.0
// SPDX-License-Identifier: GPL-2.0-only
/*
* Creating audit records for mapped devices.
*
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Red Hat, Inc.
*
......@@ -285,14 +286,14 @@ EXPORT_SYMBOL_GPL(dm_cell_promote_or_release);
struct dm_deferred_entry {
struct dm_deferred_set *ds;
unsigned count;
unsigned int count;
struct list_head work_items;
};
struct dm_deferred_set {
spinlock_t lock;
unsigned current_entry;
unsigned sweeper;
unsigned int current_entry;
unsigned int sweeper;
struct dm_deferred_entry entries[DEFERRED_SET_SIZE];
};
......@@ -338,7 +339,7 @@ struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds)
}
EXPORT_SYMBOL_GPL(dm_deferred_entry_inc);
static unsigned ds_next(unsigned index)
static unsigned int ds_next(unsigned int index)
{
return (index + 1) % DEFERRED_SET_SIZE;
}
......@@ -373,7 +374,7 @@ EXPORT_SYMBOL_GPL(dm_deferred_entry_dec);
int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work)
{
int r = 1;
unsigned next_entry;
unsigned int next_entry;
spin_lock_irq(&ds->lock);
if ((ds->sweeper == ds->current_entry) &&
......@@ -432,7 +433,7 @@ static int __init dm_bio_prison_init(void)
return 0;
bad:
bad:
while (i--)
_exits[i]();
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011-2017 Red Hat, Inc.
*
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012-2017 Red Hat, Inc.
*
......@@ -148,7 +149,7 @@ static bool __find_or_insert(struct dm_bio_prison_v2 *prison,
static bool __get(struct dm_bio_prison_v2 *prison,
struct dm_cell_key_v2 *key,
unsigned lock_level,
unsigned int lock_level,
struct bio *inmate,
struct dm_bio_prison_cell_v2 *cell_prealloc,
struct dm_bio_prison_cell_v2 **cell)
......@@ -171,7 +172,7 @@ static bool __get(struct dm_bio_prison_v2 *prison,
bool dm_cell_get_v2(struct dm_bio_prison_v2 *prison,
struct dm_cell_key_v2 *key,
unsigned lock_level,
unsigned int lock_level,
struct bio *inmate,
struct dm_bio_prison_cell_v2 *cell_prealloc,
struct dm_bio_prison_cell_v2 **cell_result)
......@@ -194,7 +195,7 @@ static bool __put(struct dm_bio_prison_v2 *prison,
// FIXME: shared locks granted above the lock level could starve this
if (!cell->shared_count) {
if (cell->exclusive_lock){
if (cell->exclusive_lock) {
if (cell->quiesce_continuation) {
queue_work(prison->wq, cell->quiesce_continuation);
cell->quiesce_continuation = NULL;
......@@ -224,7 +225,7 @@ EXPORT_SYMBOL_GPL(dm_cell_put_v2);
static int __lock(struct dm_bio_prison_v2 *prison,
struct dm_cell_key_v2 *key,
unsigned lock_level,
unsigned int lock_level,
struct dm_bio_prison_cell_v2 *cell_prealloc,
struct dm_bio_prison_cell_v2 **cell_result)
{
......@@ -255,7 +256,7 @@ static int __lock(struct dm_bio_prison_v2 *prison,
int dm_cell_lock_v2(struct dm_bio_prison_v2 *prison,
struct dm_cell_key_v2 *key,
unsigned lock_level,
unsigned int lock_level,
struct dm_bio_prison_cell_v2 *cell_prealloc,
struct dm_bio_prison_cell_v2 **cell_result)
{
......@@ -291,7 +292,7 @@ EXPORT_SYMBOL_GPL(dm_cell_quiesce_v2);
static int __promote(struct dm_bio_prison_v2 *prison,
struct dm_bio_prison_cell_v2 *cell,
unsigned new_lock_level)
unsigned int new_lock_level)
{
if (!cell->exclusive_lock)
return -EINVAL;
......@@ -302,7 +303,7 @@ static int __promote(struct dm_bio_prison_v2 *prison,
int dm_cell_lock_promote_v2(struct dm_bio_prison_v2 *prison,
struct dm_bio_prison_cell_v2 *cell,
unsigned new_lock_level)
unsigned int new_lock_level)
{
int r;
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011-2017 Red Hat, Inc.
*
......@@ -44,8 +45,8 @@ struct dm_cell_key_v2 {
struct dm_bio_prison_cell_v2 {
// FIXME: pack these
bool exclusive_lock;
unsigned exclusive_level;
unsigned shared_count;
unsigned int exclusive_level;
unsigned int shared_count;
struct work_struct *quiesce_continuation;
struct rb_node node;
......@@ -86,7 +87,7 @@ void dm_bio_prison_free_cell_v2(struct dm_bio_prison_v2 *prison,
*/
bool dm_cell_get_v2(struct dm_bio_prison_v2 *prison,
struct dm_cell_key_v2 *key,
unsigned lock_level,
unsigned int lock_level,
struct bio *inmate,
struct dm_bio_prison_cell_v2 *cell_prealloc,
struct dm_bio_prison_cell_v2 **cell_result);
......@@ -114,7 +115,7 @@ bool dm_cell_put_v2(struct dm_bio_prison_v2 *prison,
*/
int dm_cell_lock_v2(struct dm_bio_prison_v2 *prison,
struct dm_cell_key_v2 *key,
unsigned lock_level,
unsigned int lock_level,
struct dm_bio_prison_cell_v2 *cell_prealloc,
struct dm_bio_prison_cell_v2 **cell_result);
......@@ -132,7 +133,7 @@ void dm_cell_quiesce_v2(struct dm_bio_prison_v2 *prison,
*/
int dm_cell_lock_promote_v2(struct dm_bio_prison_v2 *prison,
struct dm_bio_prison_cell_v2 *cell,
unsigned new_lock_level);
unsigned int new_lock_level);
/*
* Adds any held bios to the bio list.
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
*
......
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0
// SPDX-License-Identifier: GPL-2.0-only
#include "dm-core.h"
/*
......@@ -45,5 +45,4 @@ void dm_kobject_release(struct kobject *kobj)
{
complete(dm_get_completion_from_kobject(kobj));
}
EXPORT_SYMBOL(dm_kobject_release);
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2017 Red Hat. All rights reserved.
*
......@@ -17,7 +18,7 @@ struct bt_work {
};
struct background_tracker {
unsigned max_work;
unsigned int max_work;
atomic_t pending_promotes;
atomic_t pending_writebacks;
atomic_t pending_demotes;
......@@ -29,7 +30,7 @@ struct background_tracker {
struct kmem_cache *work_cache;
};
struct background_tracker *btracker_create(unsigned max_work)
struct background_tracker *btracker_create(unsigned int max_work)
{
struct background_tracker *b = kmalloc(sizeof(*b), GFP_KERNEL);
......@@ -60,6 +61,14 @@ EXPORT_SYMBOL_GPL(btracker_create);
void btracker_destroy(struct background_tracker *b)
{
struct bt_work *w, *tmp;
BUG_ON(!list_empty(&b->issued));
list_for_each_entry_safe (w, tmp, &b->queued, list) {
list_del(&w->list);
kmem_cache_free(b->work_cache, w);
}
kmem_cache_destroy(b->work_cache);
kfree(b);
}
......@@ -147,13 +156,13 @@ static void update_stats(struct background_tracker *b, struct policy_work *w, in
}
}
unsigned btracker_nr_writebacks_queued(struct background_tracker *b)
unsigned int btracker_nr_writebacks_queued(struct background_tracker *b)
{
return atomic_read(&b->pending_writebacks);
}
EXPORT_SYMBOL_GPL(btracker_nr_writebacks_queued);
unsigned btracker_nr_demotions_queued(struct background_tracker *b)
unsigned int btracker_nr_demotions_queued(struct background_tracker *b)
{
return atomic_read(&b->pending_demotes);
}
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2017 Red Hat. All rights reserved.
*
......@@ -12,19 +13,44 @@
/*----------------------------------------------------------------*/
/*
* The cache policy decides what background work should be performed,
* such as promotions, demotions and writebacks. The core cache target
* is in charge of performing the work, and does so when it sees fit.
*
* The background_tracker acts as a go between. Keeping track of future
* work that the policy has decided upon, and handing (issuing) it to
* the core target when requested.
*
* There is no locking in this, so calls will probably need to be
* protected with a spinlock.
*/
struct background_work;
struct background_tracker;
/*
* FIXME: discuss lack of locking in all methods.
* Create a new tracker, it will not be able to queue more than
* 'max_work' entries.
*/
struct background_tracker *btracker_create(unsigned int max_work);
/*
* Destroy the tracker. No issued, but not complete, work should
* exist when this is called. It is fine to have queued but unissued
* work.
*/
struct background_tracker *btracker_create(unsigned max_work);
void btracker_destroy(struct background_tracker *b);
unsigned btracker_nr_writebacks_queued(struct background_tracker *b);
unsigned btracker_nr_demotions_queued(struct background_tracker *b);
unsigned int btracker_nr_writebacks_queued(struct background_tracker *b);
unsigned int btracker_nr_demotions_queued(struct background_tracker *b);
/*
* Queue some work within the tracker. 'work' should point to the work
* to queue, this will be copied (ownership doesn't pass). If pwork
* is not NULL then it will be set to point to the tracker's internal
* copy of the work.
*
* returns -EINVAL iff the work is already queued. -ENOMEM if the work
* couldn't be queued for another reason.
*/
......@@ -33,11 +59,20 @@ int btracker_queue(struct background_tracker *b,
struct policy_work **pwork);
/*
* Hands out the next piece of work to be performed.
* Returns -ENODATA if there's no work.
*/
int btracker_issue(struct background_tracker *b, struct policy_work **work);
void btracker_complete(struct background_tracker *b,
struct policy_work *op);
/*
* Informs the tracker that the work has been completed and it may forget
* about it.
*/
void btracker_complete(struct background_tracker *b, struct policy_work *op);
/*
* Predicate to see if an origin block is already scheduled for promotion.
*/
bool btracker_promotion_already_present(struct background_tracker *b,
dm_oblock_t oblock);
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 Red Hat, Inc.
*
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Red Hat, Inc.
*
......@@ -104,7 +105,7 @@ struct dm_cache_metadata {
refcount_t ref_count;
struct list_head list;
unsigned version;
unsigned int version;
struct block_device *bdev;
struct dm_block_manager *bm;
struct dm_space_map *metadata_sm;
......@@ -129,7 +130,7 @@ struct dm_cache_metadata {
bool clean_when_opened:1;
char policy_name[CACHE_POLICY_NAME_SIZE];
unsigned policy_version[CACHE_POLICY_VERSION_SIZE];
unsigned int policy_version[CACHE_POLICY_VERSION_SIZE];
size_t policy_hint_size;
struct dm_cache_statistics stats;
......@@ -162,10 +163,11 @@ struct dm_cache_metadata {
struct dm_bitset_cursor dirty_cursor;
};
/*-------------------------------------------------------------------
/*
*-----------------------------------------------------------------
* superblock validator
*-----------------------------------------------------------------*/
*-----------------------------------------------------------------
*/
#define SUPERBLOCK_CSUM_XOR 9031977
static void sb_prepare_for_write(struct dm_block_validator *v,
......@@ -201,15 +203,15 @@ static int sb_check(struct dm_block_validator *v,
__le32 csum_le;
if (dm_block_location(b) != le64_to_cpu(disk_super->blocknr)) {
DMERR("sb_check failed: blocknr %llu: wanted %llu",
le64_to_cpu(disk_super->blocknr),
DMERR("%s failed: blocknr %llu: wanted %llu",
__func__, le64_to_cpu(disk_super->blocknr),
(unsigned long long)dm_block_location(b));
return -ENOTBLK;
}
if (le64_to_cpu(disk_super->magic) != CACHE_SUPERBLOCK_MAGIC) {
DMERR("sb_check failed: magic %llu: wanted %llu",
le64_to_cpu(disk_super->magic),
DMERR("%s failed: magic %llu: wanted %llu",
__func__, le64_to_cpu(disk_super->magic),
(unsigned long long)CACHE_SUPERBLOCK_MAGIC);
return -EILSEQ;
}
......@@ -218,8 +220,8 @@ static int sb_check(struct dm_block_validator *v,
sb_block_size - sizeof(__le32),
SUPERBLOCK_CSUM_XOR));
if (csum_le != disk_super->csum) {
DMERR("sb_check failed: csum %u: wanted %u",
le32_to_cpu(csum_le), le32_to_cpu(disk_super->csum));
DMERR("%s failed: csum %u: wanted %u",
__func__, le32_to_cpu(csum_le), le32_to_cpu(disk_super->csum));
return -EILSEQ;
}
......@@ -260,10 +262,10 @@ static int superblock_lock(struct dm_cache_metadata *cmd,
static int __superblock_all_zeroes(struct dm_block_manager *bm, bool *result)
{
int r;
unsigned i;
unsigned int i;
struct dm_block *b;
__le64 *data_le, zero = cpu_to_le64(0);
unsigned sb_block_size = dm_bm_block_size(bm) / sizeof(__le64);
unsigned int sb_block_size = dm_bm_block_size(bm) / sizeof(__le64);
/*
* We can't use a validator here - it may be all zeroes.
......@@ -533,6 +535,7 @@ static int __create_persistent_data_objects(struct dm_cache_metadata *cmd,
bool may_format_device)
{
int r;
cmd->bm = dm_block_manager_create(cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
CACHE_MAX_CONCURRENT_LOCKS);
if (IS_ERR(cmd->bm)) {
......@@ -566,6 +569,7 @@ static void update_flags(struct cache_disk_superblock *disk_super,
flags_mutator mutator)
{
uint32_t sb_flags = mutator(le32_to_cpu(disk_super->flags));
disk_super->flags = cpu_to_le32(sb_flags);
}
......@@ -727,18 +731,20 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,
*/
#define FLAGS_MASK ((1 << 16) - 1)
static __le64 pack_value(dm_oblock_t block, unsigned flags)
static __le64 pack_value(dm_oblock_t block, unsigned int flags)
{
uint64_t value = from_oblock(block);
value <<= 16;
value = value | (flags & FLAGS_MASK);
return cpu_to_le64(value);
}
static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned *flags)
static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned int *flags)
{
uint64_t value = le64_to_cpu(value_le);
uint64_t b = value >> 16;
*block = to_oblock(b);
*flags = value & FLAGS_MASK;
}
......@@ -749,7 +755,7 @@ static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
sector_t data_block_size,
bool may_format_device,
size_t policy_hint_size,
unsigned metadata_version)
unsigned int metadata_version)
{
int r;
struct dm_cache_metadata *cmd;
......@@ -810,7 +816,7 @@ static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
sector_t data_block_size,
bool may_format_device,
size_t policy_hint_size,
unsigned metadata_version)
unsigned int metadata_version)
{
struct dm_cache_metadata *cmd, *cmd2;
......@@ -855,7 +861,7 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
sector_t data_block_size,
bool may_format_device,
size_t policy_hint_size,
unsigned metadata_version)
unsigned int metadata_version)
{
struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size, may_format_device,
policy_hint_size, metadata_version);
......@@ -890,7 +896,7 @@ static int block_clean_combined_dirty(struct dm_cache_metadata *cmd, dm_cblock_t
int r;
__le64 value;
dm_oblock_t ob;
unsigned flags;
unsigned int flags;
r = dm_array_get_value(&cmd->info, cmd->root, from_cblock(b), &value);
if (r)
......@@ -1009,13 +1015,13 @@ static bool cmd_write_lock(struct dm_cache_metadata *cmd)
do { \
if (!cmd_write_lock((cmd))) \
return -EINVAL; \
} while(0)
} while (0)
#define WRITE_LOCK_VOID(cmd) \
do { \
if (!cmd_write_lock((cmd))) \
return; \
} while(0)
} while (0)
#define WRITE_UNLOCK(cmd) \
up_write(&(cmd)->root_lock)
......@@ -1034,13 +1040,13 @@ static bool cmd_read_lock(struct dm_cache_metadata *cmd)
do { \
if (!cmd_read_lock((cmd))) \
return -EINVAL; \
} while(0)
} while (0)
#define READ_LOCK_VOID(cmd) \
do { \
if (!cmd_read_lock((cmd))) \
return; \
} while(0)
} while (0)
#define READ_UNLOCK(cmd) \
up_read(&(cmd)->root_lock)
......@@ -1252,6 +1258,7 @@ static int __insert(struct dm_cache_metadata *cmd,
{
int r;
__le64 value = pack_value(oblock, M_VALID);
__dm_bless_for_disk(&value);
r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
......@@ -1288,7 +1295,7 @@ static bool policy_unchanged(struct dm_cache_metadata *cmd,
struct dm_cache_policy *policy)
{
const char *policy_name = dm_cache_policy_get_name(policy);
const unsigned *policy_version = dm_cache_policy_get_version(policy);
const unsigned int *policy_version = dm_cache_policy_get_version(policy);
size_t policy_hint_size = dm_cache_policy_get_hint_size(policy);
/*
......@@ -1339,7 +1346,7 @@ static int __load_mapping_v1(struct dm_cache_metadata *cmd,
__le32 *hint_value_le;
dm_oblock_t oblock;
unsigned flags;
unsigned int flags;
bool dirty = true;
dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le);
......@@ -1381,7 +1388,7 @@ static int __load_mapping_v2(struct dm_cache_metadata *cmd,
__le32 *hint_value_le;
dm_oblock_t oblock;
unsigned flags;
unsigned int flags;
bool dirty = true;
dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le);
......@@ -1513,7 +1520,7 @@ static int __dump_mapping(void *context, uint64_t cblock, void *leaf)
{
__le64 value;
dm_oblock_t oblock;
unsigned flags;
unsigned int flags;
memcpy(&value, leaf, sizeof(value));
unpack_value(value, &oblock, &flags);
......@@ -1547,7 +1554,7 @@ int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd)
static int __dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty)
{
int r;
unsigned flags;
unsigned int flags;
dm_oblock_t oblock;
__le64 value;
......@@ -1574,10 +1581,11 @@ static int __dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty
}
static int __set_dirty_bits_v1(struct dm_cache_metadata *cmd, unsigned nr_bits, unsigned long *bits)
static int __set_dirty_bits_v1(struct dm_cache_metadata *cmd, unsigned int nr_bits, unsigned long *bits)
{
int r;
unsigned i;
unsigned int i;
for (i = 0; i < nr_bits; i++) {
r = __dirty(cmd, to_cblock(i), test_bit(i, bits));
if (r)
......@@ -1594,7 +1602,7 @@ static int is_dirty_callback(uint32_t index, bool *value, void *context)
return 0;
}
static int __set_dirty_bits_v2(struct dm_cache_metadata *cmd, unsigned nr_bits, unsigned long *bits)
static int __set_dirty_bits_v2(struct dm_cache_metadata *cmd, unsigned int nr_bits, unsigned long *bits)
{
int r = 0;
......@@ -1613,7 +1621,7 @@ static int __set_dirty_bits_v2(struct dm_cache_metadata *cmd, unsigned nr_bits,
}
int dm_cache_set_dirty_bits(struct dm_cache_metadata *cmd,
unsigned nr_bits,
unsigned int nr_bits,
unsigned long *bits)
{
int r;
......@@ -1712,7 +1720,7 @@ static int write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *po
int r;
size_t hint_size;
const char *policy_name = dm_cache_policy_get_name(policy);
const unsigned *policy_version = dm_cache_policy_get_version(policy);
const unsigned int *policy_version = dm_cache_policy_get_version(policy);
if (!policy_name[0] ||
(strlen(policy_name) > sizeof(cmd->policy_name) - 1))
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 Red Hat, Inc.
*
......@@ -60,7 +61,7 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
sector_t data_block_size,
bool may_format_device,
size_t policy_hint_size,
unsigned metadata_version);
unsigned int metadata_version);
void dm_cache_metadata_close(struct dm_cache_metadata *cmd);
......@@ -96,7 +97,7 @@ int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
void *context);
int dm_cache_set_dirty_bits(struct dm_cache_metadata *cmd,
unsigned nr_bits, unsigned long *bits);
unsigned int nr_bits, unsigned long *bits);
struct dm_cache_statistics {
uint32_t read_hits;
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 Red Hat. All rights reserved.
*
......@@ -85,9 +86,10 @@ static inline void policy_tick(struct dm_cache_policy *p, bool can_block)
}
static inline int policy_emit_config_values(struct dm_cache_policy *p, char *result,
unsigned maxlen, ssize_t *sz_ptr)
unsigned int maxlen, ssize_t *sz_ptr)
{
ssize_t sz = *sz_ptr;
if (p->emit_config_values)
return p->emit_config_values(p, result, maxlen, sz_ptr);
......@@ -112,20 +114,22 @@ static inline void policy_allow_migrations(struct dm_cache_policy *p, bool allow
/*
* Some utility functions commonly used by policies and the core target.
*/
static inline size_t bitset_size_in_bytes(unsigned nr_entries)
static inline size_t bitset_size_in_bytes(unsigned int nr_entries)
{
return sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG);
}
static inline unsigned long *alloc_bitset(unsigned nr_entries)
static inline unsigned long *alloc_bitset(unsigned int nr_entries)
{
size_t s = bitset_size_in_bytes(nr_entries);
return vzalloc(s);
}
static inline void clear_bitset(void *bitset, unsigned nr_entries)
static inline void clear_bitset(void *bitset, unsigned int nr_entries)
{
size_t s = bitset_size_in_bytes(nr_entries);
memset(bitset, 0, s);
}
......@@ -154,7 +158,7 @@ void dm_cache_policy_destroy(struct dm_cache_policy *p);
*/
const char *dm_cache_policy_get_name(struct dm_cache_policy *p);
const unsigned *dm_cache_policy_get_version(struct dm_cache_policy *p);
const unsigned int *dm_cache_policy_get_version(struct dm_cache_policy *p);
size_t dm_cache_policy_get_hint_size(struct dm_cache_policy *p);
......
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Red Hat. All rights reserved.
*
......@@ -154,7 +155,7 @@ const char *dm_cache_policy_get_name(struct dm_cache_policy *p)
}
EXPORT_SYMBOL_GPL(dm_cache_policy_get_name);
const unsigned *dm_cache_policy_get_version(struct dm_cache_policy *p)
const unsigned int *dm_cache_policy_get_version(struct dm_cache_policy *p)
{
struct dm_cache_policy_type *t = p->private;
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 Red Hat. All rights reserved.
*
......@@ -128,7 +129,7 @@ struct dm_cache_policy {
* Configuration.
*/
int (*emit_config_values)(struct dm_cache_policy *p, char *result,
unsigned maxlen, ssize_t *sz_ptr);
unsigned int maxlen, ssize_t *sz_ptr);
int (*set_config_value)(struct dm_cache_policy *p,
const char *key, const char *value);
......@@ -157,7 +158,7 @@ struct dm_cache_policy_type {
* what gets passed on the target line to select your policy.
*/
char name[CACHE_POLICY_NAME_SIZE];
unsigned version[CACHE_POLICY_VERSION_SIZE];
unsigned int version[CACHE_POLICY_VERSION_SIZE];
/*
* For use by an alias dm_cache_policy_type to point to the
......
This diff is collapsed.
......@@ -580,7 +580,7 @@ static int hash_table_init(struct clone *clone)
sz = 1 << HASH_TABLE_BITS;
clone->ht = kvmalloc(sz * sizeof(struct hash_table_bucket), GFP_KERNEL);
clone->ht = kvmalloc_array(sz, sizeof(struct hash_table_bucket), GFP_KERNEL);
if (!clone->ht)
return -ENOMEM;
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Internal header file _only_ for device mapper core
*
......@@ -119,7 +120,7 @@ struct mapped_device {
struct dm_stats stats;
/* the number of internal suspends */
unsigned internal_suspend_count;
unsigned int internal_suspend_count;
int swap_bios;
struct semaphore swap_bios_semaphore;
......@@ -216,7 +217,7 @@ struct dm_table {
struct list_head devices;
/* events get handed up using this callback */
void (*event_fn)(void *);
void (*event_fn)(void *data);
void *event_context;
struct dm_md_mempools *mempools;
......@@ -326,9 +327,9 @@ static inline struct completion *dm_get_completion_from_kobject(struct kobject *
return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
}
unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max);
unsigned int __dm_get_module_param(unsigned int *module_param, unsigned int def, unsigned int max);
static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
static inline bool dm_message_test_buffer_overflow(char *result, unsigned int maxlen)
{
return !maxlen || strlen(result) + 1 >= maxlen;
}
......
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2005-2007 Red Hat GmbH
*
......@@ -20,8 +21,8 @@
struct delay_class {
struct dm_dev *dev;
sector_t start;
unsigned delay;
unsigned ops;
unsigned int delay;
unsigned int ops;
};
struct delay_c {
......@@ -305,7 +306,7 @@ static int delay_map(struct dm_target *ti, struct bio *bio)
DMEMIT("%s %llu %u", (c)->dev->name, (unsigned long long)(c)->start, (c)->delay)
static void delay_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
unsigned int status_flags, char *result, unsigned int maxlen)
{
struct delay_c *dc = ti->private;
int sz = 0;
......
// SPDX-License-Identifier: GPL-2.0
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018 Red Hat, Inc.
*
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020 Red Hat GmbH
*
......@@ -390,7 +391,7 @@ static int ebs_map(struct dm_target *ti, struct bio *bio)
}
static void ebs_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
unsigned int status_flags, char *result, unsigned int maxlen)
{
struct ebs_c *ec = ti->private;
......
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2001-2002 Sistina Software (UK) Limited.
* Copyright (C) 2006-2008 Red Hat GmbH
......@@ -142,7 +143,7 @@ EXPORT_SYMBOL(dm_exception_store_type_unregister);
static int set_chunk_size(struct dm_exception_store *store,
const char *chunk_size_arg, char **error)
{
unsigned chunk_size;
unsigned int chunk_size;
if (kstrtouint(chunk_size_arg, 10, &chunk_size)) {
*error = "Invalid chunk size";
......@@ -158,7 +159,7 @@ static int set_chunk_size(struct dm_exception_store *store,
}
int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
unsigned chunk_size,
unsigned int chunk_size,
char **error)
{
/* Check chunk_size is a power of 2 */
......@@ -190,7 +191,7 @@ int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
struct dm_snapshot *snap,
unsigned *args_used,
unsigned int *args_used,
struct dm_exception_store **store)
{
int r = 0;
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2001-2002 Sistina Software (UK) Limited.
* Copyright (C) 2008 Red Hat, Inc. All rights reserved.
......@@ -43,19 +44,19 @@ struct dm_exception_store_type {
const char *name;
struct module *module;
int (*ctr) (struct dm_exception_store *store, char *options);
int (*ctr)(struct dm_exception_store *store, char *options);
/*
* Destroys this object when you've finished with it.
*/
void (*dtr) (struct dm_exception_store *store);
void (*dtr)(struct dm_exception_store *store);
/*
* The target shouldn't read the COW device until this is
* called. As exceptions are read from the COW, they are
* reported back via the callback.
*/
int (*read_metadata) (struct dm_exception_store *store,
int (*read_metadata)(struct dm_exception_store *store,
int (*callback)(void *callback_context,
chunk_t old, chunk_t new),
void *callback_context);
......@@ -63,15 +64,15 @@ struct dm_exception_store_type {
/*
* Find somewhere to store the next exception.
*/
int (*prepare_exception) (struct dm_exception_store *store,
int (*prepare_exception)(struct dm_exception_store *store,
struct dm_exception *e);
/*
* Update the metadata with this exception.
*/
void (*commit_exception) (struct dm_exception_store *store,
void (*commit_exception)(struct dm_exception_store *store,
struct dm_exception *e, int valid,
void (*callback) (void *, int success),
void (*callback)(void *, int success),
void *callback_context);
/*
......@@ -82,28 +83,28 @@ struct dm_exception_store_type {
* still-to-be-merged chunk and returns the number of
* consecutive previous ones.
*/
int (*prepare_merge) (struct dm_exception_store *store,
int (*prepare_merge)(struct dm_exception_store *store,
chunk_t *last_old_chunk, chunk_t *last_new_chunk);
/*
* Clear the last n exceptions.
* nr_merged must be <= the value returned by prepare_merge.
*/
int (*commit_merge) (struct dm_exception_store *store, int nr_merged);
int (*commit_merge)(struct dm_exception_store *store, int nr_merged);
/*
* The snapshot is invalid, note this in the metadata.
*/
void (*drop_snapshot) (struct dm_exception_store *store);
void (*drop_snapshot)(struct dm_exception_store *store);
unsigned (*status) (struct dm_exception_store *store,
unsigned int (*status)(struct dm_exception_store *store,
status_type_t status, char *result,
unsigned maxlen);
unsigned int maxlen);
/*
* Return how full the snapshot is.
*/
void (*usage) (struct dm_exception_store *store,
void (*usage)(struct dm_exception_store *store,
sector_t *total_sectors, sector_t *sectors_allocated,
sector_t *metadata_sectors);
......@@ -118,9 +119,9 @@ struct dm_exception_store {
struct dm_snapshot *snap;
/* Size of data blocks saved - must be a power of 2 */
unsigned chunk_size;
unsigned chunk_mask;
unsigned chunk_shift;
unsigned int chunk_size;
unsigned int chunk_mask;
unsigned int chunk_shift;
void *context;
......@@ -144,7 +145,7 @@ static inline chunk_t dm_chunk_number(chunk_t chunk)
return chunk & (chunk_t)((1ULL << DM_CHUNK_NUMBER_BITS) - 1ULL);
}
static inline unsigned dm_consecutive_chunk_count(struct dm_exception *e)
static inline unsigned int dm_consecutive_chunk_count(struct dm_exception *e)
{
return e->new_chunk >> DM_CHUNK_NUMBER_BITS;
}
......@@ -181,12 +182,12 @@ int dm_exception_store_type_register(struct dm_exception_store_type *type);
int dm_exception_store_type_unregister(struct dm_exception_store_type *type);
int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
unsigned chunk_size,
unsigned int chunk_size,
char **error);
int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
struct dm_snapshot *snap,
unsigned *args_used,
unsigned int *args_used,
struct dm_exception_store **store);
void dm_exception_store_destroy(struct dm_exception_store *store);
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2003 Sistina Software (UK) Limited.
* Copyright (C) 2004, 2010-2011 Red Hat, Inc. All rights reserved.
......@@ -26,12 +27,12 @@ struct flakey_c {
struct dm_dev *dev;
unsigned long start_time;
sector_t start;
unsigned up_interval;
unsigned down_interval;
unsigned int up_interval;
unsigned int down_interval;
unsigned long flags;
unsigned corrupt_bio_byte;
unsigned corrupt_bio_rw;
unsigned corrupt_bio_value;
unsigned int corrupt_bio_byte;
unsigned int corrupt_bio_rw;
unsigned int corrupt_bio_value;
blk_opf_t corrupt_bio_flags;
};
......@@ -48,7 +49,7 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
struct dm_target *ti)
{
int r;
unsigned argc;
unsigned int argc;
const char *arg_name;
static const struct dm_arg _args[] = {
......@@ -148,7 +149,7 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
BUILD_BUG_ON(sizeof(fc->corrupt_bio_flags) !=
sizeof(unsigned int));
r = dm_read_arg(_args + 3, as,
(__force unsigned *)&fc->corrupt_bio_flags,
(__force unsigned int *)&fc->corrupt_bio_flags,
&ti->error);
if (r)
return r;
......@@ -303,9 +304,13 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
*/
bio_for_each_segment(bvec, bio, iter) {
if (bio_iter_len(bio, iter) > corrupt_bio_byte) {
char *segment = (page_address(bio_iter_page(bio, iter))
+ bio_iter_offset(bio, iter));
char *segment;
struct page *page = bio_iter_page(bio, iter);
if (unlikely(page == ZERO_PAGE(0)))
break;
segment = bvec_kmap_local(&bvec);
segment[corrupt_bio_byte] = fc->corrupt_bio_value;
kunmap_local(segment);
DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
"(rw=%c bi_opf=%u bi_sector=%llu size=%u)\n",
bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
......@@ -320,8 +325,9 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
static int flakey_map(struct dm_target *ti, struct bio *bio)
{
struct flakey_c *fc = ti->private;
unsigned elapsed;
unsigned int elapsed;
struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
pb->bio_submitted = false;
if (op_is_zone_mgmt(bio_op(bio)))
......@@ -352,8 +358,7 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
if (test_bit(DROP_WRITES, &fc->flags)) {
bio_endio(bio);
return DM_MAPIO_SUBMITTED;
}
else if (test_bit(ERROR_WRITES, &fc->flags)) {
} else if (test_bit(ERROR_WRITES, &fc->flags)) {
bio_io_error(bio);
return DM_MAPIO_SUBMITTED;
}
......@@ -361,9 +366,11 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
/*
* Corrupt matching writes.
*/
if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == WRITE)) {
if (fc->corrupt_bio_byte) {
if (fc->corrupt_bio_rw == WRITE) {
if (all_corrupt_bio_flags_match(bio, fc))
corrupt_bio_data(bio, fc);
}
goto map_bio;
}
......@@ -389,13 +396,14 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio,
return DM_ENDIO_DONE;
if (!*error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
if (fc->corrupt_bio_byte) {
if ((fc->corrupt_bio_rw == READ) &&
all_corrupt_bio_flags_match(bio, fc)) {
/*
* Corrupt successful matching READs while in down state.
*/
corrupt_bio_data(bio, fc);
}
} else if (!test_bit(DROP_WRITES, &fc->flags) &&
!test_bit(ERROR_WRITES, &fc->flags)) {
/*
......@@ -410,11 +418,11 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio,
}
static void flakey_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
unsigned int status_flags, char *result, unsigned int maxlen)
{
unsigned sz = 0;
unsigned int sz = 0;
struct flakey_c *fc = ti->private;
unsigned drop_writes, error_writes;
unsigned int drop_writes, error_writes;
switch (type) {
case STATUSTYPE_INFO:
......
// SPDX-License-Identifier: GPL-2.0
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2021 Microsoft Corporation
*
* Author: Tushar Sugandhi <tusharsu@linux.microsoft.com>
*
* File: dm-ima.c
* Enables IMA measurements for DM targets
*/
......
/* SPDX-License-Identifier: GPL-2.0
*
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2021 Microsoft Corporation
*
* Author: Tushar Sugandhi <tusharsu@linux.microsoft.com>
*
* File: dm-ima.h
* Header file for device mapper IMA measurements.
*/
......
// SPDX-License-Identifier: GPL-2.0
// SPDX-License-Identifier: GPL-2.0-only
/*
* dm-init.c
* Copyright (C) 2017 The Chromium OS Authors <chromium-os-dev@chromium.org>
*
* This file is released under the GPLv2.
......@@ -296,7 +295,7 @@ static int __init dm_init_init(void)
if (waitfor[i]) {
DMINFO("waiting for device %s ...", waitfor[i]);
while (!dm_get_dev_t(waitfor[i]))
msleep(5);
fsleep(5000);
}
}
......
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 */
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022 Red Hat, Inc.
*/
......@@ -57,7 +57,7 @@ static void dm_bio_integrity_rewind(struct bio *bio, unsigned int bytes_done)
{
struct bio_integrity_payload *bip = bio_integrity(bio);
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
unsigned int bytes = bio_integrity_bytes(bi, bytes_done >> 9);
bip->bip_iter.bi_sector -= bio_integrity_intervals(bi, bytes_done >> 9);
dm_bvec_iter_rewind(bip->bip_vec, &bip->bip_iter, bytes);
......@@ -68,7 +68,6 @@ static void dm_bio_integrity_rewind(struct bio *bio, unsigned int bytes_done)
static inline void dm_bio_integrity_rewind(struct bio *bio,
unsigned int bytes_done)
{
return;
}
#endif
......@@ -104,7 +103,6 @@ static void dm_bio_crypt_rewind(struct bio *bio, unsigned int bytes)
static inline void dm_bio_crypt_rewind(struct bio *bio, unsigned int bytes)
{
return;
}
#endif
......@@ -131,7 +129,7 @@ static inline void dm_bio_rewind_iter(const struct bio *bio,
* rewinding from end of bio and restoring its original position.
* Caller is also responsibile for restoring bio's size.
*/
static void dm_bio_rewind(struct bio *bio, unsigned bytes)
static void dm_bio_rewind(struct bio *bio, unsigned int bytes)
{
if (bio_integrity(bio))
dm_bio_integrity_rewind(bio, bytes);
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2021 Red Hat, Inc. All rights reserved.
*
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2003 Sistina Software
* Copyright (C) 2006 Red Hat GmbH
......@@ -38,7 +39,7 @@ struct io {
void *context;
void *vma_invalidate_address;
unsigned long vma_invalidate_size;
} __attribute__((aligned(DM_IO_MAX_REGIONS)));
} __aligned(DM_IO_MAX_REGIONS);
static struct kmem_cache *_dm_io_cache;
......@@ -48,7 +49,7 @@ static struct kmem_cache *_dm_io_cache;
struct dm_io_client *dm_io_client_create(void)
{
struct dm_io_client *client;
unsigned min_ios = dm_get_reserved_bio_based_ios();
unsigned int min_ios = dm_get_reserved_bio_based_ios();
int ret;
client = kzalloc(sizeof(*client), GFP_KERNEL);
......@@ -65,7 +66,7 @@ struct dm_io_client *dm_io_client_create(void)
return client;
bad:
bad:
mempool_exit(&client->pool);
kfree(client);
return ERR_PTR(ret);
......@@ -80,15 +81,17 @@ void dm_io_client_destroy(struct dm_io_client *client)
}
EXPORT_SYMBOL(dm_io_client_destroy);
/*-----------------------------------------------------------------
/*
*-------------------------------------------------------------------
* We need to keep track of which region a bio is doing io for.
* To avoid a memory allocation to store just 5 or 6 bits, we
* ensure the 'struct io' pointer is aligned so enough low bits are
* always zero and then combine it with the region number directly in
* bi_private.
*---------------------------------------------------------------*/
*-------------------------------------------------------------------
*/
static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
unsigned region)
unsigned int region)
{
if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
DMCRIT("Unaligned struct io pointer %p", io);
......@@ -99,7 +102,7 @@ static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
}
static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
unsigned *region)
unsigned int *region)
{
unsigned long val = (unsigned long)bio->bi_private;
......@@ -107,10 +110,12 @@ static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
*region = val & (DM_IO_MAX_REGIONS - 1);
}
/*-----------------------------------------------------------------
/*
*--------------------------------------------------------------
* We need an io object to keep track of the number of bios that
* have been dispatched for a particular io.
*---------------------------------------------------------------*/
*--------------------------------------------------------------
*/
static void complete_io(struct io *io)
{
unsigned long error_bits = io->error_bits;
......@@ -137,7 +142,7 @@ static void dec_count(struct io *io, unsigned int region, blk_status_t error)
static void endio(struct bio *bio)
{
struct io *io;
unsigned region;
unsigned int region;
blk_status_t error;
if (bio->bi_status && bio_data_dir(bio) == READ)
......@@ -154,17 +159,19 @@ static void endio(struct bio *bio)
dec_count(io, region, error);
}
/*-----------------------------------------------------------------
/*
*--------------------------------------------------------------
* These little objects provide an abstraction for getting a new
* destination page for io.
*---------------------------------------------------------------*/
*--------------------------------------------------------------
*/
struct dpages {
void (*get_page)(struct dpages *dp,
struct page **p, unsigned long *len, unsigned *offset);
struct page **p, unsigned long *len, unsigned int *offset);
void (*next_page)(struct dpages *dp);
union {
unsigned context_u;
unsigned int context_u;
struct bvec_iter context_bi;
};
void *context_ptr;
......@@ -177,9 +184,9 @@ struct dpages {
* Functions for getting the pages from a list.
*/
static void list_get_page(struct dpages *dp,
struct page **p, unsigned long *len, unsigned *offset)
struct page **p, unsigned long *len, unsigned int *offset)
{
unsigned o = dp->context_u;
unsigned int o = dp->context_u;
struct page_list *pl = (struct page_list *) dp->context_ptr;
*p = pl->page;
......@@ -190,11 +197,12 @@ static void list_get_page(struct dpages *dp,
static void list_next_page(struct dpages *dp)
{
struct page_list *pl = (struct page_list *) dp->context_ptr;
dp->context_ptr = pl->next;
dp->context_u = 0;
}
static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned int offset)
{
dp->get_page = list_get_page;
dp->next_page = list_next_page;
......@@ -206,7 +214,7 @@ static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offse
* Functions for getting the pages from a bvec.
*/
static void bio_get_page(struct dpages *dp, struct page **p,
unsigned long *len, unsigned *offset)
unsigned long *len, unsigned int *offset)
{
struct bio_vec bvec = bvec_iter_bvec((struct bio_vec *)dp->context_ptr,
dp->context_bi);
......@@ -244,7 +252,7 @@ static void bio_dp_init(struct dpages *dp, struct bio *bio)
* Functions for getting the pages from a VMA.
*/
static void vm_get_page(struct dpages *dp,
struct page **p, unsigned long *len, unsigned *offset)
struct page **p, unsigned long *len, unsigned int *offset)
{
*p = vmalloc_to_page(dp->context_ptr);
*offset = dp->context_u;
......@@ -269,7 +277,7 @@ static void vm_dp_init(struct dpages *dp, void *data)
* Functions for getting the pages from kernel memory.
*/
static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
unsigned *offset)
unsigned int *offset)
{
*p = virt_to_page(dp->context_ptr);
*offset = dp->context_u;
......@@ -290,18 +298,20 @@ static void km_dp_init(struct dpages *dp, void *data)
dp->context_ptr = data;
}
/*-----------------------------------------------------------------
/*
*---------------------------------------------------------------
* IO routines that accept a list of pages.
*---------------------------------------------------------------*/
static void do_region(const blk_opf_t opf, unsigned region,
*---------------------------------------------------------------
*/
static void do_region(const blk_opf_t opf, unsigned int region,
struct dm_io_region *where, struct dpages *dp,
struct io *io)
{
struct bio *bio;
struct page *page;
unsigned long len;
unsigned offset;
unsigned num_bvecs;
unsigned int offset;
unsigned int num_bvecs;
sector_t remaining = where->count;
struct request_queue *q = bdev_get_queue(where->bdev);
sector_t num_sectors;
......@@ -350,7 +360,8 @@ static void do_region(const blk_opf_t opf, unsigned region,
num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
remaining -= num_sectors;
} else while (remaining) {
} else {
while (remaining) {
/*
* Try and add as many pages as possible.
*/
......@@ -363,6 +374,7 @@ static void do_region(const blk_opf_t opf, unsigned region,
remaining -= to_sector(len);
dp->next_page(dp);
}
}
atomic_inc(&io->count);
submit_bio(bio);
......@@ -508,7 +520,7 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
return 0;
}
int dm_io(struct dm_io_request *io_req, unsigned num_regions,
int dm_io(struct dm_io_request *io_req, unsigned int num_regions,
struct dm_io_region *where, unsigned long *sync_error_bits)
{
int r;
......
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2002 Sistina Software (UK) Limited.
* Copyright (C) 2006 Red Hat GmbH
......@@ -34,14 +35,14 @@
#define DEFAULT_SUB_JOB_SIZE_KB 512
#define MAX_SUB_JOB_SIZE_KB 1024
static unsigned kcopyd_subjob_size_kb = DEFAULT_SUB_JOB_SIZE_KB;
static unsigned int kcopyd_subjob_size_kb = DEFAULT_SUB_JOB_SIZE_KB;
module_param(kcopyd_subjob_size_kb, uint, S_IRUGO | S_IWUSR);
module_param(kcopyd_subjob_size_kb, uint, 0644);
MODULE_PARM_DESC(kcopyd_subjob_size_kb, "Sub-job size for dm-kcopyd clients");
static unsigned dm_get_kcopyd_subjob_size(void)
static unsigned int dm_get_kcopyd_subjob_size(void)
{
unsigned sub_job_size_kb;
unsigned int sub_job_size_kb;
sub_job_size_kb = __dm_get_module_param(&kcopyd_subjob_size_kb,
DEFAULT_SUB_JOB_SIZE_KB,
......@@ -50,15 +51,17 @@ static unsigned dm_get_kcopyd_subjob_size(void)
return sub_job_size_kb << 1;
}
/*-----------------------------------------------------------------
/*
*----------------------------------------------------------------
* Each kcopyd client has its own little pool of preallocated
* pages for kcopyd io.
*---------------------------------------------------------------*/
*---------------------------------------------------------------
*/
struct dm_kcopyd_client {
struct page_list *pages;
unsigned nr_reserved_pages;
unsigned nr_free_pages;
unsigned sub_job_size;
unsigned int nr_reserved_pages;
unsigned int nr_free_pages;
unsigned int sub_job_size;
struct dm_io_client *io_client;
......@@ -109,7 +112,7 @@ static DEFINE_SPINLOCK(throttle_spinlock);
* The reason for this is unknown but possibly due to jiffies rounding errors
* or read/write cache inside the disk.
*/
#define SLEEP_MSEC 100
#define SLEEP_USEC 100000
/*
* Maximum number of sleep events. There is a theoretical livelock if more
......@@ -119,7 +122,7 @@ static DEFINE_SPINLOCK(throttle_spinlock);
static void io_job_start(struct dm_kcopyd_throttle *t)
{
unsigned throttle, now, difference;
unsigned int throttle, now, difference;
int slept = 0, skew;
if (unlikely(!t))
......@@ -148,6 +151,7 @@ static void io_job_start(struct dm_kcopyd_throttle *t)
if (unlikely(t->total_period >= (1 << ACCOUNT_INTERVAL_SHIFT))) {
int shift = fls(t->total_period >> ACCOUNT_INTERVAL_SHIFT);
t->total_period >>= shift;
t->io_period >>= shift;
}
......@@ -157,7 +161,7 @@ static void io_job_start(struct dm_kcopyd_throttle *t)
if (unlikely(skew > 0) && slept < MAX_SLEEPS) {
slept++;
spin_unlock_irq(&throttle_spinlock);
msleep(SLEEP_MSEC);
fsleep(SLEEP_USEC);
goto try_again;
}
......@@ -182,7 +186,7 @@ static void io_job_finish(struct dm_kcopyd_throttle *t)
goto skip_limit;
if (!t->num_io_jobs) {
unsigned now, difference;
unsigned int now, difference;
now = jiffies;
difference = now - t->last_jiffies;
......@@ -303,9 +307,9 @@ static void drop_pages(struct page_list *pl)
/*
* Allocate and reserve nr_pages for the use of a specific client.
*/
static int client_reserve_pages(struct dm_kcopyd_client *kc, unsigned nr_pages)
static int client_reserve_pages(struct dm_kcopyd_client *kc, unsigned int nr_pages)
{
unsigned i;
unsigned int i;
struct page_list *pl = NULL, *next;
for (i = 0; i < nr_pages; i++) {
......@@ -333,15 +337,17 @@ static void client_free_pages(struct dm_kcopyd_client *kc)
kc->nr_free_pages = kc->nr_reserved_pages = 0;
}
/*-----------------------------------------------------------------
/*
*---------------------------------------------------------------
* kcopyd_jobs need to be allocated by the *clients* of kcopyd,
* for this reason we use a mempool to prevent the client from
* ever having to do io (which could cause a deadlock).
*---------------------------------------------------------------*/
*---------------------------------------------------------------
*/
struct kcopyd_job {
struct dm_kcopyd_client *kc;
struct list_head list;
unsigned flags;
unsigned int flags;
/*
* Error state of the job.
......@@ -582,7 +588,7 @@ static int run_io_job(struct kcopyd_job *job)
static int run_pages_job(struct kcopyd_job *job)
{
int r;
unsigned nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9);
unsigned int nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9);
r = kcopyd_get_pages(job->kc, nr_pages, &job->pages);
if (!r) {
......@@ -603,7 +609,7 @@ static int run_pages_job(struct kcopyd_job *job)
* of successful jobs.
*/
static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
int (*fn) (struct kcopyd_job *))
int (*fn)(struct kcopyd_job *))
{
struct kcopyd_job *job;
int r, count = 0;
......@@ -673,6 +679,7 @@ static void do_work(struct work_struct *work)
static void dispatch_job(struct kcopyd_job *job)
{
struct dm_kcopyd_client *kc = job->kc;
atomic_inc(&kc->nr_jobs);
if (unlikely(!job->source.count))
push(&kc->callback_jobs, job);
......@@ -819,7 +826,7 @@ void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
job->pages = NULL;
job->op = REQ_OP_READ;
} else {
memset(&job->source, 0, sizeof job->source);
memset(&job->source, 0, sizeof(job->source));
job->source.count = job->dests[0].count;
job->pages = &zero_page_list;
......@@ -849,8 +856,8 @@ void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
EXPORT_SYMBOL(dm_kcopyd_copy);
void dm_kcopyd_zero(struct dm_kcopyd_client *kc,
unsigned num_dests, struct dm_io_region *dests,
unsigned flags, dm_kcopyd_notify_fn fn, void *context)
unsigned int num_dests, struct dm_io_region *dests,
unsigned int flags, dm_kcopyd_notify_fn fn, void *context)
{
dm_kcopyd_copy(kc, NULL, num_dests, dests, flags, fn, context);
}
......@@ -900,13 +907,15 @@ int kcopyd_cancel(struct kcopyd_job *job, int block)
}
#endif /* 0 */
/*-----------------------------------------------------------------
/*
*---------------------------------------------------------------
* Client setup
*---------------------------------------------------------------*/
*---------------------------------------------------------------
*/
struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *throttle)
{
int r;
unsigned reserve_pages;
unsigned int reserve_pages;
struct dm_kcopyd_client *kc;
kc = kzalloc(sizeof(*kc), GFP_KERNEL);
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2001-2003 Sistina Software (UK) Limited.
*
......@@ -64,7 +65,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->private = lc;
return 0;
bad:
bad:
kfree(lc);
return ret;
}
......@@ -95,7 +96,7 @@ static int linear_map(struct dm_target *ti, struct bio *bio)
}
static void linear_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
unsigned int status_flags, char *result, unsigned int maxlen)
{
struct linear_c *lc = (struct linear_c *) ti->private;
size_t sz = 0;
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2006-2009 Red Hat, Inc.
*
......@@ -123,7 +124,7 @@ static int userspace_do_request(struct log_c *lc, const char *uuid,
}
static int build_constructor_string(struct dm_target *ti,
unsigned argc, char **argv,
unsigned int argc, char **argv,
char **ctr_str)
{
int i, str_size;
......@@ -188,7 +189,7 @@ static void do_flush(struct work_struct *work)
* to the userspace ctr function.
*/
static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
unsigned argc, char **argv)
unsigned int argc, char **argv)
{
int r = 0;
int str_size;
......@@ -345,8 +346,6 @@ static void userspace_dtr(struct dm_dirty_log *log)
kfree(lc->usr_argv_str);
kfree(lc);
return;
}
static int userspace_presuspend(struct dm_dirty_log *log)
......@@ -660,8 +659,6 @@ static void userspace_mark_region(struct dm_dirty_log *log, region_t region)
fe->region = region;
list_add(&fe->list, &lc->mark_list);
spin_unlock_irqrestore(&lc->flush_lock, flags);
return;
}
/*
......@@ -697,8 +694,6 @@ static void userspace_clear_region(struct dm_dirty_log *log, region_t region)
fe->region = region;
list_add(&fe->list, &lc->clear_list);
spin_unlock_irqrestore(&lc->flush_lock, flags);
return;
}
/*
......@@ -755,7 +750,6 @@ static void userspace_set_region_sync(struct dm_dirty_log *log,
* It would be nice to be able to report failures.
* However, it is easy enough to detect and resolve.
*/
return;
}
/*
......@@ -792,7 +786,7 @@ static region_t userspace_get_sync_count(struct dm_dirty_log *log)
* Returns: amount of space consumed
*/
static int userspace_status(struct dm_dirty_log *log, status_type_t status_type,
char *result, unsigned maxlen)
char *result, unsigned int maxlen)
{
int r = 0;
char *table_args;
......@@ -926,7 +920,6 @@ static void __exit userspace_dirty_log_exit(void)
kmem_cache_destroy(_flush_entry_cache);
DMINFO("version " DM_LOG_USERSPACE_VSN " unloaded");
return;
}
module_init(userspace_dirty_log_init);
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2006-2009 Red Hat, Inc.
*
......@@ -108,9 +109,8 @@ static int fill_pkg(struct cn_msg *msg, struct dm_ulog_request *tfr)
if (pkg->error != -EAGAIN)
*(pkg->data_size) = 0;
} else if (tfr->data_size > *(pkg->data_size)) {
DMERR("Insufficient space to receive package [%u] "
"(%u vs %zu)", tfr->request_type,
tfr->data_size, *(pkg->data_size));
DMERR("Insufficient space to receive package [%u] (%u vs %zu)",
tfr->request_type, tfr->data_size, *(pkg->data_size));
*(pkg->data_size) = 0;
pkg->error = -ENOSPC;
......@@ -142,7 +142,7 @@ static void cn_ulog_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
fill_pkg(msg, NULL);
else if (msg->len < sizeof(*tfr))
DMERR("Incomplete message received (expected %u, got %u): [%u]",
(unsigned)sizeof(*tfr), msg->len, msg->seq);
(unsigned int)sizeof(*tfr), msg->len, msg->seq);
else
fill_pkg(NULL, tfr);
spin_unlock(&receiving_list_lock);
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2006-2009 Red Hat, Inc.
*
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment