Commit b1f8ccda authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-5.18/dm-changes' of...

Merge tag 'for-5.18/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper updates from Mike Snitzer:

 - Significant refactoring and fixing of how DM core does bio-based IO
   accounting with focus on fixing wildly inaccurate IO stats for
   dm-crypt (and other DM targets that defer bio submission in their own
   workqueues). End result is proper IO accounting, made possible by
   targets being updated to use the new dm_submit_bio_remap() interface.

 - Add hipri bio polling support (REQ_POLLED) to bio-based DM.

 - Reduce dm_io and dm_target_io structs so that a single dm_io (which
   contains dm_target_io and first clone bio) weighs in at 256 bytes.
   For reference the bio struct is 128 bytes.

 - Various other small cleanups, fixes or improvements in DM core and
   targets.

 - Update MAINTAINERS with my kernel.org email address to allow
   distinction between my "upstream" and "Red" Hats.

* tag 'for-5.18/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: (46 commits)
  dm: consolidate spinlocks in dm_io struct
  dm: reduce size of dm_io and dm_target_io structs
  dm: switch dm_target_io booleans over to proper flags
  dm: switch dm_io booleans over to proper flags
  dm: update email address in MAINTAINERS
  dm: return void from __send_empty_flush
  dm: factor out dm_io_complete
  dm cache: use dm_submit_bio_remap
  dm: simplify dm_sumbit_bio_remap interface
  dm thin: use dm_submit_bio_remap
  dm: add WARN_ON_ONCE to dm_submit_bio_remap
  dm: support bio polling
  block: add ->poll_bio to block_device_operations
  dm mpath: use DMINFO instead of printk with KERN_INFO
  dm: stop using bdevname
  dm-zoned: remove the ->name field in struct dmz_dev
  dm: remove unnecessary local variables in __bind
  dm: requeue IO if mapping table not yet available
  dm io: remove stale comment block for dm_io()
  dm thin metadata: remove unused dm_thin_remove_block and __remove
  ...
parents 2dacc1e5 4d7bca13
......@@ -5605,7 +5605,7 @@ F: include/linux/devm-helpers.h
DEVICE-MAPPER (LVM)
M: Alasdair Kergon <agk@redhat.com>
M: Mike Snitzer <snitzer@redhat.com>
M: Mike Snitzer <snitzer@kernel.org>
M: dm-devel@redhat.com
L: dm-devel@redhat.com
S: Maintained
......
......@@ -688,7 +688,7 @@ static void __submit_bio(struct bio *bio)
*
* bio_list_on_stack[0] contains bios submitted by the current ->submit_bio.
* bio_list_on_stack[1] contains bios that were submitted before the current
* ->submit_bio_bio, but that haven't been processed yet.
* ->submit_bio, but that haven't been processed yet.
*/
static void __submit_bio_noacct(struct bio *bio)
{
......@@ -955,7 +955,7 @@ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
{
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
blk_qc_t cookie = READ_ONCE(bio->bi_cookie);
int ret;
int ret = 0;
if (cookie == BLK_QC_T_NONE ||
!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
......@@ -965,10 +965,14 @@ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
if (blk_queue_enter(q, BLK_MQ_REQ_NOWAIT))
return 0;
if (WARN_ON_ONCE(!queue_is_mq(q)))
ret = 0; /* not yet implemented, should not happen */
else
if (queue_is_mq(q)) {
ret = blk_mq_poll(q, cookie, iob, flags);
} else {
struct gendisk *disk = q->disk;
if (disk && disk->fops->poll_bio)
ret = disk->fops->poll_bio(bio, iob, flags);
}
blk_queue_exit(q);
return ret;
}
......
......@@ -412,6 +412,10 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
struct device *ddev = disk_to_dev(disk);
int ret;
/* Only makes sense for bio-based to set ->poll_bio */
if (queue_is_mq(disk->queue) && disk->fops->poll_bio)
return -EINVAL;
/*
* The disk queue should now be all set with enough information about
* the device for the elevator code to pick an adequate default
......
......@@ -1026,7 +1026,9 @@ static unsigned default_promote_level(struct smq_policy *mq)
* This scheme reminds me of a graph of entropy vs probability of a
* binary variable.
*/
static unsigned table[] = {1, 1, 1, 2, 4, 6, 7, 8, 7, 6, 4, 4, 3, 3, 2, 2, 1};
static const unsigned int table[] = {
1, 1, 1, 2, 4, 6, 7, 8, 7, 6, 4, 4, 3, 3, 2, 2, 1
};
unsigned hits = mq->cache_stats.hits;
unsigned misses = mq->cache_stats.misses;
......
......@@ -803,7 +803,7 @@ static void accounted_complete(struct cache *cache, struct bio *bio)
static void accounted_request(struct cache *cache, struct bio *bio)
{
accounted_begin(cache, bio);
submit_bio_noacct(bio);
dm_submit_bio_remap(bio, NULL);
}
static void issue_op(struct bio *bio, void *context)
......@@ -1708,7 +1708,7 @@ static bool process_bio(struct cache *cache, struct bio *bio)
bool commit_needed;
if (map_bio(cache, bio, get_bio_block(cache, bio), &commit_needed) == DM_MAPIO_REMAPPED)
submit_bio_noacct(bio);
dm_submit_bio_remap(bio, NULL);
return commit_needed;
}
......@@ -1774,7 +1774,7 @@ static bool process_discard_bio(struct cache *cache, struct bio *bio)
if (cache->features.discard_passdown) {
remap_to_origin(cache, bio);
submit_bio_noacct(bio);
dm_submit_bio_remap(bio, NULL);
} else
bio_endio(bio);
......@@ -2015,7 +2015,6 @@ static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as,
{
int r;
sector_t metadata_dev_size;
char b[BDEVNAME_SIZE];
if (!at_least_one_arg(as, error))
return -EINVAL;
......@@ -2029,8 +2028,8 @@ static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as,
metadata_dev_size = get_dev_size(ca->metadata_dev);
if (metadata_dev_size > DM_CACHE_METADATA_MAX_SECTORS_WARNING)
DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
bdevname(ca->metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
DMWARN("Metadata device %pg is larger than %u sectors: excess space will not be used.",
ca->metadata_dev->bdev, THIN_METADATA_MAX_SECTORS);
return 0;
}
......@@ -2357,6 +2356,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
cache->ti = ca->ti;
ti->private = cache;
ti->accounts_remapped_io = true;
ti->num_flush_bios = 2;
ti->flush_supported = true;
......@@ -3345,7 +3345,6 @@ static void disable_passdown_if_not_supported(struct cache *cache)
struct block_device *origin_bdev = cache->origin_dev->bdev;
struct queue_limits *origin_limits = &bdev_get_queue(origin_bdev)->limits;
const char *reason = NULL;
char buf[BDEVNAME_SIZE];
if (!cache->features.discard_passdown)
return;
......@@ -3357,8 +3356,8 @@ static void disable_passdown_if_not_supported(struct cache *cache)
reason = "max discard sectors smaller than a block";
if (reason) {
DMWARN("Origin device (%s) %s: Disabling discard passdown.",
bdevname(origin_bdev, buf), reason);
DMWARN("Origin device (%pg) %s: Disabling discard passdown.",
origin_bdev, reason);
cache->features.discard_passdown = false;
}
}
......
......@@ -1682,7 +1682,6 @@ static int parse_metadata_dev(struct clone *clone, struct dm_arg_set *as, char *
{
int r;
sector_t metadata_dev_size;
char b[BDEVNAME_SIZE];
r = dm_get_device(clone->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
&clone->metadata_dev);
......@@ -1693,8 +1692,8 @@ static int parse_metadata_dev(struct clone *clone, struct dm_arg_set *as, char *
metadata_dev_size = get_dev_size(clone->metadata_dev);
if (metadata_dev_size > DM_CLONE_METADATA_MAX_SECTORS_WARNING)
DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
bdevname(clone->metadata_dev->bdev, b), DM_CLONE_METADATA_MAX_SECTORS);
DMWARN("Metadata device %pg is larger than %u sectors: excess space will not be used.",
clone->metadata_dev->bdev, DM_CLONE_METADATA_MAX_SECTORS);
return 0;
}
......@@ -2033,7 +2032,6 @@ static void disable_passdown_if_not_supported(struct clone *clone)
struct block_device *dest_dev = clone->dest_dev->bdev;
struct queue_limits *dest_limits = &bdev_get_queue(dest_dev)->limits;
const char *reason = NULL;
char buf[BDEVNAME_SIZE];
if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags))
return;
......@@ -2044,8 +2042,8 @@ static void disable_passdown_if_not_supported(struct clone *clone)
reason = "max discard sectors smaller than a region";
if (reason) {
DMWARN("Destination device (%s) %s: Disabling discard passdown.",
bdevname(dest_dev, buf), reason);
DMWARN("Destination device (%pd) %s: Disabling discard passdown.",
dest_dev, reason);
clear_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags);
}
}
......
......@@ -64,11 +64,21 @@ struct mapped_device {
struct gendisk *disk;
struct dax_device *dax_dev;
wait_queue_head_t wait;
unsigned long __percpu *pending_io;
/* forced geometry settings */
struct hd_geometry geometry;
/*
* Processing queue (flush)
*/
struct workqueue_struct *wq;
/*
* A list of ios that arrived while we were suspended.
*/
struct work_struct work;
wait_queue_head_t wait;
spinlock_t deferred_lock;
struct bio_list deferred;
......@@ -83,36 +93,28 @@ struct mapped_device {
struct list_head uevent_list;
spinlock_t uevent_lock; /* Protect access to uevent_list */
/* for blk-mq request-based DM support */
bool init_tio_pdu:1;
struct blk_mq_tag_set *tag_set;
struct dm_stats stats;
/* the number of internal suspends */
unsigned internal_suspend_count;
int swap_bios;
struct semaphore swap_bios_semaphore;
struct mutex swap_bios_lock;
/*
* io objects are allocated from here.
*/
struct bio_set io_bs;
struct bio_set bs;
/*
* Processing queue (flush)
*/
struct workqueue_struct *wq;
/* forced geometry settings */
struct hd_geometry geometry;
/* kobject and completion */
struct dm_kobject_holder kobj_holder;
int swap_bios;
struct semaphore swap_bios_semaphore;
struct mutex swap_bios_lock;
struct dm_stats stats;
/* for blk-mq request-based DM support */
struct blk_mq_tag_set *tag_set;
bool init_tio_pdu:1;
struct srcu_struct io_barrier;
#ifdef CONFIG_BLK_DEV_ZONED
......@@ -206,35 +208,76 @@ struct dm_table {
/*
* One of these is allocated per clone bio.
*/
#define DM_TIO_MAGIC 7282014
#define DM_TIO_MAGIC 28714
struct dm_target_io {
unsigned int magic;
unsigned short magic;
unsigned short flags;
unsigned int target_bio_nr;
struct dm_io *io;
struct dm_target *ti;
unsigned int target_bio_nr;
unsigned int *len_ptr;
bool inside_dm_io;
sector_t old_sector;
struct bio clone;
};
/*
* dm_target_io flags
*/
enum {
DM_TIO_INSIDE_DM_IO,
DM_TIO_IS_DUPLICATE_BIO
};
static inline bool dm_tio_flagged(struct dm_target_io *tio, unsigned int bit)
{
return (tio->flags & (1U << bit)) != 0;
}
static inline void dm_tio_set_flag(struct dm_target_io *tio, unsigned int bit)
{
tio->flags |= (1U << bit);
}
/*
* One of these is allocated per original bio.
* It contains the first clone used for that original.
*/
#define DM_IO_MAGIC 5191977
#define DM_IO_MAGIC 19577
struct dm_io {
unsigned int magic;
struct mapped_device *md;
blk_status_t status;
unsigned short magic;
unsigned short flags;
atomic_t io_count;
struct mapped_device *md;
struct bio *orig_bio;
blk_status_t status;
spinlock_t lock;
unsigned long start_time;
spinlock_t endio_lock;
void *data;
struct hlist_node node;
struct task_struct *map_task;
struct dm_stats_aux stats_aux;
/* last member of dm_target_io is 'struct bio' */
struct dm_target_io tio;
};
/*
* dm_io flags
*/
enum {
DM_IO_START_ACCT,
DM_IO_ACCOUNTED
};
static inline bool dm_io_flagged(struct dm_io *io, unsigned int bit)
{
return (io->flags & (1U << bit)) != 0;
}
static inline void dm_io_set_flag(struct dm_io *io, unsigned int bit)
{
io->flags |= (1U << bit);
}
static inline void dm_io_inc_pending(struct dm_io *io)
{
atomic_inc(&io->io_count);
......
......@@ -1827,6 +1827,8 @@ static void crypt_endio(struct bio *clone)
crypt_dec_pending(io);
}
#define CRYPT_MAP_READ_GFP GFP_NOWAIT
static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
{
struct crypt_config *cc = io->cc;
......@@ -1854,7 +1856,7 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
return 1;
}
submit_bio_noacct(clone);
dm_submit_bio_remap(io->base_bio, clone);
return 0;
}
......@@ -1880,7 +1882,7 @@ static void kcryptd_io_write(struct dm_crypt_io *io)
{
struct bio *clone = io->ctx.bio_out;
submit_bio_noacct(clone);
dm_submit_bio_remap(io->base_bio, clone);
}
#define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
......@@ -1959,7 +1961,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
if ((likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) ||
test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags)) {
submit_bio_noacct(clone);
dm_submit_bio_remap(io->base_bio, clone);
return;
}
......@@ -2578,7 +2580,7 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
static int get_key_size(char **key_string)
{
return (*key_string[0] == ':') ? -EINVAL : strlen(*key_string) >> 1;
return (*key_string[0] == ':') ? -EINVAL : (int)(strlen(*key_string) >> 1);
}
#endif /* CONFIG_KEYS */
......@@ -3361,6 +3363,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->num_flush_bios = 1;
ti->limit_swap_bios = true;
ti->accounts_remapped_io = true;
dm_audit_log_ctr(DM_MSG_PREFIX, ti, 1);
return 0;
......@@ -3429,7 +3432,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
io->ctx.r.req = (struct skcipher_request *)(io + 1);
if (bio_data_dir(io->base_bio) == READ) {
if (kcryptd_io_read(io, GFP_NOWAIT))
if (kcryptd_io_read(io, CRYPT_MAP_READ_GFP))
kcryptd_queue_read(io);
} else
kcryptd_queue_crypt(io);
......@@ -3624,7 +3627,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type crypt_target = {
.name = "crypt",
.version = {1, 23, 0},
.version = {1, 24, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
......
......@@ -72,7 +72,7 @@ static void flush_bios(struct bio *bio)
while (bio) {
n = bio->bi_next;
bio->bi_next = NULL;
submit_bio_noacct(bio);
dm_submit_bio_remap(bio, NULL);
bio = n;
}
}
......@@ -232,6 +232,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->num_flush_bios = 1;
ti->num_discard_bios = 1;
ti->accounts_remapped_io = true;
ti->per_io_data_size = sizeof(struct dm_delay_info);
return 0;
......@@ -355,7 +356,7 @@ static int delay_iterate_devices(struct dm_target *ti,
static struct target_type delay_target = {
.name = "delay",
.version = {1, 2, 1},
.version = {1, 3, 0},
.features = DM_TARGET_PASSES_INTEGRITY,
.module = THIS_MODULE,
.ctr = delay_ctr,
......
......@@ -455,7 +455,7 @@ void dm_ima_measure_on_device_resume(struct mapped_device *md, bool swap)
scnprintf(device_table_data, DM_IMA_DEVICE_BUF_LEN,
"%sname=%s,uuid=%s;device_resume=no_data;",
DM_IMA_VERSION_STR, dev_name, dev_uuid);
l += strlen(device_table_data);
l = strlen(device_table_data);
}
......@@ -568,7 +568,7 @@ void dm_ima_measure_on_device_remove(struct mapped_device *md, bool remove_all)
scnprintf(device_table_data, DM_IMA_DEVICE_BUF_LEN,
"%sname=%s,uuid=%s;device_remove=no_data;",
DM_IMA_VERSION_STR, dev_name, dev_uuid);
l += strlen(device_table_data);
l = strlen(device_table_data);
}
memcpy(device_table_data + l, remove_all_str, remove_all_len);
......@@ -654,7 +654,7 @@ void dm_ima_measure_on_table_clear(struct mapped_device *md, bool new_map)
scnprintf(device_table_data, DM_IMA_DEVICE_BUF_LEN,
"%sname=%s,uuid=%s;table_clear=no_data;",
DM_IMA_VERSION_STR, dev_name, dev_uuid);
l += strlen(device_table_data);
l = strlen(device_table_data);
}
capacity_len = strlen(capacity_str);
......
......@@ -525,14 +525,6 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
return 0;
}
/*
* New collapsed (a)synchronous interface.
*
* If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
* the queue with blk_unplug() some time later or set REQ_SYNC in
* io_req->bi_opf. If you fail to do one of these, the IO will be submitted to
* the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
*/
int dm_io(struct dm_io_request *io_req, unsigned num_regions,
struct dm_io_region *where, unsigned long *sync_error_bits)
{
......
......@@ -18,6 +18,7 @@
#include <linux/dm-ioctl.h>
#include <linux/hdreg.h>
#include <linux/compat.h>
#include <linux/nospec.h>
#include <linux/uaccess.h>
#include <linux/ima.h>
......@@ -1788,6 +1789,7 @@ static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags)
if (unlikely(cmd >= ARRAY_SIZE(_ioctls)))
return NULL;
cmd = array_index_nospec(cmd, ARRAY_SIZE(_ioctls));
*ioctl_flags = _ioctls[cmd].flags;
return _ioctls[cmd].fn;
}
......
......@@ -899,10 +899,7 @@ static int setup_scsi_dh(struct block_device *bdev, struct multipath *m,
if (m->hw_handler_name) {
r = scsi_dh_attach(q, m->hw_handler_name);
if (r == -EBUSY) {
char b[BDEVNAME_SIZE];
printk(KERN_INFO "dm-mpath: retaining handler on device %s\n",
bdevname(bdev, b));
DMINFO("retaining handler on device %pg", bdev);
goto retain;
}
if (r < 0) {
......
......@@ -491,8 +491,13 @@ static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
if (unlikely(!ti)) {
int srcu_idx;
struct dm_table *map = dm_get_live_table(md, &srcu_idx);
struct dm_table *map;
map = dm_get_live_table(md, &srcu_idx);
if (unlikely(!map)) {
dm_put_live_table(md, srcu_idx);
return BLK_STS_RESOURCE;
}
ti = dm_table_find_target(map, 0);
dm_put_live_table(md, srcu_idx);
}
......
......@@ -195,6 +195,7 @@ void dm_stats_init(struct dm_stats *stats)
mutex_init(&stats->mutex);
INIT_LIST_HEAD(&stats->list);
stats->precise_timestamps = false;
stats->last = alloc_percpu(struct dm_stats_last_position);
for_each_possible_cpu(cpu) {
last = per_cpu_ptr(stats->last, cpu);
......@@ -231,6 +232,22 @@ void dm_stats_cleanup(struct dm_stats *stats)
mutex_destroy(&stats->mutex);
}
static void dm_stats_recalc_precise_timestamps(struct dm_stats *stats)
{
struct list_head *l;
struct dm_stat *tmp_s;
bool precise_timestamps = false;
list_for_each(l, &stats->list) {
tmp_s = container_of(l, struct dm_stat, list_entry);
if (tmp_s->stat_flags & STAT_PRECISE_TIMESTAMPS) {
precise_timestamps = true;
break;
}
}
stats->precise_timestamps = precise_timestamps;
}
static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
sector_t step, unsigned stat_flags,
unsigned n_histogram_entries,
......@@ -376,6 +393,9 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
}
ret_id = s->id;
list_add_tail_rcu(&s->list_entry, l);
dm_stats_recalc_precise_timestamps(stats);
mutex_unlock(&stats->mutex);
resume_callback(md);
......@@ -418,6 +438,9 @@ static int dm_stats_delete(struct dm_stats *stats, int id)
}
list_del_rcu(&s->list_entry);
dm_stats_recalc_precise_timestamps(stats);
mutex_unlock(&stats->mutex);
/*
......@@ -621,13 +644,14 @@ static void __dm_stat_bio(struct dm_stat *s, int bi_rw,
void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
sector_t bi_sector, unsigned bi_sectors, bool end,
unsigned long duration_jiffies,
unsigned long start_time,
struct dm_stats_aux *stats_aux)
{
struct dm_stat *s;
sector_t end_sector;
struct dm_stats_last_position *last;
bool got_precise_time;
unsigned long duration_jiffies = 0;
if (unlikely(!bi_sectors))
return;
......@@ -647,16 +671,16 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
));
WRITE_ONCE(last->last_sector, end_sector);
WRITE_ONCE(last->last_rw, bi_rw);
}
} else
duration_jiffies = jiffies - start_time;
rcu_read_lock();
got_precise_time = false;
list_for_each_entry_rcu(s, &stats->list, list_entry) {
if (s->stat_flags & STAT_PRECISE_TIMESTAMPS && !got_precise_time) {
if (!end)
stats_aux->duration_ns = ktime_to_ns(ktime_get());
else
/* start (!end) duration_ns is set by DM core's alloc_io() */
if (end)
stats_aux->duration_ns = ktime_to_ns(ktime_get()) - stats_aux->duration_ns;
got_precise_time = true;
}
......
......@@ -13,8 +13,7 @@ struct dm_stats {
struct mutex mutex;
struct list_head list; /* list of struct dm_stat */
struct dm_stats_last_position __percpu *last;
sector_t last_sector;
unsigned last_rw;
bool precise_timestamps;
};
struct dm_stats_aux {
......@@ -32,7 +31,7 @@ int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
sector_t bi_sector, unsigned bi_sectors, bool end,
unsigned long duration_jiffies,
unsigned long start_time,
struct dm_stats_aux *aux);
static inline bool dm_stats_used(struct dm_stats *st)
......@@ -40,4 +39,10 @@ static inline bool dm_stats_used(struct dm_stats *st)
return !list_empty(&st->list);
}
static inline void dm_stats_record_start(struct dm_stats *stats, struct dm_stats_aux *aux)
{
if (unlikely(stats->precise_timestamps))
aux->duration_ns = ktime_to_ns(ktime_get());
}
#endif
......@@ -230,15 +230,14 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
sector_t dev_size = bdev_nr_sectors(bdev);
unsigned short logical_block_size_sectors =
limits->logical_block_size >> SECTOR_SHIFT;
char b[BDEVNAME_SIZE];
if (!dev_size)
return 0;
if ((start >= dev_size) || (start + len > dev_size)) {
DMWARN("%s: %s too small for target: "
DMWARN("%s: %pg too small for target: "
"start=%llu, len=%llu, dev_size=%llu",
dm_device_name(ti->table->md), bdevname(bdev, b),
dm_device_name(ti->table->md), bdev,
(unsigned long long)start,
(unsigned long long)len,
(unsigned long long)dev_size);
......@@ -253,10 +252,10 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
unsigned int zone_sectors = bdev_zone_sectors(bdev);
if (start & (zone_sectors - 1)) {
DMWARN("%s: start=%llu not aligned to h/w zone size %u of %s",
DMWARN("%s: start=%llu not aligned to h/w zone size %u of %pg",
dm_device_name(ti->table->md),
(unsigned long long)start,
zone_sectors, bdevname(bdev, b));
zone_sectors, bdev);
return 1;
}
......@@ -270,10 +269,10 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
* the sector range.
*/
if (len & (zone_sectors - 1)) {
DMWARN("%s: len=%llu not aligned to h/w zone size %u of %s",
DMWARN("%s: len=%llu not aligned to h/w zone size %u of %pg",
dm_device_name(ti->table->md),
(unsigned long long)len,
zone_sectors, bdevname(bdev, b));
zone_sectors, bdev);
return 1;
}
}
......@@ -283,19 +282,19 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
if (start & (logical_block_size_sectors - 1)) {
DMWARN("%s: start=%llu not aligned to h/w "
"logical block size %u of %s",
"logical block size %u of %pg",
dm_device_name(ti->table->md),
(unsigned long long)start,
limits->logical_block_size, bdevname(bdev, b));
limits->logical_block_size, bdev);
return 1;
}
if (len & (logical_block_size_sectors - 1)) {
DMWARN("%s: len=%llu not aligned to h/w "
"logical block size %u of %s",
"logical block size %u of %pg",
dm_device_name(ti->table->md),
(unsigned long long)len,
limits->logical_block_size, bdevname(bdev, b));
limits->logical_block_size, bdev);
return 1;
}
......@@ -400,20 +399,19 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
struct queue_limits *limits = data;
struct block_device *bdev = dev->bdev;
struct request_queue *q = bdev_get_queue(bdev);
char b[BDEVNAME_SIZE];
if (unlikely(!q)) {
DMWARN("%s: Cannot set limits for nonexistent device %s",
dm_device_name(ti->table->md), bdevname(bdev, b));
DMWARN("%s: Cannot set limits for nonexistent device %pg",
dm_device_name(ti->table->md), bdev);
return 0;
}
if (blk_stack_limits(limits, &q->limits,
get_start_sect(bdev) + start) < 0)
DMWARN("%s: adding target device %s caused an alignment inconsistency: "
DMWARN("%s: adding target device %pg caused an alignment inconsistency: "
"physical_block_size=%u, logical_block_size=%u, "
"alignment_offset=%u, start=%llu",
dm_device_name(ti->table->md), bdevname(bdev, b),
dm_device_name(ti->table->md), bdev,
q->limits.physical_block_size,
q->limits.logical_block_size,
q->limits.alignment_offset,
......@@ -1483,6 +1481,14 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
return &t->targets[(KEYS_PER_NODE * n) + k];
}
static int device_not_poll_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
return !test_bit(QUEUE_FLAG_POLL, &q->queue_flags);
}
/*
* type->iterate_devices() should be called when the sanity check needs to
* iterate and check all underlying data devices. iterate_devices() will
......@@ -1533,6 +1539,11 @@ static int count_device(struct dm_target *ti, struct dm_dev *dev,
return 0;
}
static int dm_table_supports_poll(struct dm_table *t)
{
return !dm_table_any_dev_attr(t, device_not_poll_capable, NULL);
}
/*
* Check whether a table has no data devices attached using each
* target's iterate_devices method.
......@@ -2069,6 +2080,20 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
dm_update_crypto_profile(q, t);
disk_update_readahead(t->md->disk);
/*
* Check for request-based device is left to
* dm_mq_init_request_queue()->blk_mq_init_allocated_queue().
*
* For bio-based device, only set QUEUE_FLAG_POLL when all
* underlying devices supporting polling.
*/
if (__table_type_bio_based(t->type)) {
if (dm_table_supports_poll(t))
blk_queue_flag_set(QUEUE_FLAG_POLL, q);
else
blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
}
return 0;
}
......
......@@ -1665,22 +1665,6 @@ int dm_thin_insert_block(struct dm_thin_device *td, dm_block_t block,
return r;
}
static int __remove(struct dm_thin_device *td, dm_block_t block)
{
int r;
struct dm_pool_metadata *pmd = td->pmd;
dm_block_t keys[2] = { td->id, block };
r = dm_btree_remove(&pmd->info, pmd->root, keys, &pmd->root);
if (r)
return r;
td->mapped_blocks--;
td->changed = true;
return 0;
}
static int __remove_range(struct dm_thin_device *td, dm_block_t begin, dm_block_t end)
{
int r;
......@@ -1740,18 +1724,6 @@ static int __remove_range(struct dm_thin_device *td, dm_block_t begin, dm_block_
return dm_btree_insert(&pmd->tl_info, pmd->root, keys, &value, &pmd->root);
}
int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block)
{
int r = -EINVAL;
pmd_write_lock(td->pmd);
if (!td->pmd->fail_io)
r = __remove(td, block);
pmd_write_unlock(td->pmd);
return r;
}
int dm_thin_remove_range(struct dm_thin_device *td,
dm_block_t begin, dm_block_t end)
{
......
......@@ -166,7 +166,6 @@ int dm_pool_alloc_data_block(struct dm_pool_metadata *pmd, dm_block_t *result);
int dm_thin_insert_block(struct dm_thin_device *td, dm_block_t block,
dm_block_t data_block);
int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block);
int dm_thin_remove_range(struct dm_thin_device *td,
dm_block_t begin, dm_block_t end);
......
......@@ -161,7 +161,7 @@ static void throttle_work_start(struct throttle *t)
static void throttle_work_update(struct throttle *t)
{
if (!t->throttle_applied && jiffies > t->threshold) {
if (!t->throttle_applied && time_is_before_jiffies(t->threshold)) {
down_write(&t->lock);
t->throttle_applied = true;
}
......@@ -755,7 +755,7 @@ static void issue(struct thin_c *tc, struct bio *bio)
struct pool *pool = tc->pool;
if (!bio_triggers_commit(tc, bio)) {
submit_bio_noacct(bio);
dm_submit_bio_remap(bio, NULL);
return;
}
......@@ -2383,7 +2383,7 @@ static void process_deferred_bios(struct pool *pool)
if (bio->bi_opf & REQ_PREFLUSH)
bio_endio(bio);
else
submit_bio_noacct(bio);
dm_submit_bio_remap(bio, NULL);
}
}
......@@ -2824,7 +2824,6 @@ static void disable_passdown_if_not_supported(struct pool_c *pt)
struct block_device *data_bdev = pt->data_dev->bdev;
struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
const char *reason = NULL;
char buf[BDEVNAME_SIZE];
if (!pt->adjusted_pf.discard_passdown)
return;
......@@ -2836,7 +2835,7 @@ static void disable_passdown_if_not_supported(struct pool_c *pt)
reason = "max discard sectors smaller than a block";
if (reason) {
DMWARN("Data device (%s) %s: Disabling discard passdown.", bdevname(data_bdev, buf), reason);
DMWARN("Data device (%pg) %s: Disabling discard passdown.", data_bdev, reason);
pt->adjusted_pf.discard_passdown = false;
}
}
......@@ -3201,11 +3200,10 @@ static sector_t get_dev_size(struct block_device *bdev)
static void warn_if_metadata_device_too_big(struct block_device *bdev)
{
sector_t metadata_dev_size = get_dev_size(bdev);
char buffer[BDEVNAME_SIZE];
if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)
DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS);
DMWARN("Metadata device %pg is larger than %u sectors: excess space will not be used.",
bdev, THIN_METADATA_MAX_SECTORS);
}
static sector_t get_metadata_dev_size(struct block_device *bdev)
......@@ -4233,6 +4231,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->num_flush_bios = 1;
ti->flush_supported = true;
ti->accounts_remapped_io = true;
ti->per_io_data_size = sizeof(struct dm_thin_endio_hook);
/* In case the pool supports discards, pass them on. */
......
......@@ -1101,8 +1101,8 @@ static int dmz_check_sb(struct dmz_metadata *zmd, struct dmz_sb *dsb,
*/
static int dmz_read_sb(struct dmz_metadata *zmd, struct dmz_sb *sb, int set)
{
dmz_zmd_debug(zmd, "read superblock set %d dev %s block %llu",
set, sb->dev->name, sb->block);
dmz_zmd_debug(zmd, "read superblock set %d dev %pg block %llu",
set, sb->dev->bdev, sb->block);
return dmz_rdwr_block(sb->dev, REQ_OP_READ,
sb->block, sb->mblk->page);
......
......@@ -730,7 +730,6 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path,
}
dev->bdev = bdev;
dev->dev_idx = idx;
(void)bdevname(dev->bdev, dev->name);
dev->capacity = bdev_nr_sectors(bdev);
if (ti->begin) {
......
......@@ -56,7 +56,6 @@ struct dmz_dev {
struct dmz_metadata *metadata;
struct dmz_reclaim *reclaim;
char name[BDEVNAME_SIZE];
uuid_t uuid;
sector_t capacity;
......@@ -176,16 +175,16 @@ enum {
* Message functions.
*/
#define dmz_dev_info(dev, format, args...) \
DMINFO("(%s): " format, (dev)->name, ## args)
DMINFO("(%pg): " format, (dev)->bdev, ## args)
#define dmz_dev_err(dev, format, args...) \
DMERR("(%s): " format, (dev)->name, ## args)
DMERR("(%pg): " format, (dev)->bdev, ## args)
#define dmz_dev_warn(dev, format, args...) \
DMWARN("(%s): " format, (dev)->name, ## args)
DMWARN("(%pg): " format, (dev)->bdev, ## args)
#define dmz_dev_debug(dev, format, args...) \
DMDEBUG("(%s): " format, (dev)->name, ## args)
DMDEBUG("(%pg): " format, (dev)->bdev, ## args)
/*
* Functions defined in dm-zoned-metadata.c
......
This diff is collapsed.
......@@ -1457,6 +1457,8 @@ enum blk_unique_id {
struct block_device_operations {
void (*submit_bio)(struct bio *bio);
int (*poll_bio)(struct bio *bio, struct io_comp_batch *iob,
unsigned int flags);
int (*open) (struct block_device *, fmode_t);
void (*release) (struct gendisk *, fmode_t);
int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int);
......
......@@ -358,10 +358,16 @@ struct dm_target {
bool limit_swap_bios:1;
/*
* Set if this target implements a a zoned device and needs emulation of
* Set if this target implements a zoned device and needs emulation of
* zone append operations using regular writes.
*/
bool emulate_zone_append:1;
/*
* Set if the target will submit IO using dm_submit_bio_remap()
* after returning DM_MAPIO_SUBMITTED from its map function.
*/
bool accounts_remapped_io:1;
};
void *dm_per_bio_data(struct bio *bio, size_t data_size);
......@@ -465,6 +471,7 @@ int dm_suspended(struct dm_target *ti);
int dm_post_suspending(struct dm_target *ti);
int dm_noflush_suspending(struct dm_target *ti);
void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone);
union map_info *dm_get_rq_mapinfo(struct request *rq);
#ifdef CONFIG_BLK_DEV_ZONED
......
......@@ -286,9 +286,9 @@ enum {
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4
#define DM_VERSION_MINOR 45
#define DM_VERSION_MINOR 46
#define DM_VERSION_PATCHLEVEL 0
#define DM_VERSION_EXTRA "-ioctl (2021-03-22)"
#define DM_VERSION_EXTRA "-ioctl (2022-02-22)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment