Commit eaa160ed authored by Mike Snitzer's avatar Mike Snitzer

dm table: fix NVMe bio-based dm_table_determine_type() validation

The 'verify_rq_based:' code in dm_table_determine_type() was checking
all devices in the DM table rather than only checking the data devices.
Fix this by using the immutable target's iterate_devices method.

Also, tweak the block of dm_table_determine_type() code that decides
whether to upgrade from DM_TYPE_BIO_BASED to DM_TYPE_NVME_BIO_BASED so
that it makes sure the immutable_target doesn't support require
splitting IOs.

These changes have been verified to allow a "thin-pool" target whose
data device is an NVMe device to be upgraded to DM_TYPE_NVME_BIO_BASED.
Using the thin-pool in NVMe bio-based mode was verified to pass all the
device-mapper-test-suite's "thin-provisioning" tests.

Also verified that request-based DM multipath (with queue_mode "rq" and
"mq") works as expected using the 'mptest' harness.

Fixes: 22c11858 ("dm: introduce DM_TYPE_NVME_BIO_BASED")
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent c12c9a3c
...@@ -912,13 +912,31 @@ static bool dm_table_supports_dax(struct dm_table *t) ...@@ -912,13 +912,31 @@ static bool dm_table_supports_dax(struct dm_table *t)
static bool dm_table_does_not_support_partial_completion(struct dm_table *t); static bool dm_table_does_not_support_partial_completion(struct dm_table *t);
struct verify_rq_based_data {
unsigned sq_count;
unsigned mq_count;
};
static int device_is_rq_based(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
struct verify_rq_based_data *v = data;
if (q->mq_ops)
v->mq_count++;
else
v->sq_count++;
return queue_is_rq_based(q);
}
static int dm_table_determine_type(struct dm_table *t) static int dm_table_determine_type(struct dm_table *t)
{ {
unsigned i; unsigned i;
unsigned bio_based = 0, request_based = 0, hybrid = 0; unsigned bio_based = 0, request_based = 0, hybrid = 0;
unsigned sq_count = 0, mq_count = 0; struct verify_rq_based_data v = {.sq_count = 0, .mq_count = 0};
struct dm_target *tgt; struct dm_target *tgt;
struct dm_dev_internal *dd;
struct list_head *devices = dm_table_get_devices(t); struct list_head *devices = dm_table_get_devices(t);
enum dm_queue_mode live_md_type = dm_get_md_type(t->md); enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
...@@ -972,11 +990,15 @@ static int dm_table_determine_type(struct dm_table *t) ...@@ -972,11 +990,15 @@ static int dm_table_determine_type(struct dm_table *t)
if (dm_table_supports_dax(t) || if (dm_table_supports_dax(t) ||
(list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) { (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) {
t->type = DM_TYPE_DAX_BIO_BASED; t->type = DM_TYPE_DAX_BIO_BASED;
} else if ((dm_table_get_immutable_target(t) && } else {
dm_table_does_not_support_partial_completion(t)) || /* Check if upgrading to NVMe bio-based is valid or required */
(list_empty(devices) && live_md_type == DM_TYPE_NVME_BIO_BASED)) { tgt = dm_table_get_immutable_target(t);
t->type = DM_TYPE_NVME_BIO_BASED; if (tgt && !tgt->max_io_len && dm_table_does_not_support_partial_completion(t)) {
goto verify_rq_based; t->type = DM_TYPE_NVME_BIO_BASED;
goto verify_rq_based; /* must be stacked directly on NVMe (blk-mq) */
} else if (list_empty(devices) && live_md_type == DM_TYPE_NVME_BIO_BASED) {
t->type = DM_TYPE_NVME_BIO_BASED;
}
} }
return 0; return 0;
} }
...@@ -1025,25 +1047,16 @@ static int dm_table_determine_type(struct dm_table *t) ...@@ -1025,25 +1047,16 @@ static int dm_table_determine_type(struct dm_table *t)
} }
/* Non-request-stackable devices can't be used for request-based dm */ /* Non-request-stackable devices can't be used for request-based dm */
list_for_each_entry(dd, devices, list) { if (!tgt->type->iterate_devices ||
struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev); !tgt->type->iterate_devices(tgt, device_is_rq_based, &v)) {
DMERR("table load rejected: including non-request-stackable devices");
if (!queue_is_rq_based(q)) { return -EINVAL;
DMERR("table load rejected: including"
" non-request-stackable devices");
return -EINVAL;
}
if (q->mq_ops)
mq_count++;
else
sq_count++;
} }
if (sq_count && mq_count) { if (v.sq_count && v.mq_count) {
DMERR("table load rejected: not all devices are blk-mq request-stackable"); DMERR("table load rejected: not all devices are blk-mq request-stackable");
return -EINVAL; return -EINVAL;
} }
t->all_blk_mq = mq_count > 0; t->all_blk_mq = v.mq_count > 0;
if (!t->all_blk_mq && if (!t->all_blk_mq &&
(t->type == DM_TYPE_MQ_REQUEST_BASED || t->type == DM_TYPE_NVME_BIO_BASED)) { (t->type == DM_TYPE_MQ_REQUEST_BASED || t->type == DM_TYPE_NVME_BIO_BASED)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment