Commit 2d669ceb authored by Shin'ichiro Kawasaki's avatar Shin'ichiro Kawasaki Committed by Mike Snitzer

dm table: Fix zoned model check and zone sectors check

Commit 24f6b603 ("dm table: fix zoned iterate_devices based device
capability checks") triggered dm table load failure when dm-zoned device
is set up for zoned block devices and a regular device for cache.

The commit inverted logic of two callback functions for iterate_devices:
device_is_zoned_model() and device_matches_zone_sectors(). The logic of
device_is_zoned_model() was inverted then all destination devices of all
targets in dm table are required to have the expected zoned model. This
is fine for dm-linear, dm-flakey and dm-crypt on zoned block devices
since each target has only one destination device. However, this results
in failure for dm-zoned with regular cache device since that target has
both regular block device and zoned block devices.

As for device_matches_zone_sectors(), the commit inverted the logic to
require all zoned block devices in each target have the specified
zone_sectors. This check also fails for regular block device which does
not have zones.

To avoid the check failures, fix the zone model check and the zone
sectors check. For zone model check, introduce the new feature flag
DM_TARGET_MIXED_ZONED_MODEL, and set it to dm-zoned target. When the
target has this flag, allow it to have destination devices with any
zoned model. For zone sectors check, skip the check if the destination
device is not a zoned block device. Also add comments and improve an
error message to clarify expectations to the two checks.

Fixes: 24f6b603 ("dm table: fix zoned iterate_devices based device capability checks")
Signed-off-by: default avatarShin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
Signed-off-by: default avatarDamien Le Moal <damien.lemoal@wdc.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 160f99db
...@@ -1594,6 +1594,13 @@ static int device_not_zoned_model(struct dm_target *ti, struct dm_dev *dev, ...@@ -1594,6 +1594,13 @@ static int device_not_zoned_model(struct dm_target *ti, struct dm_dev *dev,
return blk_queue_zoned_model(q) != *zoned_model; return blk_queue_zoned_model(q) != *zoned_model;
} }
/*
* Check the device zoned model based on the target feature flag. If the target
* has the DM_TARGET_ZONED_HM feature flag set, host-managed zoned devices are
* also accepted but all devices must have the same zoned model. If the target
* has the DM_TARGET_MIXED_ZONED_MODEL feature set, the devices can have any
* zoned model with all zoned devices having the same zone size.
*/
static bool dm_table_supports_zoned_model(struct dm_table *t, static bool dm_table_supports_zoned_model(struct dm_table *t,
enum blk_zoned_model zoned_model) enum blk_zoned_model zoned_model)
{ {
...@@ -1603,14 +1610,16 @@ static bool dm_table_supports_zoned_model(struct dm_table *t, ...@@ -1603,14 +1610,16 @@ static bool dm_table_supports_zoned_model(struct dm_table *t,
for (i = 0; i < dm_table_get_num_targets(t); i++) { for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i); ti = dm_table_get_target(t, i);
if (zoned_model == BLK_ZONED_HM && if (dm_target_supports_zoned_hm(ti->type)) {
!dm_target_supports_zoned_hm(ti->type))
return false;
if (!ti->type->iterate_devices || if (!ti->type->iterate_devices ||
ti->type->iterate_devices(ti, device_not_zoned_model, &zoned_model)) ti->type->iterate_devices(ti, device_not_zoned_model,
&zoned_model))
return false;
} else if (!dm_target_supports_mixed_zoned_model(ti->type)) {
if (zoned_model == BLK_ZONED_HM)
return false; return false;
} }
}
return true; return true;
} }
...@@ -1621,9 +1630,17 @@ static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev * ...@@ -1621,9 +1630,17 @@ static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *
struct request_queue *q = bdev_get_queue(dev->bdev); struct request_queue *q = bdev_get_queue(dev->bdev);
unsigned int *zone_sectors = data; unsigned int *zone_sectors = data;
if (!blk_queue_is_zoned(q))
return 0;
return blk_queue_zone_sectors(q) != *zone_sectors; return blk_queue_zone_sectors(q) != *zone_sectors;
} }
/*
* Check consistency of zoned model and zone sectors across all targets. For
* zone sectors, if the destination device is a zoned block device, it shall
* have the specified zone_sectors.
*/
static int validate_hardware_zoned_model(struct dm_table *table, static int validate_hardware_zoned_model(struct dm_table *table,
enum blk_zoned_model zoned_model, enum blk_zoned_model zoned_model,
unsigned int zone_sectors) unsigned int zone_sectors)
...@@ -1642,7 +1659,7 @@ static int validate_hardware_zoned_model(struct dm_table *table, ...@@ -1642,7 +1659,7 @@ static int validate_hardware_zoned_model(struct dm_table *table,
return -EINVAL; return -EINVAL;
if (dm_table_any_dev_attr(table, device_not_matches_zone_sectors, &zone_sectors)) { if (dm_table_any_dev_attr(table, device_not_matches_zone_sectors, &zone_sectors)) {
DMERR("%s: zone sectors is not consistent across all devices", DMERR("%s: zone sectors is not consistent across all zoned devices",
dm_device_name(table->md)); dm_device_name(table->md));
return -EINVAL; return -EINVAL;
} }
......
...@@ -1143,7 +1143,7 @@ static int dmz_message(struct dm_target *ti, unsigned int argc, char **argv, ...@@ -1143,7 +1143,7 @@ static int dmz_message(struct dm_target *ti, unsigned int argc, char **argv,
static struct target_type dmz_type = { static struct target_type dmz_type = {
.name = "zoned", .name = "zoned",
.version = {2, 0, 0}, .version = {2, 0, 0},
.features = DM_TARGET_SINGLETON | DM_TARGET_ZONED_HM, .features = DM_TARGET_SINGLETON | DM_TARGET_MIXED_ZONED_MODEL,
.module = THIS_MODULE, .module = THIS_MODULE,
.ctr = dmz_ctr, .ctr = dmz_ctr,
.dtr = dmz_dtr, .dtr = dmz_dtr,
......
...@@ -253,7 +253,11 @@ struct target_type { ...@@ -253,7 +253,11 @@ struct target_type {
#define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY) #define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY)
/* /*
* Indicates that a target supports host-managed zoned block devices. * Indicates support for zoned block devices:
* - DM_TARGET_ZONED_HM: the target also supports host-managed zoned
* block devices but does not support combining different zoned models.
* - DM_TARGET_MIXED_ZONED_MODEL: the target supports combining multiple
* devices with different zoned models.
*/ */
#ifdef CONFIG_BLK_DEV_ZONED #ifdef CONFIG_BLK_DEV_ZONED
#define DM_TARGET_ZONED_HM 0x00000040 #define DM_TARGET_ZONED_HM 0x00000040
...@@ -275,6 +279,15 @@ struct target_type { ...@@ -275,6 +279,15 @@ struct target_type {
#define DM_TARGET_PASSES_CRYPTO 0x00000100 #define DM_TARGET_PASSES_CRYPTO 0x00000100
#define dm_target_passes_crypto(type) ((type)->features & DM_TARGET_PASSES_CRYPTO) #define dm_target_passes_crypto(type) ((type)->features & DM_TARGET_PASSES_CRYPTO)
#ifdef CONFIG_BLK_DEV_ZONED
#define DM_TARGET_MIXED_ZONED_MODEL 0x00000200
#define dm_target_supports_mixed_zoned_model(type) \
((type)->features & DM_TARGET_MIXED_ZONED_MODEL)
#else
#define DM_TARGET_MIXED_ZONED_MODEL 0x00000000
#define dm_target_supports_mixed_zoned_model(type) (false)
#endif
struct dm_target { struct dm_target {
struct dm_table *table; struct dm_table *table;
struct target_type *type; struct target_type *type;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment