Commit 36a0456f authored by Alasdair G Kergon's avatar Alasdair G Kergon

dm table: add immutable feature

Introduce DM_TARGET_IMMUTABLE to indicate that the target type cannot be mixed
with any other target type, and once loaded into a device, it cannot be
replaced with a table containing a different type.

The thin provisioning pool device will use this.
Signed-off-by: default avatarAlasdair G Kergon <agk@redhat.com>
parent cc6cbe14
...@@ -1215,6 +1215,7 @@ static int table_load(struct dm_ioctl *param, size_t param_size) ...@@ -1215,6 +1215,7 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
struct hash_cell *hc; struct hash_cell *hc;
struct dm_table *t; struct dm_table *t;
struct mapped_device *md; struct mapped_device *md;
struct target_type *immutable_target_type;
md = find_device(param); md = find_device(param);
if (!md) if (!md)
...@@ -1230,6 +1231,16 @@ static int table_load(struct dm_ioctl *param, size_t param_size) ...@@ -1230,6 +1231,16 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
goto out; goto out;
} }
immutable_target_type = dm_get_immutable_target_type(md);
if (immutable_target_type &&
(immutable_target_type != dm_table_get_immutable_target_type(t))) {
DMWARN("can't replace immutable target type %s",
immutable_target_type->name);
dm_table_destroy(t);
r = -EINVAL;
goto out;
}
/* Protect md->type and md->queue against concurrent table loads. */ /* Protect md->type and md->queue against concurrent table loads. */
dm_lock_md_type(md); dm_lock_md_type(md);
if (dm_get_md_type(md) == DM_TYPE_NONE) if (dm_get_md_type(md) == DM_TYPE_NONE)
......
...@@ -54,6 +54,7 @@ struct dm_table { ...@@ -54,6 +54,7 @@ struct dm_table {
sector_t *highs; sector_t *highs;
struct dm_target *targets; struct dm_target *targets;
struct target_type *immutable_target_type;
unsigned integrity_supported:1; unsigned integrity_supported:1;
unsigned singleton:1; unsigned singleton:1;
...@@ -780,6 +781,21 @@ int dm_table_add_target(struct dm_table *t, const char *type, ...@@ -780,6 +781,21 @@ int dm_table_add_target(struct dm_table *t, const char *type,
return -EINVAL; return -EINVAL;
} }
if (t->immutable_target_type) {
if (t->immutable_target_type != tgt->type) {
DMERR("%s: immutable target type %s cannot be mixed with other target types",
dm_device_name(t->md), t->immutable_target_type->name);
return -EINVAL;
}
} else if (dm_target_is_immutable(tgt->type)) {
if (t->num_targets) {
DMERR("%s: immutable target type %s cannot be mixed with other target types",
dm_device_name(t->md), tgt->type->name);
return -EINVAL;
}
t->immutable_target_type = tgt->type;
}
tgt->table = t; tgt->table = t;
tgt->begin = start; tgt->begin = start;
tgt->len = len; tgt->len = len;
...@@ -937,6 +953,11 @@ unsigned dm_table_get_type(struct dm_table *t) ...@@ -937,6 +953,11 @@ unsigned dm_table_get_type(struct dm_table *t)
return t->type; return t->type;
} }
struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
{
return t->immutable_target_type;
}
bool dm_table_request_based(struct dm_table *t) bool dm_table_request_based(struct dm_table *t)
{ {
return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED; return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED;
......
...@@ -140,6 +140,8 @@ struct mapped_device { ...@@ -140,6 +140,8 @@ struct mapped_device {
/* Protect queue and type against concurrent access. */ /* Protect queue and type against concurrent access. */
struct mutex type_lock; struct mutex type_lock;
struct target_type *immutable_target_type;
struct gendisk *disk; struct gendisk *disk;
char name[16]; char name[16];
...@@ -2096,6 +2098,8 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, ...@@ -2096,6 +2098,8 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
write_lock_irqsave(&md->map_lock, flags); write_lock_irqsave(&md->map_lock, flags);
old_map = md->map; old_map = md->map;
md->map = t; md->map = t;
md->immutable_target_type = dm_table_get_immutable_target_type(t);
dm_table_set_restrictions(t, q, limits); dm_table_set_restrictions(t, q, limits);
if (merge_is_optional) if (merge_is_optional)
set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags); set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
...@@ -2166,6 +2170,11 @@ unsigned dm_get_md_type(struct mapped_device *md) ...@@ -2166,6 +2170,11 @@ unsigned dm_get_md_type(struct mapped_device *md)
return md->type; return md->type;
} }
struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
{
return md->immutable_target_type;
}
/* /*
* Fully initialize a request-based queue (->elevator, ->request_fn, etc). * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
*/ */
......
...@@ -60,6 +60,7 @@ int dm_table_resume_targets(struct dm_table *t); ...@@ -60,6 +60,7 @@ int dm_table_resume_targets(struct dm_table *t);
int dm_table_any_congested(struct dm_table *t, int bdi_bits); int dm_table_any_congested(struct dm_table *t, int bdi_bits);
int dm_table_any_busy_target(struct dm_table *t); int dm_table_any_busy_target(struct dm_table *t);
unsigned dm_table_get_type(struct dm_table *t); unsigned dm_table_get_type(struct dm_table *t);
struct target_type *dm_table_get_immutable_target_type(struct dm_table *t);
bool dm_table_request_based(struct dm_table *t); bool dm_table_request_based(struct dm_table *t);
bool dm_table_supports_discards(struct dm_table *t); bool dm_table_supports_discards(struct dm_table *t);
int dm_table_alloc_md_mempools(struct dm_table *t); int dm_table_alloc_md_mempools(struct dm_table *t);
...@@ -72,6 +73,7 @@ void dm_lock_md_type(struct mapped_device *md); ...@@ -72,6 +73,7 @@ void dm_lock_md_type(struct mapped_device *md);
void dm_unlock_md_type(struct mapped_device *md); void dm_unlock_md_type(struct mapped_device *md);
void dm_set_md_type(struct mapped_device *md, unsigned type); void dm_set_md_type(struct mapped_device *md, unsigned type);
unsigned dm_get_md_type(struct mapped_device *md); unsigned dm_get_md_type(struct mapped_device *md);
struct target_type *dm_get_immutable_target_type(struct mapped_device *md);
int dm_setup_md_queue(struct mapped_device *md); int dm_setup_md_queue(struct mapped_device *md);
......
...@@ -173,6 +173,13 @@ struct target_type { ...@@ -173,6 +173,13 @@ struct target_type {
#define dm_target_always_writeable(type) \ #define dm_target_always_writeable(type) \
((type)->features & DM_TARGET_ALWAYS_WRITEABLE) ((type)->features & DM_TARGET_ALWAYS_WRITEABLE)
/*
* Any device that contains a table with an instance of this target may never
* have tables containing any different target type.
*/
#define DM_TARGET_IMMUTABLE 0x00000004
#define dm_target_is_immutable(type) ((type)->features & DM_TARGET_IMMUTABLE)
struct dm_target { struct dm_target {
struct dm_table *table; struct dm_table *table;
struct target_type *type; struct target_type *type;
......
...@@ -267,9 +267,9 @@ enum { ...@@ -267,9 +267,9 @@ enum {
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4 #define DM_VERSION_MAJOR 4
#define DM_VERSION_MINOR 21 #define DM_VERSION_MINOR 22
#define DM_VERSION_PATCHLEVEL 0 #define DM_VERSION_PATCHLEVEL 0
#define DM_VERSION_EXTRA "-ioctl (2011-07-06)" #define DM_VERSION_EXTRA "-ioctl (2011-10-19)"
/* Status bits */ /* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */ #define DM_READONLY_FLAG (1 << 0) /* In/Out */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment