Commit 512875bd authored by Jun'ichi Nomura's avatar Jun'ichi Nomura Committed by Alasdair G Kergon

dm: table detect io beyond device

This patch fixes a panic on shrinking a DM device if there is
outstanding I/O to the part of the device that is being removed.
(Normally this doesn't happen - a filesystem would be resized first,
for example.)

The bug is that __clone_and_map() assumes dm_table_find_target()
always returns a valid pointer.  It may fail if a bio arrives from the
block layer but its target sector is no longer included in the DM
btree.

This patch appends an empty entry to table->targets[] which will
be returned by a lookup beyond the end of the device.

After calling dm_table_find_target(), __clone_and_map() and target_message()
check for this condition using
dm_target_is_valid().

Sample test script to trigger oops:
parent fbdcf18d
...@@ -1250,21 +1250,17 @@ static int target_message(struct dm_ioctl *param, size_t param_size) ...@@ -1250,21 +1250,17 @@ static int target_message(struct dm_ioctl *param, size_t param_size)
if (!table) if (!table)
goto out_argv; goto out_argv;
if (tmsg->sector >= dm_table_get_size(table)) { ti = dm_table_find_target(table, tmsg->sector);
if (!dm_target_is_valid(ti)) {
DMWARN("Target message sector outside device."); DMWARN("Target message sector outside device.");
r = -EINVAL; r = -EINVAL;
goto out_table; } else if (ti->type->message)
}
ti = dm_table_find_target(table, tmsg->sector);
if (ti->type->message)
r = ti->type->message(ti, argc, argv); r = ti->type->message(ti, argc, argv);
else { else {
DMWARN("Target type does not support messages"); DMWARN("Target type does not support messages");
r = -EINVAL; r = -EINVAL;
} }
out_table:
dm_table_put(table); dm_table_put(table);
out_argv: out_argv:
kfree(argv); kfree(argv);
......
...@@ -189,8 +189,10 @@ static int alloc_targets(struct dm_table *t, unsigned int num) ...@@ -189,8 +189,10 @@ static int alloc_targets(struct dm_table *t, unsigned int num)
/* /*
* Allocate both the target array and offset array at once. * Allocate both the target array and offset array at once.
* Append an empty entry to catch sectors beyond the end of
* the device.
*/ */
n_highs = (sector_t *) dm_vcalloc(num, sizeof(struct dm_target) + n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) +
sizeof(sector_t)); sizeof(sector_t));
if (!n_highs) if (!n_highs)
return -ENOMEM; return -ENOMEM;
...@@ -867,6 +869,9 @@ struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index) ...@@ -867,6 +869,9 @@ struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
/* /*
* Search the btree for the correct target. * Search the btree for the correct target.
*
* Caller should check returned pointer with dm_target_is_valid()
* to trap I/O beyond end of device.
*/ */
struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
{ {
......
...@@ -672,13 +672,19 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector, ...@@ -672,13 +672,19 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,
return clone; return clone;
} }
static void __clone_and_map(struct clone_info *ci) static int __clone_and_map(struct clone_info *ci)
{ {
struct bio *clone, *bio = ci->bio; struct bio *clone, *bio = ci->bio;
struct dm_target *ti = dm_table_find_target(ci->map, ci->sector); struct dm_target *ti;
sector_t len = 0, max = max_io_len(ci->md, ci->sector, ti); sector_t len = 0, max;
struct dm_target_io *tio; struct dm_target_io *tio;
ti = dm_table_find_target(ci->map, ci->sector);
if (!dm_target_is_valid(ti))
return -EIO;
max = max_io_len(ci->md, ci->sector, ti);
/* /*
* Allocate a target io object. * Allocate a target io object.
*/ */
...@@ -736,6 +742,9 @@ static void __clone_and_map(struct clone_info *ci) ...@@ -736,6 +742,9 @@ static void __clone_and_map(struct clone_info *ci)
do { do {
if (offset) { if (offset) {
ti = dm_table_find_target(ci->map, ci->sector); ti = dm_table_find_target(ci->map, ci->sector);
if (!dm_target_is_valid(ti))
return -EIO;
max = max_io_len(ci->md, ci->sector, ti); max = max_io_len(ci->md, ci->sector, ti);
tio = alloc_tio(ci->md); tio = alloc_tio(ci->md);
...@@ -759,6 +768,8 @@ static void __clone_and_map(struct clone_info *ci) ...@@ -759,6 +768,8 @@ static void __clone_and_map(struct clone_info *ci)
ci->idx++; ci->idx++;
} }
return 0;
} }
/* /*
...@@ -767,6 +778,7 @@ static void __clone_and_map(struct clone_info *ci) ...@@ -767,6 +778,7 @@ static void __clone_and_map(struct clone_info *ci)
static int __split_bio(struct mapped_device *md, struct bio *bio) static int __split_bio(struct mapped_device *md, struct bio *bio)
{ {
struct clone_info ci; struct clone_info ci;
int error = 0;
ci.map = dm_get_table(md); ci.map = dm_get_table(md);
if (unlikely(!ci.map)) if (unlikely(!ci.map))
...@@ -784,11 +796,11 @@ static int __split_bio(struct mapped_device *md, struct bio *bio) ...@@ -784,11 +796,11 @@ static int __split_bio(struct mapped_device *md, struct bio *bio)
ci.idx = bio->bi_idx; ci.idx = bio->bi_idx;
start_io_acct(ci.io); start_io_acct(ci.io);
while (ci.sector_count) while (ci.sector_count && !error)
__clone_and_map(&ci); error = __clone_and_map(&ci);
/* drop the extra reference count */ /* drop the extra reference count */
dec_pending(ci.io, 0); dec_pending(ci.io, error);
dm_table_put(ci.map); dm_table_put(ci.map);
return 0; return 0;
......
...@@ -112,6 +112,11 @@ int dm_table_resume_targets(struct dm_table *t); ...@@ -112,6 +112,11 @@ int dm_table_resume_targets(struct dm_table *t);
int dm_table_any_congested(struct dm_table *t, int bdi_bits); int dm_table_any_congested(struct dm_table *t, int bdi_bits);
void dm_table_unplug_all(struct dm_table *t); void dm_table_unplug_all(struct dm_table *t);
/*
* To check the return value from dm_table_find_target().
*/
#define dm_target_is_valid(t) ((t)->table)
/*----------------------------------------------------------------- /*-----------------------------------------------------------------
* A registry of target types. * A registry of target types.
*---------------------------------------------------------------*/ *---------------------------------------------------------------*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment