Commit a05b7ea0 authored by NeilBrown's avatar NeilBrown

md: avoid crash when stopping md array races with closing other open fds.

md will refuse to stop an array if any other fd (or mounted fs) is
using it.
When any fs is unmounted of when the last open fd is closed all
pending IO will be flushed (e.g. sync_blockdev call in __blkdev_put)
so there will be no pending IO to worry about when the array is
stopped.

However in order to send the STOP_ARRAY ioctl to stop the array one
must first get and open fd on the block device.
If some fd is being used to write to the block device and it is closed
after mdadm open the block device, but before mdadm issues the
STOP_ARRAY ioctl, then there will be no last-close on the md device so
__blkdev_put will not call sync_blockdev.

If this happens, then IO can still be in-flight while md tears down
the array and bad things can happen (use-after-free and subsequent
havoc).

So in the case where do_md_stop is being called from an open file
descriptor, call sync_block after taking the mutex to ensure there
will be no new openers.

This is needed when setting a read-write device to read-only too.

Cc: stable@vger.kernel.org
Reported-by: default avatarmajianpeng <majianpeng@gmail.com>
Signed-off-by: default avatarNeilBrown <neilb@suse.de>
parent 25f7fd47
......@@ -3927,8 +3927,8 @@ array_state_show(struct mddev *mddev, char *page)
return sprintf(page, "%s\n", array_states[st]);
}
static int do_md_stop(struct mddev * mddev, int ro, int is_open);
static int md_set_readonly(struct mddev * mddev, int is_open);
static int do_md_stop(struct mddev * mddev, int ro, struct block_device *bdev);
static int md_set_readonly(struct mddev * mddev, struct block_device *bdev);
static int do_md_run(struct mddev * mddev);
static int restart_array(struct mddev *mddev);
......@@ -3944,14 +3944,14 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
/* stopping an active array */
if (atomic_read(&mddev->openers) > 0)
return -EBUSY;
err = do_md_stop(mddev, 0, 0);
err = do_md_stop(mddev, 0, NULL);
break;
case inactive:
/* stopping an active array */
if (mddev->pers) {
if (atomic_read(&mddev->openers) > 0)
return -EBUSY;
err = do_md_stop(mddev, 2, 0);
err = do_md_stop(mddev, 2, NULL);
} else
err = 0; /* already inactive */
break;
......@@ -3959,7 +3959,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
break; /* not supported yet */
case readonly:
if (mddev->pers)
err = md_set_readonly(mddev, 0);
err = md_set_readonly(mddev, NULL);
else {
mddev->ro = 1;
set_disk_ro(mddev->gendisk, 1);
......@@ -3969,7 +3969,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
case read_auto:
if (mddev->pers) {
if (mddev->ro == 0)
err = md_set_readonly(mddev, 0);
err = md_set_readonly(mddev, NULL);
else if (mddev->ro == 1)
err = restart_array(mddev);
if (err == 0) {
......@@ -5352,15 +5352,17 @@ void md_stop(struct mddev *mddev)
}
EXPORT_SYMBOL_GPL(md_stop);
static int md_set_readonly(struct mddev *mddev, int is_open)
static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
{
int err = 0;
mutex_lock(&mddev->open_mutex);
if (atomic_read(&mddev->openers) > is_open) {
if (atomic_read(&mddev->openers) > !!bdev) {
printk("md: %s still in use.\n",mdname(mddev));
err = -EBUSY;
goto out;
}
if (bdev)
sync_blockdev(bdev);
if (mddev->pers) {
__md_stop_writes(mddev);
......@@ -5382,18 +5384,26 @@ static int md_set_readonly(struct mddev *mddev, int is_open)
* 0 - completely stop and dis-assemble array
* 2 - stop but do not disassemble array
*/
static int do_md_stop(struct mddev * mddev, int mode, int is_open)
static int do_md_stop(struct mddev * mddev, int mode,
struct block_device *bdev)
{
struct gendisk *disk = mddev->gendisk;
struct md_rdev *rdev;
mutex_lock(&mddev->open_mutex);
if (atomic_read(&mddev->openers) > is_open ||
if (atomic_read(&mddev->openers) > !!bdev ||
mddev->sysfs_active) {
printk("md: %s still in use.\n",mdname(mddev));
mutex_unlock(&mddev->open_mutex);
return -EBUSY;
}
if (bdev)
/* It is possible IO was issued on some other
* open file which was closed before we took ->open_mutex.
* As that was not the last close __blkdev_put will not
* have called sync_blockdev, so we must.
*/
sync_blockdev(bdev);
if (mddev->pers) {
if (mddev->ro)
......@@ -5467,7 +5477,7 @@ static void autorun_array(struct mddev *mddev)
err = do_md_run(mddev);
if (err) {
printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
do_md_stop(mddev, 0, 0);
do_md_stop(mddev, 0, NULL);
}
}
......@@ -6482,11 +6492,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
goto done_unlock;
case STOP_ARRAY:
err = do_md_stop(mddev, 0, 1);
err = do_md_stop(mddev, 0, bdev);
goto done_unlock;
case STOP_ARRAY_RO:
err = md_set_readonly(mddev, 1);
err = md_set_readonly(mddev, bdev);
goto done_unlock;
case BLKROSET:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment