Commit 26c78a95 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] raid locking fix.

From: Neil Brown <neilb@cse.unsw.edu.au>

Fix bug #2661

Raid currently calls ->unplug_fn under spin_lock_irqsave(), but unplug_fns
can sleep.


After a morning of scratching my head and trying to come up with some that
does less locking, the following is the best I can come up with.  I'm not
proud of it but it should work.

If I move "nr_pending" out or rdev into the per-personality structures
(e.g.  mirror_info), and if I had "atomic_inc_if_nonzero" I could do with
without locking so much, but random atomic* functions don't seem trivial
parent d0511fb6
...@@ -159,16 +159,25 @@ static void unplug_slaves(mddev_t *mddev) ...@@ -159,16 +159,25 @@ static void unplug_slaves(mddev_t *mddev)
{ {
multipath_conf_t *conf = mddev_to_conf(mddev); multipath_conf_t *conf = mddev_to_conf(mddev);
int i; int i;
unsigned long flags;
spin_lock_irqsave(&conf->device_lock, flags);
for (i=0; i<mddev->raid_disks; i++) { for (i=0; i<mddev->raid_disks; i++) {
mdk_rdev_t *rdev = conf->multipaths[i].rdev; mdk_rdev_t *rdev = conf->multipaths[i].rdev;
if (rdev && !rdev->faulty) { if (rdev && !rdev->faulty) {
request_queue_t *r_queue = bdev_get_queue(rdev->bdev); request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
atomic_inc(&rdev->nr_pending);
spin_unlock_irqrestore(&conf->device_lock, flags);
if (r_queue->unplug_fn) if (r_queue->unplug_fn)
r_queue->unplug_fn(r_queue); r_queue->unplug_fn(r_queue);
spin_lock_irqsave(&conf->device_lock, flags);
atomic_dec(&rdev->nr_pending);
} }
} }
spin_unlock_irqrestore(&conf->device_lock, flags);
} }
static void multipath_unplug(request_queue_t *q) static void multipath_unplug(request_queue_t *q)
{ {
......
...@@ -459,11 +459,17 @@ static void unplug_slaves(mddev_t *mddev) ...@@ -459,11 +459,17 @@ static void unplug_slaves(mddev_t *mddev)
spin_lock_irqsave(&conf->device_lock, flags); spin_lock_irqsave(&conf->device_lock, flags);
for (i=0; i<mddev->raid_disks; i++) { for (i=0; i<mddev->raid_disks; i++) {
mdk_rdev_t *rdev = conf->mirrors[i].rdev; mdk_rdev_t *rdev = conf->mirrors[i].rdev;
if (rdev && !rdev->faulty) { if (rdev && atomic_read(&rdev->nr_pending)) {
request_queue_t *r_queue = bdev_get_queue(rdev->bdev); request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
atomic_inc(&rdev->nr_pending);
spin_unlock_irqrestore(&conf->device_lock, flags);
if (r_queue->unplug_fn) if (r_queue->unplug_fn)
r_queue->unplug_fn(r_queue); r_queue->unplug_fn(r_queue);
spin_lock_irqsave(&conf->device_lock, flags);
atomic_dec(&rdev->nr_pending);
} }
} }
spin_unlock_irqrestore(&conf->device_lock, flags); spin_unlock_irqrestore(&conf->device_lock, flags);
......
...@@ -1301,18 +1301,25 @@ static void unplug_slaves(mddev_t *mddev) ...@@ -1301,18 +1301,25 @@ static void unplug_slaves(mddev_t *mddev)
{ {
raid5_conf_t *conf = mddev_to_conf(mddev); raid5_conf_t *conf = mddev_to_conf(mddev);
int i; int i;
unsigned long flags;
spin_lock_irqsave(&conf->device_lock, flags);
for (i=0; i<mddev->raid_disks; i++) { for (i=0; i<mddev->raid_disks; i++) {
mdk_rdev_t *rdev = conf->disks[i].rdev; mdk_rdev_t *rdev = conf->disks[i].rdev;
if (rdev && !rdev->faulty) { if (rdev && atomic_read(&rdev->nr_pending)) {
struct block_device *bdev = rdev->bdev; request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
if (bdev) {
request_queue_t *r_queue = bdev_get_queue(bdev); atomic_inc(&rdev->nr_pending);
spin_unlock_irqrestore(&conf->device_lock, flags);
if (r_queue && r_queue->unplug_fn) if (r_queue && r_queue->unplug_fn)
r_queue->unplug_fn(r_queue); r_queue->unplug_fn(r_queue);
spin_lock_irqsave(&conf->device_lock, flags);
atomic_dec(&rdev->nr_pending);
} }
} }
} spin_unlock_irqrestore(&conf->device_lock, flags);
} }
static void raid5_unplug_device(request_queue_t *q) static void raid5_unplug_device(request_queue_t *q)
......
...@@ -1461,21 +1461,27 @@ static inline void raid6_activate_delayed(raid6_conf_t *conf) ...@@ -1461,21 +1461,27 @@ static inline void raid6_activate_delayed(raid6_conf_t *conf)
static void unplug_slaves(mddev_t *mddev) static void unplug_slaves(mddev_t *mddev)
{ {
/* note: this is always called with device_lock held */
raid6_conf_t *conf = mddev_to_conf(mddev); raid6_conf_t *conf = mddev_to_conf(mddev);
int i; int i;
unsigned long flags;
spin_lock_irqsave(&conf->device_lock, flags);
for (i=0; i<mddev->raid_disks; i++) { for (i=0; i<mddev->raid_disks; i++) {
mdk_rdev_t *rdev = conf->disks[i].rdev; mdk_rdev_t *rdev = conf->disks[i].rdev;
if (rdev && !rdev->faulty) { if (rdev && atomic_read(&rdev->nr_pending)) {
struct block_device *bdev = rdev->bdev; request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
if (bdev) {
request_queue_t *r_queue = bdev_get_queue(bdev); atomic_inc(&rdev->nr_pending);
spin_unlock_irqrestore(&conf->device_lock, flags);
if (r_queue && r_queue->unplug_fn) if (r_queue && r_queue->unplug_fn)
r_queue->unplug_fn(r_queue); r_queue->unplug_fn(r_queue);
spin_lock_irqsave(&conf->device_lock, flags);
atomic_dec(&rdev->nr_pending);
} }
} }
} spin_unlock_irqrestore(&conf->device_lock, flags);
} }
static void raid6_unplug_device(request_queue_t *q) static void raid6_unplug_device(request_queue_t *q)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment