Commit 6e010a71 authored by Neil Brown's avatar Neil Brown Committed by Linus Torvalds

[PATCH] md: make retry_list non-global in raid1 and multipath

Both raid1 and multipath have a "retry_list" which is global, so all raid1
arrays (for example) us the same list.  This is rather ugly, and it is simple
enough to make it per-array, so this patch does that.

It also changes to multipath code to use list.h lists instead of
roll-your-own.
Signed-off-by: default avatarNeil Brown <neilb@cse.unsw.edu.au>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 92952632
...@@ -36,8 +36,6 @@ ...@@ -36,8 +36,6 @@
static mdk_personality_t multipath_personality; static mdk_personality_t multipath_personality;
static spinlock_t retry_list_lock = SPIN_LOCK_UNLOCKED;
struct multipath_bh *multipath_retry_list = NULL, **multipath_retry_tail;
static void *mp_pool_alloc(int gfp_flags, void *data) static void *mp_pool_alloc(int gfp_flags, void *data)
...@@ -82,14 +80,11 @@ static void multipath_reschedule_retry (struct multipath_bh *mp_bh) ...@@ -82,14 +80,11 @@ static void multipath_reschedule_retry (struct multipath_bh *mp_bh)
{ {
unsigned long flags; unsigned long flags;
mddev_t *mddev = mp_bh->mddev; mddev_t *mddev = mp_bh->mddev;
multipath_conf_t *conf = mddev_to_conf(mddev);
spin_lock_irqsave(&retry_list_lock, flags); spin_lock_irqsave(&conf->device_lock, flags);
if (multipath_retry_list == NULL) list_add(&mp_bh->retry_list, &conf->retry_list);
multipath_retry_tail = &multipath_retry_list; spin_unlock_irqrestore(&conf->device_lock, flags);
*multipath_retry_tail = mp_bh;
multipath_retry_tail = &mp_bh->next_mp;
mp_bh->next_mp = NULL;
spin_unlock_irqrestore(&retry_list_lock, flags);
md_wakeup_thread(mddev->thread); md_wakeup_thread(mddev->thread);
} }
...@@ -382,18 +377,18 @@ static void multipathd (mddev_t *mddev) ...@@ -382,18 +377,18 @@ static void multipathd (mddev_t *mddev)
struct bio *bio; struct bio *bio;
unsigned long flags; unsigned long flags;
multipath_conf_t *conf = mddev_to_conf(mddev); multipath_conf_t *conf = mddev_to_conf(mddev);
struct list_head *head = &conf->retry_list;
md_check_recovery(mddev); md_check_recovery(mddev);
for (;;) { for (;;) {
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
spin_lock_irqsave(&retry_list_lock, flags); spin_lock_irqsave(&conf->device_lock, flags);
mp_bh = multipath_retry_list; if (list_empty(head))
if (!mp_bh)
break; break;
multipath_retry_list = mp_bh->next_mp; mp_bh = list_entry(head->prev, struct multipath_bh, retry_list);
spin_unlock_irqrestore(&retry_list_lock, flags); list_del(head->prev);
spin_unlock_irqrestore(&conf->device_lock, flags);
mddev = mp_bh->mddev;
bio = &mp_bh->bio; bio = &mp_bh->bio;
bio->bi_sector = mp_bh->master_bio->bi_sector; bio->bi_sector = mp_bh->master_bio->bi_sector;
...@@ -416,7 +411,7 @@ static void multipathd (mddev_t *mddev) ...@@ -416,7 +411,7 @@ static void multipathd (mddev_t *mddev)
generic_make_request(bio); generic_make_request(bio);
} }
} }
spin_unlock_irqrestore(&retry_list_lock, flags); spin_unlock_irqrestore(&conf->device_lock, flags);
} }
static int multipath_run (mddev_t *mddev) static int multipath_run (mddev_t *mddev)
...@@ -489,6 +484,7 @@ static int multipath_run (mddev_t *mddev) ...@@ -489,6 +484,7 @@ static int multipath_run (mddev_t *mddev)
mddev->sb_dirty = 1; mddev->sb_dirty = 1;
conf->mddev = mddev; conf->mddev = mddev;
conf->device_lock = SPIN_LOCK_UNLOCKED; conf->device_lock = SPIN_LOCK_UNLOCKED;
INIT_LIST_HEAD(&conf->retry_list);
if (!conf->working_disks) { if (!conf->working_disks) {
printk(KERN_ERR "multipath: no operational IO paths for %s\n", printk(KERN_ERR "multipath: no operational IO paths for %s\n",
......
...@@ -30,8 +30,6 @@ ...@@ -30,8 +30,6 @@
#define NR_RAID1_BIOS 256 #define NR_RAID1_BIOS 256
static mdk_personality_t raid1_personality; static mdk_personality_t raid1_personality;
static spinlock_t retry_list_lock = SPIN_LOCK_UNLOCKED;
static LIST_HEAD(retry_list_head);
static void unplug_slaves(mddev_t *mddev); static void unplug_slaves(mddev_t *mddev);
...@@ -188,10 +186,11 @@ static void reschedule_retry(r1bio_t *r1_bio) ...@@ -188,10 +186,11 @@ static void reschedule_retry(r1bio_t *r1_bio)
{ {
unsigned long flags; unsigned long flags;
mddev_t *mddev = r1_bio->mddev; mddev_t *mddev = r1_bio->mddev;
conf_t *conf = mddev_to_conf(mddev);
spin_lock_irqsave(&retry_list_lock, flags); spin_lock_irqsave(&conf->device_lock, flags);
list_add(&r1_bio->retry_list, &retry_list_head); list_add(&r1_bio->retry_list, &conf->retry_list);
spin_unlock_irqrestore(&retry_list_lock, flags); spin_unlock_irqrestore(&conf->device_lock, flags);
md_wakeup_thread(mddev->thread); md_wakeup_thread(mddev->thread);
} }
...@@ -904,11 +903,11 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) ...@@ -904,11 +903,11 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
static void raid1d(mddev_t *mddev) static void raid1d(mddev_t *mddev)
{ {
struct list_head *head = &retry_list_head;
r1bio_t *r1_bio; r1bio_t *r1_bio;
struct bio *bio; struct bio *bio;
unsigned long flags; unsigned long flags;
conf_t *conf = mddev_to_conf(mddev); conf_t *conf = mddev_to_conf(mddev);
struct list_head *head = &conf->retry_list;
int unplug=0; int unplug=0;
mdk_rdev_t *rdev; mdk_rdev_t *rdev;
...@@ -917,12 +916,12 @@ static void raid1d(mddev_t *mddev) ...@@ -917,12 +916,12 @@ static void raid1d(mddev_t *mddev)
for (;;) { for (;;) {
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
spin_lock_irqsave(&retry_list_lock, flags); spin_lock_irqsave(&conf->device_lock, flags);
if (list_empty(head)) if (list_empty(head))
break; break;
r1_bio = list_entry(head->prev, r1bio_t, retry_list); r1_bio = list_entry(head->prev, r1bio_t, retry_list);
list_del(head->prev); list_del(head->prev);
spin_unlock_irqrestore(&retry_list_lock, flags); spin_unlock_irqrestore(&conf->device_lock, flags);
mddev = r1_bio->mddev; mddev = r1_bio->mddev;
conf = mddev_to_conf(mddev); conf = mddev_to_conf(mddev);
...@@ -956,7 +955,7 @@ static void raid1d(mddev_t *mddev) ...@@ -956,7 +955,7 @@ static void raid1d(mddev_t *mddev)
} }
} }
} }
spin_unlock_irqrestore(&retry_list_lock, flags); spin_unlock_irqrestore(&conf->device_lock, flags);
if (unplug) if (unplug)
unplug_slaves(mddev); unplug_slaves(mddev);
} }
...@@ -1205,6 +1204,7 @@ static int run(mddev_t *mddev) ...@@ -1205,6 +1204,7 @@ static int run(mddev_t *mddev)
conf->raid_disks = mddev->raid_disks; conf->raid_disks = mddev->raid_disks;
conf->mddev = mddev; conf->mddev = mddev;
conf->device_lock = SPIN_LOCK_UNLOCKED; conf->device_lock = SPIN_LOCK_UNLOCKED;
INIT_LIST_HEAD(&conf->retry_list);
if (conf->working_disks == 1) if (conf->working_disks == 1)
mddev->recovery_cp = MaxSector; mddev->recovery_cp = MaxSector;
......
...@@ -13,6 +13,7 @@ struct multipath_private_data { ...@@ -13,6 +13,7 @@ struct multipath_private_data {
int raid_disks; int raid_disks;
int working_disks; int working_disks;
spinlock_t device_lock; spinlock_t device_lock;
struct list_head retry_list;
mempool_t *pool; mempool_t *pool;
}; };
...@@ -36,6 +37,6 @@ struct multipath_bh { ...@@ -36,6 +37,6 @@ struct multipath_bh {
struct bio *master_bio; struct bio *master_bio;
struct bio bio; struct bio bio;
int path; int path;
struct multipath_bh *next_mp; /* next for retry */ struct list_head retry_list;
}; };
#endif #endif
...@@ -35,6 +35,7 @@ struct r1_private_data_s { ...@@ -35,6 +35,7 @@ struct r1_private_data_s {
sector_t next_seq_sect; sector_t next_seq_sect;
spinlock_t device_lock; spinlock_t device_lock;
struct list_head retry_list;
/* for use when syncing mirrors: */ /* for use when syncing mirrors: */
spinlock_t resync_lock; spinlock_t resync_lock;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment