Commit 475b0321 authored by NeilBrown's avatar NeilBrown

md/raid10: writes should get directed to replacement as well as original.

When writing, we need to submit two writes, one to the original,
and one to the replacements - if there is a replacement.

If the write to the replacement results in a write error we just
fail the device.  We only try to record write errors to the
original.

This only handles writing new data.  Writing for resync/recovery
will come later.
Signed-off-by: default avatarNeilBrown <neilb@suse.de>
parent c8ab903e
...@@ -396,17 +396,29 @@ static void raid10_end_write_request(struct bio *bio, int error) ...@@ -396,17 +396,29 @@ static void raid10_end_write_request(struct bio *bio, int error)
int dev; int dev;
int dec_rdev = 1; int dec_rdev = 1;
struct r10conf *conf = r10_bio->mddev->private; struct r10conf *conf = r10_bio->mddev->private;
int slot; int slot, repl;
struct md_rdev *rdev;
dev = find_bio_disk(conf, r10_bio, bio, &slot, NULL); dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
if (repl)
rdev = conf->mirrors[dev].replacement;
else
rdev = conf->mirrors[dev].rdev;
/* /*
* this branch is our 'one mirror IO has finished' event handler: * this branch is our 'one mirror IO has finished' event handler:
*/ */
if (!uptodate) { if (!uptodate) {
set_bit(WriteErrorSeen, &conf->mirrors[dev].rdev->flags); if (repl)
/* Never record new bad blocks to replacement,
* just fail it.
*/
md_error(rdev->mddev, rdev);
else {
set_bit(WriteErrorSeen, &rdev->flags);
set_bit(R10BIO_WriteError, &r10_bio->state); set_bit(R10BIO_WriteError, &r10_bio->state);
dec_rdev = 0; dec_rdev = 0;
}
} else { } else {
/* /*
* Set R10BIO_Uptodate in our master bio, so that * Set R10BIO_Uptodate in our master bio, so that
...@@ -423,11 +435,14 @@ static void raid10_end_write_request(struct bio *bio, int error) ...@@ -423,11 +435,14 @@ static void raid10_end_write_request(struct bio *bio, int error)
set_bit(R10BIO_Uptodate, &r10_bio->state); set_bit(R10BIO_Uptodate, &r10_bio->state);
/* Maybe we can clear some bad blocks. */ /* Maybe we can clear some bad blocks. */
if (is_badblock(conf->mirrors[dev].rdev, if (is_badblock(rdev,
r10_bio->devs[slot].addr, r10_bio->devs[slot].addr,
r10_bio->sectors, r10_bio->sectors,
&first_bad, &bad_sectors)) { &first_bad, &bad_sectors)) {
bio_put(bio); bio_put(bio);
if (repl)
r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
else
r10_bio->devs[slot].bio = IO_MADE_GOOD; r10_bio->devs[slot].bio = IO_MADE_GOOD;
dec_rdev = 0; dec_rdev = 0;
set_bit(R10BIO_MadeGood, &r10_bio->state); set_bit(R10BIO_MadeGood, &r10_bio->state);
...@@ -444,7 +459,6 @@ static void raid10_end_write_request(struct bio *bio, int error) ...@@ -444,7 +459,6 @@ static void raid10_end_write_request(struct bio *bio, int error)
rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev); rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
} }
/* /*
* RAID10 layout manager * RAID10 layout manager
* As well as the chunksize and raid_disks count, there are two * As well as the chunksize and raid_disks count, there are two
...@@ -1073,12 +1087,23 @@ static void make_request(struct mddev *mddev, struct bio * bio) ...@@ -1073,12 +1087,23 @@ static void make_request(struct mddev *mddev, struct bio * bio)
for (i = 0; i < conf->copies; i++) { for (i = 0; i < conf->copies; i++) {
int d = r10_bio->devs[i].devnum; int d = r10_bio->devs[i].devnum;
struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev); struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
struct md_rdev *rrdev = rcu_dereference(
conf->mirrors[d].replacement);
if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
atomic_inc(&rdev->nr_pending); atomic_inc(&rdev->nr_pending);
blocked_rdev = rdev; blocked_rdev = rdev;
break; break;
} }
if (rrdev && unlikely(test_bit(Blocked, &rrdev->flags))) {
atomic_inc(&rrdev->nr_pending);
blocked_rdev = rrdev;
break;
}
if (rrdev && test_bit(Faulty, &rrdev->flags))
rrdev = NULL;
r10_bio->devs[i].bio = NULL; r10_bio->devs[i].bio = NULL;
r10_bio->devs[i].repl_bio = NULL;
if (!rdev || test_bit(Faulty, &rdev->flags)) { if (!rdev || test_bit(Faulty, &rdev->flags)) {
set_bit(R10BIO_Degraded, &r10_bio->state); set_bit(R10BIO_Degraded, &r10_bio->state);
continue; continue;
...@@ -1127,6 +1152,10 @@ static void make_request(struct mddev *mddev, struct bio * bio) ...@@ -1127,6 +1152,10 @@ static void make_request(struct mddev *mddev, struct bio * bio)
} }
r10_bio->devs[i].bio = bio; r10_bio->devs[i].bio = bio;
atomic_inc(&rdev->nr_pending); atomic_inc(&rdev->nr_pending);
if (rrdev) {
r10_bio->devs[i].repl_bio = bio;
atomic_inc(&rrdev->nr_pending);
}
} }
rcu_read_unlock(); rcu_read_unlock();
...@@ -1135,11 +1164,17 @@ static void make_request(struct mddev *mddev, struct bio * bio) ...@@ -1135,11 +1164,17 @@ static void make_request(struct mddev *mddev, struct bio * bio)
int j; int j;
int d; int d;
for (j = 0; j < i; j++) for (j = 0; j < i; j++) {
if (r10_bio->devs[j].bio) { if (r10_bio->devs[j].bio) {
d = r10_bio->devs[j].devnum; d = r10_bio->devs[j].devnum;
rdev_dec_pending(conf->mirrors[d].rdev, mddev); rdev_dec_pending(conf->mirrors[d].rdev, mddev);
} }
if (r10_bio->devs[j].repl_bio) {
d = r10_bio->devs[j].devnum;
rdev_dec_pending(
conf->mirrors[d].replacement, mddev);
}
}
allow_barrier(conf); allow_barrier(conf);
md_wait_for_blocked_rdev(blocked_rdev, mddev); md_wait_for_blocked_rdev(blocked_rdev, mddev);
wait_barrier(conf); wait_barrier(conf);
...@@ -1186,6 +1221,27 @@ static void make_request(struct mddev *mddev, struct bio * bio) ...@@ -1186,6 +1221,27 @@ static void make_request(struct mddev *mddev, struct bio * bio)
bio_list_add(&conf->pending_bio_list, mbio); bio_list_add(&conf->pending_bio_list, mbio);
conf->pending_count++; conf->pending_count++;
spin_unlock_irqrestore(&conf->device_lock, flags); spin_unlock_irqrestore(&conf->device_lock, flags);
if (!r10_bio->devs[i].repl_bio)
continue;
mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
max_sectors);
r10_bio->devs[i].repl_bio = mbio;
mbio->bi_sector = (r10_bio->devs[i].addr+
conf->mirrors[d].replacement->data_offset);
mbio->bi_bdev = conf->mirrors[d].replacement->bdev;
mbio->bi_end_io = raid10_end_write_request;
mbio->bi_rw = WRITE | do_sync | do_fua;
mbio->bi_private = r10_bio;
atomic_inc(&r10_bio->remaining);
spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio);
conf->pending_count++;
spin_unlock_irqrestore(&conf->device_lock, flags);
} }
/* Don't remove the bias on 'remaining' (one_write_done) until /* Don't remove the bias on 'remaining' (one_write_done) until
...@@ -2253,6 +2309,15 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) ...@@ -2253,6 +2309,15 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
} }
rdev_dec_pending(rdev, conf->mddev); rdev_dec_pending(rdev, conf->mddev);
} }
bio = r10_bio->devs[m].repl_bio;
rdev = conf->mirrors[dev].replacement;
if (bio == IO_MADE_GOOD) {
rdev_clear_badblocks(
rdev,
r10_bio->devs[m].addr,
r10_bio->sectors);
rdev_dec_pending(rdev, conf->mddev);
}
} }
if (test_bit(R10BIO_WriteError, if (test_bit(R10BIO_WriteError,
&r10_bio->state)) &r10_bio->state))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment