Commit e82ed3a4 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

md/raid6: refactor raid5_read_one_chunk

Refactor raid5_read_one_chunk so that all simple checks are done
before allocating the bio.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarSong Liu <song@kernel.org>
Reviewed-by: default avatarJohannes Thumshirn <johannes.thumshirn@wdc.com>
Reviewed-by: default avatarChaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Acked-by: default avatarDamien Le Moal <damien.lemoal@wdc.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 6a596569
...@@ -5393,90 +5393,72 @@ static void raid5_align_endio(struct bio *bi) ...@@ -5393,90 +5393,72 @@ static void raid5_align_endio(struct bio *bi)
static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio) static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
{ {
struct r5conf *conf = mddev->private; struct r5conf *conf = mddev->private;
int dd_idx; struct bio *align_bio;
struct bio* align_bi;
struct md_rdev *rdev; struct md_rdev *rdev;
sector_t end_sector; sector_t sector, end_sector, first_bad;
int bad_sectors, dd_idx;
if (!in_chunk_boundary(mddev, raid_bio)) { if (!in_chunk_boundary(mddev, raid_bio)) {
pr_debug("%s: non aligned\n", __func__); pr_debug("%s: non aligned\n", __func__);
return 0; return 0;
} }
/*
* use bio_clone_fast to make a copy of the bio
*/
align_bi = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->bio_set);
if (!align_bi)
return 0;
/*
* set bi_end_io to a new function, and set bi_private to the
* original bio.
*/
align_bi->bi_end_io = raid5_align_endio;
align_bi->bi_private = raid_bio;
/*
* compute position
*/
align_bi->bi_iter.bi_sector =
raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector,
0, &dd_idx, NULL);
end_sector = bio_end_sector(align_bi); sector = raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, 0,
&dd_idx, NULL);
end_sector = bio_end_sector(raid_bio);
rcu_read_lock(); rcu_read_lock();
if (r5c_big_stripe_cached(conf, sector))
goto out_rcu_unlock;
rdev = rcu_dereference(conf->disks[dd_idx].replacement); rdev = rcu_dereference(conf->disks[dd_idx].replacement);
if (!rdev || test_bit(Faulty, &rdev->flags) || if (!rdev || test_bit(Faulty, &rdev->flags) ||
rdev->recovery_offset < end_sector) { rdev->recovery_offset < end_sector) {
rdev = rcu_dereference(conf->disks[dd_idx].rdev); rdev = rcu_dereference(conf->disks[dd_idx].rdev);
if (rdev && if (!rdev)
(test_bit(Faulty, &rdev->flags) || goto out_rcu_unlock;
if (test_bit(Faulty, &rdev->flags) ||
!(test_bit(In_sync, &rdev->flags) || !(test_bit(In_sync, &rdev->flags) ||
rdev->recovery_offset >= end_sector))) rdev->recovery_offset >= end_sector))
rdev = NULL; goto out_rcu_unlock;
} }
if (r5c_big_stripe_cached(conf, align_bi->bi_iter.bi_sector)) { atomic_inc(&rdev->nr_pending);
rcu_read_unlock(); rcu_read_unlock();
bio_put(align_bi);
return 0;
}
if (rdev) { align_bio = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->bio_set);
sector_t first_bad; bio_set_dev(align_bio, rdev->bdev);
int bad_sectors; align_bio->bi_end_io = raid5_align_endio;
align_bio->bi_private = raid_bio;
align_bio->bi_iter.bi_sector = sector;
atomic_inc(&rdev->nr_pending); raid_bio->bi_next = (void *)rdev;
rcu_read_unlock();
raid_bio->bi_next = (void*)rdev;
bio_set_dev(align_bi, rdev->bdev);
if (is_badblock(rdev, align_bi->bi_iter.bi_sector, if (is_badblock(rdev, sector, bio_sectors(align_bio), &first_bad,
bio_sectors(align_bi), &bad_sectors)) {
&first_bad, &bad_sectors)) { bio_put(align_bio);
bio_put(align_bi);
rdev_dec_pending(rdev, mddev); rdev_dec_pending(rdev, mddev);
return 0; return 0;
} }
/* No reshape active, so we can trust rdev->data_offset */ /* No reshape active, so we can trust rdev->data_offset */
align_bi->bi_iter.bi_sector += rdev->data_offset; align_bio->bi_iter.bi_sector += rdev->data_offset;
spin_lock_irq(&conf->device_lock); spin_lock_irq(&conf->device_lock);
wait_event_lock_irq(conf->wait_for_quiescent, wait_event_lock_irq(conf->wait_for_quiescent, conf->quiesce == 0,
conf->quiesce == 0,
conf->device_lock); conf->device_lock);
atomic_inc(&conf->active_aligned_reads); atomic_inc(&conf->active_aligned_reads);
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
if (mddev->gendisk) if (mddev->gendisk)
trace_block_bio_remap(align_bi, disk_devt(mddev->gendisk), trace_block_bio_remap(align_bio, disk_devt(mddev->gendisk),
raid_bio->bi_iter.bi_sector); raid_bio->bi_iter.bi_sector);
submit_bio_noacct(align_bi); submit_bio_noacct(align_bio);
return 1; return 1;
} else {
out_rcu_unlock:
rcu_read_unlock(); rcu_read_unlock();
bio_put(align_bi);
return 0; return 0;
}
} }
static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio) static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment