Commit a316a259 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by David Sterba

btrfs: factor stripe submission logic out of btrfs_map_bio

Move all per-stripe handling into submit_stripe_bio and use a label to
cleanup instead of duplicating the logic.
Reviewed-by: default avatarQu Wenruo <wqu@suse.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent d7b9416f
...@@ -6690,10 +6690,30 @@ static void btrfs_end_bio(struct bio *bio) ...@@ -6690,10 +6690,30 @@ static void btrfs_end_bio(struct bio *bio)
btrfs_end_bioc(bioc, true); btrfs_end_bioc(bioc, true);
} }
static void submit_stripe_bio(struct btrfs_io_context *bioc, struct bio *bio, static void submit_stripe_bio(struct btrfs_io_context *bioc,
u64 physical, struct btrfs_device *dev) struct bio *orig_bio, int dev_nr, bool clone)
{ {
struct btrfs_fs_info *fs_info = bioc->fs_info; struct btrfs_fs_info *fs_info = bioc->fs_info;
struct btrfs_device *dev = bioc->stripes[dev_nr].dev;
u64 physical = bioc->stripes[dev_nr].physical;
struct bio *bio;
if (!dev || !dev->bdev ||
test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) ||
(btrfs_op(orig_bio) == BTRFS_MAP_WRITE &&
!test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
atomic_inc(&bioc->error);
if (atomic_dec_and_test(&bioc->stripes_pending))
btrfs_end_bioc(bioc, false);
return;
}
if (clone) {
bio = btrfs_bio_clone(dev->bdev, orig_bio);
} else {
bio = orig_bio;
bio_set_dev(bio, dev->bdev);
}
bio->bi_private = bioc; bio->bi_private = bioc;
btrfs_bio(bio)->device = dev; btrfs_bio(bio)->device = dev;
...@@ -6728,32 +6748,25 @@ static void submit_stripe_bio(struct btrfs_io_context *bioc, struct bio *bio, ...@@ -6728,32 +6748,25 @@ static void submit_stripe_bio(struct btrfs_io_context *bioc, struct bio *bio,
blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
int mirror_num) int mirror_num)
{ {
struct btrfs_device *dev;
struct bio *first_bio = bio;
u64 logical = bio->bi_iter.bi_sector << 9; u64 logical = bio->bi_iter.bi_sector << 9;
u64 length = 0; u64 length = bio->bi_iter.bi_size;
u64 map_length; u64 map_length = length;
int ret; int ret;
int dev_nr; int dev_nr;
int total_devs; int total_devs;
struct btrfs_io_context *bioc = NULL; struct btrfs_io_context *bioc = NULL;
length = bio->bi_iter.bi_size;
map_length = length;
btrfs_bio_counter_inc_blocked(fs_info); btrfs_bio_counter_inc_blocked(fs_info);
ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical, ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical,
&map_length, &bioc, mirror_num, 1); &map_length, &bioc, mirror_num, 1);
if (ret) { if (ret)
btrfs_bio_counter_dec(fs_info); goto out_dec;
return errno_to_blk_status(ret);
}
total_devs = bioc->num_stripes; total_devs = bioc->num_stripes;
bioc->orig_bio = first_bio; bioc->orig_bio = bio;
bioc->private = first_bio->bi_private; bioc->private = bio->bi_private;
bioc->end_io = first_bio->bi_end_io; bioc->end_io = bio->bi_end_io;
atomic_set(&bioc->stripes_pending, bioc->num_stripes); atomic_set(&bioc->stripes_pending, total_devs);
if ((bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) && if ((bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
((btrfs_op(bio) == BTRFS_MAP_WRITE) || (mirror_num > 1))) { ((btrfs_op(bio) == BTRFS_MAP_WRITE) || (mirror_num > 1))) {
...@@ -6765,9 +6778,7 @@ blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, ...@@ -6765,9 +6778,7 @@ blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
ret = raid56_parity_recover(bio, bioc, map_length, ret = raid56_parity_recover(bio, bioc, map_length,
mirror_num, 1); mirror_num, 1);
} }
goto out_dec;
btrfs_bio_counter_dec(fs_info);
return errno_to_blk_status(ret);
} }
if (map_length < length) { if (map_length < length) {
...@@ -6778,28 +6789,13 @@ blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, ...@@ -6778,28 +6789,13 @@ blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
} }
for (dev_nr = 0; dev_nr < total_devs; dev_nr++) { for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
dev = bioc->stripes[dev_nr].dev; const bool should_clone = (dev_nr < total_devs - 1);
if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING,
&dev->dev_state) ||
(btrfs_op(first_bio) == BTRFS_MAP_WRITE &&
!test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
atomic_inc(&bioc->error);
if (atomic_dec_and_test(&bioc->stripes_pending))
btrfs_end_bioc(bioc, false);
continue;
}
if (dev_nr < total_devs - 1) {
bio = btrfs_bio_clone(dev->bdev, first_bio);
} else {
bio = first_bio;
bio_set_dev(bio, dev->bdev);
}
submit_stripe_bio(bioc, bio, bioc->stripes[dev_nr].physical, dev); submit_stripe_bio(bioc, bio, dev_nr, should_clone);
} }
out_dec:
btrfs_bio_counter_dec(fs_info); btrfs_bio_counter_dec(fs_info);
return BLK_STS_OK; return errno_to_blk_status(ret);
} }
static bool dev_args_match_fs_devices(const struct btrfs_dev_lookup_args *args, static bool dev_args_match_fs_devices(const struct btrfs_dev_lookup_args *args,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment