Commit e430c428 authored by Qu Wenruo's avatar Qu Wenruo Committed by David Sterba

btrfs: scrub: cleanup the non-RAID56 branches in scrub_stripe()

Since we have moved all other profiles handling into their own
functions, now the main body of scrub_stripe() is just handling RAID56
profiles.

There is no need to address other profiles in the main loop of
scrub_stripe(), so we can remove those dead branches.

Since we're here, also slightly change the timing of initialization of
variables like @offset, @increment and @logical.

Especially for @logical, we don't really need to initialize it for
btrfs_extent_root()/btrfs_csum_root(), we can use bg->start for that
purpose.

Now those variables are only initialize for RAID56 branches.
Signed-off-by: default avatarQu Wenruo <wqu@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 8557635e
......@@ -3501,14 +3501,12 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
u64 flags;
int ret;
int slot;
u64 nstripes;
struct extent_buffer *l;
u64 physical = map->stripes[stripe_index].physical;
u64 logical;
u64 logic_end;
const u64 physical_end = physical + dev_extent_len;
u64 generation;
int mirror_num;
struct btrfs_key key;
u64 increment;
u64 offset;
......@@ -3525,28 +3523,6 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
int extent_mirror_num;
int stop_loop = 0;
offset = 0;
nstripes = div64_u64(dev_extent_len, map->stripe_len);
mirror_num = 1;
increment = map->stripe_len;
if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
offset = map->stripe_len * stripe_index;
increment = map->stripe_len * map->num_stripes;
} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
int factor = map->num_stripes / map->sub_stripes;
offset = map->stripe_len * (stripe_index / map->sub_stripes);
increment = map->stripe_len * factor;
mirror_num = stripe_index % map->sub_stripes + 1;
} else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) {
mirror_num = stripe_index % map->num_stripes + 1;
} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
mirror_num = stripe_index % map->num_stripes + 1;
} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
get_raid56_logic_offset(physical, stripe_index, map, &offset,
NULL);
increment = map->stripe_len * nr_data_stripes(map);
}
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
......@@ -3560,20 +3536,12 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
path->skip_locking = 1;
path->reada = READA_FORWARD;
logical = chunk_logical + offset;
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
get_raid56_logic_offset(physical_end, stripe_index,
map, &logic_end, NULL);
logic_end += chunk_logical;
} else {
logic_end = logical + increment * nstripes;
}
wait_event(sctx->list_wait,
atomic_read(&sctx->bios_in_flight) == 0);
scrub_blocked_if_needed(fs_info);
root = btrfs_extent_root(fs_info, logical);
csum_root = btrfs_csum_root(fs_info, logical);
root = btrfs_extent_root(fs_info, bg->start);
csum_root = btrfs_csum_root(fs_info, bg->start);
/*
* collect all data csums for the stripe to avoid seeking during
......@@ -3610,17 +3578,29 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
bg->start, bg->length, scrub_dev,
map->stripes[stripe_index].physical,
stripe_index + 1);
offset = 0;
goto out;
}
if (profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
ret = scrub_simple_stripe(sctx, root, csum_root, bg, map,
scrub_dev, stripe_index);
offset = map->stripe_len * (stripe_index / map->sub_stripes);
goto out;
}
/* Only RAID56 goes through the old code */
ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK);
ret = 0;
/* Calculate the logical end of the stripe */
get_raid56_logic_offset(physical_end, stripe_index,
map, &logic_end, NULL);
logic_end += chunk_logical;
/* Initialize @offset in case we need to go to out: label */
get_raid56_logic_offset(physical, stripe_index, map, &offset, NULL);
increment = map->stripe_len * nr_data_stripes(map);
while (physical < physical_end) {
/*
* canceled?
......@@ -3646,22 +3626,20 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
scrub_blocked_if_needed(fs_info);
}
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
ret = get_raid56_logic_offset(physical, stripe_index,
map, &logical,
&stripe_logical);
logical += chunk_logical;
if (ret) {
/* it is parity strip */
stripe_logical += chunk_logical;
stripe_end = stripe_logical + increment;
ret = scrub_raid56_parity(sctx, map, scrub_dev,
stripe_logical,
stripe_end);
if (ret)
goto out;
goto skip;
}
ret = get_raid56_logic_offset(physical, stripe_index,
map, &logical,
&stripe_logical);
logical += chunk_logical;
if (ret) {
/* it is parity strip */
stripe_logical += chunk_logical;
stripe_end = stripe_logical + increment;
ret = scrub_raid56_parity(sctx, map, scrub_dev,
stripe_logical,
stripe_end);
if (ret)
goto out;
goto skip;
}
if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
......@@ -3779,7 +3757,8 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
extent_physical = extent_logical - logical + physical;
extent_dev = scrub_dev;
extent_mirror_num = mirror_num;
/* For RAID56 data stripes, mirror_num is fixed to 1 */
extent_mirror_num = 1;
if (sctx->is_dev_replace)
scrub_remap_extent(fs_info, extent_logical,
extent_len, &extent_physical,
......@@ -3810,33 +3789,28 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
if (extent_logical + extent_len <
key.objectid + bytes) {
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
/*
* loop until we find next data stripe
* or we have finished all stripes.
*/
/*
* loop until we find next data stripe
* or we have finished all stripes.
*/
loop:
physical += map->stripe_len;
ret = get_raid56_logic_offset(physical,
stripe_index, map,
&logical, &stripe_logical);
logical += chunk_logical;
if (ret && physical < physical_end) {
stripe_logical += chunk_logical;
stripe_end = stripe_logical +
increment;
ret = scrub_raid56_parity(sctx,
map, scrub_dev,
stripe_logical,
stripe_end);
if (ret)
goto out;
goto loop;
}
} else {
physical += map->stripe_len;
logical += increment;
physical += map->stripe_len;
ret = get_raid56_logic_offset(physical,
stripe_index, map,
&logical, &stripe_logical);
logical += chunk_logical;
if (ret && physical < physical_end) {
stripe_logical += chunk_logical;
stripe_end = stripe_logical +
increment;
ret = scrub_raid56_parity(sctx,
map, scrub_dev,
stripe_logical,
stripe_end);
if (ret)
goto out;
goto loop;
}
if (logical < key.objectid + bytes) {
cond_resched();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment