Commit e430c428 authored by Qu Wenruo's avatar Qu Wenruo Committed by David Sterba

btrfs: scrub: cleanup the non-RAID56 branches in scrub_stripe()

Since we have moved all other profiles handling into their own
functions, now the main body of scrub_stripe() is just handling RAID56
profiles.

There is no need to address other profiles in the main loop of
scrub_stripe(), so we can remove those dead branches.

Since we're here, also slightly change the timing of initialization of
variables like @offset, @increment and @logical.

Especially for @logical, we don't really need to initialize it for
btrfs_extent_root()/btrfs_csum_root(), we can use bg->start for that
purpose.

Now those variables are only initialize for RAID56 branches.
Signed-off-by: default avatarQu Wenruo <wqu@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 8557635e
...@@ -3501,14 +3501,12 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, ...@@ -3501,14 +3501,12 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
u64 flags; u64 flags;
int ret; int ret;
int slot; int slot;
u64 nstripes;
struct extent_buffer *l; struct extent_buffer *l;
u64 physical = map->stripes[stripe_index].physical; u64 physical = map->stripes[stripe_index].physical;
u64 logical; u64 logical;
u64 logic_end; u64 logic_end;
const u64 physical_end = physical + dev_extent_len; const u64 physical_end = physical + dev_extent_len;
u64 generation; u64 generation;
int mirror_num;
struct btrfs_key key; struct btrfs_key key;
u64 increment; u64 increment;
u64 offset; u64 offset;
...@@ -3525,28 +3523,6 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, ...@@ -3525,28 +3523,6 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
int extent_mirror_num; int extent_mirror_num;
int stop_loop = 0; int stop_loop = 0;
offset = 0;
nstripes = div64_u64(dev_extent_len, map->stripe_len);
mirror_num = 1;
increment = map->stripe_len;
if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
offset = map->stripe_len * stripe_index;
increment = map->stripe_len * map->num_stripes;
} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
int factor = map->num_stripes / map->sub_stripes;
offset = map->stripe_len * (stripe_index / map->sub_stripes);
increment = map->stripe_len * factor;
mirror_num = stripe_index % map->sub_stripes + 1;
} else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) {
mirror_num = stripe_index % map->num_stripes + 1;
} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
mirror_num = stripe_index % map->num_stripes + 1;
} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
get_raid56_logic_offset(physical, stripe_index, map, &offset,
NULL);
increment = map->stripe_len * nr_data_stripes(map);
}
path = btrfs_alloc_path(); path = btrfs_alloc_path();
if (!path) if (!path)
return -ENOMEM; return -ENOMEM;
...@@ -3560,20 +3536,12 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, ...@@ -3560,20 +3536,12 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
path->skip_locking = 1; path->skip_locking = 1;
path->reada = READA_FORWARD; path->reada = READA_FORWARD;
logical = chunk_logical + offset;
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
get_raid56_logic_offset(physical_end, stripe_index,
map, &logic_end, NULL);
logic_end += chunk_logical;
} else {
logic_end = logical + increment * nstripes;
}
wait_event(sctx->list_wait, wait_event(sctx->list_wait,
atomic_read(&sctx->bios_in_flight) == 0); atomic_read(&sctx->bios_in_flight) == 0);
scrub_blocked_if_needed(fs_info); scrub_blocked_if_needed(fs_info);
root = btrfs_extent_root(fs_info, logical); root = btrfs_extent_root(fs_info, bg->start);
csum_root = btrfs_csum_root(fs_info, logical); csum_root = btrfs_csum_root(fs_info, bg->start);
/* /*
* collect all data csums for the stripe to avoid seeking during * collect all data csums for the stripe to avoid seeking during
...@@ -3610,17 +3578,29 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, ...@@ -3610,17 +3578,29 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
bg->start, bg->length, scrub_dev, bg->start, bg->length, scrub_dev,
map->stripes[stripe_index].physical, map->stripes[stripe_index].physical,
stripe_index + 1); stripe_index + 1);
offset = 0;
goto out; goto out;
} }
if (profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) { if (profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
ret = scrub_simple_stripe(sctx, root, csum_root, bg, map, ret = scrub_simple_stripe(sctx, root, csum_root, bg, map,
scrub_dev, stripe_index); scrub_dev, stripe_index);
offset = map->stripe_len * (stripe_index / map->sub_stripes);
goto out; goto out;
} }
/* Only RAID56 goes through the old code */ /* Only RAID56 goes through the old code */
ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK); ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK);
ret = 0; ret = 0;
/* Calculate the logical end of the stripe */
get_raid56_logic_offset(physical_end, stripe_index,
map, &logic_end, NULL);
logic_end += chunk_logical;
/* Initialize @offset in case we need to go to out: label */
get_raid56_logic_offset(physical, stripe_index, map, &offset, NULL);
increment = map->stripe_len * nr_data_stripes(map);
while (physical < physical_end) { while (physical < physical_end) {
/* /*
* canceled? * canceled?
...@@ -3646,7 +3626,6 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, ...@@ -3646,7 +3626,6 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
scrub_blocked_if_needed(fs_info); scrub_blocked_if_needed(fs_info);
} }
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
ret = get_raid56_logic_offset(physical, stripe_index, ret = get_raid56_logic_offset(physical, stripe_index,
map, &logical, map, &logical,
&stripe_logical); &stripe_logical);
...@@ -3662,7 +3641,6 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, ...@@ -3662,7 +3641,6 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
goto out; goto out;
goto skip; goto skip;
} }
}
if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
key.type = BTRFS_METADATA_ITEM_KEY; key.type = BTRFS_METADATA_ITEM_KEY;
...@@ -3779,7 +3757,8 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, ...@@ -3779,7 +3757,8 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
extent_physical = extent_logical - logical + physical; extent_physical = extent_logical - logical + physical;
extent_dev = scrub_dev; extent_dev = scrub_dev;
extent_mirror_num = mirror_num; /* For RAID56 data stripes, mirror_num is fixed to 1 */
extent_mirror_num = 1;
if (sctx->is_dev_replace) if (sctx->is_dev_replace)
scrub_remap_extent(fs_info, extent_logical, scrub_remap_extent(fs_info, extent_logical,
extent_len, &extent_physical, extent_len, &extent_physical,
...@@ -3810,7 +3789,6 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, ...@@ -3810,7 +3789,6 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
if (extent_logical + extent_len < if (extent_logical + extent_len <
key.objectid + bytes) { key.objectid + bytes) {
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
/* /*
* loop until we find next data stripe * loop until we find next data stripe
* or we have finished all stripes. * or we have finished all stripes.
...@@ -3834,10 +3812,6 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, ...@@ -3834,10 +3812,6 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
goto out; goto out;
goto loop; goto loop;
} }
} else {
physical += map->stripe_len;
logical += increment;
}
if (logical < key.objectid + bytes) { if (logical < key.objectid + bytes) {
cond_resched(); cond_resched();
goto again; goto again;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment