Commit 18d30ab9 authored by Qu Wenruo's avatar Qu Wenruo Committed by David Sterba

btrfs: scrub: use scrub_simple_mirror() to handle RAID56 data stripe scrub

Although RAID56 has complex repair mechanism, which involves reading the
whole full stripe, but inside one data stripe, it's in fact no different
than SINGLE/RAID1.

The point here is, for data stripe we just check the csum for each
extent we hit.  Only for csum mismatch case, our repair paths divide.

So we can still reuse scrub_simple_mirror() for RAID56 data stripes,
which saves quite some code.
Signed-off-by: default avatarQu Wenruo <wqu@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent e430c428
...@@ -3494,33 +3494,20 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, ...@@ -3494,33 +3494,20 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
struct btrfs_fs_info *fs_info = sctx->fs_info; struct btrfs_fs_info *fs_info = sctx->fs_info;
struct btrfs_root *root; struct btrfs_root *root;
struct btrfs_root *csum_root; struct btrfs_root *csum_root;
struct btrfs_extent_item *extent;
struct blk_plug plug; struct blk_plug plug;
const u64 profile = map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK; const u64 profile = map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK;
const u64 chunk_logical = bg->start; const u64 chunk_logical = bg->start;
u64 flags;
int ret; int ret;
int slot;
struct extent_buffer *l;
u64 physical = map->stripes[stripe_index].physical; u64 physical = map->stripes[stripe_index].physical;
const u64 physical_end = physical + dev_extent_len;
u64 logical; u64 logical;
u64 logic_end; u64 logic_end;
const u64 physical_end = physical + dev_extent_len; /* The logical increment after finishing one stripe */
u64 generation;
struct btrfs_key key;
u64 increment; u64 increment;
/* Offset inside the chunk */
u64 offset; u64 offset;
u64 extent_logical;
u64 extent_physical;
/*
* Unlike chunk length, extent length should never go beyond
* BTRFS_MAX_EXTENT_SIZE, thus u32 is enough here.
*/
u32 extent_len;
u64 stripe_logical; u64 stripe_logical;
u64 stripe_end; u64 stripe_end;
struct btrfs_device *extent_dev;
int extent_mirror_num;
int stop_loop = 0; int stop_loop = 0;
path = btrfs_alloc_path(); path = btrfs_alloc_path();
...@@ -3601,34 +3588,13 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, ...@@ -3601,34 +3588,13 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
get_raid56_logic_offset(physical, stripe_index, map, &offset, NULL); get_raid56_logic_offset(physical, stripe_index, map, &offset, NULL);
increment = map->stripe_len * nr_data_stripes(map); increment = map->stripe_len * nr_data_stripes(map);
while (physical < physical_end) {
/*
* canceled?
*/
if (atomic_read(&fs_info->scrub_cancel_req) ||
atomic_read(&sctx->cancel_req)) {
ret = -ECANCELED;
goto out;
}
/* /*
* check to see if we have to pause * Due to the rotation, for RAID56 it's better to iterate each stripe
* using their physical offset.
*/ */
if (atomic_read(&fs_info->scrub_pause_req)) { while (physical < physical_end) {
/* push queued extents */ ret = get_raid56_logic_offset(physical, stripe_index, map,
sctx->flush_all_writes = true; &logical, &stripe_logical);
scrub_submit(sctx);
mutex_lock(&sctx->wr_lock);
scrub_wr_submit(sctx);
mutex_unlock(&sctx->wr_lock);
wait_event(sctx->list_wait,
atomic_read(&sctx->bios_in_flight) == 0);
sctx->flush_all_writes = false;
scrub_blocked_if_needed(fs_info);
}
ret = get_raid56_logic_offset(physical, stripe_index,
map, &logical,
&stripe_logical);
logical += chunk_logical; logical += chunk_logical;
if (ret) { if (ret) {
/* it is parity strip */ /* it is parity strip */
...@@ -3639,194 +3605,23 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, ...@@ -3639,194 +3605,23 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
stripe_end); stripe_end);
if (ret) if (ret)
goto out; goto out;
goto skip;
}
if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
key.type = BTRFS_METADATA_ITEM_KEY;
else
key.type = BTRFS_EXTENT_ITEM_KEY;
key.objectid = logical;
key.offset = (u64)-1;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto out;
if (ret > 0) {
ret = btrfs_previous_extent_item(root, path, 0);
if (ret < 0)
goto out;
if (ret > 0) {
/* there's no smaller item, so stick with the
* larger one */
btrfs_release_path(path);
ret = btrfs_search_slot(NULL, root, &key,
path, 0, 0);
if (ret < 0)
goto out;
}
}
stop_loop = 0;
while (1) {
u64 bytes;
l = path->nodes[0];
slot = path->slots[0];
if (slot >= btrfs_header_nritems(l)) {
ret = btrfs_next_leaf(root, path);
if (ret == 0)
continue;
if (ret < 0)
goto out;
stop_loop = 1;
break;
}
btrfs_item_key_to_cpu(l, &key, slot);
if (key.type != BTRFS_EXTENT_ITEM_KEY &&
key.type != BTRFS_METADATA_ITEM_KEY)
goto next; goto next;
if (key.type == BTRFS_METADATA_ITEM_KEY)
bytes = fs_info->nodesize;
else
bytes = key.offset;
if (key.objectid + bytes <= logical)
goto next;
if (key.objectid >= logical + map->stripe_len) {
/* out of this device extent */
if (key.objectid >= logic_end)
stop_loop = 1;
break;
}
/*
* If our block group was removed in the meanwhile, just
* stop scrubbing since there is no point in continuing.
* Continuing would prevent reusing its device extents
* for new block groups for a long time.
*/
spin_lock(&bg->lock);
if (bg->removed) {
spin_unlock(&bg->lock);
ret = 0;
goto out;
}
spin_unlock(&bg->lock);
extent = btrfs_item_ptr(l, slot,
struct btrfs_extent_item);
flags = btrfs_extent_flags(l, extent);
generation = btrfs_extent_generation(l, extent);
if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
(key.objectid < logical ||
key.objectid + bytes >
logical + map->stripe_len)) {
btrfs_err(fs_info,
"scrub: tree block %llu spanning stripes, ignored. logical=%llu",
key.objectid, logical);
spin_lock(&sctx->stat_lock);
sctx->stat.uncorrectable_errors++;
spin_unlock(&sctx->stat_lock);
goto next;
}
again:
extent_logical = key.objectid;
ASSERT(bytes <= U32_MAX);
extent_len = bytes;
/*
* trim extent to this stripe
*/
if (extent_logical < logical) {
extent_len -= logical - extent_logical;
extent_logical = logical;
}
if (extent_logical + extent_len >
logical + map->stripe_len) {
extent_len = logical + map->stripe_len -
extent_logical;
}
extent_physical = extent_logical - logical + physical;
extent_dev = scrub_dev;
/* For RAID56 data stripes, mirror_num is fixed to 1 */
extent_mirror_num = 1;
if (sctx->is_dev_replace)
scrub_remap_extent(fs_info, extent_logical,
extent_len, &extent_physical,
&extent_dev,
&extent_mirror_num);
if (flags & BTRFS_EXTENT_FLAG_DATA) {
ret = btrfs_lookup_csums_range(csum_root,
extent_logical,
extent_logical + extent_len - 1,
&sctx->csum_list, 1);
if (ret)
goto out;
} }
ret = scrub_extent(sctx, map, extent_logical, extent_len,
extent_physical, extent_dev, flags,
generation, extent_mirror_num,
extent_logical - logical + physical);
scrub_free_csums(sctx);
if (ret)
goto out;
if (sctx->is_dev_replace)
sync_replace_for_zoned(sctx);
if (extent_logical + extent_len <
key.objectid + bytes) {
/* /*
* loop until we find next data stripe * Now we're at a data stripe, scrub each extents in the range.
* or we have finished all stripes. *
* At this stage, if we ignore the repair part, inside each data
* stripe it is no different than SINGLE profile.
* We can reuse scrub_simple_mirror() here, as the repair part
* is still based on @mirror_num.
*/ */
loop: ret = scrub_simple_mirror(sctx, root, csum_root, bg, map,
physical += map->stripe_len; logical, map->stripe_len,
ret = get_raid56_logic_offset(physical, scrub_dev, physical, 1);
stripe_index, map, if (ret < 0)
&logical, &stripe_logical);
logical += chunk_logical;
if (ret && physical < physical_end) {
stripe_logical += chunk_logical;
stripe_end = stripe_logical +
increment;
ret = scrub_raid56_parity(sctx,
map, scrub_dev,
stripe_logical,
stripe_end);
if (ret)
goto out; goto out;
goto loop;
}
if (logical < key.objectid + bytes) {
cond_resched();
goto again;
}
if (physical >= physical_end) {
stop_loop = 1;
break;
}
}
next: next:
path->slots[0]++;
}
btrfs_release_path(path);
skip:
logical += increment; logical += increment;
physical += map->stripe_len; physical += map->stripe_len;
spin_lock(&sctx->stat_lock); spin_lock(&sctx->stat_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment