Commit 97e38239 authored by Qu Wenruo's avatar Qu Wenruo Committed by David Sterba

btrfs: introduce a bitmap based csum range search function

Although we have an existing function, btrfs_lookup_csums_range(), to
find all data checksums for a range, it's based on a btrfs_ordered_sum
list.

For the incoming RAID56 data checksum verification at RMW time, we don't
want to waste time by allocating temporary memory.

So this patch will introduce a new helper, btrfs_lookup_csums_bitmap().
It will use bitmap based result, which will be a perfect fit for later
RAID56 usage.
Signed-off-by: default avatarQu Wenruo <wqu@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent cb649e81
...@@ -527,7 +527,7 @@ blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u8 *dst ...@@ -527,7 +527,7 @@ blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u8 *dst
return ret; return ret;
} }
int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, int btrfs_lookup_csums_list(struct btrfs_root *root, u64 start, u64 end,
struct list_head *list, int search_commit, struct list_head *list, int search_commit,
bool nowait) bool nowait)
{ {
...@@ -661,6 +661,127 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, ...@@ -661,6 +661,127 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
return ret; return ret;
} }
/*
* Do the same work as btrfs_lookup_csums_list(), the difference is in how
* we return the result.
*
* This version will set the corresponding bits in @csum_bitmap to represent
* that there is a csum found.
* Each bit represents a sector. Thus caller should ensure @csum_buf passed
* in is large enough to contain all csums.
*/
int btrfs_lookup_csums_bitmap(struct btrfs_root *root, u64 start, u64 end,
u8 *csum_buf, unsigned long *csum_bitmap)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key key;
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_csum_item *item;
const u64 orig_start = start;
int ret;
ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
IS_ALIGNED(end + 1, fs_info->sectorsize));
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
key.type = BTRFS_EXTENT_CSUM_KEY;
key.offset = start;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto fail;
if (ret > 0 && path->slots[0] > 0) {
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
/*
* There are two cases we can hit here for the previous csum
* item:
*
* |<- search range ->|
* |<- csum item ->|
*
* Or
* |<- search range ->|
* |<- csum item ->|
*
* Check if the previous csum item covers the leading part of
* the search range. If so we have to start from previous csum
* item.
*/
if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID &&
key.type == BTRFS_EXTENT_CSUM_KEY) {
if (bytes_to_csum_size(fs_info, start - key.offset) <
btrfs_item_size(leaf, path->slots[0] - 1))
path->slots[0]--;
}
}
while (start <= end) {
u64 csum_end;
leaf = path->nodes[0];
if (path->slots[0] >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
if (ret < 0)
goto fail;
if (ret > 0)
break;
leaf = path->nodes[0];
}
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
key.type != BTRFS_EXTENT_CSUM_KEY ||
key.offset > end)
break;
if (key.offset > start)
start = key.offset;
csum_end = key.offset + csum_size_to_bytes(fs_info,
btrfs_item_size(leaf, path->slots[0]));
if (csum_end <= start) {
path->slots[0]++;
continue;
}
csum_end = min(csum_end, end + 1);
item = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_csum_item);
while (start < csum_end) {
unsigned long offset;
size_t size;
u8 *csum_dest = csum_buf + bytes_to_csum_size(fs_info,
start - orig_start);
size = min_t(size_t, csum_end - start, end + 1 - start);
offset = bytes_to_csum_size(fs_info, start - key.offset);
read_extent_buffer(path->nodes[0], csum_dest,
((unsigned long)item) + offset,
bytes_to_csum_size(fs_info, size));
bitmap_set(csum_bitmap,
(start - orig_start) >> fs_info->sectorsize_bits,
size >> fs_info->sectorsize_bits);
start += size;
}
path->slots[0]++;
}
ret = 0;
fail:
btrfs_free_path(path);
return ret;
}
/* /*
* Calculate checksums of the data contained inside a bio. * Calculate checksums of the data contained inside a bio.
* *
......
...@@ -18,9 +18,11 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, ...@@ -18,9 +18,11 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
struct btrfs_ordered_sum *sums); struct btrfs_ordered_sum *sums);
blk_status_t btrfs_csum_one_bio(struct btrfs_inode *inode, struct bio *bio, blk_status_t btrfs_csum_one_bio(struct btrfs_inode *inode, struct bio *bio,
u64 offset, bool one_ordered); u64 offset, bool one_ordered);
int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, int btrfs_lookup_csums_list(struct btrfs_root *root, u64 start, u64 end,
struct list_head *list, int search_commit, struct list_head *list, int search_commit,
bool nowait); bool nowait);
int btrfs_lookup_csums_bitmap(struct btrfs_root *root, u64 start, u64 end,
u8 *csum_buf, unsigned long *csum_bitmap);
void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode, void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
const struct btrfs_path *path, const struct btrfs_path *path,
struct btrfs_file_extent_item *fi, struct btrfs_file_extent_item *fi,
......
...@@ -1709,9 +1709,8 @@ static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info, ...@@ -1709,9 +1709,8 @@ static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info,
int ret; int ret;
LIST_HEAD(list); LIST_HEAD(list);
ret = btrfs_lookup_csums_range(csum_root, bytenr, ret = btrfs_lookup_csums_list(csum_root, bytenr, bytenr + num_bytes - 1,
bytenr + num_bytes - 1, &list, 0, &list, 0, nowait);
nowait);
if (ret == 0 && list_empty(&list)) if (ret == 0 && list_empty(&list))
return 0; return 0;
......
...@@ -4357,7 +4357,7 @@ int btrfs_reloc_clone_csums(struct btrfs_inode *inode, u64 file_pos, u64 len) ...@@ -4357,7 +4357,7 @@ int btrfs_reloc_clone_csums(struct btrfs_inode *inode, u64 file_pos, u64 len)
disk_bytenr = file_pos + inode->index_cnt; disk_bytenr = file_pos + inode->index_cnt;
csum_root = btrfs_csum_root(fs_info, disk_bytenr); csum_root = btrfs_csum_root(fs_info, disk_bytenr);
ret = btrfs_lookup_csums_range(csum_root, disk_bytenr, ret = btrfs_lookup_csums_list(csum_root, disk_bytenr,
disk_bytenr + len - 1, &list, 0, false); disk_bytenr + len - 1, &list, 0, false);
if (ret) if (ret)
goto out; goto out;
......
...@@ -3238,7 +3238,7 @@ static int scrub_raid56_data_stripe_for_parity(struct scrub_ctx *sctx, ...@@ -3238,7 +3238,7 @@ static int scrub_raid56_data_stripe_for_parity(struct scrub_ctx *sctx,
extent_dev = bioc->stripes[0].dev; extent_dev = bioc->stripes[0].dev;
btrfs_put_bioc(bioc); btrfs_put_bioc(bioc);
ret = btrfs_lookup_csums_range(csum_root, extent_start, ret = btrfs_lookup_csums_list(csum_root, extent_start,
extent_start + extent_size - 1, extent_start + extent_size - 1,
&sctx->csum_list, 1, false); &sctx->csum_list, 1, false);
if (ret) { if (ret) {
...@@ -3464,7 +3464,7 @@ static int scrub_simple_mirror(struct scrub_ctx *sctx, ...@@ -3464,7 +3464,7 @@ static int scrub_simple_mirror(struct scrub_ctx *sctx,
cur_logical; cur_logical;
if (extent_flags & BTRFS_EXTENT_FLAG_DATA) { if (extent_flags & BTRFS_EXTENT_FLAG_DATA) {
ret = btrfs_lookup_csums_range(csum_root, cur_logical, ret = btrfs_lookup_csums_list(csum_root, cur_logical,
cur_logical + scrub_len - 1, cur_logical + scrub_len - 1,
&sctx->csum_list, 1, false); &sctx->csum_list, 1, false);
if (ret) if (ret)
......
...@@ -826,7 +826,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, ...@@ -826,7 +826,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
btrfs_file_extent_num_bytes(eb, item); btrfs_file_extent_num_bytes(eb, item);
} }
ret = btrfs_lookup_csums_range(root->log_root, ret = btrfs_lookup_csums_list(root->log_root,
csum_start, csum_end - 1, csum_start, csum_end - 1,
&ordered_sums, 0, false); &ordered_sums, 0, false);
if (ret) if (ret)
...@@ -4443,7 +4443,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, ...@@ -4443,7 +4443,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
csum_root = btrfs_csum_root(trans->fs_info, disk_bytenr); csum_root = btrfs_csum_root(trans->fs_info, disk_bytenr);
disk_bytenr += extent_offset; disk_bytenr += extent_offset;
ret = btrfs_lookup_csums_range(csum_root, disk_bytenr, ret = btrfs_lookup_csums_list(csum_root, disk_bytenr,
disk_bytenr + extent_num_bytes - 1, disk_bytenr + extent_num_bytes - 1,
&ordered_sums, 0, false); &ordered_sums, 0, false);
if (ret) if (ret)
...@@ -4638,8 +4638,7 @@ static int log_extent_csums(struct btrfs_trans_handle *trans, ...@@ -4638,8 +4638,7 @@ static int log_extent_csums(struct btrfs_trans_handle *trans,
/* block start is already adjusted for the file extent offset. */ /* block start is already adjusted for the file extent offset. */
csum_root = btrfs_csum_root(trans->fs_info, em->block_start); csum_root = btrfs_csum_root(trans->fs_info, em->block_start);
ret = btrfs_lookup_csums_range(csum_root, ret = btrfs_lookup_csums_list(csum_root, em->block_start + csum_offset,
em->block_start + csum_offset,
em->block_start + csum_offset + em->block_start + csum_offset +
csum_len - 1, &ordered_sums, 0, false); csum_len - 1, &ordered_sums, 0, false);
if (ret) if (ret)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment