Commit 9bc5fc04 authored by Gabriel Niebler's avatar Gabriel Niebler Committed by David Sterba

btrfs: use btrfs_for_each_slot in mark_block_group_to_copy

This function can be simplified by refactoring to use the new iterator
macro.  No functional changes.
Signed-off-by: default avatarMarcos Paulo de Souza <mpdesouza@suse.com>
Signed-off-by: default avatarGabriel Niebler <gniebler@suse.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 36dfbbe2
...@@ -474,6 +474,7 @@ static int mark_block_group_to_copy(struct btrfs_fs_info *fs_info, ...@@ -474,6 +474,7 @@ static int mark_block_group_to_copy(struct btrfs_fs_info *fs_info,
struct btrfs_dev_extent *dev_extent = NULL; struct btrfs_dev_extent *dev_extent = NULL;
struct btrfs_block_group *cache; struct btrfs_block_group *cache;
struct btrfs_trans_handle *trans; struct btrfs_trans_handle *trans;
int iter_ret = 0;
int ret = 0; int ret = 0;
u64 chunk_offset; u64 chunk_offset;
...@@ -524,29 +525,8 @@ static int mark_block_group_to_copy(struct btrfs_fs_info *fs_info, ...@@ -524,29 +525,8 @@ static int mark_block_group_to_copy(struct btrfs_fs_info *fs_info,
key.type = BTRFS_DEV_EXTENT_KEY; key.type = BTRFS_DEV_EXTENT_KEY;
key.offset = 0; key.offset = 0;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) {
if (ret < 0)
goto free_path;
if (ret > 0) {
if (path->slots[0] >=
btrfs_header_nritems(path->nodes[0])) {
ret = btrfs_next_leaf(root, path);
if (ret < 0)
goto free_path;
if (ret > 0) {
ret = 0;
goto free_path;
}
} else {
ret = 0;
}
}
while (1) {
struct extent_buffer *leaf = path->nodes[0]; struct extent_buffer *leaf = path->nodes[0];
int slot = path->slots[0];
btrfs_item_key_to_cpu(leaf, &found_key, slot);
if (found_key.objectid != src_dev->devid) if (found_key.objectid != src_dev->devid)
break; break;
...@@ -557,30 +537,23 @@ static int mark_block_group_to_copy(struct btrfs_fs_info *fs_info, ...@@ -557,30 +537,23 @@ static int mark_block_group_to_copy(struct btrfs_fs_info *fs_info,
if (found_key.offset < key.offset) if (found_key.offset < key.offset)
break; break;
dev_extent = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent); dev_extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent);
chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dev_extent); chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dev_extent);
cache = btrfs_lookup_block_group(fs_info, chunk_offset); cache = btrfs_lookup_block_group(fs_info, chunk_offset);
if (!cache) if (!cache)
goto skip; continue;
spin_lock(&cache->lock); spin_lock(&cache->lock);
cache->to_copy = 1; cache->to_copy = 1;
spin_unlock(&cache->lock); spin_unlock(&cache->lock);
btrfs_put_block_group(cache); btrfs_put_block_group(cache);
skip:
ret = btrfs_next_item(root, path);
if (ret != 0) {
if (ret > 0)
ret = 0;
break;
}
} }
if (iter_ret < 0)
ret = iter_ret;
free_path:
btrfs_free_path(path); btrfs_free_path(path);
unlock: unlock:
mutex_unlock(&fs_info->chunk_mutex); mutex_unlock(&fs_info->chunk_mutex);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment