Commit 0c9b36e0 authored by Liu Bo's avatar Liu Bo Committed by David Sterba

Btrfs: try to avoid acquiring free space ctl's lock

We don't need to take the lock if the block group has not been cached.
Signed-off-by: default avatarLiu Bo <bo.li.liu@oracle.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 6f6b643e
...@@ -7714,18 +7714,20 @@ static noinline int find_free_extent(struct btrfs_root *orig_root, ...@@ -7714,18 +7714,20 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
last_ptr->fragmented = 1; last_ptr->fragmented = 1;
spin_unlock(&last_ptr->lock); spin_unlock(&last_ptr->lock);
} }
spin_lock(&block_group->free_space_ctl->tree_lock); if (cached) {
if (cached && struct btrfs_free_space_ctl *ctl =
block_group->free_space_ctl->free_space < block_group->free_space_ctl;
spin_lock(&ctl->tree_lock);
if (ctl->free_space <
num_bytes + empty_cluster + empty_size) { num_bytes + empty_cluster + empty_size) {
if (block_group->free_space_ctl->free_space > if (ctl->free_space > max_extent_size)
max_extent_size) max_extent_size = ctl->free_space;
max_extent_size = spin_unlock(&ctl->tree_lock);
block_group->free_space_ctl->free_space;
spin_unlock(&block_group->free_space_ctl->tree_lock);
goto loop; goto loop;
} }
spin_unlock(&block_group->free_space_ctl->tree_lock); spin_unlock(&ctl->tree_lock);
}
offset = btrfs_find_space_for_alloc(block_group, search_start, offset = btrfs_find_space_for_alloc(block_group, search_start,
num_bytes, empty_size, num_bytes, empty_size,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment