Commit 7e895409 authored by Naohiro Aota's avatar Naohiro Aota Committed by David Sterba

btrfs: factor out prepare_allocation() for extent allocation

This function finally factor out prepare_allocation() form
find_free_extent(). This function is called before the allocation loop
and a specific allocator function like prepare_allocation_clustered()
should initialize their private information and can set proper hint_byte
to indicate where to start the allocation with.
Reviewed-by: default avatarJosef Bacik <josef@toxicpanda.com>
Signed-off-by: default avatarNaohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 45d8e033
...@@ -3853,6 +3853,71 @@ static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info, ...@@ -3853,6 +3853,71 @@ static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info,
return -ENOSPC; return -ENOSPC;
} }
static int prepare_allocation_clustered(struct btrfs_fs_info *fs_info,
struct find_free_extent_ctl *ffe_ctl,
struct btrfs_space_info *space_info,
struct btrfs_key *ins)
{
/*
* If our free space is heavily fragmented we may not be able to make
* big contiguous allocations, so instead of doing the expensive search
* for free space, simply return ENOSPC with our max_extent_size so we
* can go ahead and search for a more manageable chunk.
*
* If our max_extent_size is large enough for our allocation simply
* disable clustering since we will likely not be able to find enough
* space to create a cluster and induce latency trying.
*/
if (space_info->max_extent_size) {
spin_lock(&space_info->lock);
if (space_info->max_extent_size &&
ffe_ctl->num_bytes > space_info->max_extent_size) {
ins->offset = space_info->max_extent_size;
spin_unlock(&space_info->lock);
return -ENOSPC;
} else if (space_info->max_extent_size) {
ffe_ctl->use_cluster = false;
}
spin_unlock(&space_info->lock);
}
ffe_ctl->last_ptr = fetch_cluster_info(fs_info, space_info,
&ffe_ctl->empty_cluster);
if (ffe_ctl->last_ptr) {
struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr;
spin_lock(&last_ptr->lock);
if (last_ptr->block_group)
ffe_ctl->hint_byte = last_ptr->window_start;
if (last_ptr->fragmented) {
/*
* We still set window_start so we can keep track of the
* last place we found an allocation to try and save
* some time.
*/
ffe_ctl->hint_byte = last_ptr->window_start;
ffe_ctl->use_cluster = false;
}
spin_unlock(&last_ptr->lock);
}
return 0;
}
static int prepare_allocation(struct btrfs_fs_info *fs_info,
struct find_free_extent_ctl *ffe_ctl,
struct btrfs_space_info *space_info,
struct btrfs_key *ins)
{
switch (ffe_ctl->policy) {
case BTRFS_EXTENT_ALLOC_CLUSTERED:
return prepare_allocation_clustered(fs_info, ffe_ctl,
space_info, ins);
default:
BUG();
}
}
/* /*
* walks the btree of allocated extents and find a hole of a given size. * walks the btree of allocated extents and find a hole of a given size.
* The key ins is changed to record the hole: * The key ins is changed to record the hole:
...@@ -3922,48 +3987,9 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info, ...@@ -3922,48 +3987,9 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
return -ENOSPC; return -ENOSPC;
} }
/* ret = prepare_allocation(fs_info, &ffe_ctl, space_info, ins);
* If our free space is heavily fragmented we may not be able to make if (ret < 0)
* big contiguous allocations, so instead of doing the expensive search return ret;
* for free space, simply return ENOSPC with our max_extent_size so we
* can go ahead and search for a more manageable chunk.
*
* If our max_extent_size is large enough for our allocation simply
* disable clustering since we will likely not be able to find enough
* space to create a cluster and induce latency trying.
*/
if (unlikely(space_info->max_extent_size)) {
spin_lock(&space_info->lock);
if (space_info->max_extent_size &&
num_bytes > space_info->max_extent_size) {
ins->offset = space_info->max_extent_size;
spin_unlock(&space_info->lock);
return -ENOSPC;
} else if (space_info->max_extent_size) {
ffe_ctl.use_cluster = false;
}
spin_unlock(&space_info->lock);
}
ffe_ctl.last_ptr = fetch_cluster_info(fs_info, space_info,
&ffe_ctl.empty_cluster);
if (ffe_ctl.last_ptr) {
struct btrfs_free_cluster *last_ptr = ffe_ctl.last_ptr;
spin_lock(&last_ptr->lock);
if (last_ptr->block_group)
ffe_ctl.hint_byte = last_ptr->window_start;
if (last_ptr->fragmented) {
/*
* We still set window_start so we can keep track of the
* last place we found an allocation to try and save
* some time.
*/
ffe_ctl.hint_byte = last_ptr->window_start;
ffe_ctl.use_cluster = false;
}
spin_unlock(&last_ptr->lock);
}
ffe_ctl.search_start = max(ffe_ctl.search_start, ffe_ctl.search_start = max(ffe_ctl.search_start,
first_logical_byte(fs_info, 0)); first_logical_byte(fs_info, 0));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment