Commit e72d79d6 authored by Qu Wenruo's avatar Qu Wenruo Committed by David Sterba

btrfs: Refactor find_free_extent loops update into find_free_extent_update_loop

We have a complex loop design for find_free_extent(), that has different
behavior for each loop, some even includes new chunk allocation.

Instead of putting such a long code into find_free_extent() and makes it
harder to read, just extract them into find_free_extent_update_loop().

With all the cleanups, the main find_free_extent() should be pretty
barebone:

find_free_extent()
|- Iterate through all block groups
|  |- Get a valid block group
|  |- Try to do clustered allocation in that block group
|  |- Try to do unclustered allocation in that block group
|  |- Check if the result is valid
|  |  |- If valid, then exit
|  |- Jump to next block group
|
|- Push harder to find free extents
   |- If not found, re-iterate all block groups
Signed-off-by: default avatarQu Wenruo <wqu@suse.com>
Reviewed-by: default avatarSu Yue <suy.fnst@cn.fujitsu.com>
[ copy callchain from changelog to function comment ]
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent e1a41848
...@@ -7277,7 +7277,9 @@ struct find_free_extent_ctl { ...@@ -7277,7 +7277,9 @@ struct find_free_extent_ctl {
/* RAID index, converted from flags */ /* RAID index, converted from flags */
int index; int index;
/* Current loop number */ /*
* Current loop number, check find_free_extent_update_loop() for details
*/
int loop; int loop;
/* /*
...@@ -7479,6 +7481,117 @@ static int find_free_extent_unclustered(struct btrfs_block_group_cache *bg, ...@@ -7479,6 +7481,117 @@ static int find_free_extent_unclustered(struct btrfs_block_group_cache *bg,
return 0; return 0;
} }
/*
* Return >0 means caller needs to re-search for free extent
* Return 0 means we have the needed free extent.
* Return <0 means we failed to locate any free extent.
*/
static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info,
struct btrfs_free_cluster *last_ptr,
struct btrfs_key *ins,
struct find_free_extent_ctl *ffe_ctl,
int full_search, bool use_cluster)
{
struct btrfs_root *root = fs_info->extent_root;
int ret;
if ((ffe_ctl->loop == LOOP_CACHING_NOWAIT) &&
ffe_ctl->have_caching_bg && !ffe_ctl->orig_have_caching_bg)
ffe_ctl->orig_have_caching_bg = true;
if (!ins->objectid && ffe_ctl->loop >= LOOP_CACHING_WAIT &&
ffe_ctl->have_caching_bg)
return 1;
if (!ins->objectid && ++(ffe_ctl->index) < BTRFS_NR_RAID_TYPES)
return 1;
if (ins->objectid) {
if (!use_cluster && last_ptr) {
spin_lock(&last_ptr->lock);
last_ptr->window_start = ins->objectid;
spin_unlock(&last_ptr->lock);
}
return 0;
}
/*
* LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
* caching kthreads as we move along
* LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
* LOOP_ALLOC_CHUNK, force a chunk allocation and try again
* LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
* again
*/
if (ffe_ctl->loop < LOOP_NO_EMPTY_SIZE) {
ffe_ctl->index = 0;
if (ffe_ctl->loop == LOOP_CACHING_NOWAIT) {
/*
* We want to skip the LOOP_CACHING_WAIT step if we
* don't have any uncached bgs and we've already done a
* full search through.
*/
if (ffe_ctl->orig_have_caching_bg || !full_search)
ffe_ctl->loop = LOOP_CACHING_WAIT;
else
ffe_ctl->loop = LOOP_ALLOC_CHUNK;
} else {
ffe_ctl->loop++;
}
if (ffe_ctl->loop == LOOP_ALLOC_CHUNK) {
struct btrfs_trans_handle *trans;
int exist = 0;
trans = current->journal_info;
if (trans)
exist = 1;
else
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
return ret;
}
ret = do_chunk_alloc(trans, ffe_ctl->flags,
CHUNK_ALLOC_FORCE);
/*
* If we can't allocate a new chunk we've already looped
* through at least once, move on to the NO_EMPTY_SIZE
* case.
*/
if (ret == -ENOSPC)
ffe_ctl->loop = LOOP_NO_EMPTY_SIZE;
/* Do not bail out on ENOSPC since we can do more. */
if (ret < 0 && ret != -ENOSPC)
btrfs_abort_transaction(trans, ret);
else
ret = 0;
if (!exist)
btrfs_end_transaction(trans);
if (ret)
return ret;
}
if (ffe_ctl->loop == LOOP_NO_EMPTY_SIZE) {
/*
* Don't loop again if we already have no empty_size and
* no empty_cluster.
*/
if (ffe_ctl->empty_size == 0 &&
ffe_ctl->empty_cluster == 0)
return -ENOSPC;
ffe_ctl->empty_size = 0;
ffe_ctl->empty_cluster = 0;
}
return 1;
}
return -ENOSPC;
}
/* /*
* walks the btree of allocated extents and find a hole of a given size. * walks the btree of allocated extents and find a hole of a given size.
* The key ins is changed to record the hole: * The key ins is changed to record the hole:
...@@ -7489,6 +7602,20 @@ static int find_free_extent_unclustered(struct btrfs_block_group_cache *bg, ...@@ -7489,6 +7602,20 @@ static int find_free_extent_unclustered(struct btrfs_block_group_cache *bg,
* *
* If there is no suitable free space, we will record the max size of * If there is no suitable free space, we will record the max size of
* the free space extent currently. * the free space extent currently.
*
* The overall logic and call chain:
*
* find_free_extent()
* |- Iterate through all block groups
* | |- Get a valid block group
* | |- Try to do clustered allocation in that block group
* | |- Try to do unclustered allocation in that block group
* | |- Check if the result is valid
* | | |- If valid, then exit
* | |- Jump to next block group
* |
* |- Push harder to find free extents
* |- If not found, re-iterate all block groups
*/ */
static noinline int find_free_extent(struct btrfs_fs_info *fs_info, static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
u64 ram_bytes, u64 num_bytes, u64 empty_size, u64 ram_bytes, u64 num_bytes, u64 empty_size,
...@@ -7496,7 +7623,6 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info, ...@@ -7496,7 +7623,6 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
u64 flags, int delalloc) u64 flags, int delalloc)
{ {
int ret = 0; int ret = 0;
struct btrfs_root *root = fs_info->extent_root;
struct btrfs_free_cluster *last_ptr = NULL; struct btrfs_free_cluster *last_ptr = NULL;
struct btrfs_block_group_cache *block_group = NULL; struct btrfs_block_group_cache *block_group = NULL;
struct find_free_extent_ctl ffe_ctl = {0}; struct find_free_extent_ctl ffe_ctl = {0};
...@@ -7731,106 +7857,11 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info, ...@@ -7731,106 +7857,11 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
} }
up_read(&space_info->groups_sem); up_read(&space_info->groups_sem);
if ((ffe_ctl.loop == LOOP_CACHING_NOWAIT) && ffe_ctl.have_caching_bg ret = find_free_extent_update_loop(fs_info, last_ptr, ins, &ffe_ctl,
&& !ffe_ctl.orig_have_caching_bg) full_search, use_cluster);
ffe_ctl.orig_have_caching_bg = true; if (ret > 0)
if (!ins->objectid && ffe_ctl.loop >= LOOP_CACHING_WAIT &&
ffe_ctl.have_caching_bg)
goto search;
if (!ins->objectid && ++ffe_ctl.index < BTRFS_NR_RAID_TYPES)
goto search; goto search;
/*
* LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
* caching kthreads as we move along
* LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
* LOOP_ALLOC_CHUNK, force a chunk allocation and try again
* LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
* again
*/
if (!ins->objectid && ffe_ctl.loop < LOOP_NO_EMPTY_SIZE) {
ffe_ctl.index = 0;
if (ffe_ctl.loop == LOOP_CACHING_NOWAIT) {
/*
* We want to skip the LOOP_CACHING_WAIT step if we
* don't have any uncached bgs and we've already done a
* full search through.
*/
if (ffe_ctl.orig_have_caching_bg || !full_search)
ffe_ctl.loop = LOOP_CACHING_WAIT;
else
ffe_ctl.loop = LOOP_ALLOC_CHUNK;
} else {
ffe_ctl.loop++;
}
if (ffe_ctl.loop == LOOP_ALLOC_CHUNK) {
struct btrfs_trans_handle *trans;
int exist = 0;
trans = current->journal_info;
if (trans)
exist = 1;
else
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out;
}
ret = do_chunk_alloc(trans, flags, CHUNK_ALLOC_FORCE);
/*
* If we can't allocate a new chunk we've already looped
* through at least once, move on to the NO_EMPTY_SIZE
* case.
*/
if (ret == -ENOSPC)
ffe_ctl.loop = LOOP_NO_EMPTY_SIZE;
/*
* Do not bail out on ENOSPC since we
* can do more things.
*/
if (ret < 0 && ret != -ENOSPC)
btrfs_abort_transaction(trans, ret);
else
ret = 0;
if (!exist)
btrfs_end_transaction(trans);
if (ret)
goto out;
}
if (ffe_ctl.loop == LOOP_NO_EMPTY_SIZE) {
/*
* Don't loop again if we already have no empty_size and
* no empty_cluster.
*/
if (empty_size == 0 &&
ffe_ctl.empty_cluster == 0) {
ret = -ENOSPC;
goto out;
}
empty_size = 0;
ffe_ctl.empty_cluster = 0;
}
goto search;
} else if (!ins->objectid) {
ret = -ENOSPC;
} else if (ins->objectid) {
if (!use_cluster && last_ptr) {
spin_lock(&last_ptr->lock);
last_ptr->window_start = ins->objectid;
spin_unlock(&last_ptr->lock);
}
ret = 0;
}
out:
if (ret == -ENOSPC) { if (ret == -ENOSPC) {
/* /*
* Use ffe_ctl->total_free_space as fallback if we can't find * Use ffe_ctl->total_free_space as fallback if we can't find
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment