Commit 285ff5af authored by Josef Bacik's avatar Josef Bacik Committed by Chris Mason

Btrfs: remove the ideal caching code

This is a relic from before we had the disk space cache and it was to make
bootup times when you had btrfs as root not be so damned slow.  Now that we have
the disk space cache this isn't a problem anymore and really having this code
casues uneeded fragmentation and complexity, so just remove it.  Thanks,
Signed-off-by: default avatarJosef Bacik <josef@redhat.com>
parent c16fa4f2
......@@ -5260,11 +5260,10 @@ static int get_block_group_index(struct btrfs_block_group_cache *cache)
}
enum btrfs_loop_type {
LOOP_FIND_IDEAL = 0,
LOOP_CACHING_NOWAIT = 1,
LOOP_CACHING_WAIT = 2,
LOOP_ALLOC_CHUNK = 3,
LOOP_NO_EMPTY_SIZE = 4,
LOOP_CACHING_NOWAIT = 0,
LOOP_CACHING_WAIT = 1,
LOOP_ALLOC_CHUNK = 2,
LOOP_NO_EMPTY_SIZE = 3,
};
/*
......@@ -5300,8 +5299,6 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
bool failed_alloc = false;
bool use_cluster = true;
bool have_caching_bg = false;
u64 ideal_cache_percent = 0;
u64 ideal_cache_offset = 0;
WARN_ON(num_bytes < root->sectorsize);
btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
......@@ -5351,7 +5348,6 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
empty_cluster = 0;
if (search_start == hint_byte) {
ideal_cache:
block_group = btrfs_lookup_block_group(root->fs_info,
search_start);
used_block_group = block_group;
......@@ -5363,8 +5359,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
* picked out then we don't care that the block group is cached.
*/
if (block_group && block_group_bits(block_group, data) &&
(block_group->cached != BTRFS_CACHE_NO ||
search_start == ideal_cache_offset)) {
block_group->cached != BTRFS_CACHE_NO) {
down_read(&space_info->groups_sem);
if (list_empty(&block_group->list) ||
block_group->ro) {
......@@ -5418,44 +5413,12 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
have_block_group:
cached = block_group_cache_done(block_group);
if (unlikely(!cached)) {
u64 free_percent;
found_uncached_bg = true;
ret = cache_block_group(block_group, trans,
orig_root, 1);
if (block_group->cached == BTRFS_CACHE_FINISHED)
goto alloc;
free_percent = btrfs_block_group_used(&block_group->item);
free_percent *= 100;
free_percent = div64_u64(free_percent,
block_group->key.offset);
free_percent = 100 - free_percent;
if (free_percent > ideal_cache_percent &&
likely(!block_group->ro)) {
ideal_cache_offset = block_group->key.objectid;
ideal_cache_percent = free_percent;
}
/*
* The caching workers are limited to 2 threads, so we
* can queue as much work as we care to.
*/
if (loop > LOOP_FIND_IDEAL) {
ret = cache_block_group(block_group, trans,
orig_root, 0);
BUG_ON(ret);
}
/*
* If loop is set for cached only, try the next block
* group.
*/
if (loop == LOOP_FIND_IDEAL)
goto loop;
}
alloc:
if (unlikely(block_group->ro))
goto loop;
......@@ -5661,9 +5624,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
goto search;
/* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for
* for them to make caching progress. Also
* determine the best possible bg to cache
/*
* LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
* caching kthreads as we move along
* LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
......@@ -5673,45 +5634,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
*/
if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
index = 0;
if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
found_uncached_bg = false;
loop++;
if (!ideal_cache_percent)
goto search;
/*
* 1 of the following 2 things have happened so far
*
* 1) We found an ideal block group for caching that
* is mostly full and will cache quickly, so we might
* as well wait for it.
*
* 2) We searched for cached only and we didn't find
* anything, and we didn't start any caching kthreads
* either, so chances are we will loop through and
* start a couple caching kthreads, and then come back
* around and just wait for them. This will be slower
* because we will have 2 caching kthreads reading at
* the same time when we could have just started one
* and waited for it to get far enough to give us an
* allocation, so go ahead and go to the wait caching
* loop.
*/
loop = LOOP_CACHING_WAIT;
search_start = ideal_cache_offset;
ideal_cache_percent = 0;
goto ideal_cache;
} else if (loop == LOOP_FIND_IDEAL) {
/*
* Didn't find a uncached bg, wait on anything we find
* next.
*/
loop = LOOP_CACHING_WAIT;
goto search;
}
loop++;
if (loop == LOOP_ALLOC_CHUNK) {
if (allowed_chunk_alloc) {
ret = do_chunk_alloc(trans, root, num_bytes +
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment