Commit 7fe6d45e authored by Dennis Zhou's avatar Dennis Zhou Committed by David Sterba

btrfs: have multiple discard lists

Non-block group destruction discarding currently only had a single list
with no minimum discard length. This can lead to caravaning more
meaningful discards behind a heavily fragmented block group.

This adds support for multiple lists with minimum discard lengths to
prevent the caravan effect. We promote block groups back up when we
exceed the BTRFS_ASYNC_DISCARD_MAX_FILTER size, currently we support
only 2 lists with filters of 1MB and 32KB respectively.
Reviewed-by: default avatarJosef Bacik <josef@toxicpanda.com>
Signed-off-by: default avatarDennis Zhou <dennis@kernel.org>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 19b2a2c7
...@@ -456,7 +456,7 @@ struct btrfs_full_stripe_locks_tree { ...@@ -456,7 +456,7 @@ struct btrfs_full_stripe_locks_tree {
* afterwards represent monotonically decreasing discard filter sizes to * afterwards represent monotonically decreasing discard filter sizes to
* prioritize what should be discarded next. * prioritize what should be discarded next.
*/ */
#define BTRFS_NR_DISCARD_LISTS 2 #define BTRFS_NR_DISCARD_LISTS 3
#define BTRFS_DISCARD_INDEX_UNUSED 0 #define BTRFS_DISCARD_INDEX_UNUSED 0
#define BTRFS_DISCARD_INDEX_START 1 #define BTRFS_DISCARD_INDEX_START 1
......
...@@ -22,6 +22,13 @@ ...@@ -22,6 +22,13 @@
#define BTRFS_DISCARD_MAX_DELAY_MSEC (1000UL) #define BTRFS_DISCARD_MAX_DELAY_MSEC (1000UL)
#define BTRFS_DISCARD_MAX_IOPS (10U) #define BTRFS_DISCARD_MAX_IOPS (10U)
/* Montonically decreasing minimum length filters after index 0 */
static int discard_minlen[BTRFS_NR_DISCARD_LISTS] = {
0,
BTRFS_ASYNC_DISCARD_MAX_FILTER,
BTRFS_ASYNC_DISCARD_MIN_FILTER
};
static struct list_head *get_discard_list(struct btrfs_discard_ctl *discard_ctl, static struct list_head *get_discard_list(struct btrfs_discard_ctl *discard_ctl,
struct btrfs_block_group *block_group) struct btrfs_block_group *block_group)
{ {
...@@ -139,16 +146,18 @@ static struct btrfs_block_group *find_next_block_group( ...@@ -139,16 +146,18 @@ static struct btrfs_block_group *find_next_block_group(
* peek_discard_list - wrap find_next_block_group() * peek_discard_list - wrap find_next_block_group()
* @discard_ctl: discard control * @discard_ctl: discard control
* @discard_state: the discard_state of the block_group after state management * @discard_state: the discard_state of the block_group after state management
* @discard_index: the discard_index of the block_group after state management
* *
* This wraps find_next_block_group() and sets the block_group to be in use. * This wraps find_next_block_group() and sets the block_group to be in use.
* discard_state's control flow is managed here. Variables related to * discard_state's control flow is managed here. Variables related to
* discard_state are reset here as needed (eg. discard_cursor). @discard_state * discard_state are reset here as needed (eg discard_cursor). @discard_state
* is remembered as it may change while we're discarding, but we want the * and @discard_index are remembered as it may change while we're discarding,
* discard to execute in the context determined here. * but we want the discard to execute in the context determined here.
*/ */
static struct btrfs_block_group *peek_discard_list( static struct btrfs_block_group *peek_discard_list(
struct btrfs_discard_ctl *discard_ctl, struct btrfs_discard_ctl *discard_ctl,
enum btrfs_discard_state *discard_state) enum btrfs_discard_state *discard_state,
int *discard_index)
{ {
struct btrfs_block_group *block_group; struct btrfs_block_group *block_group;
const u64 now = ktime_get_ns(); const u64 now = ktime_get_ns();
...@@ -169,6 +178,7 @@ static struct btrfs_block_group *peek_discard_list( ...@@ -169,6 +178,7 @@ static struct btrfs_block_group *peek_discard_list(
} }
discard_ctl->block_group = block_group; discard_ctl->block_group = block_group;
*discard_state = block_group->discard_state; *discard_state = block_group->discard_state;
*discard_index = block_group->discard_index;
} else { } else {
block_group = NULL; block_group = NULL;
} }
...@@ -178,6 +188,64 @@ static struct btrfs_block_group *peek_discard_list( ...@@ -178,6 +188,64 @@ static struct btrfs_block_group *peek_discard_list(
return block_group; return block_group;
} }
/**
* btrfs_discard_check_filter - updates a block groups filters
* @block_group: block group of interest
* @bytes: recently freed region size after coalescing
*
* Async discard maintains multiple lists with progressively smaller filters
* to prioritize discarding based on size. Should a free space that matches
* a larger filter be returned to the free_space_cache, prioritize that discard
* by moving @block_group to the proper filter.
*/
void btrfs_discard_check_filter(struct btrfs_block_group *block_group,
u64 bytes)
{
struct btrfs_discard_ctl *discard_ctl;
if (!block_group ||
!btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC))
return;
discard_ctl = &block_group->fs_info->discard_ctl;
if (block_group->discard_index > BTRFS_DISCARD_INDEX_START &&
bytes >= discard_minlen[block_group->discard_index - 1]) {
int i;
remove_from_discard_list(discard_ctl, block_group);
for (i = BTRFS_DISCARD_INDEX_START; i < BTRFS_NR_DISCARD_LISTS;
i++) {
if (bytes >= discard_minlen[i]) {
block_group->discard_index = i;
add_to_discard_list(discard_ctl, block_group);
break;
}
}
}
}
/**
* btrfs_update_discard_index - moves a block group along the discard lists
* @discard_ctl: discard control
* @block_group: block_group of interest
*
* Increment @block_group's discard_index. If it falls of the list, let it be.
* Otherwise add it back to the appropriate list.
*/
static void btrfs_update_discard_index(struct btrfs_discard_ctl *discard_ctl,
struct btrfs_block_group *block_group)
{
block_group->discard_index++;
if (block_group->discard_index == BTRFS_NR_DISCARD_LISTS) {
block_group->discard_index = 1;
return;
}
add_to_discard_list(discard_ctl, block_group);
}
/** /**
* btrfs_discard_cancel_work - remove a block_group from the discard lists * btrfs_discard_cancel_work - remove a block_group from the discard lists
* @discard_ctl: discard control * @discard_ctl: discard control
...@@ -295,6 +363,8 @@ static void btrfs_finish_discard_pass(struct btrfs_discard_ctl *discard_ctl, ...@@ -295,6 +363,8 @@ static void btrfs_finish_discard_pass(struct btrfs_discard_ctl *discard_ctl,
btrfs_mark_bg_unused(block_group); btrfs_mark_bg_unused(block_group);
else else
add_to_discard_unused_list(discard_ctl, block_group); add_to_discard_unused_list(discard_ctl, block_group);
} else {
btrfs_update_discard_index(discard_ctl, block_group);
} }
} }
...@@ -311,25 +381,42 @@ static void btrfs_discard_workfn(struct work_struct *work) ...@@ -311,25 +381,42 @@ static void btrfs_discard_workfn(struct work_struct *work)
struct btrfs_discard_ctl *discard_ctl; struct btrfs_discard_ctl *discard_ctl;
struct btrfs_block_group *block_group; struct btrfs_block_group *block_group;
enum btrfs_discard_state discard_state; enum btrfs_discard_state discard_state;
int discard_index = 0;
u64 trimmed = 0; u64 trimmed = 0;
u64 minlen = 0;
discard_ctl = container_of(work, struct btrfs_discard_ctl, work.work); discard_ctl = container_of(work, struct btrfs_discard_ctl, work.work);
block_group = peek_discard_list(discard_ctl, &discard_state); block_group = peek_discard_list(discard_ctl, &discard_state,
&discard_index);
if (!block_group || !btrfs_run_discard_work(discard_ctl)) if (!block_group || !btrfs_run_discard_work(discard_ctl))
return; return;
/* Perform discarding */ /* Perform discarding */
if (discard_state == BTRFS_DISCARD_BITMAPS) minlen = discard_minlen[discard_index];
if (discard_state == BTRFS_DISCARD_BITMAPS) {
u64 maxlen = 0;
/*
* Use the previous levels minimum discard length as the max
* length filter. In the case something is added to make a
* region go beyond the max filter, the entire bitmap is set
* back to BTRFS_TRIM_STATE_UNTRIMMED.
*/
if (discard_index != BTRFS_DISCARD_INDEX_UNUSED)
maxlen = discard_minlen[discard_index - 1];
btrfs_trim_block_group_bitmaps(block_group, &trimmed, btrfs_trim_block_group_bitmaps(block_group, &trimmed,
block_group->discard_cursor, block_group->discard_cursor,
btrfs_block_group_end(block_group), btrfs_block_group_end(block_group),
0, true); minlen, maxlen, true);
else } else {
btrfs_trim_block_group_extents(block_group, &trimmed, btrfs_trim_block_group_extents(block_group, &trimmed,
block_group->discard_cursor, block_group->discard_cursor,
btrfs_block_group_end(block_group), btrfs_block_group_end(block_group),
0, true); minlen, true);
}
discard_ctl->prev_discard = trimmed; discard_ctl->prev_discard = trimmed;
......
...@@ -11,6 +11,11 @@ struct btrfs_block_group; ...@@ -11,6 +11,11 @@ struct btrfs_block_group;
/* Discard size limits */ /* Discard size limits */
#define BTRFS_ASYNC_DISCARD_DEFAULT_MAX_SIZE (SZ_64M) #define BTRFS_ASYNC_DISCARD_DEFAULT_MAX_SIZE (SZ_64M)
#define BTRFS_ASYNC_DISCARD_MAX_FILTER (SZ_1M)
#define BTRFS_ASYNC_DISCARD_MIN_FILTER (SZ_32K)
/* List operations */
void btrfs_discard_check_filter(struct btrfs_block_group *block_group, u64 bytes);
/* Work operations */ /* Work operations */
void btrfs_discard_cancel_work(struct btrfs_discard_ctl *discard_ctl, void btrfs_discard_cancel_work(struct btrfs_discard_ctl *discard_ctl,
......
...@@ -2465,6 +2465,7 @@ int __btrfs_add_free_space(struct btrfs_fs_info *fs_info, ...@@ -2465,6 +2465,7 @@ int __btrfs_add_free_space(struct btrfs_fs_info *fs_info,
struct btrfs_block_group *block_group = ctl->private; struct btrfs_block_group *block_group = ctl->private;
struct btrfs_free_space *info; struct btrfs_free_space *info;
int ret = 0; int ret = 0;
u64 filter_bytes = bytes;
info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS); info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
if (!info) if (!info)
...@@ -2501,6 +2502,8 @@ int __btrfs_add_free_space(struct btrfs_fs_info *fs_info, ...@@ -2501,6 +2502,8 @@ int __btrfs_add_free_space(struct btrfs_fs_info *fs_info,
*/ */
steal_from_bitmap(ctl, info, true); steal_from_bitmap(ctl, info, true);
filter_bytes = max(filter_bytes, info->bytes);
ret = link_free_space(ctl, info); ret = link_free_space(ctl, info);
if (ret) if (ret)
kmem_cache_free(btrfs_free_space_cachep, info); kmem_cache_free(btrfs_free_space_cachep, info);
...@@ -2513,8 +2516,10 @@ int __btrfs_add_free_space(struct btrfs_fs_info *fs_info, ...@@ -2513,8 +2516,10 @@ int __btrfs_add_free_space(struct btrfs_fs_info *fs_info,
ASSERT(ret != -EEXIST); ASSERT(ret != -EEXIST);
} }
if (trim_state != BTRFS_TRIM_STATE_TRIMMED) if (trim_state != BTRFS_TRIM_STATE_TRIMMED) {
btrfs_discard_check_filter(block_group, filter_bytes);
btrfs_discard_queue_work(&fs_info->discard_ctl, block_group); btrfs_discard_queue_work(&fs_info->discard_ctl, block_group);
}
return ret; return ret;
} }
...@@ -3478,7 +3483,14 @@ static int trim_no_bitmap(struct btrfs_block_group *block_group, ...@@ -3478,7 +3483,14 @@ static int trim_no_bitmap(struct btrfs_block_group *block_group,
goto next; goto next;
} }
unlink_free_space(ctl, entry); unlink_free_space(ctl, entry);
if (max_discard_size && bytes > max_discard_size) { /*
* Let bytes = BTRFS_MAX_DISCARD_SIZE + X.
* If X < BTRFS_ASYNC_DISCARD_MIN_FILTER, we won't trim
* X when we come back around. So trim it now.
*/
if (max_discard_size &&
bytes >= (max_discard_size +
BTRFS_ASYNC_DISCARD_MIN_FILTER)) {
bytes = max_discard_size; bytes = max_discard_size;
extent_bytes = max_discard_size; extent_bytes = max_discard_size;
entry->offset += max_discard_size; entry->offset += max_discard_size;
...@@ -3585,7 +3597,7 @@ static void end_trimming_bitmap(struct btrfs_free_space_ctl *ctl, ...@@ -3585,7 +3597,7 @@ static void end_trimming_bitmap(struct btrfs_free_space_ctl *ctl,
*/ */
static int trim_bitmaps(struct btrfs_block_group *block_group, static int trim_bitmaps(struct btrfs_block_group *block_group,
u64 *total_trimmed, u64 start, u64 end, u64 minlen, u64 *total_trimmed, u64 start, u64 end, u64 minlen,
bool async) u64 maxlen, bool async)
{ {
struct btrfs_discard_ctl *discard_ctl = struct btrfs_discard_ctl *discard_ctl =
&block_group->fs_info->discard_ctl; &block_group->fs_info->discard_ctl;
...@@ -3613,7 +3625,15 @@ static int trim_bitmaps(struct btrfs_block_group *block_group, ...@@ -3613,7 +3625,15 @@ static int trim_bitmaps(struct btrfs_block_group *block_group,
} }
entry = tree_search_offset(ctl, offset, 1, 0); entry = tree_search_offset(ctl, offset, 1, 0);
if (!entry || (async && start == offset && /*
* Bitmaps are marked trimmed lossily now to prevent constant
* discarding of the same bitmap (the reason why we are bound
* by the filters). So, retrim the block group bitmaps when we
* are preparing to punt to the unused_bgs list. This uses
* @minlen to determine if we are in BTRFS_DISCARD_INDEX_UNUSED
* which is the only discard index which sets minlen to 0.
*/
if (!entry || (async && minlen && start == offset &&
btrfs_free_space_trimmed(entry))) { btrfs_free_space_trimmed(entry))) {
spin_unlock(&ctl->tree_lock); spin_unlock(&ctl->tree_lock);
mutex_unlock(&ctl->cache_writeout_mutex); mutex_unlock(&ctl->cache_writeout_mutex);
...@@ -3634,10 +3654,10 @@ static int trim_bitmaps(struct btrfs_block_group *block_group, ...@@ -3634,10 +3654,10 @@ static int trim_bitmaps(struct btrfs_block_group *block_group,
ret2 = search_bitmap(ctl, entry, &start, &bytes, false); ret2 = search_bitmap(ctl, entry, &start, &bytes, false);
if (ret2 || start >= end) { if (ret2 || start >= end) {
/* /*
* This keeps the invariant that all bytes are trimmed * We lossily consider a bitmap trimmed if we only skip
* if BTRFS_TRIM_STATE_TRIMMED is set on a bitmap. * over regions <= BTRFS_ASYNC_DISCARD_MIN_FILTER.
*/ */
if (ret2 && !minlen) if (ret2 && minlen <= BTRFS_ASYNC_DISCARD_MIN_FILTER)
end_trimming_bitmap(ctl, entry); end_trimming_bitmap(ctl, entry);
else else
entry->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; entry->trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
...@@ -3658,14 +3678,21 @@ static int trim_bitmaps(struct btrfs_block_group *block_group, ...@@ -3658,14 +3678,21 @@ static int trim_bitmaps(struct btrfs_block_group *block_group,
} }
bytes = min(bytes, end - start); bytes = min(bytes, end - start);
if (bytes < minlen) { if (bytes < minlen || (async && maxlen && bytes > maxlen)) {
entry->trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
spin_unlock(&ctl->tree_lock); spin_unlock(&ctl->tree_lock);
mutex_unlock(&ctl->cache_writeout_mutex); mutex_unlock(&ctl->cache_writeout_mutex);
goto next; goto next;
} }
if (async && max_discard_size && bytes > max_discard_size) /*
* Let bytes = BTRFS_MAX_DISCARD_SIZE + X.
* If X < @minlen, we won't trim X when we come back around.
* So trim it now. We differ here from trimming extents as we
* don't keep individual state per bit.
*/
if (async &&
max_discard_size &&
bytes > (max_discard_size + minlen))
bytes = max_discard_size; bytes = max_discard_size;
bitmap_clear_bits(ctl, entry, start, bytes); bitmap_clear_bits(ctl, entry, start, bytes);
...@@ -3773,7 +3800,7 @@ int btrfs_trim_block_group(struct btrfs_block_group *block_group, ...@@ -3773,7 +3800,7 @@ int btrfs_trim_block_group(struct btrfs_block_group *block_group,
if (ret) if (ret)
goto out; goto out;
ret = trim_bitmaps(block_group, trimmed, start, end, minlen, false); ret = trim_bitmaps(block_group, trimmed, start, end, minlen, 0, false);
div64_u64_rem(end, BITS_PER_BITMAP * ctl->unit, &rem); div64_u64_rem(end, BITS_PER_BITMAP * ctl->unit, &rem);
/* If we ended in the middle of a bitmap, reset the trimming flag */ /* If we ended in the middle of a bitmap, reset the trimming flag */
if (rem) if (rem)
...@@ -3807,7 +3834,7 @@ int btrfs_trim_block_group_extents(struct btrfs_block_group *block_group, ...@@ -3807,7 +3834,7 @@ int btrfs_trim_block_group_extents(struct btrfs_block_group *block_group,
int btrfs_trim_block_group_bitmaps(struct btrfs_block_group *block_group, int btrfs_trim_block_group_bitmaps(struct btrfs_block_group *block_group,
u64 *trimmed, u64 start, u64 end, u64 minlen, u64 *trimmed, u64 start, u64 end, u64 minlen,
bool async) u64 maxlen, bool async)
{ {
int ret; int ret;
...@@ -3821,7 +3848,9 @@ int btrfs_trim_block_group_bitmaps(struct btrfs_block_group *block_group, ...@@ -3821,7 +3848,9 @@ int btrfs_trim_block_group_bitmaps(struct btrfs_block_group *block_group,
btrfs_get_block_group_trimming(block_group); btrfs_get_block_group_trimming(block_group);
spin_unlock(&block_group->lock); spin_unlock(&block_group->lock);
ret = trim_bitmaps(block_group, trimmed, start, end, minlen, async); ret = trim_bitmaps(block_group, trimmed, start, end, minlen, maxlen,
async);
btrfs_put_block_group_trimming(block_group); btrfs_put_block_group_trimming(block_group);
return ret; return ret;
......
...@@ -146,7 +146,7 @@ int btrfs_trim_block_group_extents(struct btrfs_block_group *block_group, ...@@ -146,7 +146,7 @@ int btrfs_trim_block_group_extents(struct btrfs_block_group *block_group,
bool async); bool async);
int btrfs_trim_block_group_bitmaps(struct btrfs_block_group *block_group, int btrfs_trim_block_group_bitmaps(struct btrfs_block_group *block_group,
u64 *trimmed, u64 start, u64 end, u64 minlen, u64 *trimmed, u64 start, u64 end, u64 minlen,
bool async); u64 maxlen, bool async);
/* Support functions for running our sanity tests */ /* Support functions for running our sanity tests */
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment