Commit bd56b302 authored by Chris Mason's avatar Chris Mason

Btrfs: Make btrfs_drop_snapshot work in larger and more efficient chunks

Every transaction in btrfs creates a new snapshot, and then schedules the
snapshot from the last transaction for deletion.  Snapshot deletion
works by walking down the btree and dropping the reference counts
on each btree block during the walk.

If if a given leaf or node has a reference count greater than one,
the reference count is decremented and the subtree pointed to by that
node is ignored.

If the reference count is one, walking continues down into that node
or leaf, and the references of everything it points to are decremented.

The old code would try to work in small pieces, walking down the tree
until it found the lowest leaf or node to free and then returning.  This
was very friendly to the rest of the FS because it didn't have a huge
impact on other operations.

But it wouldn't always keep up with the rate that new commits added new
snapshots for deletion, and it wasn't very optimal for the extent
allocation tree because it wasn't finding leaves that were close together
on disk and processing them at the same time.

This changes things to walk down to a level 1 node and then process it
in bulk.  All the leaf pointers are sorted and the leaves are dropped
in order based on their extent number.

The extent allocation tree and commit code are now fast enough for
this kind of bulk processing to work without slowing the rest of the FS
down.  Overall it does less IO and is better able to keep up with
snapshot deletions under high load.
Signed-off-by: default avatarChris Mason <chris.mason@oracle.com>
parent b4ce94de
...@@ -1533,6 +1533,11 @@ int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, ...@@ -1533,6 +1533,11 @@ int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
* struct refsort is used to match byte number to slot in the btree block. * struct refsort is used to match byte number to slot in the btree block.
* we sort based on the byte number and then use the slot to actually * we sort based on the byte number and then use the slot to actually
* find the item. * find the item.
*
* struct refsort is smaller than strcut btrfs_item and smaller than
* struct btrfs_key_ptr. Since we're currently limited to the page size
* for a btree block, there's no way for a kmalloc of refsorts for a
* single node to be bigger than a page.
*/ */
struct refsort { struct refsort {
u64 bytenr; u64 bytenr;
...@@ -3457,36 +3462,73 @@ int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans, ...@@ -3457,36 +3462,73 @@ int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
{ {
u64 leaf_owner; u64 leaf_owner;
u64 leaf_generation; u64 leaf_generation;
struct refsort *sorted;
struct btrfs_key key; struct btrfs_key key;
struct btrfs_file_extent_item *fi; struct btrfs_file_extent_item *fi;
int i; int i;
int nritems; int nritems;
int ret; int ret;
int refi = 0;
int slot;
BUG_ON(!btrfs_is_leaf(leaf)); BUG_ON(!btrfs_is_leaf(leaf));
nritems = btrfs_header_nritems(leaf); nritems = btrfs_header_nritems(leaf);
leaf_owner = btrfs_header_owner(leaf); leaf_owner = btrfs_header_owner(leaf);
leaf_generation = btrfs_header_generation(leaf); leaf_generation = btrfs_header_generation(leaf);
sorted = kmalloc(sizeof(*sorted) * nritems, GFP_NOFS);
/* we do this loop twice. The first time we build a list
* of the extents we have a reference on, then we sort the list
* by bytenr. The second time around we actually do the
* extent freeing.
*/
for (i = 0; i < nritems; i++) { for (i = 0; i < nritems; i++) {
u64 disk_bytenr; u64 disk_bytenr;
cond_resched(); cond_resched();
btrfs_item_key_to_cpu(leaf, &key, i); btrfs_item_key_to_cpu(leaf, &key, i);
/* only extents have references, skip everything else */
if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY) if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
continue; continue;
fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item); fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
/* inline extents live in the btree, they don't have refs */
if (btrfs_file_extent_type(leaf, fi) == if (btrfs_file_extent_type(leaf, fi) ==
BTRFS_FILE_EXTENT_INLINE) BTRFS_FILE_EXTENT_INLINE)
continue; continue;
/*
* FIXME make sure to insert a trans record that
* repeats the snapshot del on crash
*/
disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
/* holes don't have refs */
if (disk_bytenr == 0) if (disk_bytenr == 0)
continue; continue;
sorted[refi].bytenr = disk_bytenr;
sorted[refi].slot = i;
refi++;
}
if (refi == 0)
goto out;
sort(sorted, refi, sizeof(struct refsort), refsort_cmp, NULL);
for (i = 0; i < refi; i++) {
u64 disk_bytenr;
disk_bytenr = sorted[i].bytenr;
slot = sorted[i].slot;
cond_resched();
btrfs_item_key_to_cpu(leaf, &key, slot);
if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
continue;
fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
ret = __btrfs_free_extent(trans, root, disk_bytenr, ret = __btrfs_free_extent(trans, root, disk_bytenr,
btrfs_file_extent_disk_num_bytes(leaf, fi), btrfs_file_extent_disk_num_bytes(leaf, fi),
leaf->start, leaf_owner, leaf_generation, leaf->start, leaf_owner, leaf_generation,
...@@ -3497,6 +3539,8 @@ int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans, ...@@ -3497,6 +3539,8 @@ int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
wake_up(&root->fs_info->transaction_throttle); wake_up(&root->fs_info->transaction_throttle);
cond_resched(); cond_resched();
} }
out:
kfree(sorted);
return 0; return 0;
} }
...@@ -3506,9 +3550,25 @@ static noinline int cache_drop_leaf_ref(struct btrfs_trans_handle *trans, ...@@ -3506,9 +3550,25 @@ static noinline int cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
{ {
int i; int i;
int ret; int ret;
struct btrfs_extent_info *info = ref->extents; struct btrfs_extent_info *info;
struct refsort *sorted;
if (ref->nritems == 0)
return 0;
sorted = kmalloc(sizeof(*sorted) * ref->nritems, GFP_NOFS);
for (i = 0; i < ref->nritems; i++) {
sorted[i].bytenr = ref->extents[i].bytenr;
sorted[i].slot = i;
}
sort(sorted, ref->nritems, sizeof(struct refsort), refsort_cmp, NULL);
/*
* the items in the ref were sorted when the ref was inserted
* into the ref cache, so this is already in order
*/
for (i = 0; i < ref->nritems; i++) { for (i = 0; i < ref->nritems; i++) {
info = ref->extents + sorted[i].slot;
ret = __btrfs_free_extent(trans, root, info->bytenr, ret = __btrfs_free_extent(trans, root, info->bytenr,
info->num_bytes, ref->bytenr, info->num_bytes, ref->bytenr,
ref->owner, ref->generation, ref->owner, ref->generation,
...@@ -3565,6 +3625,152 @@ static int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, ...@@ -3565,6 +3625,152 @@ static int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start,
return ret; return ret;
} }
/*
* this is used while deleting old snapshots, and it drops the refs
* on a whole subtree starting from a level 1 node.
*
* The idea is to sort all the leaf pointers, and then drop the
* ref on all the leaves in order. Most of the time the leaves
* will have ref cache entries, so no leaf IOs will be required to
* find the extents they have references on.
*
* For each leaf, any references it has are also dropped in order
*
* This ends up dropping the references in something close to optimal
* order for reading and modifying the extent allocation tree.
*/
static noinline int drop_level_one_refs(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path)
{
u64 bytenr;
u64 root_owner;
u64 root_gen;
struct extent_buffer *eb = path->nodes[1];
struct extent_buffer *leaf;
struct btrfs_leaf_ref *ref;
struct refsort *sorted = NULL;
int nritems = btrfs_header_nritems(eb);
int ret;
int i;
int refi = 0;
int slot = path->slots[1];
u32 blocksize = btrfs_level_size(root, 0);
u32 refs;
if (nritems == 0)
goto out;
root_owner = btrfs_header_owner(eb);
root_gen = btrfs_header_generation(eb);
sorted = kmalloc(sizeof(*sorted) * nritems, GFP_NOFS);
/*
* step one, sort all the leaf pointers so we don't scribble
* randomly into the extent allocation tree
*/
for (i = slot; i < nritems; i++) {
sorted[refi].bytenr = btrfs_node_blockptr(eb, i);
sorted[refi].slot = i;
refi++;
}
/*
* nritems won't be zero, but if we're picking up drop_snapshot
* after a crash, slot might be > 0, so double check things
* just in case.
*/
if (refi == 0)
goto out;
sort(sorted, refi, sizeof(struct refsort), refsort_cmp, NULL);
/*
* the first loop frees everything the leaves point to
*/
for (i = 0; i < refi; i++) {
u64 ptr_gen;
bytenr = sorted[i].bytenr;
/*
* check the reference count on this leaf. If it is > 1
* we just decrement it below and don't update any
* of the refs the leaf points to.
*/
ret = drop_snap_lookup_refcount(root, bytenr, blocksize, &refs);
BUG_ON(ret);
if (refs != 1)
continue;
ptr_gen = btrfs_node_ptr_generation(eb, sorted[i].slot);
/*
* the leaf only had one reference, which means the
* only thing pointing to this leaf is the snapshot
* we're deleting. It isn't possible for the reference
* count to increase again later
*
* The reference cache is checked for the leaf,
* and if found we'll be able to drop any refs held by
* the leaf without needing to read it in.
*/
ref = btrfs_lookup_leaf_ref(root, bytenr);
if (ref && ref->generation != ptr_gen) {
btrfs_free_leaf_ref(root, ref);
ref = NULL;
}
if (ref) {
ret = cache_drop_leaf_ref(trans, root, ref);
BUG_ON(ret);
btrfs_remove_leaf_ref(root, ref);
btrfs_free_leaf_ref(root, ref);
} else {
/*
* the leaf wasn't in the reference cache, so
* we have to read it.
*/
leaf = read_tree_block(root, bytenr, blocksize,
ptr_gen);
ret = btrfs_drop_leaf_ref(trans, root, leaf);
BUG_ON(ret);
free_extent_buffer(leaf);
}
atomic_inc(&root->fs_info->throttle_gen);
wake_up(&root->fs_info->transaction_throttle);
cond_resched();
}
/*
* run through the loop again to free the refs on the leaves.
* This is faster than doing it in the loop above because
* the leaves are likely to be clustered together. We end up
* working in nice chunks on the extent allocation tree.
*/
for (i = 0; i < refi; i++) {
bytenr = sorted[i].bytenr;
ret = __btrfs_free_extent(trans, root, bytenr,
blocksize, eb->start,
root_owner, root_gen, 0, 1);
BUG_ON(ret);
atomic_inc(&root->fs_info->throttle_gen);
wake_up(&root->fs_info->transaction_throttle);
cond_resched();
}
out:
kfree(sorted);
/*
* update the path to show we've processed the entire level 1
* node. This will get saved into the root's drop_snapshot_progress
* field so these drops are not repeated again if this transaction
* commits.
*/
path->slots[1] = nritems;
return 0;
}
/* /*
* helper function for drop_snapshot, this walks down the tree dropping ref * helper function for drop_snapshot, this walks down the tree dropping ref
* counts as it goes. * counts as it goes.
...@@ -3580,7 +3786,6 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans, ...@@ -3580,7 +3786,6 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
struct extent_buffer *next; struct extent_buffer *next;
struct extent_buffer *cur; struct extent_buffer *cur;
struct extent_buffer *parent; struct extent_buffer *parent;
struct btrfs_leaf_ref *ref;
u32 blocksize; u32 blocksize;
int ret; int ret;
u32 refs; u32 refs;
...@@ -3607,17 +3812,46 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans, ...@@ -3607,17 +3812,46 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
if (path->slots[*level] >= if (path->slots[*level] >=
btrfs_header_nritems(cur)) btrfs_header_nritems(cur))
break; break;
/* the new code goes down to level 1 and does all the
* leaves pointed to that node in bulk. So, this check
* for level 0 will always be false.
*
* But, the disk format allows the drop_snapshot_progress
* field in the root to leave things in a state where
* a leaf will need cleaning up here. If someone crashes
* with the old code and then boots with the new code,
* we might find a leaf here.
*/
if (*level == 0) { if (*level == 0) {
ret = btrfs_drop_leaf_ref(trans, root, cur); ret = btrfs_drop_leaf_ref(trans, root, cur);
BUG_ON(ret); BUG_ON(ret);
break; break;
} }
/*
* once we get to level one, process the whole node
* at once, including everything below it.
*/
if (*level == 1) {
ret = drop_level_one_refs(trans, root, path);
BUG_ON(ret);
break;
}
bytenr = btrfs_node_blockptr(cur, path->slots[*level]); bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]); ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
blocksize = btrfs_level_size(root, *level - 1); blocksize = btrfs_level_size(root, *level - 1);
ret = drop_snap_lookup_refcount(root, bytenr, blocksize, &refs); ret = drop_snap_lookup_refcount(root, bytenr, blocksize, &refs);
BUG_ON(ret); BUG_ON(ret);
/*
* if there is more than one reference, we don't need
* to read that node to drop any references it has. We
* just drop the ref we hold on that node and move on to the
* next slot in this level.
*/
if (refs != 1) { if (refs != 1) {
parent = path->nodes[*level]; parent = path->nodes[*level];
root_owner = btrfs_header_owner(parent); root_owner = btrfs_header_owner(parent);
...@@ -3636,46 +3870,12 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans, ...@@ -3636,46 +3870,12 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
continue; continue;
} }
/* /*
* at this point, we have a single ref, and since the * we need to keep freeing things in the next level down.
* only place referencing this extent is a dead root * read the block and loop around to process it
* the reference count should never go higher.
* So, we don't need to check it again
*/ */
if (*level == 1) { next = read_tree_block(root, bytenr, blocksize, ptr_gen);
ref = btrfs_lookup_leaf_ref(root, bytenr);
if (ref && ref->generation != ptr_gen) {
btrfs_free_leaf_ref(root, ref);
ref = NULL;
}
if (ref) {
ret = cache_drop_leaf_ref(trans, root, ref);
BUG_ON(ret);
btrfs_remove_leaf_ref(root, ref);
btrfs_free_leaf_ref(root, ref);
*level = 0;
break;
}
}
next = btrfs_find_tree_block(root, bytenr, blocksize);
if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) {
free_extent_buffer(next);
next = read_tree_block(root, bytenr, blocksize,
ptr_gen);
cond_resched();
#if 0
/*
* this is a debugging check and can go away
* the ref should never go all the way down to 1
* at this point
*/
ret = lookup_extent_ref(NULL, root, bytenr, blocksize,
&refs);
BUG_ON(ret);
WARN_ON(refs != 1);
#endif
}
WARN_ON(*level <= 0); WARN_ON(*level <= 0);
if (path->nodes[*level-1]) if (path->nodes[*level-1])
free_extent_buffer(path->nodes[*level-1]); free_extent_buffer(path->nodes[*level-1]);
...@@ -3700,11 +3900,16 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans, ...@@ -3700,11 +3900,16 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
root_owner = btrfs_header_owner(parent); root_owner = btrfs_header_owner(parent);
root_gen = btrfs_header_generation(parent); root_gen = btrfs_header_generation(parent);
/*
* cleanup and free the reference on the last node
* we processed
*/
ret = __btrfs_free_extent(trans, root, bytenr, blocksize, ret = __btrfs_free_extent(trans, root, bytenr, blocksize,
parent->start, root_owner, root_gen, parent->start, root_owner, root_gen,
*level, 1); *level, 1);
free_extent_buffer(path->nodes[*level]); free_extent_buffer(path->nodes[*level]);
path->nodes[*level] = NULL; path->nodes[*level] = NULL;
*level += 1; *level += 1;
BUG_ON(ret); BUG_ON(ret);
...@@ -3824,6 +4029,13 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans, ...@@ -3824,6 +4029,13 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
if (slot < btrfs_header_nritems(path->nodes[i]) - 1) { if (slot < btrfs_header_nritems(path->nodes[i]) - 1) {
struct extent_buffer *node; struct extent_buffer *node;
struct btrfs_disk_key disk_key; struct btrfs_disk_key disk_key;
/*
* there is more work to do in this level.
* Update the drop_progress marker to reflect
* the work we've done so far, and then bump
* the slot number
*/
node = path->nodes[i]; node = path->nodes[i];
path->slots[i]++; path->slots[i]++;
*level = i; *level = i;
...@@ -3835,6 +4047,11 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans, ...@@ -3835,6 +4047,11 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
return 0; return 0;
} else { } else {
struct extent_buffer *parent; struct extent_buffer *parent;
/*
* this whole node is done, free our reference
* on it and go up one level
*/
if (path->nodes[*level] == root->node) if (path->nodes[*level] == root->node)
parent = path->nodes[*level]; parent = path->nodes[*level];
else else
...@@ -4849,6 +5066,7 @@ int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans, ...@@ -4849,6 +5066,7 @@ int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans,
ref->bytenr = buf->start; ref->bytenr = buf->start;
ref->owner = btrfs_header_owner(buf); ref->owner = btrfs_header_owner(buf);
ref->generation = btrfs_header_generation(buf); ref->generation = btrfs_header_generation(buf);
ret = btrfs_add_leaf_ref(root, ref, 0); ret = btrfs_add_leaf_ref(root, ref, 0);
WARN_ON(ret); WARN_ON(ret);
btrfs_free_leaf_ref(root, ref); btrfs_free_leaf_ref(root, ref);
......
...@@ -2441,6 +2441,8 @@ static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans, ...@@ -2441,6 +2441,8 @@ static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans,
ref->generation = leaf_gen; ref->generation = leaf_gen;
ref->nritems = 0; ref->nritems = 0;
btrfs_sort_leaf_ref(ref);
ret = btrfs_add_leaf_ref(root, ref, 0); ret = btrfs_add_leaf_ref(root, ref, 0);
WARN_ON(ret); WARN_ON(ret);
btrfs_free_leaf_ref(root, ref); btrfs_free_leaf_ref(root, ref);
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
*/ */
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/sort.h>
#include "ctree.h" #include "ctree.h"
#include "ref-cache.h" #include "ref-cache.h"
#include "transaction.h" #include "transaction.h"
......
...@@ -73,5 +73,4 @@ int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref, ...@@ -73,5 +73,4 @@ int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref,
int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen, int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen,
int shared); int shared);
int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref); int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment