Commit 1887be66 authored by Chris Mason's avatar Chris Mason

Btrfs: try to cleanup delayed refs while freeing extents

When extents are freed, it is likely that we've removed the last
delayed reference update for the extent.  This checks the delayed
ref tree when things are freed, and if no ref updates area left it
immediately processes the delayed ref.
Signed-off-by: default avatarChris Mason <chris.mason@oracle.com>
parent 44871b1b
...@@ -510,6 +510,24 @@ int btrfs_add_delayed_ref(struct btrfs_trans_handle *trans, ...@@ -510,6 +510,24 @@ int btrfs_add_delayed_ref(struct btrfs_trans_handle *trans,
return 0; return 0;
} }
/*
* this does a simple search for the head node for a given extent.
* It must be called with the delayed ref spinlock held, and it returns
* the head node if any where found, or NULL if not.
*/
struct btrfs_delayed_ref_head *
btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
{
struct btrfs_delayed_ref_node *ref;
struct btrfs_delayed_ref_root *delayed_refs;
delayed_refs = &trans->transaction->delayed_refs;
ref = tree_search(&delayed_refs->root, bytenr, (u64)-1);
if (ref)
return btrfs_delayed_node_to_head(ref);
return NULL;
}
/* /*
* add a delayed ref to the tree. This does all of the accounting required * add a delayed ref to the tree. This does all of the accounting required
* to make sure the delayed ref is eventually processed before this * to make sure the delayed ref is eventually processed before this
......
...@@ -137,9 +137,8 @@ int btrfs_add_delayed_ref(struct btrfs_trans_handle *trans, ...@@ -137,9 +137,8 @@ int btrfs_add_delayed_ref(struct btrfs_trans_handle *trans,
u64 ref_generation, u64 owner_objectid, int action, u64 ref_generation, u64 owner_objectid, int action,
int pin); int pin);
struct btrfs_delayed_ref * struct btrfs_delayed_ref_head *
btrfs_find_delayed_ref(struct btrfs_trans_handle *trans, u64 bytenr, btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr);
u64 parent);
int btrfs_delayed_ref_pending(struct btrfs_trans_handle *trans, u64 bytenr); int btrfs_delayed_ref_pending(struct btrfs_trans_handle *trans, u64 bytenr);
int btrfs_lock_delayed_ref(struct btrfs_trans_handle *trans, int btrfs_lock_delayed_ref(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_node *ref, struct btrfs_delayed_ref_node *ref,
......
...@@ -1021,6 +1021,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, ...@@ -1021,6 +1021,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
if (!locked_ref && count == 0) if (!locked_ref && count == 0)
break; break;
cond_resched();
spin_lock(&delayed_refs->lock); spin_lock(&delayed_refs->lock);
} }
if (run_all) { if (run_all) {
...@@ -1045,6 +1046,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, ...@@ -1045,6 +1046,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
mutex_unlock(&head->mutex); mutex_unlock(&head->mutex);
btrfs_put_delayed_ref(ref); btrfs_put_delayed_ref(ref);
cond_resched();
goto again; goto again;
} }
node = rb_next(node); node = rb_next(node);
...@@ -2361,6 +2363,68 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, ...@@ -2361,6 +2363,68 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
owner_objectid, pin, pin == 0, refs_to_drop); owner_objectid, pin, pin == 0, refs_to_drop);
} }
/*
* when we free an extent, it is possible (and likely) that we free the last
* delayed ref for that extent as well. This searches the delayed ref tree for
* a given extent, and if there are no other delayed refs to be processed, it
* removes it from the tree.
*/
static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 bytenr)
{
struct btrfs_delayed_ref_head *head;
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_delayed_ref_node *ref;
struct rb_node *node;
int ret;
delayed_refs = &trans->transaction->delayed_refs;
spin_lock(&delayed_refs->lock);
head = btrfs_find_delayed_ref_head(trans, bytenr);
if (!head)
goto out;
node = rb_prev(&head->node.rb_node);
if (!node)
goto out;
ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
/* there are still entries for this ref, we can't drop it */
if (ref->bytenr == bytenr)
goto out;
/*
* waiting for the lock here would deadlock. If someone else has it
* locked they are already in the process of dropping it anyway
*/
if (!mutex_trylock(&head->mutex))
goto out;
/*
* at this point we have a head with no other entries. Go
* ahead and process it.
*/
head->node.in_tree = 0;
rb_erase(&head->node.rb_node, &delayed_refs->root);
delayed_refs->num_entries--;
/*
* we don't take a ref on the node because we're removing it from the
* tree, so we just steal the ref the tree was holding.
*/
spin_unlock(&delayed_refs->lock);
ret = run_one_delayed_ref(trans, root->fs_info->tree_root,
&head->node, head->must_insert_reserved);
BUG_ON(ret);
btrfs_put_delayed_ref(&head->node);
return 0;
out:
spin_unlock(&delayed_refs->lock);
return 0;
}
int btrfs_free_extent(struct btrfs_trans_handle *trans, int btrfs_free_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
u64 bytenr, u64 num_bytes, u64 parent, u64 bytenr, u64 num_bytes, u64 parent,
...@@ -2388,6 +2452,9 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, ...@@ -2388,6 +2452,9 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans,
root_objectid, ref_generation, root_objectid, ref_generation,
owner_objectid, owner_objectid,
BTRFS_DROP_DELAYED_REF, 1); BTRFS_DROP_DELAYED_REF, 1);
BUG_ON(ret);
ret = check_ref_cleanup(trans, root, bytenr);
BUG_ON(ret);
} }
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment