Commit 194ab0bc authored by Josef Bacik's avatar Josef Bacik Committed by David Sterba

btrfs: breakout empty head cleanup to a helper

Move this code out to a helper function to further simplivy
__btrfs_run_delayed_refs.
Signed-off-by: default avatarJosef Bacik <jbacik@fb.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent b00e6250
...@@ -2605,6 +2605,43 @@ static int cleanup_extent_op(struct btrfs_trans_handle *trans, ...@@ -2605,6 +2605,43 @@ static int cleanup_extent_op(struct btrfs_trans_handle *trans,
return ret ? ret : 1; return ret ? ret : 1;
} }
static int cleanup_ref_head(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_head *head)
{
struct btrfs_delayed_ref_root *delayed_refs;
int ret;
delayed_refs = &trans->transaction->delayed_refs;
ret = cleanup_extent_op(trans, fs_info, head);
if (ret < 0) {
unselect_delayed_ref_head(delayed_refs, head);
btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
return ret;
} else if (ret) {
return ret;
}
/*
* Need to drop our head ref lock and re-acquire the delayed ref lock
* and then re-check to make sure nobody got added.
*/
spin_unlock(&head->lock);
spin_lock(&delayed_refs->lock);
spin_lock(&head->lock);
if (!list_empty(&head->ref_list) || head->extent_op) {
spin_unlock(&head->lock);
spin_unlock(&delayed_refs->lock);
return 1;
}
head->node.in_tree = 0;
delayed_refs->num_heads--;
rb_erase(&head->href_node, &delayed_refs->href_root);
spin_unlock(&delayed_refs->lock);
return 0;
}
/* /*
* Returns 0 on success or if called with an already aborted transaction. * Returns 0 on success or if called with an already aborted transaction.
* Returns -ENOMEM or -EIO on failure and will abort the transaction. * Returns -ENOMEM or -EIO on failure and will abort the transaction.
...@@ -2686,47 +2723,21 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, ...@@ -2686,47 +2723,21 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
} }
if (!ref) { if (!ref) {
ret = cleanup_ref_head(trans, fs_info, locked_ref);
if (ret > 0 ) {
/* All delayed refs have been processed, Go ahead
* and send the head node to run_one_delayed_ref,
* so that any accounting fixes can happen
*/
ref = &locked_ref->node;
ret = cleanup_extent_op(trans, fs_info, locked_ref);
if (ret < 0) {
unselect_delayed_ref_head(delayed_refs,
locked_ref);
btrfs_debug(fs_info,
"run_delayed_extent_op returned %d",
ret);
return ret;
} else if (ret > 0) {
/* We dropped our lock, we need to loop. */ /* We dropped our lock, we need to loop. */
ret = 0; ret = 0;
continue; continue;
} else if (ret) {
return ret;
} }
/* /*
* Need to drop our head ref lock and re-acquire the * All delayed refs have been processed, Go ahead and
* delayed ref lock and then re-check to make sure * send the head node to run_one_delayed_ref, so that
* nobody got added. * any accounting fixes can happen
*/ */
spin_unlock(&locked_ref->lock); ref = &locked_ref->node;
spin_lock(&delayed_refs->lock);
spin_lock(&locked_ref->lock);
if (!list_empty(&locked_ref->ref_list) ||
locked_ref->extent_op) {
spin_unlock(&locked_ref->lock);
spin_unlock(&delayed_refs->lock);
continue;
}
ref->in_tree = 0;
delayed_refs->num_heads--;
rb_erase(&locked_ref->href_node,
&delayed_refs->href_root);
spin_unlock(&delayed_refs->lock);
} else { } else {
actual_count++; actual_count++;
ref->in_tree = 0; ref->in_tree = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment