Commit 2335efaf authored by Nikolay Borisov's avatar Nikolay Borisov Committed by David Sterba

btrfs: split delayed ref head initialization and addition

add_delayed_ref_head really performed 2 independent operations -
initialisting the ref head and adding it to a list. Now that the init
part is in a separate function let's complete the separation between
both operations. This results in a lot simpler interface for
add_delayed_ref_head since the function now deals solely with either
adding the newly initialised delayed ref head or merging it into an
existing delayed ref head. This results in vastly simplified function
signature since 5 arguments are dropped. The only other thing worth
mentioning is that due to this split the WARN_ON catching reinit of
existing. In this patch the condition is extended such that:

  qrecord && head_ref->qgroup_ref_root && head_ref->qgroup_reserved

is added. This is done because the two qgroup_* prefixed member are
set only if both ref_root and reserved are passed. So functionally
it's equivalent to the old WARN_ON and allows to remove the two args
from add_delayed_ref_head.
Signed-off-by: default avatarNikolay Borisov <nborisov@suse.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent eb86ec73
......@@ -600,19 +600,15 @@ static noinline struct btrfs_delayed_ref_head *
add_delayed_ref_head(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *head_ref,
struct btrfs_qgroup_extent_record *qrecord,
u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
int action, int is_data, int is_system,
int *qrecord_inserted_ret,
int action, int *qrecord_inserted_ret,
int *old_ref_mod, int *new_ref_mod)
{
struct btrfs_delayed_ref_head *existing;
struct btrfs_delayed_ref_root *delayed_refs;
int qrecord_inserted = 0;
delayed_refs = &trans->transaction->delayed_refs;
init_delayed_ref_head(head_ref, qrecord, bytenr, num_bytes, ref_root,
reserved, action, is_data, is_system);
/* Record qgroup extent info if provided */
if (qrecord) {
if (btrfs_qgroup_trace_extent_nolock(trans->fs_info,
......@@ -627,7 +623,9 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
existing = htree_insert(&delayed_refs->href_root,
&head_ref->href_node);
if (existing) {
WARN_ON(ref_root && reserved && existing->qgroup_ref_root
WARN_ON(qrecord && head_ref->qgroup_ref_root
&& head_ref->qgroup_reserved
&& existing->qgroup_ref_root
&& existing->qgroup_reserved);
update_existing_head_ref(delayed_refs, existing, head_ref,
old_ref_mod);
......@@ -640,8 +638,8 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
} else {
if (old_ref_mod)
*old_ref_mod = 0;
if (is_data && head_ref->ref_mod < 0)
delayed_refs->pending_csums += num_bytes;
if (head_ref->is_data && head_ref->ref_mod < 0)
delayed_refs->pending_csums += head_ref->num_bytes;
delayed_refs->num_heads++;
delayed_refs->num_heads_ready++;
atomic_inc(&delayed_refs->num_entries);
......@@ -651,6 +649,7 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
*qrecord_inserted_ret = qrecord_inserted;
if (new_ref_mod)
*new_ref_mod = head_ref->total_ref_mod;
return head_ref;
}
......@@ -722,7 +721,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_qgroup_extent_record *record = NULL;
int qrecord_inserted;
int is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID);
bool is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID);
int ret;
u8 ref_type;
......@@ -752,6 +751,8 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
goto free_head_ref;
}
init_delayed_ref_head(head_ref, record, bytenr, num_bytes,
ref_root, 0, action, false, is_system);
head_ref->extent_op = extent_op;
delayed_refs = &trans->transaction->delayed_refs;
......@@ -761,12 +762,10 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
* insert both the head node and the new ref without dropping
* the spin lock
*/
head_ref = add_delayed_ref_head(trans, head_ref, record, bytenr,
num_bytes, 0, 0, action, 0,
is_system, &qrecord_inserted,
head_ref = add_delayed_ref_head(trans, head_ref, record,
action, &qrecord_inserted,
old_ref_mod, new_ref_mod);
ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
spin_unlock(&delayed_refs->lock);
......@@ -840,6 +839,8 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
}
}
init_delayed_ref_head(head_ref, record, bytenr, num_bytes, ref_root,
reserved, action, true, false);
head_ref->extent_op = NULL;
delayed_refs = &trans->transaction->delayed_refs;
......@@ -849,9 +850,8 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
* insert both the head node and the new ref without dropping
* the spin lock
*/
head_ref = add_delayed_ref_head(trans, head_ref, record, bytenr,
num_bytes, ref_root, reserved,
action, 1, 0, &qrecord_inserted,
head_ref = add_delayed_ref_head(trans, head_ref, record,
action, &qrecord_inserted,
old_ref_mod, new_ref_mod);
ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
......@@ -881,19 +881,16 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
if (!head_ref)
return -ENOMEM;
init_delayed_ref_head(head_ref, NULL, bytenr, num_bytes, 0, 0,
BTRFS_UPDATE_DELAYED_HEAD, extent_op->is_data,
false);
head_ref->extent_op = extent_op;
delayed_refs = &trans->transaction->delayed_refs;
spin_lock(&delayed_refs->lock);
/*
* extent_ops just modify the flags of an extent and they don't result
* in ref count changes, hence it's safe to pass false/0 for is_system
* argument
*/
add_delayed_ref_head(trans, head_ref, NULL, bytenr, num_bytes, 0, 0,
BTRFS_UPDATE_DELAYED_HEAD, extent_op->is_data,
0, NULL, NULL, NULL);
add_delayed_ref_head(trans, head_ref, NULL, BTRFS_UPDATE_DELAYED_HEAD,
NULL, NULL, NULL);
spin_unlock(&delayed_refs->lock);
return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment