Commit eb86ec73 authored by Nikolay Borisov's avatar Nikolay Borisov Committed by David Sterba

btrfs: Use init_delayed_ref_head in add_delayed_ref_head

Use the newly introduced function when initialising the head_ref in
add_delayed_ref_head. No functional changes.
Signed-off-by: default avatarNikolay Borisov <nborisov@suse.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent a2e569b3
...@@ -608,69 +608,14 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans, ...@@ -608,69 +608,14 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
{ {
struct btrfs_delayed_ref_head *existing; struct btrfs_delayed_ref_head *existing;
struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_delayed_ref_root *delayed_refs;
int count_mod = 1;
int must_insert_reserved = 0;
int qrecord_inserted = 0; int qrecord_inserted = 0;
/* If reserved is provided, it must be a data extent. */
BUG_ON(!is_data && reserved);
/*
* the head node stores the sum of all the mods, so dropping a ref
* should drop the sum in the head node by one.
*/
if (action == BTRFS_UPDATE_DELAYED_HEAD)
count_mod = 0;
else if (action == BTRFS_DROP_DELAYED_REF)
count_mod = -1;
/*
* BTRFS_ADD_DELAYED_EXTENT means that we need to update
* the reserved accounting when the extent is finally added, or
* if a later modification deletes the delayed ref without ever
* inserting the extent into the extent allocation tree.
* ref->must_insert_reserved is the flag used to record
* that accounting mods are required.
*
* Once we record must_insert_reserved, switch the action to
* BTRFS_ADD_DELAYED_REF because other special casing is not required.
*/
if (action == BTRFS_ADD_DELAYED_EXTENT)
must_insert_reserved = 1;
else
must_insert_reserved = 0;
delayed_refs = &trans->transaction->delayed_refs; delayed_refs = &trans->transaction->delayed_refs;
init_delayed_ref_head(head_ref, qrecord, bytenr, num_bytes, ref_root,
refcount_set(&head_ref->refs, 1); reserved, action, is_data, is_system);
head_ref->bytenr = bytenr;
head_ref->num_bytes = num_bytes;
head_ref->ref_mod = count_mod;
head_ref->must_insert_reserved = must_insert_reserved;
head_ref->is_data = is_data;
head_ref->is_system = is_system;
head_ref->ref_tree = RB_ROOT;
INIT_LIST_HEAD(&head_ref->ref_add_list);
RB_CLEAR_NODE(&head_ref->href_node);
head_ref->processing = 0;
head_ref->total_ref_mod = count_mod;
head_ref->qgroup_reserved = 0;
head_ref->qgroup_ref_root = 0;
spin_lock_init(&head_ref->lock);
mutex_init(&head_ref->mutex);
/* Record qgroup extent info if provided */ /* Record qgroup extent info if provided */
if (qrecord) { if (qrecord) {
if (ref_root && reserved) { if (btrfs_qgroup_trace_extent_nolock(trans->fs_info,
head_ref->qgroup_ref_root = ref_root;
head_ref->qgroup_reserved = reserved;
}
qrecord->bytenr = bytenr;
qrecord->num_bytes = num_bytes;
qrecord->old_roots = NULL;
if(btrfs_qgroup_trace_extent_nolock(trans->fs_info,
delayed_refs, qrecord)) delayed_refs, qrecord))
kfree(qrecord); kfree(qrecord);
else else
...@@ -695,7 +640,7 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans, ...@@ -695,7 +640,7 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
} else { } else {
if (old_ref_mod) if (old_ref_mod)
*old_ref_mod = 0; *old_ref_mod = 0;
if (is_data && count_mod < 0) if (is_data && head_ref->ref_mod < 0)
delayed_refs->pending_csums += num_bytes; delayed_refs->pending_csums += num_bytes;
delayed_refs->num_heads++; delayed_refs->num_heads++;
delayed_refs->num_heads_ready++; delayed_refs->num_heads_ready++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment