Commit 1262133b authored by Josef Bacik's avatar Josef Bacik Committed by Chris Mason

Btrfs: account for crcs in delayed ref processing

As we delete large extents, we end up doing huge amounts of COW in order
to delete the corresponding crcs.  This adds accounting so that we keep
track of that space and flushing of delayed refs so that we don't build
up too much delayed crc work.

This helps limit the delayed work that must be done at commit time and
tries to avoid ENOSPC aborts because the crcs eat all the global
reserves.
Signed-off-by: default avatarChris Mason <clm@fb.com>
parent 28ed1345
...@@ -489,11 +489,13 @@ update_existing_ref(struct btrfs_trans_handle *trans, ...@@ -489,11 +489,13 @@ update_existing_ref(struct btrfs_trans_handle *trans,
* existing and update must have the same bytenr * existing and update must have the same bytenr
*/ */
static noinline void static noinline void
update_existing_head_ref(struct btrfs_delayed_ref_node *existing, update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_delayed_ref_node *existing,
struct btrfs_delayed_ref_node *update) struct btrfs_delayed_ref_node *update)
{ {
struct btrfs_delayed_ref_head *existing_ref; struct btrfs_delayed_ref_head *existing_ref;
struct btrfs_delayed_ref_head *ref; struct btrfs_delayed_ref_head *ref;
int old_ref_mod;
existing_ref = btrfs_delayed_node_to_head(existing); existing_ref = btrfs_delayed_node_to_head(existing);
ref = btrfs_delayed_node_to_head(update); ref = btrfs_delayed_node_to_head(update);
...@@ -541,7 +543,20 @@ update_existing_head_ref(struct btrfs_delayed_ref_node *existing, ...@@ -541,7 +543,20 @@ update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
* only need the lock for this case cause we could be processing it * only need the lock for this case cause we could be processing it
* currently, for refs we just added we know we're a-ok. * currently, for refs we just added we know we're a-ok.
*/ */
old_ref_mod = existing_ref->total_ref_mod;
existing->ref_mod += update->ref_mod; existing->ref_mod += update->ref_mod;
existing_ref->total_ref_mod += update->ref_mod;
/*
* If we are going to from a positive ref mod to a negative or vice
* versa we need to make sure to adjust pending_csums accordingly.
*/
if (existing_ref->is_data) {
if (existing_ref->total_ref_mod >= 0 && old_ref_mod < 0)
delayed_refs->pending_csums -= existing->num_bytes;
if (existing_ref->total_ref_mod < 0 && old_ref_mod >= 0)
delayed_refs->pending_csums += existing->num_bytes;
}
spin_unlock(&existing_ref->lock); spin_unlock(&existing_ref->lock);
} }
...@@ -605,6 +620,7 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info, ...@@ -605,6 +620,7 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
head_ref->is_data = is_data; head_ref->is_data = is_data;
head_ref->ref_root = RB_ROOT; head_ref->ref_root = RB_ROOT;
head_ref->processing = 0; head_ref->processing = 0;
head_ref->total_ref_mod = count_mod;
spin_lock_init(&head_ref->lock); spin_lock_init(&head_ref->lock);
mutex_init(&head_ref->mutex); mutex_init(&head_ref->mutex);
...@@ -614,7 +630,7 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info, ...@@ -614,7 +630,7 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
existing = htree_insert(&delayed_refs->href_root, existing = htree_insert(&delayed_refs->href_root,
&head_ref->href_node); &head_ref->href_node);
if (existing) { if (existing) {
update_existing_head_ref(&existing->node, ref); update_existing_head_ref(delayed_refs, &existing->node, ref);
/* /*
* we've updated the existing ref, free the newly * we've updated the existing ref, free the newly
* allocated ref * allocated ref
...@@ -622,6 +638,8 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info, ...@@ -622,6 +638,8 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref); kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
head_ref = existing; head_ref = existing;
} else { } else {
if (is_data && count_mod < 0)
delayed_refs->pending_csums += num_bytes;
delayed_refs->num_heads++; delayed_refs->num_heads++;
delayed_refs->num_heads_ready++; delayed_refs->num_heads_ready++;
atomic_inc(&delayed_refs->num_entries); atomic_inc(&delayed_refs->num_entries);
......
...@@ -88,6 +88,14 @@ struct btrfs_delayed_ref_head { ...@@ -88,6 +88,14 @@ struct btrfs_delayed_ref_head {
struct rb_node href_node; struct rb_node href_node;
struct btrfs_delayed_extent_op *extent_op; struct btrfs_delayed_extent_op *extent_op;
/*
* This is used to track the final ref_mod from all the refs associated
* with this head ref, this is not adjusted as delayed refs are run,
* this is meant to track if we need to do the csum accounting or not.
*/
int total_ref_mod;
/* /*
* when a new extent is allocated, it is just reserved in memory * when a new extent is allocated, it is just reserved in memory
* The actual extent isn't inserted into the extent allocation tree * The actual extent isn't inserted into the extent allocation tree
...@@ -138,6 +146,8 @@ struct btrfs_delayed_ref_root { ...@@ -138,6 +146,8 @@ struct btrfs_delayed_ref_root {
/* total number of head nodes ready for processing */ /* total number of head nodes ready for processing */
unsigned long num_heads_ready; unsigned long num_heads_ready;
u64 pending_csums;
/* /*
* set when the tree is flushing before a transaction commit, * set when the tree is flushing before a transaction commit,
* used by the throttling code to decide if new updates need * used by the throttling code to decide if new updates need
......
...@@ -2538,6 +2538,12 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, ...@@ -2538,6 +2538,12 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
* list before we release it. * list before we release it.
*/ */
if (btrfs_delayed_ref_is_head(ref)) { if (btrfs_delayed_ref_is_head(ref)) {
if (locked_ref->is_data &&
locked_ref->total_ref_mod < 0) {
spin_lock(&delayed_refs->lock);
delayed_refs->pending_csums -= ref->num_bytes;
spin_unlock(&delayed_refs->lock);
}
btrfs_delayed_ref_unlock(locked_ref); btrfs_delayed_ref_unlock(locked_ref);
locked_ref = NULL; locked_ref = NULL;
} }
...@@ -2626,11 +2632,31 @@ static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads) ...@@ -2626,11 +2632,31 @@ static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root)); return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
} }
/*
* Takes the number of bytes to be csumm'ed and figures out how many leaves it
* would require to store the csums for that many bytes.
*/
static u64 csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes)
{
u64 csum_size;
u64 num_csums_per_leaf;
u64 num_csums;
csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
num_csums_per_leaf = div64_u64(csum_size,
(u64)btrfs_super_csum_size(root->fs_info->super_copy));
num_csums = div64_u64(csum_bytes, root->sectorsize);
num_csums += num_csums_per_leaf - 1;
num_csums = div64_u64(num_csums, num_csums_per_leaf);
return num_csums;
}
int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans, int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
struct btrfs_root *root) struct btrfs_root *root)
{ {
struct btrfs_block_rsv *global_rsv; struct btrfs_block_rsv *global_rsv;
u64 num_heads = trans->transaction->delayed_refs.num_heads_ready; u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
u64 num_bytes; u64 num_bytes;
int ret = 0; int ret = 0;
...@@ -2639,6 +2665,7 @@ int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans, ...@@ -2639,6 +2665,7 @@ int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
if (num_heads > 1) if (num_heads > 1)
num_bytes += (num_heads - 1) * root->nodesize; num_bytes += (num_heads - 1) * root->nodesize;
num_bytes <<= 1; num_bytes <<= 1;
num_bytes += csum_bytes_to_leaves(root, csum_bytes) * root->nodesize;
global_rsv = &root->fs_info->global_block_rsv; global_rsv = &root->fs_info->global_block_rsv;
/* /*
...@@ -5065,30 +5092,19 @@ static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes, ...@@ -5065,30 +5092,19 @@ static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
int reserve) int reserve)
{ {
struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root *root = BTRFS_I(inode)->root;
u64 csum_size; u64 old_csums, num_csums;
int num_csums_per_leaf;
int num_csums;
int old_csums;
if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM && if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
BTRFS_I(inode)->csum_bytes == 0) BTRFS_I(inode)->csum_bytes == 0)
return 0; return 0;
old_csums = (int)div_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize); old_csums = csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
if (reserve) if (reserve)
BTRFS_I(inode)->csum_bytes += num_bytes; BTRFS_I(inode)->csum_bytes += num_bytes;
else else
BTRFS_I(inode)->csum_bytes -= num_bytes; BTRFS_I(inode)->csum_bytes -= num_bytes;
csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item); num_csums = csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
num_csums_per_leaf = (int)div_u64(csum_size,
sizeof(struct btrfs_csum_item) +
sizeof(struct btrfs_disk_key));
num_csums = (int)div_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
num_csums = num_csums + num_csums_per_leaf - 1;
num_csums = num_csums / num_csums_per_leaf;
old_csums = old_csums + num_csums_per_leaf - 1;
old_csums = old_csums / num_csums_per_leaf;
/* No change, no need to reserve more */ /* No change, no need to reserve more */
if (old_csums == num_csums) if (old_csums == num_csums)
......
...@@ -4197,9 +4197,10 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, ...@@ -4197,9 +4197,10 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
int extent_type = -1; int extent_type = -1;
int ret; int ret;
int err = 0; int err = 0;
int be_nice = 0;
u64 ino = btrfs_ino(inode); u64 ino = btrfs_ino(inode);
u64 bytes_deleted = 0; u64 bytes_deleted = 0;
bool be_nice = 0;
bool should_throttle = 0;
BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY); BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
...@@ -4405,19 +4406,20 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, ...@@ -4405,19 +4406,20 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
btrfs_header_owner(leaf), btrfs_header_owner(leaf),
ino, extent_offset, 0); ino, extent_offset, 0);
BUG_ON(ret); BUG_ON(ret);
if (be_nice && pending_del_nr && if (btrfs_should_throttle_delayed_refs(trans, root))
(pending_del_nr % 16 == 0) &&
bytes_deleted > 1024 * 1024) {
btrfs_async_run_delayed_refs(root, btrfs_async_run_delayed_refs(root,
trans->delayed_ref_updates * 2, 0); trans->delayed_ref_updates * 2, 0);
}
} }
if (found_type == BTRFS_INODE_ITEM_KEY) if (found_type == BTRFS_INODE_ITEM_KEY)
break; break;
should_throttle =
btrfs_should_throttle_delayed_refs(trans, root);
if (path->slots[0] == 0 || if (path->slots[0] == 0 ||
path->slots[0] != pending_del_slot) { path->slots[0] != pending_del_slot ||
(be_nice && should_throttle)) {
if (pending_del_nr) { if (pending_del_nr) {
ret = btrfs_del_items(trans, root, path, ret = btrfs_del_items(trans, root, path,
pending_del_slot, pending_del_slot,
...@@ -4430,6 +4432,15 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, ...@@ -4430,6 +4432,15 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
pending_del_nr = 0; pending_del_nr = 0;
} }
btrfs_release_path(path); btrfs_release_path(path);
if (be_nice && should_throttle) {
unsigned long updates = trans->delayed_ref_updates;
if (updates) {
trans->delayed_ref_updates = 0;
ret = btrfs_run_delayed_refs(trans, root, updates * 2);
if (ret && !err)
err = ret;
}
}
goto search_again; goto search_again;
} else { } else {
path->slots[0]--; path->slots[0]--;
...@@ -4449,7 +4460,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, ...@@ -4449,7 +4460,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
btrfs_free_path(path); btrfs_free_path(path);
if (be_nice && bytes_deleted > 32 * 1024 * 1024) { if (be_nice && btrfs_should_throttle_delayed_refs(trans, root)) {
unsigned long updates = trans->delayed_ref_updates; unsigned long updates = trans->delayed_ref_updates;
if (updates) { if (updates) {
trans->delayed_ref_updates = 0; trans->delayed_ref_updates = 0;
......
...@@ -64,6 +64,9 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction) ...@@ -64,6 +64,9 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction)
if (atomic_dec_and_test(&transaction->use_count)) { if (atomic_dec_and_test(&transaction->use_count)) {
BUG_ON(!list_empty(&transaction->list)); BUG_ON(!list_empty(&transaction->list));
WARN_ON(!RB_EMPTY_ROOT(&transaction->delayed_refs.href_root)); WARN_ON(!RB_EMPTY_ROOT(&transaction->delayed_refs.href_root));
if (transaction->delayed_refs.pending_csums)
printk(KERN_ERR "pending csums is %llu\n",
transaction->delayed_refs.pending_csums);
while (!list_empty(&transaction->pending_chunks)) { while (!list_empty(&transaction->pending_chunks)) {
struct extent_map *em; struct extent_map *em;
...@@ -223,6 +226,7 @@ static noinline int join_transaction(struct btrfs_root *root, unsigned int type) ...@@ -223,6 +226,7 @@ static noinline int join_transaction(struct btrfs_root *root, unsigned int type)
cur_trans->delayed_refs.href_root = RB_ROOT; cur_trans->delayed_refs.href_root = RB_ROOT;
atomic_set(&cur_trans->delayed_refs.num_entries, 0); atomic_set(&cur_trans->delayed_refs.num_entries, 0);
cur_trans->delayed_refs.num_heads_ready = 0; cur_trans->delayed_refs.num_heads_ready = 0;
cur_trans->delayed_refs.pending_csums = 0;
cur_trans->delayed_refs.num_heads = 0; cur_trans->delayed_refs.num_heads = 0;
cur_trans->delayed_refs.flushing = 0; cur_trans->delayed_refs.flushing = 0;
cur_trans->delayed_refs.run_delayed_start = 0; cur_trans->delayed_refs.run_delayed_start = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment