Commit 28bad212 authored by Josef Bacik's avatar Josef Bacik Committed by David Sterba

btrfs: fix truncate throttling

We have a bunch of magic to make sure we're throttling delayed refs when
truncating a file.  Now that we have a delayed refs rsv and a mechanism
for refilling that reserve simply use that instead of all of this magic.
Reviewed-by: default avatarNikolay Borisov <nborisov@suse.com>
Signed-off-by: default avatarJosef Bacik <josef@toxicpanda.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent db2462a6
...@@ -4442,31 +4442,6 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) ...@@ -4442,31 +4442,6 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
return err; return err;
} }
static int truncate_space_check(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 bytes_deleted)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
/*
* This is only used to apply pressure to the enospc system, we don't
* intend to use this reservation at all.
*/
bytes_deleted = btrfs_csum_bytes_to_leaves(fs_info, bytes_deleted);
bytes_deleted *= fs_info->nodesize;
ret = btrfs_block_rsv_add(root, &fs_info->trans_block_rsv,
bytes_deleted, BTRFS_RESERVE_NO_FLUSH);
if (!ret) {
trace_btrfs_space_reservation(fs_info, "transaction",
trans->transid,
bytes_deleted, 1);
trans->bytes_reserved += bytes_deleted;
}
return ret;
}
/* /*
* Return this if we need to call truncate_block for the last bit of the * Return this if we need to call truncate_block for the last bit of the
* truncate. * truncate.
...@@ -4511,7 +4486,6 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, ...@@ -4511,7 +4486,6 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
u64 bytes_deleted = 0; u64 bytes_deleted = 0;
bool be_nice = false; bool be_nice = false;
bool should_throttle = false; bool should_throttle = false;
bool should_end = false;
BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY); BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
...@@ -4724,15 +4698,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, ...@@ -4724,15 +4698,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
btrfs_abort_transaction(trans, ret); btrfs_abort_transaction(trans, ret);
break; break;
} }
if (btrfs_should_throttle_delayed_refs(trans))
btrfs_async_run_delayed_refs(fs_info,
trans->delayed_ref_updates * 2,
trans->transid, 0);
if (be_nice) { if (be_nice) {
if (truncate_space_check(trans, root,
extent_num_bytes)) {
should_end = true;
}
if (btrfs_should_throttle_delayed_refs(trans)) if (btrfs_should_throttle_delayed_refs(trans))
should_throttle = true; should_throttle = true;
} }
...@@ -4743,7 +4709,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, ...@@ -4743,7 +4709,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
if (path->slots[0] == 0 || if (path->slots[0] == 0 ||
path->slots[0] != pending_del_slot || path->slots[0] != pending_del_slot ||
should_throttle || should_end) { should_throttle) {
if (pending_del_nr) { if (pending_del_nr) {
ret = btrfs_del_items(trans, root, path, ret = btrfs_del_items(trans, root, path,
pending_del_slot, pending_del_slot,
...@@ -4755,23 +4721,24 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, ...@@ -4755,23 +4721,24 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
pending_del_nr = 0; pending_del_nr = 0;
} }
btrfs_release_path(path); btrfs_release_path(path);
if (should_throttle) {
unsigned long updates = trans->delayed_ref_updates;
if (updates) {
trans->delayed_ref_updates = 0;
ret = btrfs_run_delayed_refs(trans,
updates * 2);
if (ret)
break;
}
}
/* /*
* if we failed to refill our space rsv, bail out * We can generate a lot of delayed refs, so we need to
* and let the transaction restart * throttle every once and a while and make sure we're
* adding enough space to keep up with the work we are
* generating. Since we hold a transaction here we
* can't flush, and we don't want to FLUSH_LIMIT because
* we could have generated too many delayed refs to
* actually allocate, so just bail if we're short and
* let the normal reservation dance happen higher up.
*/ */
if (should_end) { if (should_throttle) {
ret = -EAGAIN; ret = btrfs_delayed_refs_rsv_refill(fs_info,
break; BTRFS_RESERVE_NO_FLUSH);
if (ret) {
ret = -EAGAIN;
break;
}
} }
goto search_again; goto search_again;
} else { } else {
...@@ -4797,18 +4764,6 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, ...@@ -4797,18 +4764,6 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
} }
btrfs_free_path(path); btrfs_free_path(path);
if (be_nice && bytes_deleted > SZ_32M && (ret >= 0 || ret == -EAGAIN)) {
unsigned long updates = trans->delayed_ref_updates;
int err;
if (updates) {
trans->delayed_ref_updates = 0;
err = btrfs_run_delayed_refs(trans, updates * 2);
if (err)
ret = err;
}
}
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment