Commit 51773bec authored by Qu Wenruo's avatar Qu Wenruo Committed by Chris Mason

btrfs: qgroup: Avoid calling btrfs_free_reserved_data_space in clear_bit_hook

In clear_bit_hook, qgroup reserved data is already handled quite well,
either released by finish_ordered_io or invalidatepage.

So calling btrfs_qgroup_free_data() here is completely meaningless, and
since btrfs_qgroup_free_data() will lock io_tree, so it can't be called
with io_tree lock hold.

This patch will add a new function
btrfs_free_reserved_data_space_noquota() for clear_bit_hook() to cease
the lockdep warning.
Signed-off-by: default avatarQu Wenruo <quwenruo@cn.fujitsu.com>
Signed-off-by: default avatarChris Mason <clm@fb.com>
parent 14524a84
......@@ -3455,6 +3455,8 @@ enum btrfs_reserve_flush_enum {
int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len);
int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes);
void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len);
void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
u64 len);
void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans);
......
......@@ -4066,10 +4066,12 @@ int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len)
* Called if we need to clear a data reservation for this inode
* Normally in a error case.
*
* This one will handle the per-indoe data rsv map for accurate reserved
* space framework.
* This one will *NOT* use accurate qgroup reserved space API, just for case
* which we can't sleep and is sure it won't affect qgroup reserved space.
* Like clear_bit_hook().
*/
void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len)
void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
u64 len)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_space_info *data_sinfo;
......@@ -4079,13 +4081,6 @@ void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len)
round_down(start, root->sectorsize);
start = round_down(start, root->sectorsize);
/*
* Free any reserved qgroup data space first
* As it will alloc memory, we can't do it with data sinfo
* spinlock hold.
*/
btrfs_qgroup_free_data(inode, start, len);
data_sinfo = root->fs_info->data_sinfo;
spin_lock(&data_sinfo->lock);
if (WARN_ON(data_sinfo->bytes_may_use < len))
......@@ -4097,6 +4092,19 @@ void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len)
spin_unlock(&data_sinfo->lock);
}
/*
* Called if we need to clear a data reservation for this inode
* Normally in a error case.
*
* This one will handle the per-indoe data rsv map for accurate reserved
* space framework.
*/
void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len)
{
btrfs_free_reserved_data_space_noquota(inode, start, len);
btrfs_qgroup_free_data(inode, start, len);
}
static void force_metadata_allocation(struct btrfs_fs_info *info)
{
struct list_head *head = &info->space_info;
......
......@@ -1776,8 +1776,8 @@ static void btrfs_clear_bit_hook(struct inode *inode,
if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
&& do_list && !(state->state & EXTENT_NORESERVE))
btrfs_free_reserved_data_space(inode, state->start,
len);
btrfs_free_reserved_data_space_noquota(inode,
state->start, len);
__percpu_counter_add(&root->fs_info->delalloc_bytes, -len,
root->fs_info->delalloc_batch);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment