Commit 8287475a authored by Qu Wenruo's avatar Qu Wenruo Committed by David Sterba

btrfs: qgroup: Use root::qgroup_meta_rsv_* to record qgroup meta reserved space

For quota disabled->enable case, it's possible that at reservation time
quota was not enabled so no bytes were really reserved, while at release
time, quota was enabled so we will try to release some bytes we didn't
really own.

Such situation can cause metadata reserveation underflow, for both types,
also less possible for per-trans type since quota enable will commit
transaction.

To address this, record qgroup meta reserved bytes into
root::qgroup_meta_rsv_pertrans and ::prealloc.
So at releasing time we won't free any bytes we didn't reserve.

For DATA, it's already handled by io_tree, so nothing needs to be done
there.
Signed-off-by: default avatarQu Wenruo <wqu@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 4f5427cc
...@@ -1264,6 +1264,11 @@ struct btrfs_root { ...@@ -1264,6 +1264,11 @@ struct btrfs_root {
int send_in_progress; int send_in_progress;
struct btrfs_subvolume_writers *subv_writers; struct btrfs_subvolume_writers *subv_writers;
atomic_t will_be_snapshotted; atomic_t will_be_snapshotted;
/* For qgroup metadata reserved space */
spinlock_t qgroup_meta_rsv_lock;
u64 qgroup_meta_rsv_pertrans;
u64 qgroup_meta_rsv_prealloc;
}; };
struct btrfs_file_private { struct btrfs_file_private {
......
...@@ -1147,6 +1147,7 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info, ...@@ -1147,6 +1147,7 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
spin_lock_init(&root->accounting_lock); spin_lock_init(&root->accounting_lock);
spin_lock_init(&root->log_extents_lock[0]); spin_lock_init(&root->log_extents_lock[0]);
spin_lock_init(&root->log_extents_lock[1]); spin_lock_init(&root->log_extents_lock[1]);
spin_lock_init(&root->qgroup_meta_rsv_lock);
mutex_init(&root->objectid_mutex); mutex_init(&root->objectid_mutex);
mutex_init(&root->log_mutex); mutex_init(&root->log_mutex);
mutex_init(&root->ordered_extent_mutex); mutex_init(&root->ordered_extent_mutex);
......
...@@ -2538,11 +2538,11 @@ void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info, ...@@ -2538,11 +2538,11 @@ void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
if (!qgroup) if (!qgroup)
goto out; goto out;
/*
* We're freeing all pertrans rsv, get current value from level 0
* qgroup as real num_bytes to free.
*/
if (num_bytes == (u64)-1) if (num_bytes == (u64)-1)
/*
* We're freeing all pertrans rsv, get reserved value from
* level 0 qgroup as real num_bytes to free.
*/
num_bytes = qgroup->rsv.values[type]; num_bytes = qgroup->rsv.values[type];
ulist_reinit(fs_info->qgroup_ulist); ulist_reinit(fs_info->qgroup_ulist);
...@@ -3087,6 +3087,46 @@ int btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len) ...@@ -3087,6 +3087,46 @@ int btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len)
return __btrfs_qgroup_release_data(inode, NULL, start, len, 0); return __btrfs_qgroup_release_data(inode, NULL, start, len, 0);
} }
static void add_root_meta_rsv(struct btrfs_root *root, int num_bytes,
enum btrfs_qgroup_rsv_type type)
{
if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
type != BTRFS_QGROUP_RSV_META_PERTRANS)
return;
if (num_bytes == 0)
return;
spin_lock(&root->qgroup_meta_rsv_lock);
if (type == BTRFS_QGROUP_RSV_META_PREALLOC)
root->qgroup_meta_rsv_prealloc += num_bytes;
else
root->qgroup_meta_rsv_pertrans += num_bytes;
spin_unlock(&root->qgroup_meta_rsv_lock);
}
static int sub_root_meta_rsv(struct btrfs_root *root, int num_bytes,
enum btrfs_qgroup_rsv_type type)
{
if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
type != BTRFS_QGROUP_RSV_META_PERTRANS)
return 0;
if (num_bytes == 0)
return 0;
spin_lock(&root->qgroup_meta_rsv_lock);
if (type == BTRFS_QGROUP_RSV_META_PREALLOC) {
num_bytes = min_t(u64, root->qgroup_meta_rsv_prealloc,
num_bytes);
root->qgroup_meta_rsv_prealloc -= num_bytes;
} else {
num_bytes = min_t(u64, root->qgroup_meta_rsv_pertrans,
num_bytes);
root->qgroup_meta_rsv_pertrans -= num_bytes;
}
spin_unlock(&root->qgroup_meta_rsv_lock);
return num_bytes;
}
int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
enum btrfs_qgroup_rsv_type type, bool enforce) enum btrfs_qgroup_rsv_type type, bool enforce)
{ {
...@@ -3102,6 +3142,15 @@ int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, ...@@ -3102,6 +3142,15 @@ int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
ret = qgroup_reserve(root, num_bytes, enforce, type); ret = qgroup_reserve(root, num_bytes, enforce, type);
if (ret < 0) if (ret < 0)
return ret; return ret;
/*
* Record what we have reserved into root.
*
* To avoid quota disabled->enabled underflow.
* In that case, we may try to free space we haven't reserved
* (since quota was disabled), so record what we reserved into root.
* And ensure later release won't underflow this number.
*/
add_root_meta_rsv(root, num_bytes, type);
return ret; return ret;
} }
...@@ -3129,6 +3178,12 @@ void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes, ...@@ -3129,6 +3178,12 @@ void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
!is_fstree(root->objectid)) !is_fstree(root->objectid))
return; return;
/*
* reservation for META_PREALLOC can happen before quota is enabled,
* which can lead to underflow.
* Here ensure we will only free what we really have reserved.
*/
num_bytes = sub_root_meta_rsv(root, num_bytes, type);
BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize)); BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
trace_qgroup_meta_reserve(root, -(s64)num_bytes); trace_qgroup_meta_reserve(root, -(s64)num_bytes);
btrfs_qgroup_free_refroot(fs_info, root->objectid, num_bytes, type); btrfs_qgroup_free_refroot(fs_info, root->objectid, num_bytes, type);
...@@ -3187,6 +3242,9 @@ void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes) ...@@ -3187,6 +3242,9 @@ void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes)
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) || if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
!is_fstree(root->objectid)) !is_fstree(root->objectid))
return; return;
/* Same as btrfs_qgroup_free_meta_prealloc() */
num_bytes = sub_root_meta_rsv(root, num_bytes,
BTRFS_QGROUP_RSV_META_PREALLOC);
qgroup_convert_meta(fs_info, root->objectid, num_bytes); qgroup_convert_meta(fs_info, root->objectid, num_bytes);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment