Commit 686c4a5a authored by Qu Wenruo's avatar Qu Wenruo Committed by David Sterba

btrfs: qgroup: iterate qgroups without memory allocation for qgroup_reserve()

Qgroup heavily relies on ulist to go through all the involved
qgroups, but since we're using ulist inside fs_info->qgroup_lock
spinlock, this means we're doing a lot of GFP_ATOMIC allocations.

This patch reduces the GFP_ATOMIC usage for qgroup_reserve() by
eliminating the memory allocation completely.

This is done by moving the needed memory to btrfs_qgroup::iterator
list_head, so that we can put all the involved qgroup into a on-stack
list, thus eliminating the need to allocate memory while holding
spinlock.

The only cost is the slightly higher memory usage, but considering the
reduce GFP_ATOMIC during a hot path, it should still be acceptable.

Function qgroup_reserve() is the perfect start point for this
conversion.
Reviewed-by: default avatarBoris Burkov <boris@bur.io>
Signed-off-by: default avatarQu Wenruo <wqu@suse.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 2a3a1dd9
......@@ -208,6 +208,7 @@ static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
INIT_LIST_HEAD(&qgroup->groups);
INIT_LIST_HEAD(&qgroup->members);
INIT_LIST_HEAD(&qgroup->dirty);
INIT_LIST_HEAD(&qgroup->iterator);
rb_link_node(&qgroup->node, parent, p);
rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
......@@ -1342,6 +1343,24 @@ static void qgroup_dirty(struct btrfs_fs_info *fs_info,
list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
}
static void qgroup_iterator_add(struct list_head *head, struct btrfs_qgroup *qgroup)
{
if (!list_empty(&qgroup->iterator))
return;
list_add_tail(&qgroup->iterator, head);
}
static void qgroup_iterator_clean(struct list_head *head)
{
while (!list_empty(head)) {
struct btrfs_qgroup *qgroup;
qgroup = list_first_entry(head, struct btrfs_qgroup, iterator);
list_del_init(&qgroup->iterator);
}
}
/*
* The easy accounting, we're updating qgroup relationship whose child qgroup
* only has exclusive extents.
......@@ -3125,8 +3144,7 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce,
struct btrfs_fs_info *fs_info = root->fs_info;
u64 ref_root = root->root_key.objectid;
int ret = 0;
struct ulist_node *unode;
struct ulist_iterator uiter;
LIST_HEAD(qgroup_list);
if (!is_fstree(ref_root))
return 0;
......@@ -3146,49 +3164,28 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce,
if (!qgroup)
goto out;
/*
* in a first step, we check all affected qgroups if any limits would
* be exceeded
*/
ulist_reinit(fs_info->qgroup_ulist);
ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
qgroup_to_aux(qgroup), GFP_ATOMIC);
if (ret < 0)
goto out;
ULIST_ITER_INIT(&uiter);
while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
struct btrfs_qgroup *qg;
qgroup_iterator_add(&qgroup_list, qgroup);
list_for_each_entry(qgroup, &qgroup_list, iterator) {
struct btrfs_qgroup_list *glist;
qg = unode_aux_to_qgroup(unode);
if (enforce && !qgroup_check_limits(qg, num_bytes)) {
if (enforce && !qgroup_check_limits(qgroup, num_bytes)) {
ret = -EDQUOT;
goto out;
}
list_for_each_entry(glist, &qg->groups, next_group) {
ret = ulist_add(fs_info->qgroup_ulist,
glist->group->qgroupid,
qgroup_to_aux(glist->group), GFP_ATOMIC);
if (ret < 0)
goto out;
}
list_for_each_entry(glist, &qgroup->groups, next_group)
qgroup_iterator_add(&qgroup_list, glist->group);
}
ret = 0;
/*
* no limits exceeded, now record the reservation into all qgroups
*/
ULIST_ITER_INIT(&uiter);
while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
struct btrfs_qgroup *qg;
qg = unode_aux_to_qgroup(unode);
qgroup_rsv_add(fs_info, qg, num_bytes, type);
}
list_for_each_entry(qgroup, &qgroup_list, iterator)
qgroup_rsv_add(fs_info, qgroup, num_bytes, type);
out:
qgroup_iterator_clean(&qgroup_list);
spin_unlock(&fs_info->qgroup_lock);
return ret;
}
......
......@@ -220,6 +220,15 @@ struct btrfs_qgroup {
struct list_head groups; /* groups this group is member of */
struct list_head members; /* groups that are members of this group */
struct list_head dirty; /* dirty groups */
/*
* For qgroup iteration usage.
*
* The iteration list should always be empty until qgroup_iterator_add()
* is called. And should be reset to empty after the iteration is
* finished.
*/
struct list_head iterator;
struct rb_node node; /* tree of qgroups */
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment