Commit 253bf575 authored by Gabriel Niebler's avatar Gabriel Niebler Committed by David Sterba

btrfs: turn delayed_nodes_tree into an XArray

… in the btrfs_root struct and adjust all usages of this object to use
the XArray API, because it is notionally easier to use and understand,
as it provides array semantics, and also takes care of locking for us,
further simplifying the code.

Also use the opportunity to do some light refactoring.
Reviewed-by: default avatarNikolay Borisov <nborisov@suse.com>
Signed-off-by: default avatarGabriel Niebler <gniebler@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 719fae89
...@@ -1222,10 +1222,10 @@ struct btrfs_root { ...@@ -1222,10 +1222,10 @@ struct btrfs_root {
struct rb_root inode_tree; struct rb_root inode_tree;
/* /*
* radix tree that keeps track of delayed nodes of every inode, * Xarray that keeps track of delayed nodes of every inode, protected
* protected by inode_lock * by inode_lock
*/ */
struct radix_tree_root delayed_nodes_tree; struct xarray delayed_nodes;
/* /*
* right now this just gets used so that a root has its own devid * right now this just gets used so that a root has its own devid
* for stat. It may be used for more later * for stat. It may be used for more later
......
...@@ -78,7 +78,7 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node( ...@@ -78,7 +78,7 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
} }
spin_lock(&root->inode_lock); spin_lock(&root->inode_lock);
node = radix_tree_lookup(&root->delayed_nodes_tree, ino); node = xa_load(&root->delayed_nodes, ino);
if (node) { if (node) {
if (btrfs_inode->delayed_node) { if (btrfs_inode->delayed_node) {
...@@ -90,9 +90,9 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node( ...@@ -90,9 +90,9 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
/* /*
* It's possible that we're racing into the middle of removing * It's possible that we're racing into the middle of removing
* this node from the radix tree. In this case, the refcount * this node from the xarray. In this case, the refcount
* was zero and it should never go back to one. Just return * was zero and it should never go back to one. Just return
* NULL like it was never in the radix at all; our release * NULL like it was never in the xarray at all; our release
* function is in the process of removing it. * function is in the process of removing it.
* *
* Some implementations of refcount_inc refuse to bump the * Some implementations of refcount_inc refuse to bump the
...@@ -100,7 +100,7 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node( ...@@ -100,7 +100,7 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
* here, refcount_inc() may decide to just WARN_ONCE() instead * here, refcount_inc() may decide to just WARN_ONCE() instead
* of actually bumping the refcount. * of actually bumping the refcount.
* *
* If this node is properly in the radix, we want to bump the * If this node is properly in the xarray, we want to bump the
* refcount twice, once for the inode and once for this get * refcount twice, once for the inode and once for this get
* operation. * operation.
*/ */
...@@ -128,7 +128,7 @@ static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node( ...@@ -128,7 +128,7 @@ static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
u64 ino = btrfs_ino(btrfs_inode); u64 ino = btrfs_ino(btrfs_inode);
int ret; int ret;
again: do {
node = btrfs_get_delayed_node(btrfs_inode); node = btrfs_get_delayed_node(btrfs_inode);
if (node) if (node)
return node; return node;
...@@ -138,26 +138,20 @@ static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node( ...@@ -138,26 +138,20 @@ static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
btrfs_init_delayed_node(node, root, ino); btrfs_init_delayed_node(node, root, ino);
/* cached in the btrfs inode and can be accessed */ /* Cached in the inode and can be accessed */
refcount_set(&node->refs, 2); refcount_set(&node->refs, 2);
ret = radix_tree_preload(GFP_NOFS);
if (ret) {
kmem_cache_free(delayed_node_cache, node);
return ERR_PTR(ret);
}
spin_lock(&root->inode_lock); spin_lock(&root->inode_lock);
ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node); ret = xa_insert(&root->delayed_nodes, ino, node, GFP_NOFS);
if (ret == -EEXIST) { if (ret) {
spin_unlock(&root->inode_lock); spin_unlock(&root->inode_lock);
kmem_cache_free(delayed_node_cache, node); kmem_cache_free(delayed_node_cache, node);
radix_tree_preload_end(); if (ret != -EBUSY)
goto again; return ERR_PTR(ret);
} }
} while (ret);
btrfs_inode->delayed_node = node; btrfs_inode->delayed_node = node;
spin_unlock(&root->inode_lock); spin_unlock(&root->inode_lock);
radix_tree_preload_end();
return node; return node;
} }
...@@ -276,8 +270,7 @@ static void __btrfs_release_delayed_node( ...@@ -276,8 +270,7 @@ static void __btrfs_release_delayed_node(
* back up. We can delete it now. * back up. We can delete it now.
*/ */
ASSERT(refcount_read(&delayed_node->refs) == 0); ASSERT(refcount_read(&delayed_node->refs) == 0);
radix_tree_delete(&root->delayed_nodes_tree, xa_erase(&root->delayed_nodes, delayed_node->inode_id);
delayed_node->inode_id);
spin_unlock(&root->inode_lock); spin_unlock(&root->inode_lock);
kmem_cache_free(delayed_node_cache, delayed_node); kmem_cache_free(delayed_node_cache, delayed_node);
} }
...@@ -1870,34 +1863,35 @@ void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode) ...@@ -1870,34 +1863,35 @@ void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
void btrfs_kill_all_delayed_nodes(struct btrfs_root *root) void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
{ {
u64 inode_id = 0; unsigned long index = 0;
struct btrfs_delayed_node *delayed_node;
struct btrfs_delayed_node *delayed_nodes[8]; struct btrfs_delayed_node *delayed_nodes[8];
int i, n;
while (1) { while (1) {
int n = 0;
spin_lock(&root->inode_lock); spin_lock(&root->inode_lock);
n = radix_tree_gang_lookup(&root->delayed_nodes_tree, if (xa_empty(&root->delayed_nodes)) {
(void **)delayed_nodes, inode_id,
ARRAY_SIZE(delayed_nodes));
if (!n) {
spin_unlock(&root->inode_lock); spin_unlock(&root->inode_lock);
break; return;
} }
inode_id = delayed_nodes[n - 1]->inode_id + 1; xa_for_each_start(&root->delayed_nodes, index, delayed_node, index) {
for (i = 0; i < n; i++) {
/* /*
* Don't increase refs in case the node is dead and * Don't increase refs in case the node is dead and
* about to be removed from the tree in the loop below * about to be removed from the tree in the loop below
*/ */
if (!refcount_inc_not_zero(&delayed_nodes[i]->refs)) if (refcount_inc_not_zero(&delayed_node->refs)) {
delayed_nodes[i] = NULL; delayed_nodes[n] = delayed_node;
n++;
}
if (n >= ARRAY_SIZE(delayed_nodes))
break;
} }
index++;
spin_unlock(&root->inode_lock); spin_unlock(&root->inode_lock);
for (i = 0; i < n; i++) { for (int i = 0; i < n; i++) {
if (!delayed_nodes[i])
continue;
__btrfs_kill_delayed_node(delayed_nodes[i]); __btrfs_kill_delayed_node(delayed_nodes[i]);
btrfs_release_delayed_node(delayed_nodes[i]); btrfs_release_delayed_node(delayed_nodes[i]);
} }
......
...@@ -1160,7 +1160,7 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info, ...@@ -1160,7 +1160,7 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
root->nr_delalloc_inodes = 0; root->nr_delalloc_inodes = 0;
root->nr_ordered_extents = 0; root->nr_ordered_extents = 0;
root->inode_tree = RB_ROOT; root->inode_tree = RB_ROOT;
INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC); xa_init_flags(&root->delayed_nodes, GFP_ATOMIC);
btrfs_init_root_block_rsv(root); btrfs_init_root_block_rsv(root);
......
...@@ -3881,7 +3881,7 @@ static int btrfs_read_locked_inode(struct inode *inode, ...@@ -3881,7 +3881,7 @@ static int btrfs_read_locked_inode(struct inode *inode,
* cache. * cache.
* *
* This is required for both inode re-read from disk and delayed inode * This is required for both inode re-read from disk and delayed inode
* in delayed_nodes_tree. * in the delayed_nodes xarray.
*/ */
if (BTRFS_I(inode)->last_trans == fs_info->generation) if (BTRFS_I(inode)->last_trans == fs_info->generation)
set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment