Commit d3fbb00f authored by Josef Bacik's avatar Josef Bacik Committed by David Sterba

btrfs: embed data_ref and tree_ref in btrfs_delayed_ref_node

We have been embedding btrfs_delayed_ref_node in the
btrfs_delayed_data_ref and btrfs_delayed_tree_ref, and then we have two
sets of cachep's and a variety of handling that is awkward because of
this separation.

Instead union these two members inside of btrfs_delayed_ref_node and
make that the first class object.  This allows us to go down to one
cachep for our delayed ref nodes instead of two.
Reviewed-by: default avatarFilipe Manana <fdmanana@suse.com>
Signed-off-by: default avatarJosef Bacik <josef@toxicpanda.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 0eea355f
...@@ -16,8 +16,7 @@ ...@@ -16,8 +16,7 @@
#include "fs.h" #include "fs.h"
struct kmem_cache *btrfs_delayed_ref_head_cachep; struct kmem_cache *btrfs_delayed_ref_head_cachep;
struct kmem_cache *btrfs_delayed_tree_ref_cachep; struct kmem_cache *btrfs_delayed_ref_node_cachep;
struct kmem_cache *btrfs_delayed_data_ref_cachep;
struct kmem_cache *btrfs_delayed_extent_op_cachep; struct kmem_cache *btrfs_delayed_extent_op_cachep;
/* /*
* delayed back reference update tracking. For subvolume trees * delayed back reference update tracking. For subvolume trees
...@@ -1082,26 +1081,26 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans, ...@@ -1082,26 +1081,26 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
is_system = (generic_ref->tree_ref.ref_root == BTRFS_CHUNK_TREE_OBJECTID); is_system = (generic_ref->tree_ref.ref_root == BTRFS_CHUNK_TREE_OBJECTID);
ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action); ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS); node = kmem_cache_alloc(btrfs_delayed_ref_node_cachep, GFP_NOFS);
if (!ref) if (!node)
return -ENOMEM; return -ENOMEM;
head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS); head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
if (!head_ref) { if (!head_ref) {
kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref); kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
return -ENOMEM; return -ENOMEM;
} }
if (btrfs_qgroup_full_accounting(fs_info) && !generic_ref->skip_qgroup) { if (btrfs_qgroup_full_accounting(fs_info) && !generic_ref->skip_qgroup) {
record = kzalloc(sizeof(*record), GFP_NOFS); record = kzalloc(sizeof(*record), GFP_NOFS);
if (!record) { if (!record) {
kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref); kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref); kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
return -ENOMEM; return -ENOMEM;
} }
} }
node = btrfs_delayed_tree_ref_to_node(ref); ref = btrfs_delayed_node_to_tree_ref(node);
if (parent) if (parent)
ref_type = BTRFS_SHARED_BLOCK_REF_KEY; ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
...@@ -1143,7 +1142,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans, ...@@ -1143,7 +1142,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
action == BTRFS_ADD_DELAYED_EXTENT ? action == BTRFS_ADD_DELAYED_EXTENT ?
BTRFS_ADD_DELAYED_REF : action); BTRFS_ADD_DELAYED_REF : action);
if (merged) if (merged)
kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref); kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
if (qrecord_inserted) if (qrecord_inserted)
btrfs_qgroup_trace_extent_post(trans, record); btrfs_qgroup_trace_extent_post(trans, record);
...@@ -1176,11 +1175,11 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans, ...@@ -1176,11 +1175,11 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
u8 ref_type; u8 ref_type;
ASSERT(generic_ref->type == BTRFS_REF_DATA && action); ASSERT(generic_ref->type == BTRFS_REF_DATA && action);
ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS); node = kmem_cache_alloc(btrfs_delayed_ref_node_cachep, GFP_NOFS);
if (!ref) if (!node)
return -ENOMEM; return -ENOMEM;
node = btrfs_delayed_data_ref_to_node(ref); ref = btrfs_delayed_node_to_data_ref(node);
if (parent) if (parent)
ref_type = BTRFS_SHARED_DATA_REF_KEY; ref_type = BTRFS_SHARED_DATA_REF_KEY;
...@@ -1196,14 +1195,14 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans, ...@@ -1196,14 +1195,14 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS); head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
if (!head_ref) { if (!head_ref) {
kmem_cache_free(btrfs_delayed_data_ref_cachep, ref); kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
return -ENOMEM; return -ENOMEM;
} }
if (btrfs_qgroup_full_accounting(fs_info) && !generic_ref->skip_qgroup) { if (btrfs_qgroup_full_accounting(fs_info) && !generic_ref->skip_qgroup) {
record = kzalloc(sizeof(*record), GFP_NOFS); record = kzalloc(sizeof(*record), GFP_NOFS);
if (!record) { if (!record) {
kmem_cache_free(btrfs_delayed_data_ref_cachep, ref); kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
kmem_cache_free(btrfs_delayed_ref_head_cachep, kmem_cache_free(btrfs_delayed_ref_head_cachep,
head_ref); head_ref);
return -ENOMEM; return -ENOMEM;
...@@ -1237,7 +1236,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans, ...@@ -1237,7 +1236,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
action == BTRFS_ADD_DELAYED_EXTENT ? action == BTRFS_ADD_DELAYED_EXTENT ?
BTRFS_ADD_DELAYED_REF : action); BTRFS_ADD_DELAYED_REF : action);
if (merged) if (merged)
kmem_cache_free(btrfs_delayed_data_ref_cachep, ref); kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
if (qrecord_inserted) if (qrecord_inserted)
...@@ -1280,18 +1279,7 @@ void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref) ...@@ -1280,18 +1279,7 @@ void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
{ {
if (refcount_dec_and_test(&ref->refs)) { if (refcount_dec_and_test(&ref->refs)) {
WARN_ON(!RB_EMPTY_NODE(&ref->ref_node)); WARN_ON(!RB_EMPTY_NODE(&ref->ref_node));
switch (ref->type) { kmem_cache_free(btrfs_delayed_ref_node_cachep, ref);
case BTRFS_TREE_BLOCK_REF_KEY:
case BTRFS_SHARED_BLOCK_REF_KEY:
kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
break;
case BTRFS_EXTENT_DATA_REF_KEY:
case BTRFS_SHARED_DATA_REF_KEY:
kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
break;
default:
BUG();
}
} }
} }
...@@ -1310,8 +1298,7 @@ btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 byt ...@@ -1310,8 +1298,7 @@ btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 byt
void __cold btrfs_delayed_ref_exit(void) void __cold btrfs_delayed_ref_exit(void)
{ {
kmem_cache_destroy(btrfs_delayed_ref_head_cachep); kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
kmem_cache_destroy(btrfs_delayed_tree_ref_cachep); kmem_cache_destroy(btrfs_delayed_ref_node_cachep);
kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
kmem_cache_destroy(btrfs_delayed_extent_op_cachep); kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
} }
...@@ -1321,12 +1308,8 @@ int __init btrfs_delayed_ref_init(void) ...@@ -1321,12 +1308,8 @@ int __init btrfs_delayed_ref_init(void)
if (!btrfs_delayed_ref_head_cachep) if (!btrfs_delayed_ref_head_cachep)
goto fail; goto fail;
btrfs_delayed_tree_ref_cachep = KMEM_CACHE(btrfs_delayed_tree_ref, 0); btrfs_delayed_ref_node_cachep = KMEM_CACHE(btrfs_delayed_ref_node, 0);
if (!btrfs_delayed_tree_ref_cachep) if (!btrfs_delayed_ref_node_cachep)
goto fail;
btrfs_delayed_data_ref_cachep = KMEM_CACHE(btrfs_delayed_data_ref, 0);
if (!btrfs_delayed_data_ref_cachep)
goto fail; goto fail;
btrfs_delayed_extent_op_cachep = KMEM_CACHE(btrfs_delayed_extent_op, 0); btrfs_delayed_extent_op_cachep = KMEM_CACHE(btrfs_delayed_extent_op, 0);
......
...@@ -30,6 +30,19 @@ enum btrfs_delayed_ref_action { ...@@ -30,6 +30,19 @@ enum btrfs_delayed_ref_action {
BTRFS_UPDATE_DELAYED_HEAD, BTRFS_UPDATE_DELAYED_HEAD,
} __packed; } __packed;
struct btrfs_delayed_tree_ref {
u64 root;
u64 parent;
int level;
};
struct btrfs_delayed_data_ref {
u64 root;
u64 parent;
u64 objectid;
u64 offset;
};
struct btrfs_delayed_ref_node { struct btrfs_delayed_ref_node {
struct rb_node ref_node; struct rb_node ref_node;
/* /*
...@@ -64,6 +77,11 @@ struct btrfs_delayed_ref_node { ...@@ -64,6 +77,11 @@ struct btrfs_delayed_ref_node {
unsigned int action:8; unsigned int action:8;
unsigned int type:8; unsigned int type:8;
union {
struct btrfs_delayed_tree_ref tree_ref;
struct btrfs_delayed_data_ref data_ref;
};
}; };
struct btrfs_delayed_extent_op { struct btrfs_delayed_extent_op {
...@@ -151,21 +169,6 @@ struct btrfs_delayed_ref_head { ...@@ -151,21 +169,6 @@ struct btrfs_delayed_ref_head {
bool processing; bool processing;
}; };
struct btrfs_delayed_tree_ref {
struct btrfs_delayed_ref_node node;
u64 root;
u64 parent;
int level;
};
struct btrfs_delayed_data_ref {
struct btrfs_delayed_ref_node node;
u64 root;
u64 parent;
u64 objectid;
u64 offset;
};
enum btrfs_delayed_ref_flags { enum btrfs_delayed_ref_flags {
/* Indicate that we are flushing delayed refs for the commit */ /* Indicate that we are flushing delayed refs for the commit */
BTRFS_DELAYED_REFS_FLUSHING, BTRFS_DELAYED_REFS_FLUSHING,
...@@ -279,8 +282,7 @@ struct btrfs_ref { ...@@ -279,8 +282,7 @@ struct btrfs_ref {
}; };
extern struct kmem_cache *btrfs_delayed_ref_head_cachep; extern struct kmem_cache *btrfs_delayed_ref_head_cachep;
extern struct kmem_cache *btrfs_delayed_tree_ref_cachep; extern struct kmem_cache *btrfs_delayed_ref_node_cachep;
extern struct kmem_cache *btrfs_delayed_data_ref_cachep;
extern struct kmem_cache *btrfs_delayed_extent_op_cachep; extern struct kmem_cache *btrfs_delayed_extent_op_cachep;
int __init btrfs_delayed_ref_init(void); int __init btrfs_delayed_ref_init(void);
...@@ -404,25 +406,25 @@ bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info); ...@@ -404,25 +406,25 @@ bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info);
static inline struct btrfs_delayed_tree_ref * static inline struct btrfs_delayed_tree_ref *
btrfs_delayed_node_to_tree_ref(struct btrfs_delayed_ref_node *node) btrfs_delayed_node_to_tree_ref(struct btrfs_delayed_ref_node *node)
{ {
return container_of(node, struct btrfs_delayed_tree_ref, node); return &node->tree_ref;
} }
static inline struct btrfs_delayed_data_ref * static inline struct btrfs_delayed_data_ref *
btrfs_delayed_node_to_data_ref(struct btrfs_delayed_ref_node *node) btrfs_delayed_node_to_data_ref(struct btrfs_delayed_ref_node *node)
{ {
return container_of(node, struct btrfs_delayed_data_ref, node); return &node->data_ref;
} }
static inline struct btrfs_delayed_ref_node * static inline struct btrfs_delayed_ref_node *
btrfs_delayed_tree_ref_to_node(struct btrfs_delayed_tree_ref *ref) btrfs_delayed_tree_ref_to_node(struct btrfs_delayed_tree_ref *ref)
{ {
return &ref->node; return container_of(ref, struct btrfs_delayed_ref_node, tree_ref);
} }
static inline struct btrfs_delayed_ref_node * static inline struct btrfs_delayed_ref_node *
btrfs_delayed_data_ref_to_node(struct btrfs_delayed_data_ref *ref) btrfs_delayed_data_ref_to_node(struct btrfs_delayed_data_ref *ref)
{ {
return &ref->node; return container_of(ref, struct btrfs_delayed_ref_node, data_ref);
} }
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment