Commit 017e5369 authored by Chris Mason's avatar Chris Mason

Btrfs: Leaf reference cache update

This changes the reference cache to make a single cache per root
instead of one cache per transaction, and to key by the byte number
of the disk block instead of the keys inside.

This makes it much less likely to have cache misses if a snapshot
or something has an extra reference on a higher node or a leaf while
the first transaction that added the leaf into the cache is dropping.

Some throttling is added to functions that free blocks heavily so they
wait for old transactions to drop.
Signed-off-by: default avatarChris Mason <chris.mason@oracle.com>
parent 31153d81
...@@ -594,7 +594,6 @@ struct btrfs_fs_info { ...@@ -594,7 +594,6 @@ struct btrfs_fs_info {
spinlock_t ref_cache_lock; spinlock_t ref_cache_lock;
u64 total_ref_cache_size; u64 total_ref_cache_size;
u64 running_ref_cache_size;
u64 avail_data_alloc_bits; u64 avail_data_alloc_bits;
u64 avail_metadata_alloc_bits; u64 avail_metadata_alloc_bits;
...@@ -606,10 +605,18 @@ struct btrfs_fs_info { ...@@ -606,10 +605,18 @@ struct btrfs_fs_info {
void *bdev_holder; void *bdev_holder;
}; };
struct btrfs_leaf_ref_tree {
struct rb_root root;
struct btrfs_leaf_ref *last;
struct list_head list;
spinlock_t lock;
};
/* /*
* in ram representation of the tree. extent_root is used for all allocations * in ram representation of the tree. extent_root is used for all allocations
* and for the extent tree extent_root root. * and for the extent tree extent_root root.
*/ */
struct dirty_root;
struct btrfs_root { struct btrfs_root {
struct extent_buffer *node; struct extent_buffer *node;
...@@ -618,6 +625,8 @@ struct btrfs_root { ...@@ -618,6 +625,8 @@ struct btrfs_root {
struct extent_buffer *commit_root; struct extent_buffer *commit_root;
struct btrfs_leaf_ref_tree *ref_tree; struct btrfs_leaf_ref_tree *ref_tree;
struct btrfs_leaf_ref_tree ref_tree_struct;
struct dirty_root *dirty_root;
struct btrfs_root_item root_item; struct btrfs_root_item root_item;
struct btrfs_key root_key; struct btrfs_key root_key;
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#include "print-tree.h" #include "print-tree.h"
#include "async-thread.h" #include "async-thread.h"
#include "locking.h" #include "locking.h"
#include "ref-cache.h"
#if 0 #if 0
static int check_tree_block(struct btrfs_root *root, struct extent_buffer *buf) static int check_tree_block(struct btrfs_root *root, struct extent_buffer *buf)
...@@ -737,6 +738,10 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, ...@@ -737,6 +738,10 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
spin_lock_init(&root->node_lock); spin_lock_init(&root->node_lock);
spin_lock_init(&root->orphan_lock); spin_lock_init(&root->orphan_lock);
mutex_init(&root->objectid_mutex); mutex_init(&root->objectid_mutex);
btrfs_leaf_ref_tree_init(&root->ref_tree_struct);
root->ref_tree = &root->ref_tree_struct;
memset(&root->root_key, 0, sizeof(root->root_key)); memset(&root->root_key, 0, sizeof(root->root_key));
memset(&root->root_item, 0, sizeof(root->root_item)); memset(&root->root_item, 0, sizeof(root->root_item));
memset(&root->defrag_progress, 0, sizeof(root->defrag_progress)); memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
...@@ -1176,9 +1181,6 @@ static int transaction_kthread(void *arg) ...@@ -1176,9 +1181,6 @@ static int transaction_kthread(void *arg)
goto sleep; goto sleep;
} }
printk("btrfs: running reference cache size %Lu\n",
root->fs_info->running_ref_cache_size);
now = get_seconds(); now = get_seconds();
if (now < cur->start_time || now - cur->start_time < 30) { if (now < cur->start_time || now - cur->start_time < 30) {
mutex_unlock(&root->fs_info->trans_mutex); mutex_unlock(&root->fs_info->trans_mutex);
......
...@@ -1004,8 +1004,6 @@ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, ...@@ -1004,8 +1004,6 @@ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
goto out; goto out;
} }
btrfs_item_key_to_cpu(buf, &ref->key, 0);
ref->bytenr = buf->start; ref->bytenr = buf->start;
ref->owner = btrfs_header_owner(buf); ref->owner = btrfs_header_owner(buf);
ref->generation = btrfs_header_generation(buf); ref->generation = btrfs_header_generation(buf);
...@@ -2387,19 +2385,15 @@ static void noinline reada_walk_down(struct btrfs_root *root, ...@@ -2387,19 +2385,15 @@ static void noinline reada_walk_down(struct btrfs_root *root,
} }
} }
/*
* we want to avoid as much random IO as we can with the alloc mutex
* held, so drop the lock and do the lookup, then do it again with the
* lock held.
*/
int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len, int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len,
u32 *refs) u32 *refs)
{ {
int ret;
mutex_unlock(&root->fs_info->alloc_mutex); mutex_unlock(&root->fs_info->alloc_mutex);
lookup_extent_ref(NULL, root, start, len, refs); ret = lookup_extent_ref(NULL, root, start, len, refs);
cond_resched(); cond_resched();
mutex_lock(&root->fs_info->alloc_mutex); mutex_lock(&root->fs_info->alloc_mutex);
return lookup_extent_ref(NULL, root, start, len, refs); return ret;
} }
/* /*
...@@ -2472,7 +2466,7 @@ static int noinline walk_down_tree(struct btrfs_trans_handle *trans, ...@@ -2472,7 +2466,7 @@ static int noinline walk_down_tree(struct btrfs_trans_handle *trans,
if (*level == 1) { if (*level == 1) {
struct btrfs_key key; struct btrfs_key key;
btrfs_node_key_to_cpu(cur, &key, path->slots[*level]); btrfs_node_key_to_cpu(cur, &key, path->slots[*level]);
ref = btrfs_lookup_leaf_ref(root, &key); ref = btrfs_lookup_leaf_ref(root, bytenr);
if (ref) { if (ref) {
ret = drop_leaf_ref(trans, root, ref); ret = drop_leaf_ref(trans, root, ref);
BUG_ON(ret); BUG_ON(ret);
...@@ -2482,7 +2476,6 @@ static int noinline walk_down_tree(struct btrfs_trans_handle *trans, ...@@ -2482,7 +2476,6 @@ static int noinline walk_down_tree(struct btrfs_trans_handle *trans,
break; break;
} }
} }
next = btrfs_find_tree_block(root, bytenr, blocksize); next = btrfs_find_tree_block(root, bytenr, blocksize);
if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) { if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) {
free_extent_buffer(next); free_extent_buffer(next);
...@@ -2672,6 +2665,7 @@ int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root ...@@ -2672,6 +2665,7 @@ int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
ret = -EAGAIN; ret = -EAGAIN;
break; break;
} }
wake_up(&root->fs_info->transaction_throttle);
} }
for (i = 0; i <= orig_level; i++) { for (i = 0; i <= orig_level; i++) {
if (path->nodes[i]) { if (path->nodes[i]) {
......
...@@ -347,7 +347,7 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans, ...@@ -347,7 +347,7 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
btrfs_update_inode(trans, root, inode); btrfs_update_inode(trans, root, inode);
} }
failed: failed:
err = btrfs_end_transaction_throttle(trans, root); err = btrfs_end_transaction(trans, root);
out_unlock: out_unlock:
unlock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS); unlock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
return err; return err;
......
...@@ -2482,7 +2482,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, ...@@ -2482,7 +2482,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
btrfs_update_inode_block_group(trans, dir); btrfs_update_inode_block_group(trans, dir);
out_unlock: out_unlock:
nr = trans->blocks_used; nr = trans->blocks_used;
btrfs_end_transaction_throttle(trans, root); btrfs_end_transaction(trans, root);
fail: fail:
if (drop_inode) { if (drop_inode) {
inode_dec_link_count(inode); inode_dec_link_count(inode);
...@@ -2535,7 +2535,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, ...@@ -2535,7 +2535,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
drop_inode = 1; drop_inode = 1;
nr = trans->blocks_used; nr = trans->blocks_used;
btrfs_end_transaction_throttle(trans, root); btrfs_end_transaction(trans, root);
fail: fail:
if (drop_inode) { if (drop_inode) {
inode_dec_link_count(inode); inode_dec_link_count(inode);
...@@ -2609,7 +2609,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) ...@@ -2609,7 +2609,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
out_fail: out_fail:
nr = trans->blocks_used; nr = trans->blocks_used;
btrfs_end_transaction_throttle(trans, root); btrfs_end_transaction(trans, root);
out_unlock: out_unlock:
if (drop_on_err) if (drop_on_err)
...@@ -3548,7 +3548,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, ...@@ -3548,7 +3548,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
out_unlock: out_unlock:
nr = trans->blocks_used; nr = trans->blocks_used;
btrfs_end_transaction_throttle(trans, root); btrfs_end_transaction(trans, root);
out_fail: out_fail:
if (drop_inode) { if (drop_inode) {
inode_dec_link_count(inode); inode_dec_link_count(inode);
......
...@@ -29,6 +29,7 @@ struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(int nr_extents) ...@@ -29,6 +29,7 @@ struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(int nr_extents)
if (ref) { if (ref) {
memset(ref, 0, sizeof(*ref)); memset(ref, 0, sizeof(*ref));
atomic_set(&ref->usage, 1); atomic_set(&ref->usage, 1);
INIT_LIST_HEAD(&ref->list);
} }
return ref; return ref;
} }
...@@ -44,40 +45,21 @@ void btrfs_free_leaf_ref(struct btrfs_leaf_ref *ref) ...@@ -44,40 +45,21 @@ void btrfs_free_leaf_ref(struct btrfs_leaf_ref *ref)
} }
} }
static int comp_keys(struct btrfs_key *k1, struct btrfs_key *k2) static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr,
{
if (k1->objectid > k2->objectid)
return 1;
if (k1->objectid < k2->objectid)
return -1;
if (k1->type > k2->type)
return 1;
if (k1->type < k2->type)
return -1;
if (k1->offset > k2->offset)
return 1;
if (k1->offset < k2->offset)
return -1;
return 0;
}
static struct rb_node *tree_insert(struct rb_root *root, struct btrfs_key *key,
struct rb_node *node) struct rb_node *node)
{ {
struct rb_node ** p = &root->rb_node; struct rb_node ** p = &root->rb_node;
struct rb_node * parent = NULL; struct rb_node * parent = NULL;
struct btrfs_leaf_ref *entry; struct btrfs_leaf_ref *entry;
int ret;
while(*p) { while(*p) {
parent = *p; parent = *p;
entry = rb_entry(parent, struct btrfs_leaf_ref, rb_node); entry = rb_entry(parent, struct btrfs_leaf_ref, rb_node);
WARN_ON(!entry->in_tree); WARN_ON(!entry->in_tree);
ret = comp_keys(key, &entry->key); if (bytenr < entry->bytenr)
if (ret < 0)
p = &(*p)->rb_left; p = &(*p)->rb_left;
else if (ret > 0) else if (bytenr > entry->bytenr)
p = &(*p)->rb_right; p = &(*p)->rb_right;
else else
return parent; return parent;
...@@ -90,20 +72,18 @@ static struct rb_node *tree_insert(struct rb_root *root, struct btrfs_key *key, ...@@ -90,20 +72,18 @@ static struct rb_node *tree_insert(struct rb_root *root, struct btrfs_key *key,
return NULL; return NULL;
} }
static struct rb_node *tree_search(struct rb_root *root, struct btrfs_key *key) static struct rb_node *tree_search(struct rb_root *root, u64 bytenr)
{ {
struct rb_node * n = root->rb_node; struct rb_node * n = root->rb_node;
struct btrfs_leaf_ref *entry; struct btrfs_leaf_ref *entry;
int ret;
while(n) { while(n) {
entry = rb_entry(n, struct btrfs_leaf_ref, rb_node); entry = rb_entry(n, struct btrfs_leaf_ref, rb_node);
WARN_ON(!entry->in_tree); WARN_ON(!entry->in_tree);
ret = comp_keys(key, &entry->key); if (bytenr < entry->bytenr)
if (ret < 0)
n = n->rb_left; n = n->rb_left;
else if (ret > 0) else if (bytenr > entry->bytenr)
n = n->rb_right; n = n->rb_right;
else else
return n; return n;
...@@ -122,11 +102,11 @@ int btrfs_remove_leaf_refs(struct btrfs_root *root) ...@@ -122,11 +102,11 @@ int btrfs_remove_leaf_refs(struct btrfs_root *root)
spin_lock(&tree->lock); spin_lock(&tree->lock);
while(!btrfs_leaf_ref_tree_empty(tree)) { while(!btrfs_leaf_ref_tree_empty(tree)) {
tree->last = NULL;
rb = rb_first(&tree->root); rb = rb_first(&tree->root);
ref = rb_entry(rb, struct btrfs_leaf_ref, rb_node); ref = rb_entry(rb, struct btrfs_leaf_ref, rb_node);
rb_erase(&ref->rb_node, &tree->root); rb_erase(&ref->rb_node, &tree->root);
ref->in_tree = 0; ref->in_tree = 0;
list_del_init(&ref->list);
spin_unlock(&tree->lock); spin_unlock(&tree->lock);
...@@ -140,7 +120,7 @@ int btrfs_remove_leaf_refs(struct btrfs_root *root) ...@@ -140,7 +120,7 @@ int btrfs_remove_leaf_refs(struct btrfs_root *root)
} }
struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root,
struct btrfs_key *key) u64 bytenr)
{ {
struct rb_node *rb; struct rb_node *rb;
struct btrfs_leaf_ref *ref = NULL; struct btrfs_leaf_ref *ref = NULL;
...@@ -150,15 +130,9 @@ struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root, ...@@ -150,15 +130,9 @@ struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root,
return NULL; return NULL;
spin_lock(&tree->lock); spin_lock(&tree->lock);
if (tree->last && comp_keys(key, &tree->last->key) == 0) { rb = tree_search(&tree->root, bytenr);
ref = tree->last; if (rb)
} else {
rb = tree_search(&tree->root, key);
if (rb) {
ref = rb_entry(rb, struct btrfs_leaf_ref, rb_node); ref = rb_entry(rb, struct btrfs_leaf_ref, rb_node);
tree->last = ref;
}
}
if (ref) if (ref)
atomic_inc(&ref->usage); atomic_inc(&ref->usage);
spin_unlock(&tree->lock); spin_unlock(&tree->lock);
...@@ -171,21 +145,17 @@ int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref) ...@@ -171,21 +145,17 @@ int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref)
struct rb_node *rb; struct rb_node *rb;
size_t size = btrfs_leaf_ref_size(ref->nritems); size_t size = btrfs_leaf_ref_size(ref->nritems);
struct btrfs_leaf_ref_tree *tree = root->ref_tree; struct btrfs_leaf_ref_tree *tree = root->ref_tree;
struct btrfs_transaction *trans = root->fs_info->running_transaction;
spin_lock(&tree->lock); spin_lock(&tree->lock);
rb = tree_insert(&tree->root, &ref->key, &ref->rb_node); rb = tree_insert(&tree->root, ref->bytenr, &ref->rb_node);
if (rb) { if (rb) {
ret = -EEXIST; ret = -EEXIST;
} else { } else {
spin_lock(&root->fs_info->ref_cache_lock); spin_lock(&root->fs_info->ref_cache_lock);
root->fs_info->total_ref_cache_size += size; root->fs_info->total_ref_cache_size += size;
if (trans && tree->generation == trans->transid)
root->fs_info->running_ref_cache_size += size;
spin_unlock(&root->fs_info->ref_cache_lock); spin_unlock(&root->fs_info->ref_cache_lock);
tree->last = ref;
atomic_inc(&ref->usage); atomic_inc(&ref->usage);
list_add_tail(&ref->list, &tree->list);
} }
spin_unlock(&tree->lock); spin_unlock(&tree->lock);
return ret; return ret;
...@@ -195,28 +165,17 @@ int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref) ...@@ -195,28 +165,17 @@ int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref)
{ {
size_t size = btrfs_leaf_ref_size(ref->nritems); size_t size = btrfs_leaf_ref_size(ref->nritems);
struct btrfs_leaf_ref_tree *tree = root->ref_tree; struct btrfs_leaf_ref_tree *tree = root->ref_tree;
struct btrfs_transaction *trans = root->fs_info->running_transaction;
BUG_ON(!ref->in_tree); BUG_ON(!ref->in_tree);
spin_lock(&tree->lock); spin_lock(&tree->lock);
spin_lock(&root->fs_info->ref_cache_lock); spin_lock(&root->fs_info->ref_cache_lock);
root->fs_info->total_ref_cache_size -= size; root->fs_info->total_ref_cache_size -= size;
if (trans && tree->generation == trans->transid)
root->fs_info->running_ref_cache_size -= size;
spin_unlock(&root->fs_info->ref_cache_lock); spin_unlock(&root->fs_info->ref_cache_lock);
if (tree->last == ref) {
struct rb_node *next = rb_next(&ref->rb_node);
if (next) {
tree->last = rb_entry(next, struct btrfs_leaf_ref,
rb_node);
} else
tree->last = NULL;
}
rb_erase(&ref->rb_node, &tree->root); rb_erase(&ref->rb_node, &tree->root);
ref->in_tree = 0; ref->in_tree = 0;
list_del_init(&ref->list);
spin_unlock(&tree->lock); spin_unlock(&tree->lock);
......
...@@ -15,6 +15,8 @@ ...@@ -15,6 +15,8 @@
* Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA. * Boston, MA 021110-1307, USA.
*/ */
#ifndef __REFCACHE__
#define __REFCACHE__
struct btrfs_extent_info { struct btrfs_extent_info {
u64 bytenr; u64 bytenr;
...@@ -25,7 +27,6 @@ struct btrfs_extent_info { ...@@ -25,7 +27,6 @@ struct btrfs_extent_info {
struct btrfs_leaf_ref { struct btrfs_leaf_ref {
struct rb_node rb_node; struct rb_node rb_node;
struct btrfs_key key;
int in_tree; int in_tree;
atomic_t usage; atomic_t usage;
...@@ -33,14 +34,9 @@ struct btrfs_leaf_ref { ...@@ -33,14 +34,9 @@ struct btrfs_leaf_ref {
u64 owner; u64 owner;
u64 generation; u64 generation;
int nritems; int nritems;
struct btrfs_extent_info extents[];
};
struct btrfs_leaf_ref_tree { struct list_head list;
struct rb_root root; struct btrfs_extent_info extents[];
struct btrfs_leaf_ref *last;
u64 generation;
spinlock_t lock;
}; };
static inline size_t btrfs_leaf_ref_size(int nr_extents) static inline size_t btrfs_leaf_ref_size(int nr_extents)
...@@ -53,7 +49,7 @@ static inline void btrfs_leaf_ref_tree_init(struct btrfs_leaf_ref_tree *tree) ...@@ -53,7 +49,7 @@ static inline void btrfs_leaf_ref_tree_init(struct btrfs_leaf_ref_tree *tree)
{ {
tree->root.rb_node = NULL; tree->root.rb_node = NULL;
tree->last = NULL; tree->last = NULL;
tree->generation = 0; INIT_LIST_HEAD(&tree->list);
spin_lock_init(&tree->lock); spin_lock_init(&tree->lock);
} }
...@@ -66,7 +62,9 @@ void btrfs_leaf_ref_tree_init(struct btrfs_leaf_ref_tree *tree); ...@@ -66,7 +62,9 @@ void btrfs_leaf_ref_tree_init(struct btrfs_leaf_ref_tree *tree);
struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(int nr_extents); struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(int nr_extents);
void btrfs_free_leaf_ref(struct btrfs_leaf_ref *ref); void btrfs_free_leaf_ref(struct btrfs_leaf_ref *ref);
struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root,
struct btrfs_key *key); u64 bytenr);
int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref); int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref);
int btrfs_remove_leaf_refs(struct btrfs_root *root); int btrfs_remove_leaf_refs(struct btrfs_root *root);
int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref); int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref);
#endif
...@@ -36,7 +36,6 @@ struct dirty_root { ...@@ -36,7 +36,6 @@ struct dirty_root {
struct list_head list; struct list_head list;
struct btrfs_root *root; struct btrfs_root *root;
struct btrfs_root *latest_root; struct btrfs_root *latest_root;
struct btrfs_leaf_ref_tree ref_tree;
}; };
static noinline void put_transaction(struct btrfs_transaction *transaction) static noinline void put_transaction(struct btrfs_transaction *transaction)
...@@ -108,13 +107,13 @@ static noinline int record_root_in_trans(struct btrfs_root *root) ...@@ -108,13 +107,13 @@ static noinline int record_root_in_trans(struct btrfs_root *root)
dirty->latest_root = root; dirty->latest_root = root;
INIT_LIST_HEAD(&dirty->list); INIT_LIST_HEAD(&dirty->list);
btrfs_leaf_ref_tree_init(&dirty->ref_tree);
dirty->ref_tree.generation = running_trans_id;
root->commit_root = btrfs_root_node(root); root->commit_root = btrfs_root_node(root);
root->ref_tree = &dirty->ref_tree; root->dirty_root = dirty;
memcpy(dirty->root, root, sizeof(*root)); memcpy(dirty->root, root, sizeof(*root));
dirty->root->ref_tree = &root->ref_tree_struct;
spin_lock_init(&dirty->root->node_lock); spin_lock_init(&dirty->root->node_lock);
mutex_init(&dirty->root->objectid_mutex); mutex_init(&dirty->root->objectid_mutex);
dirty->root->node = root->commit_root; dirty->root->node = root->commit_root;
...@@ -217,11 +216,12 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, ...@@ -217,11 +216,12 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
if (waitqueue_active(&cur_trans->writer_wait)) if (waitqueue_active(&cur_trans->writer_wait))
wake_up(&cur_trans->writer_wait); wake_up(&cur_trans->writer_wait);
if (0 && cur_trans->in_commit && throttle) { if (throttle && atomic_read(&root->fs_info->throttles)) {
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
mutex_unlock(&root->fs_info->trans_mutex); mutex_unlock(&root->fs_info->trans_mutex);
prepare_to_wait(&root->fs_info->transaction_throttle, &wait, prepare_to_wait(&root->fs_info->transaction_throttle, &wait,
TASK_UNINTERRUPTIBLE); TASK_UNINTERRUPTIBLE);
if (atomic_read(&root->fs_info->throttles))
schedule(); schedule();
finish_wait(&root->fs_info->transaction_throttle, &wait); finish_wait(&root->fs_info->transaction_throttle, &wait);
mutex_lock(&root->fs_info->trans_mutex); mutex_lock(&root->fs_info->trans_mutex);
...@@ -333,6 +333,8 @@ int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans, ...@@ -333,6 +333,8 @@ int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans,
list_del_init(next); list_del_init(next);
root = list_entry(next, struct btrfs_root, dirty_list); root = list_entry(next, struct btrfs_root, dirty_list);
update_cowonly_root(trans, root); update_cowonly_root(trans, root);
if (root->fs_info->closing)
btrfs_remove_leaf_refs(root);
} }
return 0; return 0;
} }
...@@ -346,10 +348,8 @@ int btrfs_add_dead_root(struct btrfs_root *root, ...@@ -346,10 +348,8 @@ int btrfs_add_dead_root(struct btrfs_root *root,
dirty = kmalloc(sizeof(*dirty), GFP_NOFS); dirty = kmalloc(sizeof(*dirty), GFP_NOFS);
if (!dirty) if (!dirty)
return -ENOMEM; return -ENOMEM;
btrfs_leaf_ref_tree_init(&dirty->ref_tree);
dirty->root = root; dirty->root = root;
dirty->latest_root = latest; dirty->latest_root = latest;
root->ref_tree = NULL;
list_add(&dirty->list, dead_list); list_add(&dirty->list, dead_list);
return 0; return 0;
} }
...@@ -379,18 +379,14 @@ static noinline int add_dirty_roots(struct btrfs_trans_handle *trans, ...@@ -379,18 +379,14 @@ static noinline int add_dirty_roots(struct btrfs_trans_handle *trans,
BTRFS_ROOT_TRANS_TAG); BTRFS_ROOT_TRANS_TAG);
BUG_ON(!root->ref_tree); BUG_ON(!root->ref_tree);
dirty = container_of(root->ref_tree, struct dirty_root, dirty = root->dirty_root;
ref_tree);
if (root->commit_root == root->node) { if (root->commit_root == root->node) {
WARN_ON(root->node->start != WARN_ON(root->node->start !=
btrfs_root_bytenr(&root->root_item)); btrfs_root_bytenr(&root->root_item));
BUG_ON(!btrfs_leaf_ref_tree_empty(
root->ref_tree));
free_extent_buffer(root->commit_root); free_extent_buffer(root->commit_root);
root->commit_root = NULL; root->commit_root = NULL;
root->ref_tree = NULL;
kfree(dirty->root); kfree(dirty->root);
kfree(dirty); kfree(dirty);
...@@ -410,7 +406,6 @@ static noinline int add_dirty_roots(struct btrfs_trans_handle *trans, ...@@ -410,7 +406,6 @@ static noinline int add_dirty_roots(struct btrfs_trans_handle *trans,
sizeof(struct btrfs_disk_key)); sizeof(struct btrfs_disk_key));
root->root_item.drop_level = 0; root->root_item.drop_level = 0;
root->commit_root = NULL; root->commit_root = NULL;
root->ref_tree = NULL;
root->root_key.offset = root->fs_info->generation; root->root_key.offset = root->fs_info->generation;
btrfs_set_root_bytenr(&root->root_item, btrfs_set_root_bytenr(&root->root_item,
root->node->start); root->node->start);
...@@ -485,7 +480,7 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root, ...@@ -485,7 +480,7 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root,
while(!list_empty(list)) { while(!list_empty(list)) {
struct btrfs_root *root; struct btrfs_root *root;
dirty = list_entry(list->next, struct dirty_root, list); dirty = list_entry(list->prev, struct dirty_root, list);
list_del_init(&dirty->list); list_del_init(&dirty->list);
num_bytes = btrfs_root_used(&dirty->root->root_item); num_bytes = btrfs_root_used(&dirty->root->root_item);
...@@ -507,7 +502,7 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root, ...@@ -507,7 +502,7 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root,
if (err) if (err)
ret = err; ret = err;
nr = trans->blocks_used; nr = trans->blocks_used;
ret = btrfs_end_transaction_throttle(trans, tree_root); ret = btrfs_end_transaction(trans, tree_root);
BUG_ON(ret); BUG_ON(ret);
mutex_unlock(&root->fs_info->drop_mutex); mutex_unlock(&root->fs_info->drop_mutex);
...@@ -517,6 +512,7 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root, ...@@ -517,6 +512,7 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root,
} }
BUG_ON(ret); BUG_ON(ret);
atomic_dec(&root->fs_info->throttles); atomic_dec(&root->fs_info->throttles);
wake_up(&root->fs_info->transaction_throttle);
mutex_lock(&root->fs_info->alloc_mutex); mutex_lock(&root->fs_info->alloc_mutex);
num_bytes -= btrfs_root_used(&dirty->root->root_item); num_bytes -= btrfs_root_used(&dirty->root->root_item);
...@@ -539,8 +535,6 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root, ...@@ -539,8 +535,6 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root,
ret = btrfs_end_transaction(trans, tree_root); ret = btrfs_end_transaction(trans, tree_root);
BUG_ON(ret); BUG_ON(ret);
btrfs_remove_leaf_refs(dirty->root);
free_extent_buffer(dirty->root->node); free_extent_buffer(dirty->root->node);
kfree(dirty->root); kfree(dirty->root);
kfree(dirty); kfree(dirty);
...@@ -725,10 +719,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, ...@@ -725,10 +719,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
&dirty_fs_roots); &dirty_fs_roots);
BUG_ON(ret); BUG_ON(ret);
spin_lock(&root->fs_info->ref_cache_lock);
root->fs_info->running_ref_cache_size = 0;
spin_unlock(&root->fs_info->ref_cache_lock);
ret = btrfs_commit_tree_roots(trans, root); ret = btrfs_commit_tree_roots(trans, root);
BUG_ON(ret); BUG_ON(ret);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment