Commit 25179201 authored by Josef Bacik's avatar Josef Bacik Committed by Chris Mason

Btrfs: nuke fs wide allocation mutex V2

This patch removes the giant fs_info->alloc_mutex and replaces it with a bunch
of little locks.

There is now a pinned_mutex, which is used when messing with the pinned_extents
extent io tree, and the extent_ins_mutex which is used with the pending_del and
extent_ins extent io trees.

The locking for the extent tree stuff was inspired by a patch that Yan Zheng
wrote to fix a race condition, I cleaned it up some and changed the locking
around a little bit, but the idea remains the same.  Basically instead of
holding the extent_ins_mutex throughout the processing of an extent on the
extent_ins or pending_del trees, we just hold it while we're searching and when
we clear the bits on those trees, and lock the extent for the duration of the
operations on the extent.

Also to keep from getting hung up waiting to lock an extent, I've added a
try_lock_extent so if we cannot lock the extent, move on to the next one in the
tree and we'll come back to that one.  I have tested this heavily and it does
not appear to break anything.  This has to be applied on top of my
find_free_extent redo patch.

I tested this patch on top of Yan's space reblancing code and it worked fine.
The only thing that has changed since the last version is I pulled out all my
debugging stuff, apparently I forgot to run guilt refresh before I sent the
last patch out.  Thank you,
Signed-off-by: default avatarJosef Bacik <jbacik@redhat.com>
parent 80eb234a
...@@ -1387,8 +1387,7 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root ...@@ -1387,8 +1387,7 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
lowest_level = p->lowest_level; lowest_level = p->lowest_level;
WARN_ON(lowest_level && ins_len > 0); WARN_ON(lowest_level && ins_len > 0);
WARN_ON(p->nodes[0] != NULL); WARN_ON(p->nodes[0] != NULL);
WARN_ON(cow && root == root->fs_info->extent_root &&
!mutex_is_locked(&root->fs_info->alloc_mutex));
if (ins_len < 0) if (ins_len < 0)
lowest_unlock = 2; lowest_unlock = 2;
......
...@@ -558,6 +558,7 @@ struct btrfs_block_group_cache { ...@@ -558,6 +558,7 @@ struct btrfs_block_group_cache {
struct btrfs_key key; struct btrfs_key key;
struct btrfs_block_group_item item; struct btrfs_block_group_item item;
spinlock_t lock; spinlock_t lock;
struct mutex alloc_mutex;
u64 pinned; u64 pinned;
u64 reserved; u64 reserved;
u64 flags; u64 flags;
...@@ -635,7 +636,8 @@ struct btrfs_fs_info { ...@@ -635,7 +636,8 @@ struct btrfs_fs_info {
struct mutex tree_log_mutex; struct mutex tree_log_mutex;
struct mutex transaction_kthread_mutex; struct mutex transaction_kthread_mutex;
struct mutex cleaner_mutex; struct mutex cleaner_mutex;
struct mutex alloc_mutex; struct mutex extent_ins_mutex;
struct mutex pinned_mutex;
struct mutex chunk_mutex; struct mutex chunk_mutex;
struct mutex drop_mutex; struct mutex drop_mutex;
struct mutex volume_mutex; struct mutex volume_mutex;
...@@ -1941,8 +1943,12 @@ int btrfs_acl_chmod(struct inode *inode); ...@@ -1941,8 +1943,12 @@ int btrfs_acl_chmod(struct inode *inode);
/* free-space-cache.c */ /* free-space-cache.c */
int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
u64 bytenr, u64 size); u64 bytenr, u64 size);
int btrfs_add_free_space_lock(struct btrfs_block_group_cache *block_group,
u64 offset, u64 bytes);
int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
u64 bytenr, u64 size); u64 bytenr, u64 size);
int btrfs_remove_free_space_lock(struct btrfs_block_group_cache *block_group,
u64 offset, u64 bytes);
void btrfs_remove_free_space_cache(struct btrfs_block_group_cache void btrfs_remove_free_space_cache(struct btrfs_block_group_cache
*block_group); *block_group);
struct btrfs_free_space *btrfs_find_free_space(struct btrfs_block_group_cache struct btrfs_free_space *btrfs_find_free_space(struct btrfs_block_group_cache
......
...@@ -1460,7 +1460,8 @@ struct btrfs_root *open_ctree(struct super_block *sb, ...@@ -1460,7 +1460,8 @@ struct btrfs_root *open_ctree(struct super_block *sb,
mutex_init(&fs_info->trans_mutex); mutex_init(&fs_info->trans_mutex);
mutex_init(&fs_info->tree_log_mutex); mutex_init(&fs_info->tree_log_mutex);
mutex_init(&fs_info->drop_mutex); mutex_init(&fs_info->drop_mutex);
mutex_init(&fs_info->alloc_mutex); mutex_init(&fs_info->extent_ins_mutex);
mutex_init(&fs_info->pinned_mutex);
mutex_init(&fs_info->chunk_mutex); mutex_init(&fs_info->chunk_mutex);
mutex_init(&fs_info->transaction_kthread_mutex); mutex_init(&fs_info->transaction_kthread_mutex);
mutex_init(&fs_info->cleaner_mutex); mutex_init(&fs_info->cleaner_mutex);
......
This diff is collapsed.
...@@ -938,6 +938,20 @@ int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) ...@@ -938,6 +938,20 @@ int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
} }
EXPORT_SYMBOL(lock_extent); EXPORT_SYMBOL(lock_extent);
int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask)
{
int err;
u64 failed_start;
err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
&failed_start, mask);
if (err == -EEXIST)
return 0;
return 1;
}
EXPORT_SYMBOL(try_lock_extent);
int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask) gfp_t mask)
{ {
......
...@@ -128,6 +128,8 @@ int try_release_extent_state(struct extent_map_tree *map, ...@@ -128,6 +128,8 @@ int try_release_extent_state(struct extent_map_tree *map,
gfp_t mask); gfp_t mask);
int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask);
int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask);
int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask);
int extent_read_full_page(struct extent_io_tree *tree, struct page *page, int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
get_extent_t *get_extent); get_extent_t *get_extent);
int __init extent_io_init(void); int __init extent_io_init(void);
......
...@@ -184,8 +184,8 @@ static int link_free_space(struct btrfs_block_group_cache *block_group, ...@@ -184,8 +184,8 @@ static int link_free_space(struct btrfs_block_group_cache *block_group,
return ret; return ret;
} }
int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, static int __btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
u64 offset, u64 bytes) u64 offset, u64 bytes)
{ {
struct btrfs_free_space *right_info; struct btrfs_free_space *right_info;
struct btrfs_free_space *left_info; struct btrfs_free_space *left_info;
...@@ -202,8 +202,6 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, ...@@ -202,8 +202,6 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
* are adding, if there is remove that struct and add a new one to * are adding, if there is remove that struct and add a new one to
* cover the entire range * cover the entire range
*/ */
spin_lock(&block_group->lock);
right_info = tree_search_offset(&block_group->free_space_offset, right_info = tree_search_offset(&block_group->free_space_offset,
offset+bytes, 0, 1); offset+bytes, 0, 1);
left_info = tree_search_offset(&block_group->free_space_offset, left_info = tree_search_offset(&block_group->free_space_offset,
...@@ -261,7 +259,6 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, ...@@ -261,7 +259,6 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
if (ret) if (ret)
kfree(info); kfree(info);
out: out:
spin_unlock(&block_group->lock);
if (ret) { if (ret) {
printk(KERN_ERR "btrfs: unable to add free space :%d\n", ret); printk(KERN_ERR "btrfs: unable to add free space :%d\n", ret);
if (ret == -EEXIST) if (ret == -EEXIST)
...@@ -274,13 +271,13 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, ...@@ -274,13 +271,13 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
return ret; return ret;
} }
int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, static int
u64 offset, u64 bytes) __btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
u64 offset, u64 bytes)
{ {
struct btrfs_free_space *info; struct btrfs_free_space *info;
int ret = 0; int ret = 0;
spin_lock(&block_group->lock);
info = tree_search_offset(&block_group->free_space_offset, offset, 0, info = tree_search_offset(&block_group->free_space_offset, offset, 0,
1); 1);
...@@ -334,17 +331,63 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, ...@@ -334,17 +331,63 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
/* step two, insert a new info struct to cover anything /* step two, insert a new info struct to cover anything
* before the hole * before the hole
*/ */
spin_unlock(&block_group->lock); ret = __btrfs_add_free_space(block_group, old_start,
ret = btrfs_add_free_space(block_group, old_start, offset - old_start);
offset - old_start);
BUG_ON(ret); BUG_ON(ret);
goto out_nolock;
} else { } else {
WARN_ON(1); WARN_ON(1);
} }
out: out:
spin_unlock(&block_group->lock); return ret;
out_nolock: }
int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
u64 offset, u64 bytes)
{
int ret;
struct btrfs_free_space *sp;
mutex_lock(&block_group->alloc_mutex);
ret = __btrfs_add_free_space(block_group, offset, bytes);
sp = tree_search_offset(&block_group->free_space_offset, offset, 0, 1);
BUG_ON(!sp);
mutex_unlock(&block_group->alloc_mutex);
return ret;
}
int btrfs_add_free_space_lock(struct btrfs_block_group_cache *block_group,
u64 offset, u64 bytes)
{
int ret;
struct btrfs_free_space *sp;
ret = __btrfs_add_free_space(block_group, offset, bytes);
sp = tree_search_offset(&block_group->free_space_offset, offset, 0, 1);
BUG_ON(!sp);
return ret;
}
int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
u64 offset, u64 bytes)
{
int ret = 0;
mutex_lock(&block_group->alloc_mutex);
ret = __btrfs_remove_free_space(block_group, offset, bytes);
mutex_unlock(&block_group->alloc_mutex);
return ret;
}
int btrfs_remove_free_space_lock(struct btrfs_block_group_cache *block_group,
u64 offset, u64 bytes)
{
int ret;
ret = __btrfs_remove_free_space(block_group, offset, bytes);
return ret; return ret;
} }
...@@ -386,18 +429,18 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) ...@@ -386,18 +429,18 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
struct btrfs_free_space *info; struct btrfs_free_space *info;
struct rb_node *node; struct rb_node *node;
spin_lock(&block_group->lock); mutex_lock(&block_group->alloc_mutex);
while ((node = rb_last(&block_group->free_space_bytes)) != NULL) { while ((node = rb_last(&block_group->free_space_bytes)) != NULL) {
info = rb_entry(node, struct btrfs_free_space, bytes_index); info = rb_entry(node, struct btrfs_free_space, bytes_index);
unlink_free_space(block_group, info); unlink_free_space(block_group, info);
kfree(info); kfree(info);
if (need_resched()) { if (need_resched()) {
spin_unlock(&block_group->lock); mutex_unlock(&block_group->alloc_mutex);
cond_resched(); cond_resched();
spin_lock(&block_group->lock); mutex_lock(&block_group->alloc_mutex);
} }
} }
spin_unlock(&block_group->lock); mutex_unlock(&block_group->alloc_mutex);
} }
struct btrfs_free_space *btrfs_find_free_space_offset(struct struct btrfs_free_space *btrfs_find_free_space_offset(struct
...@@ -407,10 +450,10 @@ struct btrfs_free_space *btrfs_find_free_space_offset(struct ...@@ -407,10 +450,10 @@ struct btrfs_free_space *btrfs_find_free_space_offset(struct
{ {
struct btrfs_free_space *ret; struct btrfs_free_space *ret;
spin_lock(&block_group->lock); mutex_lock(&block_group->alloc_mutex);
ret = tree_search_offset(&block_group->free_space_offset, offset, ret = tree_search_offset(&block_group->free_space_offset, offset,
bytes, 0); bytes, 0);
spin_unlock(&block_group->lock); mutex_unlock(&block_group->alloc_mutex);
return ret; return ret;
} }
...@@ -422,10 +465,10 @@ struct btrfs_free_space *btrfs_find_free_space_bytes(struct ...@@ -422,10 +465,10 @@ struct btrfs_free_space *btrfs_find_free_space_bytes(struct
{ {
struct btrfs_free_space *ret; struct btrfs_free_space *ret;
spin_lock(&block_group->lock); mutex_lock(&block_group->alloc_mutex);
ret = tree_search_bytes(&block_group->free_space_bytes, offset, bytes); ret = tree_search_bytes(&block_group->free_space_bytes, offset, bytes);
spin_unlock(&block_group->lock); mutex_unlock(&block_group->alloc_mutex);
return ret; return ret;
} }
...@@ -434,16 +477,13 @@ struct btrfs_free_space *btrfs_find_free_space(struct btrfs_block_group_cache ...@@ -434,16 +477,13 @@ struct btrfs_free_space *btrfs_find_free_space(struct btrfs_block_group_cache
*block_group, u64 offset, *block_group, u64 offset,
u64 bytes) u64 bytes)
{ {
struct btrfs_free_space *ret; struct btrfs_free_space *ret = NULL;
spin_lock(&block_group->lock);
ret = tree_search_offset(&block_group->free_space_offset, offset, ret = tree_search_offset(&block_group->free_space_offset, offset,
bytes, 0); bytes, 0);
if (!ret) if (!ret)
ret = tree_search_bytes(&block_group->free_space_bytes, ret = tree_search_bytes(&block_group->free_space_bytes,
offset, bytes); offset, bytes);
spin_unlock(&block_group->lock);
return ret; return ret;
} }
...@@ -670,7 +670,6 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root, ...@@ -670,7 +670,6 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root,
atomic_dec(&root->fs_info->throttles); atomic_dec(&root->fs_info->throttles);
wake_up(&root->fs_info->transaction_throttle); wake_up(&root->fs_info->transaction_throttle);
mutex_lock(&root->fs_info->alloc_mutex);
num_bytes -= btrfs_root_used(&dirty->root->root_item); num_bytes -= btrfs_root_used(&dirty->root->root_item);
bytes_used = btrfs_root_used(&root->root_item); bytes_used = btrfs_root_used(&root->root_item);
if (num_bytes) { if (num_bytes) {
...@@ -678,7 +677,6 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root, ...@@ -678,7 +677,6 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root,
btrfs_set_root_used(&root->root_item, btrfs_set_root_used(&root->root_item,
bytes_used - num_bytes); bytes_used - num_bytes);
} }
mutex_unlock(&root->fs_info->alloc_mutex);
ret = btrfs_del_root(trans, tree_root, &dirty->root->root_key); ret = btrfs_del_root(trans, tree_root, &dirty->root->root_key);
if (ret) { if (ret) {
......
...@@ -125,9 +125,6 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, ...@@ -125,9 +125,6 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
if (is_extent) if (is_extent)
btrfs_extent_post_op(trans, root); btrfs_extent_post_op(trans, root);
out: out:
if (is_extent)
mutex_unlock(&root->fs_info->alloc_mutex);
if (path) if (path)
btrfs_free_path(path); btrfs_free_path(path);
if (ret == -EAGAIN) { if (ret == -EAGAIN) {
......
...@@ -271,10 +271,10 @@ static int process_one_buffer(struct btrfs_root *log, ...@@ -271,10 +271,10 @@ static int process_one_buffer(struct btrfs_root *log,
struct walk_control *wc, u64 gen) struct walk_control *wc, u64 gen)
{ {
if (wc->pin) { if (wc->pin) {
mutex_lock(&log->fs_info->alloc_mutex); mutex_lock(&log->fs_info->pinned_mutex);
btrfs_update_pinned_extents(log->fs_info->extent_root, btrfs_update_pinned_extents(log->fs_info->extent_root,
eb->start, eb->len, 1); eb->start, eb->len, 1);
mutex_unlock(&log->fs_info->alloc_mutex); mutex_unlock(&log->fs_info->pinned_mutex);
} }
if (btrfs_buffer_uptodate(eb, gen)) { if (btrfs_buffer_uptodate(eb, gen)) {
......
...@@ -58,14 +58,12 @@ void btrfs_unlock_volumes(void) ...@@ -58,14 +58,12 @@ void btrfs_unlock_volumes(void)
static void lock_chunks(struct btrfs_root *root) static void lock_chunks(struct btrfs_root *root)
{ {
mutex_lock(&root->fs_info->alloc_mutex);
mutex_lock(&root->fs_info->chunk_mutex); mutex_lock(&root->fs_info->chunk_mutex);
} }
static void unlock_chunks(struct btrfs_root *root) static void unlock_chunks(struct btrfs_root *root)
{ {
mutex_unlock(&root->fs_info->chunk_mutex); mutex_unlock(&root->fs_info->chunk_mutex);
mutex_unlock(&root->fs_info->alloc_mutex);
} }
int btrfs_cleanup_fs_uuids(void) int btrfs_cleanup_fs_uuids(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment