Commit b4ce94de authored by Chris Mason's avatar Chris Mason

Btrfs: Change btree locking to use explicit blocking points

Most of the btrfs metadata operations can be protected by a spinlock,
but some operations still need to schedule.

So far, btrfs has been using a mutex along with a trylock loop,
most of the time it is able to avoid going for the full mutex, so
the trylock loop is a big performance gain.

This commit is step one for getting rid of the blocking locks entirely.
btrfs_tree_lock takes a spinlock, and the code explicitly switches
to a blocking lock when it starts an operation that can schedule.

We'll be able get rid of the blocking locks in smaller pieces over time.
Tracing allows us to find the most common cause of blocking, so we
can start with the hot spots first.

The basic idea is:

btrfs_tree_lock() returns with the spin lock held

btrfs_set_lock_blocking() sets the EXTENT_BUFFER_BLOCKING bit in
the extent buffer flags, and then drops the spin lock.  The buffer is
still considered locked by all of the btrfs code.

If btrfs_tree_lock gets the spinlock but finds the blocking bit set, it drops
the spin lock and waits on a wait queue for the blocking bit to go away.

Much of the code that needs to set the blocking bit finishes without actually
blocking a good percentage of the time.  So, an adaptive spin is still
used against the blocking bit to avoid very high context switch rates.

btrfs_clear_lock_blocking() clears the blocking bit and returns
with the spinlock held again.

btrfs_tree_unlock() can be called on either blocking or spinning locks,
it does the right thing based on the blocking bit.

ctree.c has a helper function to set/clear all the locked buffers in a
path as blocking.
Signed-off-by: default avatarChris Mason <chris.mason@oracle.com>
parent c487685d
This diff is collapsed.
...@@ -1835,6 +1835,10 @@ void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p); ...@@ -1835,6 +1835,10 @@ void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p);
struct btrfs_path *btrfs_alloc_path(void); struct btrfs_path *btrfs_alloc_path(void);
void btrfs_free_path(struct btrfs_path *p); void btrfs_free_path(struct btrfs_path *p);
void btrfs_init_path(struct btrfs_path *p); void btrfs_init_path(struct btrfs_path *p);
void btrfs_set_path_blocking(struct btrfs_path *p);
void btrfs_clear_path_blocking(struct btrfs_path *p);
void btrfs_unlock_up_safe(struct btrfs_path *p, int level);
int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_path *path, int slot, int nr); struct btrfs_path *path, int slot, int nr);
int btrfs_del_leaf(struct btrfs_trans_handle *trans, int btrfs_del_leaf(struct btrfs_trans_handle *trans,
......
...@@ -799,7 +799,7 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, ...@@ -799,7 +799,7 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid); ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
if (ret == 0) if (ret == 0)
buf->flags |= EXTENT_UPTODATE; set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags);
else else
WARN_ON(1); WARN_ON(1);
return buf; return buf;
...@@ -813,6 +813,10 @@ int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, ...@@ -813,6 +813,10 @@ int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
if (btrfs_header_generation(buf) == if (btrfs_header_generation(buf) ==
root->fs_info->running_transaction->transid) { root->fs_info->running_transaction->transid) {
WARN_ON(!btrfs_tree_locked(buf)); WARN_ON(!btrfs_tree_locked(buf));
/* ugh, clear_extent_buffer_dirty can be expensive */
btrfs_set_lock_blocking(buf);
clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree, clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
buf); buf);
} }
...@@ -2311,6 +2315,8 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf) ...@@ -2311,6 +2315,8 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
u64 transid = btrfs_header_generation(buf); u64 transid = btrfs_header_generation(buf);
struct inode *btree_inode = root->fs_info->btree_inode; struct inode *btree_inode = root->fs_info->btree_inode;
btrfs_set_lock_blocking(buf);
WARN_ON(!btrfs_tree_locked(buf)); WARN_ON(!btrfs_tree_locked(buf));
if (transid != root->fs_info->generation) { if (transid != root->fs_info->generation) {
printk(KERN_CRIT "btrfs transid mismatch buffer %llu, " printk(KERN_CRIT "btrfs transid mismatch buffer %llu, "
...@@ -2353,7 +2359,7 @@ int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid) ...@@ -2353,7 +2359,7 @@ int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
int ret; int ret;
ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid); ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
if (ret == 0) if (ret == 0)
buf->flags |= EXTENT_UPTODATE; set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags);
return ret; return ret;
} }
......
...@@ -3407,7 +3407,10 @@ struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans, ...@@ -3407,7 +3407,10 @@ struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
btrfs_set_header_generation(buf, trans->transid); btrfs_set_header_generation(buf, trans->transid);
btrfs_tree_lock(buf); btrfs_tree_lock(buf);
clean_tree_block(trans, root, buf); clean_tree_block(trans, root, buf);
btrfs_set_lock_blocking(buf);
btrfs_set_buffer_uptodate(buf); btrfs_set_buffer_uptodate(buf);
if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) { if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
set_extent_dirty(&root->dirty_log_pages, buf->start, set_extent_dirty(&root->dirty_log_pages, buf->start,
buf->start + buf->len - 1, GFP_NOFS); buf->start + buf->len - 1, GFP_NOFS);
...@@ -3416,6 +3419,7 @@ struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans, ...@@ -3416,6 +3419,7 @@ struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
buf->start + buf->len - 1, GFP_NOFS); buf->start + buf->len - 1, GFP_NOFS);
} }
trans->blocks_used++; trans->blocks_used++;
/* this returns a buffer locked for blocking */
return buf; return buf;
} }
...@@ -3752,6 +3756,7 @@ static noinline int walk_down_subtree(struct btrfs_trans_handle *trans, ...@@ -3752,6 +3756,7 @@ static noinline int walk_down_subtree(struct btrfs_trans_handle *trans,
next = read_tree_block(root, bytenr, blocksize, ptr_gen); next = read_tree_block(root, bytenr, blocksize, ptr_gen);
btrfs_tree_lock(next); btrfs_tree_lock(next);
btrfs_set_lock_blocking(next);
ret = btrfs_lookup_extent_ref(trans, root, bytenr, blocksize, ret = btrfs_lookup_extent_ref(trans, root, bytenr, blocksize,
&refs); &refs);
......
...@@ -2990,7 +2990,9 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree, ...@@ -2990,7 +2990,9 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
eb = kmem_cache_zalloc(extent_buffer_cache, mask); eb = kmem_cache_zalloc(extent_buffer_cache, mask);
eb->start = start; eb->start = start;
eb->len = len; eb->len = len;
mutex_init(&eb->mutex); spin_lock_init(&eb->lock);
init_waitqueue_head(&eb->lock_wq);
#if LEAK_DEBUG #if LEAK_DEBUG
spin_lock_irqsave(&leak_lock, flags); spin_lock_irqsave(&leak_lock, flags);
list_add(&eb->leak_list, &buffers); list_add(&eb->leak_list, &buffers);
...@@ -3071,8 +3073,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, ...@@ -3071,8 +3073,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
unlock_page(p); unlock_page(p);
} }
if (uptodate) if (uptodate)
eb->flags |= EXTENT_UPTODATE; set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
eb->flags |= EXTENT_BUFFER_FILLED;
spin_lock(&tree->buffer_lock); spin_lock(&tree->buffer_lock);
exists = buffer_tree_insert(tree, start, &eb->rb_node); exists = buffer_tree_insert(tree, start, &eb->rb_node);
...@@ -3226,7 +3227,7 @@ int clear_extent_buffer_uptodate(struct extent_io_tree *tree, ...@@ -3226,7 +3227,7 @@ int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
unsigned long num_pages; unsigned long num_pages;
num_pages = num_extent_pages(eb->start, eb->len); num_pages = num_extent_pages(eb->start, eb->len);
eb->flags &= ~EXTENT_UPTODATE; clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
GFP_NOFS); GFP_NOFS);
...@@ -3297,7 +3298,7 @@ int extent_buffer_uptodate(struct extent_io_tree *tree, ...@@ -3297,7 +3298,7 @@ int extent_buffer_uptodate(struct extent_io_tree *tree,
struct page *page; struct page *page;
int pg_uptodate = 1; int pg_uptodate = 1;
if (eb->flags & EXTENT_UPTODATE) if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
return 1; return 1;
ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1, ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
...@@ -3333,7 +3334,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, ...@@ -3333,7 +3334,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
struct bio *bio = NULL; struct bio *bio = NULL;
unsigned long bio_flags = 0; unsigned long bio_flags = 0;
if (eb->flags & EXTENT_UPTODATE) if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
return 0; return 0;
if (test_range_bit(tree, eb->start, eb->start + eb->len - 1, if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
...@@ -3364,7 +3365,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, ...@@ -3364,7 +3365,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
} }
if (all_uptodate) { if (all_uptodate) {
if (start_i == 0) if (start_i == 0)
eb->flags |= EXTENT_UPTODATE; set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
goto unlock_exit; goto unlock_exit;
} }
...@@ -3400,7 +3401,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, ...@@ -3400,7 +3401,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
} }
if (!ret) if (!ret)
eb->flags |= EXTENT_UPTODATE; set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
return ret; return ret;
unlock_exit: unlock_exit:
...@@ -3497,7 +3498,6 @@ int map_extent_buffer(struct extent_buffer *eb, unsigned long start, ...@@ -3497,7 +3498,6 @@ int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
unmap_extent_buffer(eb, eb->map_token, km); unmap_extent_buffer(eb, eb->map_token, km);
eb->map_token = NULL; eb->map_token = NULL;
save = 1; save = 1;
WARN_ON(!mutex_is_locked(&eb->mutex));
} }
err = map_private_extent_buffer(eb, start, min_len, token, map, err = map_private_extent_buffer(eb, start, min_len, token, map,
map_start, map_len, km); map_start, map_len, km);
......
...@@ -22,6 +22,10 @@ ...@@ -22,6 +22,10 @@
/* flags for bio submission */ /* flags for bio submission */
#define EXTENT_BIO_COMPRESSED 1 #define EXTENT_BIO_COMPRESSED 1
/* these are bit numbers for test/set bit */
#define EXTENT_BUFFER_UPTODATE 0
#define EXTENT_BUFFER_BLOCKING 1
/* /*
* page->private values. Every page that is controlled by the extent * page->private values. Every page that is controlled by the extent
* map has page->private set to one. * map has page->private set to one.
...@@ -95,11 +99,19 @@ struct extent_buffer { ...@@ -95,11 +99,19 @@ struct extent_buffer {
unsigned long map_start; unsigned long map_start;
unsigned long map_len; unsigned long map_len;
struct page *first_page; struct page *first_page;
unsigned long bflags;
atomic_t refs; atomic_t refs;
int flags;
struct list_head leak_list; struct list_head leak_list;
struct rb_node rb_node; struct rb_node rb_node;
struct mutex mutex;
/* the spinlock is used to protect most operations */
spinlock_t lock;
/*
* when we keep the lock held while blocking, waiters go onto
* the wq
*/
wait_queue_head_t lock_wq;
}; };
struct extent_map_tree; struct extent_map_tree;
......
...@@ -50,6 +50,7 @@ ...@@ -50,6 +50,7 @@
#include "tree-log.h" #include "tree-log.h"
#include "ref-cache.h" #include "ref-cache.h"
#include "compression.h" #include "compression.h"
#include "locking.h"
struct btrfs_iget_args { struct btrfs_iget_args {
u64 ino; u64 ino;
...@@ -2021,6 +2022,7 @@ void btrfs_read_locked_inode(struct inode *inode) ...@@ -2021,6 +2022,7 @@ void btrfs_read_locked_inode(struct inode *inode)
BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
alloc_group_block = btrfs_inode_block_group(leaf, inode_item); alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0, BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0,
alloc_group_block, 0); alloc_group_block, 0);
btrfs_free_path(path); btrfs_free_path(path);
...@@ -2117,6 +2119,7 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, ...@@ -2117,6 +2119,7 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
goto failed; goto failed;
} }
btrfs_unlock_up_safe(path, 1);
leaf = path->nodes[0]; leaf = path->nodes[0];
inode_item = btrfs_item_ptr(leaf, path->slots[0], inode_item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_inode_item); struct btrfs_inode_item);
......
...@@ -26,45 +26,215 @@ ...@@ -26,45 +26,215 @@
#include "locking.h" #include "locking.h"
/* /*
* locks the per buffer mutex in an extent buffer. This uses adaptive locks * btrfs_header_level() isn't free, so don't call it when lockdep isn't
* and the spin is not tuned very extensively. The spinning does make a big * on
* difference in almost every workload, but spinning for the right amount of
* time needs some help.
*
* In general, we want to spin as long as the lock holder is doing btree
* searches, and we should give up if they are in more expensive code.
*/ */
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static inline void spin_nested(struct extent_buffer *eb)
{
spin_lock_nested(&eb->lock, BTRFS_MAX_LEVEL - btrfs_header_level(eb));
}
#else
static inline void spin_nested(struct extent_buffer *eb)
{
spin_lock(&eb->lock);
}
#endif
int btrfs_tree_lock(struct extent_buffer *eb) /*
* Setting a lock to blocking will drop the spinlock and set the
* flag that forces other procs who want the lock to wait. After
* this you can safely schedule with the lock held.
*/
void btrfs_set_lock_blocking(struct extent_buffer *eb)
{ {
int i; if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) {
set_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags);
spin_unlock(&eb->lock);
}
/* exit with the spin lock released and the bit set */
}
if (mutex_trylock(&eb->mutex)) /*
return 0; * clearing the blocking flag will take the spinlock again.
* After this you can't safely schedule
*/
void btrfs_clear_lock_blocking(struct extent_buffer *eb)
{
if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) {
spin_nested(eb);
clear_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags);
smp_mb__after_clear_bit();
}
/* exit with the spin lock held */
}
/*
* unfortunately, many of the places that currently set a lock to blocking
* don't end up blocking for every long, and often they don't block
* at all. For a dbench 50 run, if we don't spin one the blocking bit
* at all, the context switch rate can jump up to 400,000/sec or more.
*
* So, we're still stuck with this crummy spin on the blocking bit,
* at least until the most common causes of the short blocks
* can be dealt with.
*/
static int btrfs_spin_on_block(struct extent_buffer *eb)
{
int i;
for (i = 0; i < 512; i++) { for (i = 0; i < 512; i++) {
cpu_relax(); cpu_relax();
if (mutex_trylock(&eb->mutex)) if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
return 1;
if (need_resched())
break;
}
return 0;
}
/*
* This is somewhat different from trylock. It will take the
* spinlock but if it finds the lock is set to blocking, it will
* return without the lock held.
*
* returns 1 if it was able to take the lock and zero otherwise
*
* After this call, scheduling is not safe without first calling
* btrfs_set_lock_blocking()
*/
int btrfs_try_spin_lock(struct extent_buffer *eb)
{
int i;
spin_nested(eb);
if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
return 1;
spin_unlock(&eb->lock);
/* spin for a bit on the BLOCKING flag */
for (i = 0; i < 2; i++) {
if (!btrfs_spin_on_block(eb))
break;
spin_nested(eb);
if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
return 1;
spin_unlock(&eb->lock);
}
return 0;
}
/*
* the autoremove wake function will return 0 if it tried to wake up
* a process that was already awake, which means that process won't
* count as an exclusive wakeup. The waitq code will continue waking
* procs until it finds one that was actually sleeping.
*
* For btrfs, this isn't quite what we want. We want a single proc
* to be notified that the lock is ready for taking. If that proc
* already happen to be awake, great, it will loop around and try for
* the lock.
*
* So, btrfs_wake_function always returns 1, even when the proc that we
* tried to wake up was already awake.
*/
static int btrfs_wake_function(wait_queue_t *wait, unsigned mode,
int sync, void *key)
{
autoremove_wake_function(wait, mode, sync, key);
return 1;
}
/*
* returns with the extent buffer spinlocked.
*
* This will spin and/or wait as required to take the lock, and then
* return with the spinlock held.
*
* After this call, scheduling is not safe without first calling
* btrfs_set_lock_blocking()
*/
int btrfs_tree_lock(struct extent_buffer *eb)
{
DEFINE_WAIT(wait);
wait.func = btrfs_wake_function;
while(1) {
spin_nested(eb);
/* nobody is blocking, exit with the spinlock held */
if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
return 0; return 0;
/*
* we have the spinlock, but the real owner is blocking.
* wait for them
*/
spin_unlock(&eb->lock);
/*
* spin for a bit, and if the blocking flag goes away,
* loop around
*/
if (btrfs_spin_on_block(eb))
continue;
prepare_to_wait_exclusive(&eb->lock_wq, &wait,
TASK_UNINTERRUPTIBLE);
if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
schedule();
finish_wait(&eb->lock_wq, &wait);
} }
cpu_relax();
mutex_lock_nested(&eb->mutex, BTRFS_MAX_LEVEL - btrfs_header_level(eb));
return 0; return 0;
} }
/*
* Very quick trylock, this does not spin or schedule. It returns
* 1 with the spinlock held if it was able to take the lock, or it
* returns zero if it was unable to take the lock.
*
* After this call, scheduling is not safe without first calling
* btrfs_set_lock_blocking()
*/
int btrfs_try_tree_lock(struct extent_buffer *eb) int btrfs_try_tree_lock(struct extent_buffer *eb)
{ {
return mutex_trylock(&eb->mutex); if (spin_trylock(&eb->lock)) {
if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) {
/*
* we've got the spinlock, but the real owner is
* blocking. Drop the spinlock and return failure
*/
spin_unlock(&eb->lock);
return 0;
}
return 1;
}
/* someone else has the spinlock giveup */
return 0;
} }
int btrfs_tree_unlock(struct extent_buffer *eb) int btrfs_tree_unlock(struct extent_buffer *eb)
{ {
mutex_unlock(&eb->mutex); /*
* if we were a blocking owner, we don't have the spinlock held
* just clear the bit and look for waiters
*/
if (test_and_clear_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
smp_mb__after_clear_bit();
else
spin_unlock(&eb->lock);
if (waitqueue_active(&eb->lock_wq))
wake_up(&eb->lock_wq);
return 0; return 0;
} }
int btrfs_tree_locked(struct extent_buffer *eb) int btrfs_tree_locked(struct extent_buffer *eb)
{ {
return mutex_is_locked(&eb->mutex); return test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags) ||
spin_is_locked(&eb->lock);
} }
/* /*
...@@ -75,12 +245,14 @@ int btrfs_path_lock_waiting(struct btrfs_path *path, int level) ...@@ -75,12 +245,14 @@ int btrfs_path_lock_waiting(struct btrfs_path *path, int level)
{ {
int i; int i;
struct extent_buffer *eb; struct extent_buffer *eb;
for (i = level; i <= level + 1 && i < BTRFS_MAX_LEVEL; i++) { for (i = level; i <= level + 1 && i < BTRFS_MAX_LEVEL; i++) {
eb = path->nodes[i]; eb = path->nodes[i];
if (!eb) if (!eb)
break; break;
smp_mb(); smp_mb();
if (!list_empty(&eb->mutex.wait_list)) if (spin_is_contended(&eb->lock) ||
waitqueue_active(&eb->lock_wq))
return 1; return 1;
} }
return 0; return 0;
......
...@@ -22,6 +22,12 @@ ...@@ -22,6 +22,12 @@
int btrfs_tree_lock(struct extent_buffer *eb); int btrfs_tree_lock(struct extent_buffer *eb);
int btrfs_tree_unlock(struct extent_buffer *eb); int btrfs_tree_unlock(struct extent_buffer *eb);
int btrfs_tree_locked(struct extent_buffer *eb); int btrfs_tree_locked(struct extent_buffer *eb);
int btrfs_try_tree_lock(struct extent_buffer *eb); int btrfs_try_tree_lock(struct extent_buffer *eb);
int btrfs_try_spin_lock(struct extent_buffer *eb);
int btrfs_path_lock_waiting(struct btrfs_path *path, int level); int btrfs_path_lock_waiting(struct btrfs_path *path, int level);
void btrfs_set_lock_blocking(struct extent_buffer *eb);
void btrfs_clear_lock_blocking(struct extent_buffer *eb);
#endif #endif
...@@ -74,6 +74,7 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, ...@@ -74,6 +74,7 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
u32 nritems; u32 nritems;
root_node = btrfs_lock_root_node(root); root_node = btrfs_lock_root_node(root);
btrfs_set_lock_blocking(root_node);
nritems = btrfs_header_nritems(root_node); nritems = btrfs_header_nritems(root_node);
root->defrag_max.objectid = 0; root->defrag_max.objectid = 0;
/* from above we know this is not a leaf */ /* from above we know this is not a leaf */
......
...@@ -1615,6 +1615,7 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, ...@@ -1615,6 +1615,7 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
btrfs_tree_lock(next); btrfs_tree_lock(next);
clean_tree_block(trans, root, next); clean_tree_block(trans, root, next);
btrfs_set_lock_blocking(next);
btrfs_wait_tree_block_writeback(next); btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next); btrfs_tree_unlock(next);
...@@ -1661,6 +1662,7 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, ...@@ -1661,6 +1662,7 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
next = path->nodes[*level]; next = path->nodes[*level];
btrfs_tree_lock(next); btrfs_tree_lock(next);
clean_tree_block(trans, root, next); clean_tree_block(trans, root, next);
btrfs_set_lock_blocking(next);
btrfs_wait_tree_block_writeback(next); btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next); btrfs_tree_unlock(next);
...@@ -1718,6 +1720,7 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans, ...@@ -1718,6 +1720,7 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
btrfs_tree_lock(next); btrfs_tree_lock(next);
clean_tree_block(trans, root, next); clean_tree_block(trans, root, next);
btrfs_set_lock_blocking(next);
btrfs_wait_tree_block_writeback(next); btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next); btrfs_tree_unlock(next);
...@@ -1790,6 +1793,7 @@ static int walk_log_tree(struct btrfs_trans_handle *trans, ...@@ -1790,6 +1793,7 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
btrfs_tree_lock(next); btrfs_tree_lock(next);
clean_tree_block(trans, log, next); clean_tree_block(trans, log, next);
btrfs_set_lock_blocking(next);
btrfs_wait_tree_block_writeback(next); btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next); btrfs_tree_unlock(next);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment