Commit 2dd3e67b authored by Chris Mason's avatar Chris Mason

Btrfs: More throttle tuning

* Make walk_down_tree wake up throttled tasks more often
* Make walk_down_tree call cond_resched during long loops
* As the size of the ref cache grows, wait longer in throttle
* Get rid of the reada code in walk_down_tree, the leaves don't get
  read anymore, thanks to the ref cache.
Signed-off-by: default avatarChris Mason <chris.mason@oracle.com>
parent 65b51a00
...@@ -188,13 +188,6 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf, ...@@ -188,13 +188,6 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
btrfs_csum_final(crc, result); btrfs_csum_final(crc, result);
if (verify) { if (verify) {
int from_this_trans = 0;
if (root->fs_info->running_transaction &&
btrfs_header_generation(buf) ==
root->fs_info->running_transaction->transid)
from_this_trans = 1;
/* FIXME, this is not good */ /* FIXME, this is not good */
if (memcmp_extent_buffer(buf, result, 0, BTRFS_CRC32_SIZE)) { if (memcmp_extent_buffer(buf, result, 0, BTRFS_CRC32_SIZE)) {
u32 val; u32 val;
...@@ -203,11 +196,9 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf, ...@@ -203,11 +196,9 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
read_extent_buffer(buf, &val, 0, BTRFS_CRC32_SIZE); read_extent_buffer(buf, &val, 0, BTRFS_CRC32_SIZE);
printk("btrfs: %s checksum verify failed on %llu " printk("btrfs: %s checksum verify failed on %llu "
"wanted %X found %X from_this_trans %d " "wanted %X found %X level %d\n",
"level %d\n",
root->fs_info->sb->s_id, root->fs_info->sb->s_id,
buf->start, val, found, from_this_trans, buf->start, val, found, btrfs_header_level(buf));
btrfs_header_level(buf));
return 1; return 1;
} }
} else { } else {
......
...@@ -2369,6 +2369,11 @@ static int noinline drop_leaf_ref_no_cache(struct btrfs_trans_handle *trans, ...@@ -2369,6 +2369,11 @@ static int noinline drop_leaf_ref_no_cache(struct btrfs_trans_handle *trans,
leaf_owner, leaf_generation, leaf_owner, leaf_generation,
key.objectid, key.offset, 0); key.objectid, key.offset, 0);
mutex_unlock(&root->fs_info->alloc_mutex); mutex_unlock(&root->fs_info->alloc_mutex);
atomic_inc(&root->fs_info->throttle_gen);
wake_up(&root->fs_info->transaction_throttle);
cond_resched();
BUG_ON(ret); BUG_ON(ret);
} }
return 0; return 0;
...@@ -2389,6 +2394,11 @@ static int noinline drop_leaf_ref(struct btrfs_trans_handle *trans, ...@@ -2389,6 +2394,11 @@ static int noinline drop_leaf_ref(struct btrfs_trans_handle *trans,
ref->owner, ref->generation, ref->owner, ref->generation,
info->objectid, info->offset, 0); info->objectid, info->offset, 0);
mutex_unlock(&root->fs_info->alloc_mutex); mutex_unlock(&root->fs_info->alloc_mutex);
atomic_inc(&root->fs_info->throttle_gen);
wake_up(&root->fs_info->transaction_throttle);
cond_resched();
BUG_ON(ret); BUG_ON(ret);
info++; info++;
} }
...@@ -2396,51 +2406,6 @@ static int noinline drop_leaf_ref(struct btrfs_trans_handle *trans, ...@@ -2396,51 +2406,6 @@ static int noinline drop_leaf_ref(struct btrfs_trans_handle *trans,
return 0; return 0;
} }
static void noinline reada_walk_down(struct btrfs_root *root,
struct extent_buffer *node,
int slot)
{
u64 bytenr;
u64 last = 0;
u32 nritems;
u32 refs;
u32 blocksize;
int ret;
int i;
int level;
int skipped = 0;
nritems = btrfs_header_nritems(node);
level = btrfs_header_level(node);
if (level)
return;
for (i = slot; i < nritems && skipped < 32; i++) {
bytenr = btrfs_node_blockptr(node, i);
if (last && ((bytenr > last && bytenr - last > 32 * 1024) ||
(last > bytenr && last - bytenr > 32 * 1024))) {
skipped++;
continue;
}
blocksize = btrfs_level_size(root, level - 1);
if (i != slot) {
ret = lookup_extent_ref(NULL, root, bytenr,
blocksize, &refs);
BUG_ON(ret);
if (refs != 1) {
skipped++;
continue;
}
}
ret = readahead_tree_block(root, bytenr, blocksize,
btrfs_node_ptr_generation(node, i));
last = bytenr + blocksize;
cond_resched();
if (ret)
break;
}
}
int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len, int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len,
u32 *refs) u32 *refs)
{ {
...@@ -2549,6 +2514,7 @@ static int noinline walk_down_tree(struct btrfs_trans_handle *trans, ...@@ -2549,6 +2514,7 @@ static int noinline walk_down_tree(struct btrfs_trans_handle *trans,
atomic_inc(&root->fs_info->throttle_gen); atomic_inc(&root->fs_info->throttle_gen);
wake_up(&root->fs_info->transaction_throttle); wake_up(&root->fs_info->transaction_throttle);
cond_resched();
continue; continue;
} }
...@@ -2578,8 +2544,6 @@ static int noinline walk_down_tree(struct btrfs_trans_handle *trans, ...@@ -2578,8 +2544,6 @@ static int noinline walk_down_tree(struct btrfs_trans_handle *trans,
if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) { if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) {
free_extent_buffer(next); free_extent_buffer(next);
if (path->slots[*level] == 0)
reada_walk_down(root, cur, path->slots[*level]);
next = read_tree_block(root, bytenr, blocksize, next = read_tree_block(root, bytenr, blocksize,
ptr_gen); ptr_gen);
cond_resched(); cond_resched();
...@@ -2601,6 +2565,7 @@ static int noinline walk_down_tree(struct btrfs_trans_handle *trans, ...@@ -2601,6 +2565,7 @@ static int noinline walk_down_tree(struct btrfs_trans_handle *trans,
path->nodes[*level-1] = next; path->nodes[*level-1] = next;
*level = btrfs_header_level(next); *level = btrfs_header_level(next);
path->slots[*level] = 0; path->slots[*level] = 0;
cond_resched();
} }
out: out:
WARN_ON(*level < 0); WARN_ON(*level < 0);
......
...@@ -210,7 +210,9 @@ static noinline int wait_for_commit(struct btrfs_root *root, ...@@ -210,7 +210,9 @@ static noinline int wait_for_commit(struct btrfs_root *root,
static void throttle_on_drops(struct btrfs_root *root) static void throttle_on_drops(struct btrfs_root *root)
{ {
struct btrfs_fs_info *info = root->fs_info; struct btrfs_fs_info *info = root->fs_info;
int harder_count = 0;
harder:
if (atomic_read(&info->throttles)) { if (atomic_read(&info->throttles)) {
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
int thr; int thr;
...@@ -226,6 +228,19 @@ static void throttle_on_drops(struct btrfs_root *root) ...@@ -226,6 +228,19 @@ static void throttle_on_drops(struct btrfs_root *root)
schedule(); schedule();
finish_wait(&info->transaction_throttle, &wait); finish_wait(&info->transaction_throttle, &wait);
} while (thr == atomic_read(&info->throttle_gen)); } while (thr == atomic_read(&info->throttle_gen));
harder_count++;
if (root->fs_info->total_ref_cache_size > 1 * 1024 * 1024 &&
harder_count < 2)
goto harder;
if (root->fs_info->total_ref_cache_size > 5 * 1024 * 1024 &&
harder_count < 10)
goto harder;
if (root->fs_info->total_ref_cache_size > 10 * 1024 * 1024 &&
harder_count < 20)
goto harder;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment