Commit 9ec72677 authored by Josef Bacik's avatar Josef Bacik Committed by Chris Mason

Btrfs: stop using GFP_ATOMIC when allocating rewind ebs

There is no reason we can't just set the path to blocking and then do normal
GFP_NOFS allocations for these extent buffers.  Thanks,
Signed-off-by: default avatarJosef Bacik <jbacik@fusionio.com>
Signed-off-by: default avatarChris Mason <chris.mason@fusionio.com>
parent db7f3436
...@@ -1191,8 +1191,8 @@ __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, ...@@ -1191,8 +1191,8 @@ __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
* is freed (its refcount is decremented). * is freed (its refcount is decremented).
*/ */
static struct extent_buffer * static struct extent_buffer *
tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
u64 time_seq) struct extent_buffer *eb, u64 time_seq)
{ {
struct extent_buffer *eb_rewin; struct extent_buffer *eb_rewin;
struct tree_mod_elem *tm; struct tree_mod_elem *tm;
...@@ -1207,12 +1207,15 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, ...@@ -1207,12 +1207,15 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
if (!tm) if (!tm)
return eb; return eb;
btrfs_set_path_blocking(path);
btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) { if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
BUG_ON(tm->slot != 0); BUG_ON(tm->slot != 0);
eb_rewin = alloc_dummy_extent_buffer(eb->start, eb_rewin = alloc_dummy_extent_buffer(eb->start,
fs_info->tree_root->nodesize); fs_info->tree_root->nodesize);
if (!eb_rewin) { if (!eb_rewin) {
btrfs_tree_read_unlock(eb); btrfs_tree_read_unlock_blocking(eb);
free_extent_buffer(eb); free_extent_buffer(eb);
return NULL; return NULL;
} }
...@@ -1224,13 +1227,14 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, ...@@ -1224,13 +1227,14 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
} else { } else {
eb_rewin = btrfs_clone_extent_buffer(eb); eb_rewin = btrfs_clone_extent_buffer(eb);
if (!eb_rewin) { if (!eb_rewin) {
btrfs_tree_read_unlock(eb); btrfs_tree_read_unlock_blocking(eb);
free_extent_buffer(eb); free_extent_buffer(eb);
return NULL; return NULL;
} }
} }
btrfs_tree_read_unlock(eb); btrfs_clear_path_blocking(path, NULL, BTRFS_READ_LOCK);
btrfs_tree_read_unlock_blocking(eb);
free_extent_buffer(eb); free_extent_buffer(eb);
extent_buffer_get(eb_rewin); extent_buffer_get(eb_rewin);
...@@ -1294,8 +1298,9 @@ get_old_root(struct btrfs_root *root, u64 time_seq) ...@@ -1294,8 +1298,9 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
free_extent_buffer(eb_root); free_extent_buffer(eb_root);
eb = alloc_dummy_extent_buffer(logical, root->nodesize); eb = alloc_dummy_extent_buffer(logical, root->nodesize);
} else { } else {
btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
eb = btrfs_clone_extent_buffer(eb_root); eb = btrfs_clone_extent_buffer(eb_root);
btrfs_tree_read_unlock(eb_root); btrfs_tree_read_unlock_blocking(eb_root);
free_extent_buffer(eb_root); free_extent_buffer(eb_root);
} }
...@@ -2779,7 +2784,7 @@ int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key, ...@@ -2779,7 +2784,7 @@ int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
btrfs_clear_path_blocking(p, b, btrfs_clear_path_blocking(p, b,
BTRFS_READ_LOCK); BTRFS_READ_LOCK);
} }
b = tree_mod_log_rewind(root->fs_info, b, time_seq); b = tree_mod_log_rewind(root->fs_info, p, b, time_seq);
if (!b) { if (!b) {
ret = -ENOMEM; ret = -ENOMEM;
goto done; goto done;
......
...@@ -4340,12 +4340,12 @@ struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src) ...@@ -4340,12 +4340,12 @@ struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
struct extent_buffer *new; struct extent_buffer *new;
unsigned long num_pages = num_extent_pages(src->start, src->len); unsigned long num_pages = num_extent_pages(src->start, src->len);
new = __alloc_extent_buffer(NULL, src->start, src->len, GFP_ATOMIC); new = __alloc_extent_buffer(NULL, src->start, src->len, GFP_NOFS);
if (new == NULL) if (new == NULL)
return NULL; return NULL;
for (i = 0; i < num_pages; i++) { for (i = 0; i < num_pages; i++) {
p = alloc_page(GFP_ATOMIC); p = alloc_page(GFP_NOFS);
if (!p) { if (!p) {
btrfs_release_extent_buffer(new); btrfs_release_extent_buffer(new);
return NULL; return NULL;
...@@ -4369,12 +4369,12 @@ struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len) ...@@ -4369,12 +4369,12 @@ struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len)
unsigned long num_pages = num_extent_pages(0, len); unsigned long num_pages = num_extent_pages(0, len);
unsigned long i; unsigned long i;
eb = __alloc_extent_buffer(NULL, start, len, GFP_ATOMIC); eb = __alloc_extent_buffer(NULL, start, len, GFP_NOFS);
if (!eb) if (!eb)
return NULL; return NULL;
for (i = 0; i < num_pages; i++) { for (i = 0; i < num_pages; i++) {
eb->pages[i] = alloc_page(GFP_ATOMIC); eb->pages[i] = alloc_page(GFP_NOFS);
if (!eb->pages[i]) if (!eb->pages[i])
goto err; goto err;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment