Commit db7f3436 authored by Josef Bacik's avatar Josef Bacik Committed by Chris Mason

Btrfs: deal with enomem in the rewind path

We can get ENOMEM trying to allocate dummy bufs for the rewind operation of the
tree mod log.  Instead of BUG_ON()'ing in this case pass up ENOMEM.  I looked
back through the callers and I'm pretty sure I got everybody who did BUG_ON(ret)
in this path.  Thanks,
Signed-off-by: default avatarJosef Bacik <jbacik@fusionio.com>
Signed-off-by: default avatarChris Mason <chris.mason@fusionio.com>
parent ebdad913
...@@ -1211,7 +1211,11 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, ...@@ -1211,7 +1211,11 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
BUG_ON(tm->slot != 0); BUG_ON(tm->slot != 0);
eb_rewin = alloc_dummy_extent_buffer(eb->start, eb_rewin = alloc_dummy_extent_buffer(eb->start,
fs_info->tree_root->nodesize); fs_info->tree_root->nodesize);
BUG_ON(!eb_rewin); if (!eb_rewin) {
btrfs_tree_read_unlock(eb);
free_extent_buffer(eb);
return NULL;
}
btrfs_set_header_bytenr(eb_rewin, eb->start); btrfs_set_header_bytenr(eb_rewin, eb->start);
btrfs_set_header_backref_rev(eb_rewin, btrfs_set_header_backref_rev(eb_rewin,
btrfs_header_backref_rev(eb)); btrfs_header_backref_rev(eb));
...@@ -1219,7 +1223,11 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, ...@@ -1219,7 +1223,11 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
btrfs_set_header_level(eb_rewin, btrfs_header_level(eb)); btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
} else { } else {
eb_rewin = btrfs_clone_extent_buffer(eb); eb_rewin = btrfs_clone_extent_buffer(eb);
BUG_ON(!eb_rewin); if (!eb_rewin) {
btrfs_tree_read_unlock(eb);
free_extent_buffer(eb);
return NULL;
}
} }
btrfs_tree_read_unlock(eb); btrfs_tree_read_unlock(eb);
...@@ -2772,6 +2780,10 @@ int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key, ...@@ -2772,6 +2780,10 @@ int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
BTRFS_READ_LOCK); BTRFS_READ_LOCK);
} }
b = tree_mod_log_rewind(root->fs_info, b, time_seq); b = tree_mod_log_rewind(root->fs_info, b, time_seq);
if (!b) {
ret = -ENOMEM;
goto done;
}
p->locks[level] = BTRFS_READ_LOCK; p->locks[level] = BTRFS_READ_LOCK;
p->nodes[level] = b; p->nodes[level] = b;
} else { } else {
......
...@@ -4222,6 +4222,76 @@ static void __free_extent_buffer(struct extent_buffer *eb) ...@@ -4222,6 +4222,76 @@ static void __free_extent_buffer(struct extent_buffer *eb)
kmem_cache_free(extent_buffer_cache, eb); kmem_cache_free(extent_buffer_cache, eb);
} }
static int extent_buffer_under_io(struct extent_buffer *eb)
{
return (atomic_read(&eb->io_pages) ||
test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
}
/*
* Helper for releasing extent buffer page.
*/
static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
unsigned long start_idx)
{
unsigned long index;
unsigned long num_pages;
struct page *page;
int mapped = !test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
BUG_ON(extent_buffer_under_io(eb));
num_pages = num_extent_pages(eb->start, eb->len);
index = start_idx + num_pages;
if (start_idx >= index)
return;
do {
index--;
page = extent_buffer_page(eb, index);
if (page && mapped) {
spin_lock(&page->mapping->private_lock);
/*
* We do this since we'll remove the pages after we've
* removed the eb from the radix tree, so we could race
* and have this page now attached to the new eb. So
* only clear page_private if it's still connected to
* this eb.
*/
if (PagePrivate(page) &&
page->private == (unsigned long)eb) {
BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
BUG_ON(PageDirty(page));
BUG_ON(PageWriteback(page));
/*
* We need to make sure we haven't be attached
* to a new eb.
*/
ClearPagePrivate(page);
set_page_private(page, 0);
/* One for the page private */
page_cache_release(page);
}
spin_unlock(&page->mapping->private_lock);
}
if (page) {
/* One for when we alloced the page */
page_cache_release(page);
}
} while (index != start_idx);
}
/*
* Helper for releasing the extent buffer.
*/
static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
{
btrfs_release_extent_buffer_page(eb, 0);
__free_extent_buffer(eb);
}
static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree, static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
u64 start, u64 start,
unsigned long len, unsigned long len,
...@@ -4276,7 +4346,10 @@ struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src) ...@@ -4276,7 +4346,10 @@ struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
for (i = 0; i < num_pages; i++) { for (i = 0; i < num_pages; i++) {
p = alloc_page(GFP_ATOMIC); p = alloc_page(GFP_ATOMIC);
BUG_ON(!p); if (!p) {
btrfs_release_extent_buffer(new);
return NULL;
}
attach_extent_buffer_page(new, p); attach_extent_buffer_page(new, p);
WARN_ON(PageDirty(p)); WARN_ON(PageDirty(p));
SetPageUptodate(p); SetPageUptodate(p);
...@@ -4317,76 +4390,6 @@ struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len) ...@@ -4317,76 +4390,6 @@ struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len)
return NULL; return NULL;
} }
static int extent_buffer_under_io(struct extent_buffer *eb)
{
return (atomic_read(&eb->io_pages) ||
test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
}
/*
* Helper for releasing extent buffer page.
*/
static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
unsigned long start_idx)
{
unsigned long index;
unsigned long num_pages;
struct page *page;
int mapped = !test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
BUG_ON(extent_buffer_under_io(eb));
num_pages = num_extent_pages(eb->start, eb->len);
index = start_idx + num_pages;
if (start_idx >= index)
return;
do {
index--;
page = extent_buffer_page(eb, index);
if (page && mapped) {
spin_lock(&page->mapping->private_lock);
/*
* We do this since we'll remove the pages after we've
* removed the eb from the radix tree, so we could race
* and have this page now attached to the new eb. So
* only clear page_private if it's still connected to
* this eb.
*/
if (PagePrivate(page) &&
page->private == (unsigned long)eb) {
BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
BUG_ON(PageDirty(page));
BUG_ON(PageWriteback(page));
/*
* We need to make sure we haven't be attached
* to a new eb.
*/
ClearPagePrivate(page);
set_page_private(page, 0);
/* One for the page private */
page_cache_release(page);
}
spin_unlock(&page->mapping->private_lock);
}
if (page) {
/* One for when we alloced the page */
page_cache_release(page);
}
} while (index != start_idx);
}
/*
* Helper for releasing the extent buffer.
*/
static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
{
btrfs_release_extent_buffer_page(eb, 0);
__free_extent_buffer(eb);
}
static void check_buffer_tree_ref(struct extent_buffer *eb) static void check_buffer_tree_ref(struct extent_buffer *eb)
{ {
int refs; int refs;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment