Commit 3e04e7f1 authored by Josef Bacik's avatar Josef Bacik

Btrfs: handle errors in compression submission path

I noticed we would deadlock if we aborted a transaction while doing
compressed io.  This is because we don't unlock our pages if something goes
horribly wrong.  To fix this we need to make sure that we call
extent_clear_unlock_delalloc in order to unlock all the pages.  If we have
to cow in the async submission thread we need to make sure to unlock our
locked_page as the cow error path will not unlock the locked page as it
depends on the caller to unlock that page.  With this patch we no longer
deadlock on the page lock when we have an aborted transaction.  Thanks,
Signed-off-by: default avatarJosef Bacik <jbacik@fusionio.com>
parent 70afa399
...@@ -608,7 +608,7 @@ static noinline int submit_compressed_extents(struct inode *inode, ...@@ -608,7 +608,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
if (list_empty(&async_cow->extents)) if (list_empty(&async_cow->extents))
return 0; return 0;
again:
while (!list_empty(&async_cow->extents)) { while (!list_empty(&async_cow->extents)) {
async_extent = list_entry(async_cow->extents.next, async_extent = list_entry(async_cow->extents.next,
struct async_extent, list); struct async_extent, list);
...@@ -648,6 +648,8 @@ static noinline int submit_compressed_extents(struct inode *inode, ...@@ -648,6 +648,8 @@ static noinline int submit_compressed_extents(struct inode *inode,
async_extent->ram_size - 1, async_extent->ram_size - 1,
btrfs_get_extent, btrfs_get_extent,
WB_SYNC_ALL); WB_SYNC_ALL);
else if (ret)
unlock_page(async_cow->locked_page);
kfree(async_extent); kfree(async_extent);
cond_resched(); cond_resched();
continue; continue;
...@@ -672,6 +674,7 @@ static noinline int submit_compressed_extents(struct inode *inode, ...@@ -672,6 +674,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
if (ret) { if (ret) {
int i; int i;
for (i = 0; i < async_extent->nr_pages; i++) { for (i = 0; i < async_extent->nr_pages; i++) {
WARN_ON(async_extent->pages[i]->mapping); WARN_ON(async_extent->pages[i]->mapping);
page_cache_release(async_extent->pages[i]); page_cache_release(async_extent->pages[i]);
...@@ -679,12 +682,10 @@ static noinline int submit_compressed_extents(struct inode *inode, ...@@ -679,12 +682,10 @@ static noinline int submit_compressed_extents(struct inode *inode,
kfree(async_extent->pages); kfree(async_extent->pages);
async_extent->nr_pages = 0; async_extent->nr_pages = 0;
async_extent->pages = NULL; async_extent->pages = NULL;
unlock_extent(io_tree, async_extent->start,
async_extent->start +
async_extent->ram_size - 1);
if (ret == -ENOSPC) if (ret == -ENOSPC)
goto retry; goto retry;
goto out_free; /* JDM: Requeue? */ goto out_free;
} }
/* /*
...@@ -696,7 +697,8 @@ static noinline int submit_compressed_extents(struct inode *inode, ...@@ -696,7 +697,8 @@ static noinline int submit_compressed_extents(struct inode *inode,
async_extent->ram_size - 1, 0); async_extent->ram_size - 1, 0);
em = alloc_extent_map(); em = alloc_extent_map();
BUG_ON(!em); /* -ENOMEM */ if (!em)
goto out_free_reserve;
em->start = async_extent->start; em->start = async_extent->start;
em->len = async_extent->ram_size; em->len = async_extent->ram_size;
em->orig_start = em->start; em->orig_start = em->start;
...@@ -728,6 +730,9 @@ static noinline int submit_compressed_extents(struct inode *inode, ...@@ -728,6 +730,9 @@ static noinline int submit_compressed_extents(struct inode *inode,
async_extent->ram_size - 1, 0); async_extent->ram_size - 1, 0);
} }
if (ret)
goto out_free_reserve;
ret = btrfs_add_ordered_extent_compress(inode, ret = btrfs_add_ordered_extent_compress(inode,
async_extent->start, async_extent->start,
ins.objectid, ins.objectid,
...@@ -735,7 +740,8 @@ static noinline int submit_compressed_extents(struct inode *inode, ...@@ -735,7 +740,8 @@ static noinline int submit_compressed_extents(struct inode *inode,
ins.offset, ins.offset,
BTRFS_ORDERED_COMPRESSED, BTRFS_ORDERED_COMPRESSED,
async_extent->compress_type); async_extent->compress_type);
BUG_ON(ret); /* -ENOMEM */ if (ret)
goto out_free_reserve;
/* /*
* clear dirty, set writeback and unlock the pages. * clear dirty, set writeback and unlock the pages.
...@@ -756,18 +762,30 @@ static noinline int submit_compressed_extents(struct inode *inode, ...@@ -756,18 +762,30 @@ static noinline int submit_compressed_extents(struct inode *inode,
ins.objectid, ins.objectid,
ins.offset, async_extent->pages, ins.offset, async_extent->pages,
async_extent->nr_pages); async_extent->nr_pages);
BUG_ON(ret); /* -ENOMEM */
alloc_hint = ins.objectid + ins.offset; alloc_hint = ins.objectid + ins.offset;
kfree(async_extent); kfree(async_extent);
if (ret)
goto out;
cond_resched(); cond_resched();
} }
ret = 0; ret = 0;
out: out:
return ret; return ret;
out_free_reserve:
btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
out_free: out_free:
extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
async_extent->start,
async_extent->start +
async_extent->ram_size - 1,
NULL, EXTENT_CLEAR_UNLOCK_PAGE |
EXTENT_CLEAR_UNLOCK |
EXTENT_CLEAR_DELALLOC |
EXTENT_CLEAR_DIRTY |
EXTENT_SET_WRITEBACK |
EXTENT_END_WRITEBACK);
kfree(async_extent); kfree(async_extent);
goto out; goto again;
} }
static u64 get_extent_allocation_hint(struct inode *inode, u64 start, static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment