Commit 07b5c5ad authored by Dave Chinner's avatar Dave Chinner Committed by Dave Chinner

xfs: use xfs_buf_alloc_pages for uncached buffers

Use the newly factored out page allocation code. This adds
automatic buffer zeroing for non-read uncached buffers.

This also allows us to greatly simply the error handling in
xfs_buf_get_uncached(). Because xfs_buf_alloc_pages() cleans up
partial allocation failure, we can just call xfs_buf_free() in all
error cases now to clean up after failures.
Signed-off-by: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarDarrick J. Wong <djwong@kernel.org>
parent 0a683794
...@@ -43,7 +43,6 @@ xfs_get_aghdr_buf( ...@@ -43,7 +43,6 @@ xfs_get_aghdr_buf(
if (error) if (error)
return error; return error;
xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
bp->b_bn = blkno; bp->b_bn = blkno;
bp->b_maps[0].bm_bn = blkno; bp->b_maps[0].bm_bn = blkno;
bp->b_ops = ops; bp->b_ops = ops;
......
...@@ -973,7 +973,7 @@ xfs_buf_get_uncached( ...@@ -973,7 +973,7 @@ xfs_buf_get_uncached(
struct xfs_buf **bpp) struct xfs_buf **bpp)
{ {
unsigned long page_count; unsigned long page_count;
int error, i; int error;
struct xfs_buf *bp; struct xfs_buf *bp;
DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks); DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
...@@ -982,41 +982,26 @@ xfs_buf_get_uncached( ...@@ -982,41 +982,26 @@ xfs_buf_get_uncached(
/* flags might contain irrelevant bits, pass only what we care about */ /* flags might contain irrelevant bits, pass only what we care about */
error = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT, &bp); error = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT, &bp);
if (error) if (error)
goto fail; return error;
page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT; page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT;
error = _xfs_buf_get_pages(bp, page_count); error = xfs_buf_alloc_pages(bp, page_count, flags);
if (error) if (error)
goto fail_free_buf; goto fail_free_buf;
for (i = 0; i < page_count; i++) {
bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
if (!bp->b_pages[i]) {
error = -ENOMEM;
goto fail_free_mem;
}
}
bp->b_flags |= _XBF_PAGES;
error = _xfs_buf_map_pages(bp, 0); error = _xfs_buf_map_pages(bp, 0);
if (unlikely(error)) { if (unlikely(error)) {
xfs_warn(target->bt_mount, xfs_warn(target->bt_mount,
"%s: failed to map pages", __func__); "%s: failed to map pages", __func__);
goto fail_free_mem; goto fail_free_buf;
} }
trace_xfs_buf_get_uncached(bp, _RET_IP_); trace_xfs_buf_get_uncached(bp, _RET_IP_);
*bpp = bp; *bpp = bp;
return 0; return 0;
fail_free_mem: fail_free_buf:
while (--i >= 0) xfs_buf_free(bp);
__free_page(bp->b_pages[i]);
_xfs_buf_free_pages(bp);
fail_free_buf:
xfs_buf_free_maps(bp);
kmem_cache_free(xfs_buf_zone, bp);
fail:
return error; return error;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment