Commit c9fa5630 authored by Dave Chinner's avatar Dave Chinner Committed by Dave Chinner

xfs: use alloc_pages_bulk_array() for buffers

Because it's more efficient than allocating pages one at a time in a
loop.
Signed-off-by: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarDarrick J. Wong <djwong@kernel.org>
parent 07b5c5ad
...@@ -386,10 +386,7 @@ xfs_buf_alloc_pages( ...@@ -386,10 +386,7 @@ xfs_buf_alloc_pages(
xfs_buf_flags_t flags) xfs_buf_flags_t flags)
{ {
gfp_t gfp_mask = xb_to_gfp(flags); gfp_t gfp_mask = xb_to_gfp(flags);
size_t size; long filled = 0;
size_t offset;
size_t nbytes;
int i;
int error; int error;
/* Assure zeroed buffer for non-read cases. */ /* Assure zeroed buffer for non-read cases. */
...@@ -400,50 +397,39 @@ xfs_buf_alloc_pages( ...@@ -400,50 +397,39 @@ xfs_buf_alloc_pages(
if (unlikely(error)) if (unlikely(error))
return error; return error;
offset = bp->b_offset;
bp->b_flags |= _XBF_PAGES; bp->b_flags |= _XBF_PAGES;
for (i = 0; i < bp->b_page_count; i++) { /*
struct page *page; * Bulk filling of pages can take multiple calls. Not filling the entire
uint retries = 0; * array is not an allocation failure, so don't back off if we get at
retry: * least one extra page.
page = alloc_page(gfp_mask); */
if (unlikely(page == NULL)) { for (;;) {
if (flags & XBF_READ_AHEAD) { long last = filled;
bp->b_page_count = i;
error = -ENOMEM;
goto out_free_pages;
}
/* filled = alloc_pages_bulk_array(gfp_mask, bp->b_page_count,
* This could deadlock. bp->b_pages);
* if (filled == bp->b_page_count) {
* But until all the XFS lowlevel code is revamped to XFS_STATS_INC(bp->b_mount, xb_page_found);
* handle buffer allocation failures we can't do much. break;
*/
if (!(++retries % 100))
xfs_err(NULL,
"%s(%u) possible memory allocation deadlock in %s (mode:0x%x)",
current->comm, current->pid,
__func__, gfp_mask);
XFS_STATS_INC(bp->b_mount, xb_page_retries);
congestion_wait(BLK_RW_ASYNC, HZ/50);
goto retry;
} }
XFS_STATS_INC(bp->b_mount, xb_page_found); if (filled != last)
continue;
nbytes = min_t(size_t, size, PAGE_SIZE - offset); if (flags & XBF_READ_AHEAD) {
size -= nbytes; error = -ENOMEM;
bp->b_pages[i] = page; goto out_free_pages;
offset = 0; }
XFS_STATS_INC(bp->b_mount, xb_page_retries);
congestion_wait(BLK_RW_ASYNC, HZ / 50);
} }
return 0; return 0;
out_free_pages: out_free_pages:
for (i = 0; i < bp->b_page_count; i++) while (--filled >= 0)
__free_page(bp->b_pages[i]); __free_page(bp->b_pages[filled]);
bp->b_flags &= ~_XBF_PAGES; bp->b_flags &= ~_XBF_PAGES;
return error; return error;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment