Commit 95f8e302 authored by Nick Piggin's avatar Nick Piggin Committed by Lachlan McIlroy

[XFS] use scalable vmap API

Implement XFS's large buffer support with the new vmap APIs. See the vmap
rewrite (db64fe02) for some numbers. The biggest improvement that comes from
using the new APIs is avoiding the global KVA allocation lock on every call.
Signed-off-by: default avatarNick Piggin <npiggin@suse.de>
Reviewed-by: default avatarChristoph Hellwig <hch@infradead.org>
Signed-off-by: default avatarLachlan McIlroy <lachlan@sgi.com>
parent d2859751
...@@ -264,7 +264,7 @@ xfs_buf_free( ...@@ -264,7 +264,7 @@ xfs_buf_free(
uint i; uint i;
if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1)) if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
vunmap(bp->b_addr - bp->b_offset); vm_unmap_ram(bp->b_addr - bp->b_offset, bp->b_page_count);
for (i = 0; i < bp->b_page_count; i++) { for (i = 0; i < bp->b_page_count; i++) {
struct page *page = bp->b_pages[i]; struct page *page = bp->b_pages[i];
...@@ -386,8 +386,8 @@ _xfs_buf_map_pages( ...@@ -386,8 +386,8 @@ _xfs_buf_map_pages(
bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
bp->b_flags |= XBF_MAPPED; bp->b_flags |= XBF_MAPPED;
} else if (flags & XBF_MAPPED) { } else if (flags & XBF_MAPPED) {
bp->b_addr = vmap(bp->b_pages, bp->b_page_count, bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
VM_MAP, PAGE_KERNEL); -1, PAGE_KERNEL);
if (unlikely(bp->b_addr == NULL)) if (unlikely(bp->b_addr == NULL))
return -ENOMEM; return -ENOMEM;
bp->b_addr += bp->b_offset; bp->b_addr += bp->b_offset;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment