Commit 0a195b91 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Darrick J. Wong

iomap: Support arbitrarily many blocks per page

Size the uptodate array dynamically to support larger pages in the
page cache.  With a 64kB page, we're only saving 8 bytes per page today,
but with a 2MB maximum page size, we'd have to allocate more than 4kB
per page.  Add a few debugging assertions.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
parent b21866f5
...@@ -22,18 +22,25 @@ ...@@ -22,18 +22,25 @@
#include "../internal.h" #include "../internal.h"
/* /*
* Structure allocated for each page when block size < PAGE_SIZE to track * Structure allocated for each page or THP when block size < page size
* sub-page uptodate status and I/O completions. * to track sub-page uptodate status and I/O completions.
*/ */
struct iomap_page { struct iomap_page {
atomic_t read_count; atomic_t read_count;
atomic_t write_count; atomic_t write_count;
spinlock_t uptodate_lock; spinlock_t uptodate_lock;
DECLARE_BITMAP(uptodate, PAGE_SIZE / 512); unsigned long uptodate[];
}; };
static inline struct iomap_page *to_iomap_page(struct page *page) static inline struct iomap_page *to_iomap_page(struct page *page)
{ {
/*
* per-block data is stored in the head page. Callers should
* not be dealing with tail pages (and if they are, they can
* call thp_head() first.
*/
VM_BUG_ON_PGFLAGS(PageTail(page), page);
if (page_has_private(page)) if (page_has_private(page))
return (struct iomap_page *)page_private(page); return (struct iomap_page *)page_private(page);
return NULL; return NULL;
...@@ -45,11 +52,13 @@ static struct iomap_page * ...@@ -45,11 +52,13 @@ static struct iomap_page *
iomap_page_create(struct inode *inode, struct page *page) iomap_page_create(struct inode *inode, struct page *page)
{ {
struct iomap_page *iop = to_iomap_page(page); struct iomap_page *iop = to_iomap_page(page);
unsigned int nr_blocks = i_blocks_per_page(inode, page);
if (iop || i_blocks_per_page(inode, page) <= 1) if (iop || nr_blocks <= 1)
return iop; return iop;
iop = kzalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL); iop = kzalloc(struct_size(iop, uptodate, BITS_TO_LONGS(nr_blocks)),
GFP_NOFS | __GFP_NOFAIL);
spin_lock_init(&iop->uptodate_lock); spin_lock_init(&iop->uptodate_lock);
attach_page_private(page, iop); attach_page_private(page, iop);
return iop; return iop;
...@@ -59,11 +68,14 @@ static void ...@@ -59,11 +68,14 @@ static void
iomap_page_release(struct page *page) iomap_page_release(struct page *page)
{ {
struct iomap_page *iop = detach_page_private(page); struct iomap_page *iop = detach_page_private(page);
unsigned int nr_blocks = i_blocks_per_page(page->mapping->host, page);
if (!iop) if (!iop)
return; return;
WARN_ON_ONCE(atomic_read(&iop->read_count)); WARN_ON_ONCE(atomic_read(&iop->read_count));
WARN_ON_ONCE(atomic_read(&iop->write_count)); WARN_ON_ONCE(atomic_read(&iop->write_count));
WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) !=
PageUptodate(page));
kfree(iop); kfree(iop);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment