Commit 1cea335d authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Darrick J. Wong

iomap: fix sub-page uptodate handling

bio completions can race when a page spans more than one file system
block.  Add a spinlock to synchronize marking the page uptodate.

Fixes: 9dc55f13 ("iomap: add support for sub-pagesize buffered I/O without buffer heads")
Reported-by: default avatarJan Stancek <jstancek@redhat.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
parent 88cfd30e
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
struct iomap_page { struct iomap_page {
atomic_t read_count; atomic_t read_count;
atomic_t write_count; atomic_t write_count;
spinlock_t uptodate_lock;
DECLARE_BITMAP(uptodate, PAGE_SIZE / 512); DECLARE_BITMAP(uptodate, PAGE_SIZE / 512);
}; };
...@@ -51,6 +52,7 @@ iomap_page_create(struct inode *inode, struct page *page) ...@@ -51,6 +52,7 @@ iomap_page_create(struct inode *inode, struct page *page)
iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL); iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL);
atomic_set(&iop->read_count, 0); atomic_set(&iop->read_count, 0);
atomic_set(&iop->write_count, 0); atomic_set(&iop->write_count, 0);
spin_lock_init(&iop->uptodate_lock);
bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE); bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
/* /*
...@@ -139,25 +141,38 @@ iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop, ...@@ -139,25 +141,38 @@ iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
} }
static void static void
iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len) iomap_iop_set_range_uptodate(struct page *page, unsigned off, unsigned len)
{ {
struct iomap_page *iop = to_iomap_page(page); struct iomap_page *iop = to_iomap_page(page);
struct inode *inode = page->mapping->host; struct inode *inode = page->mapping->host;
unsigned first = off >> inode->i_blkbits; unsigned first = off >> inode->i_blkbits;
unsigned last = (off + len - 1) >> inode->i_blkbits; unsigned last = (off + len - 1) >> inode->i_blkbits;
unsigned int i;
bool uptodate = true; bool uptodate = true;
unsigned long flags;
unsigned int i;
if (iop) { spin_lock_irqsave(&iop->uptodate_lock, flags);
for (i = 0; i < PAGE_SIZE / i_blocksize(inode); i++) { for (i = 0; i < PAGE_SIZE / i_blocksize(inode); i++) {
if (i >= first && i <= last) if (i >= first && i <= last)
set_bit(i, iop->uptodate); set_bit(i, iop->uptodate);
else if (!test_bit(i, iop->uptodate)) else if (!test_bit(i, iop->uptodate))
uptodate = false; uptodate = false;
} }
}
if (uptodate && !PageError(page)) if (uptodate)
SetPageUptodate(page);
spin_unlock_irqrestore(&iop->uptodate_lock, flags);
}
static void
iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
{
if (PageError(page))
return;
if (page_has_private(page))
iomap_iop_set_range_uptodate(page, off, len);
else
SetPageUptodate(page); SetPageUptodate(page);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment