Commit ac8ee546 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Darrick J. Wong

xfs: allow writeback on pages without buffer heads

Disable the IOMAP_F_BUFFER_HEAD flag on file systems with a block size
equal to the page size, and deal with pages without buffer heads in
writeback.  Thanks to the previous refactoring this is basically trivial
now.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarBrian Foster <bfoster@redhat.com>
Reviewed-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
parent 8e1f065b
...@@ -79,6 +79,19 @@ xfs_find_daxdev_for_inode( ...@@ -79,6 +79,19 @@ xfs_find_daxdev_for_inode(
return mp->m_ddev_targp->bt_daxdev; return mp->m_ddev_targp->bt_daxdev;
} }
static void
xfs_finish_page_writeback(
struct inode *inode,
struct bio_vec *bvec,
int error)
{
if (error) {
SetPageError(bvec->bv_page);
mapping_set_error(inode->i_mapping, -EIO);
}
end_page_writeback(bvec->bv_page);
}
/* /*
* We're now finished for good with this page. Update the page state via the * We're now finished for good with this page. Update the page state via the
* associated buffer_heads, paying attention to the start and end offsets that * associated buffer_heads, paying attention to the start and end offsets that
...@@ -91,7 +104,7 @@ xfs_find_daxdev_for_inode( ...@@ -91,7 +104,7 @@ xfs_find_daxdev_for_inode(
* and buffers potentially freed after every call to end_buffer_async_write. * and buffers potentially freed after every call to end_buffer_async_write.
*/ */
static void static void
xfs_finish_page_writeback( xfs_finish_buffer_writeback(
struct inode *inode, struct inode *inode,
struct bio_vec *bvec, struct bio_vec *bvec,
int error) int error)
...@@ -166,9 +179,12 @@ xfs_destroy_ioend( ...@@ -166,9 +179,12 @@ xfs_destroy_ioend(
next = bio->bi_private; next = bio->bi_private;
/* walk each page on bio, ending page IO on them */ /* walk each page on bio, ending page IO on them */
bio_for_each_segment_all(bvec, bio, i) bio_for_each_segment_all(bvec, bio, i) {
if (page_has_buffers(bvec->bv_page))
xfs_finish_buffer_writeback(inode, bvec, error);
else
xfs_finish_page_writeback(inode, bvec, error); xfs_finish_page_writeback(inode, bvec, error);
}
bio_put(bio); bio_put(bio);
} }
...@@ -792,13 +808,16 @@ xfs_writepage_map( ...@@ -792,13 +808,16 @@ xfs_writepage_map(
{ {
LIST_HEAD(submit_list); LIST_HEAD(submit_list);
struct xfs_ioend *ioend, *next; struct xfs_ioend *ioend, *next;
struct buffer_head *bh; struct buffer_head *bh = NULL;
ssize_t len = i_blocksize(inode); ssize_t len = i_blocksize(inode);
uint64_t file_offset; /* file offset of page */ uint64_t file_offset; /* file offset of page */
unsigned poffset; /* offset into page */ unsigned poffset; /* offset into page */
int error = 0; int error = 0;
int count = 0; int count = 0;
if (page_has_buffers(page))
bh = page_buffers(page);
/* /*
* Walk the blocks on the page, and if we run off the end of the current * Walk the blocks on the page, and if we run off the end of the current
* map or find the current map invalid, grab a new one. We only use * map or find the current map invalid, grab a new one. We only use
...@@ -806,28 +825,34 @@ xfs_writepage_map( ...@@ -806,28 +825,34 @@ xfs_writepage_map(
* the iteration through the page. This allows us to replace the * the iteration through the page. This allows us to replace the
* bufferhead with some other state tracking mechanism in future. * bufferhead with some other state tracking mechanism in future.
*/ */
file_offset = page_offset(page); for (poffset = 0, file_offset = page_offset(page);
bh = page_buffers(page);
for (poffset = 0;
poffset < PAGE_SIZE; poffset < PAGE_SIZE;
poffset += len, file_offset += len, bh = bh->b_this_page) { poffset += len, file_offset += len) {
/* past the range we are writing, so nothing more to write. */ /* past the range we are writing, so nothing more to write. */
if (file_offset >= end_offset) if (file_offset >= end_offset)
break; break;
if (!buffer_uptodate(bh)) { if (bh && !buffer_uptodate(bh)) {
if (PageUptodate(page)) if (PageUptodate(page))
ASSERT(buffer_mapped(bh)); ASSERT(buffer_mapped(bh));
bh = bh->b_this_page;
continue; continue;
} }
error = xfs_map_blocks(wpc, inode, file_offset); error = xfs_map_blocks(wpc, inode, file_offset);
if (error) if (error)
break; break;
if (wpc->io_type == XFS_IO_HOLE)
if (wpc->io_type == XFS_IO_HOLE) {
if (bh)
bh = bh->b_this_page;
continue; continue;
}
if (bh) {
xfs_map_at_offset(inode, bh, &wpc->imap, file_offset); xfs_map_at_offset(inode, bh, &wpc->imap, file_offset);
bh = bh->b_this_page;
}
xfs_add_to_ioend(inode, file_offset, page, wpc, wbc, xfs_add_to_ioend(inode, file_offset, page, wpc, wbc,
&submit_list); &submit_list);
count++; count++;
...@@ -925,8 +950,6 @@ xfs_do_writepage( ...@@ -925,8 +950,6 @@ xfs_do_writepage(
trace_xfs_writepage(inode, page, 0, 0); trace_xfs_writepage(inode, page, 0, 0);
ASSERT(page_has_buffers(page));
/* /*
* Refuse to write the page out if we are called from reclaim context. * Refuse to write the page out if we are called from reclaim context.
* *
......
...@@ -1032,6 +1032,7 @@ xfs_file_iomap_begin( ...@@ -1032,6 +1032,7 @@ xfs_file_iomap_begin(
if (XFS_FORCED_SHUTDOWN(mp)) if (XFS_FORCED_SHUTDOWN(mp))
return -EIO; return -EIO;
if (i_blocksize(inode) < PAGE_SIZE)
iomap->flags |= IOMAP_F_BUFFER_HEAD; iomap->flags |= IOMAP_F_BUFFER_HEAD;
if (((flags & (IOMAP_WRITE | IOMAP_DIRECT)) == IOMAP_WRITE) && if (((flags & (IOMAP_WRITE | IOMAP_DIRECT)) == IOMAP_WRITE) &&
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment