Commit afd9d6a1 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Darrick J. Wong

fs: use ->is_partially_uptodate in page_cache_seek_hole_data

This way the implementation doesn't depend on buffer_head internals.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarAndreas Gruenbacher <agruenba@redhat.com>
Reviewed-by: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
parent bd56b3e1
......@@ -595,34 +595,53 @@ EXPORT_SYMBOL_GPL(iomap_fiemap);
/*
* Seek for SEEK_DATA / SEEK_HOLE within @page, starting at @lastoff.
*
* Returns the offset within the file on success, and -ENOENT otherwise.
* Returns true if found and updates @lastoff to the offset in file.
*/
static loff_t
page_seek_hole_data(struct page *page, loff_t lastoff, int whence)
static bool
page_seek_hole_data(struct inode *inode, struct page *page, loff_t *lastoff,
int whence)
{
loff_t offset = page_offset(page);
struct buffer_head *bh, *head;
const struct address_space_operations *ops = inode->i_mapping->a_ops;
unsigned int bsize = i_blocksize(inode), off;
bool seek_data = whence == SEEK_DATA;
loff_t poff = page_offset(page);
if (lastoff < offset)
lastoff = offset;
if (WARN_ON_ONCE(*lastoff >= poff + PAGE_SIZE))
return false;
bh = head = page_buffers(page);
do {
offset += bh->b_size;
if (lastoff >= offset)
continue;
if (*lastoff < poff) {
/*
* Last offset smaller than the start of the page means we found
* a hole:
*/
if (whence == SEEK_HOLE)
return true;
*lastoff = poff;
}
/*
* Any buffer with valid data in it should have BH_Uptodate set.
* Just check the page unless we can and should check block ranges:
*/
if (buffer_uptodate(bh) == seek_data)
return lastoff;
if (bsize == PAGE_SIZE || !ops->is_partially_uptodate)
return PageUptodate(page) == seek_data;
lastoff = offset;
} while ((bh = bh->b_this_page) != head);
return -ENOENT;
lock_page(page);
if (unlikely(page->mapping != inode->i_mapping))
goto out_unlock_not_found;
for (off = 0; off < PAGE_SIZE; off += bsize) {
if ((*lastoff & ~PAGE_MASK) >= off + bsize)
continue;
if (ops->is_partially_uptodate(page, off, bsize) == seek_data) {
unlock_page(page);
return true;
}
*lastoff = poff + off + bsize;
}
out_unlock_not_found:
unlock_page(page);
return false;
}
/*
......@@ -659,30 +678,8 @@ page_cache_seek_hole_data(struct inode *inode, loff_t offset, loff_t length,
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
/*
* At this point, the page may be truncated or
* invalidated (changing page->mapping to NULL), or
* even swizzled back from swapper_space to tmpfs file
* mapping. However, page->index will not change
* because we have a reference on the page.
*
* If current page offset is beyond where we've ended,
* we've found a hole.
*/
if (whence == SEEK_HOLE &&
lastoff < page_offset(page))
goto check_range;
lock_page(page);
if (likely(page->mapping == inode->i_mapping) &&
page_has_buffers(page)) {
lastoff = page_seek_hole_data(page, lastoff, whence);
if (lastoff >= 0) {
unlock_page(page);
if (page_seek_hole_data(inode, page, &lastoff, whence))
goto check_range;
}
}
unlock_page(page);
lastoff = page_offset(page) + PAGE_SIZE;
}
pagevec_release(&pvec);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment