Commit b96af1fd authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Richard Weinberger

ubifs: Convert do_readpage() to take a folio

All the callers now have a folio, so pass it in, and convert do_readpage()
to us folios directly.  Includes unifying the exit paths from this
function and using kmap_local instead of plain kmap.  This function
should now work with large folios, but this is not tested.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarZhihao Cheng <chengzhihao1@huawei.com>
Signed-off-by: default avatarRichard Weinberger <richard@nod.at>
parent ffdff813
...@@ -96,36 +96,36 @@ static int read_block(struct inode *inode, void *addr, unsigned int block, ...@@ -96,36 +96,36 @@ static int read_block(struct inode *inode, void *addr, unsigned int block,
return -EINVAL; return -EINVAL;
} }
static int do_readpage(struct page *page) static int do_readpage(struct folio *folio)
{ {
void *addr; void *addr;
int err = 0, i; int err = 0, i;
unsigned int block, beyond; unsigned int block, beyond;
struct ubifs_data_node *dn; struct ubifs_data_node *dn = NULL;
struct inode *inode = page->mapping->host; struct inode *inode = folio->mapping->host;
struct ubifs_info *c = inode->i_sb->s_fs_info; struct ubifs_info *c = inode->i_sb->s_fs_info;
loff_t i_size = i_size_read(inode); loff_t i_size = i_size_read(inode);
dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx", dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
inode->i_ino, page->index, i_size, page->flags); inode->i_ino, folio->index, i_size, folio->flags);
ubifs_assert(c, !PageChecked(page)); ubifs_assert(c, !folio_test_checked(folio));
ubifs_assert(c, !PagePrivate(page)); ubifs_assert(c, !folio->private);
addr = kmap(page); addr = kmap_local_folio(folio, 0);
block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT; block = folio->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
beyond = (i_size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT; beyond = (i_size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT;
if (block >= beyond) { if (block >= beyond) {
/* Reading beyond inode */ /* Reading beyond inode */
SetPageChecked(page); folio_set_checked(folio);
memset(addr, 0, PAGE_SIZE); addr = folio_zero_tail(folio, 0, addr);
goto out; goto out;
} }
dn = kmalloc(UBIFS_MAX_DATA_NODE_SZ, GFP_NOFS); dn = kmalloc(UBIFS_MAX_DATA_NODE_SZ, GFP_NOFS);
if (!dn) { if (!dn) {
err = -ENOMEM; err = -ENOMEM;
goto error; goto out;
} }
i = 0; i = 0;
...@@ -150,39 +150,35 @@ static int do_readpage(struct page *page) ...@@ -150,39 +150,35 @@ static int do_readpage(struct page *page)
memset(addr + ilen, 0, dlen - ilen); memset(addr + ilen, 0, dlen - ilen);
} }
} }
if (++i >= UBIFS_BLOCKS_PER_PAGE) if (++i >= (UBIFS_BLOCKS_PER_PAGE << folio_order(folio)))
break; break;
block += 1; block += 1;
addr += UBIFS_BLOCK_SIZE; addr += UBIFS_BLOCK_SIZE;
if (folio_test_highmem(folio) && (offset_in_page(addr) == 0)) {
kunmap_local(addr - UBIFS_BLOCK_SIZE);
addr = kmap_local_folio(folio, i * UBIFS_BLOCK_SIZE);
}
} }
if (err) { if (err) {
struct ubifs_info *c = inode->i_sb->s_fs_info; struct ubifs_info *c = inode->i_sb->s_fs_info;
if (err == -ENOENT) { if (err == -ENOENT) {
/* Not found, so it must be a hole */ /* Not found, so it must be a hole */
SetPageChecked(page); folio_set_checked(folio);
dbg_gen("hole"); dbg_gen("hole");
goto out_free; err = 0;
} else {
ubifs_err(c, "cannot read page %lu of inode %lu, error %d",
folio->index, inode->i_ino, err);
} }
ubifs_err(c, "cannot read page %lu of inode %lu, error %d",
page->index, inode->i_ino, err);
goto error;
} }
out_free:
kfree(dn);
out: out:
SetPageUptodate(page);
ClearPageError(page);
flush_dcache_page(page);
kunmap(page);
return 0;
error:
kfree(dn); kfree(dn);
ClearPageUptodate(page); if (!err)
SetPageError(page); folio_mark_uptodate(folio);
flush_dcache_page(page); flush_dcache_folio(folio);
kunmap(page); kunmap_local(addr);
return err; return err;
} }
...@@ -254,7 +250,7 @@ static int write_begin_slow(struct address_space *mapping, ...@@ -254,7 +250,7 @@ static int write_begin_slow(struct address_space *mapping,
if (pos == folio_pos(folio) && len >= folio_size(folio)) if (pos == folio_pos(folio) && len >= folio_size(folio))
folio_set_checked(folio); folio_set_checked(folio);
else { else {
err = do_readpage(&folio->page); err = do_readpage(folio);
if (err) { if (err) {
folio_unlock(folio); folio_unlock(folio);
folio_put(folio); folio_put(folio);
...@@ -455,7 +451,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping, ...@@ -455,7 +451,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
folio_set_checked(folio); folio_set_checked(folio);
skipped_read = 1; skipped_read = 1;
} else { } else {
err = do_readpage(&folio->page); err = do_readpage(folio);
if (err) { if (err) {
folio_unlock(folio); folio_unlock(folio);
folio_put(folio); folio_put(folio);
...@@ -559,7 +555,7 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping, ...@@ -559,7 +555,7 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping,
* Return 0 to force VFS to repeat the whole operation, or the * Return 0 to force VFS to repeat the whole operation, or the
* error code if 'do_readpage()' fails. * error code if 'do_readpage()' fails.
*/ */
copied = do_readpage(&folio->page); copied = do_readpage(folio);
goto out; goto out;
} }
...@@ -895,7 +891,7 @@ static int ubifs_read_folio(struct file *file, struct folio *folio) ...@@ -895,7 +891,7 @@ static int ubifs_read_folio(struct file *file, struct folio *folio)
if (ubifs_bulk_read(page)) if (ubifs_bulk_read(page))
return 0; return 0;
do_readpage(page); do_readpage(folio);
folio_unlock(folio); folio_unlock(folio);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment