Commit 452a8f40 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Andrew Morton

mm,jfs: move write_one_page/folio_write_one to jfs

The last remaining user of folio_write_one through the write_one_page
wrapper is jfs, so move the functionality there and hard code the call to
metapage_writepage.

Note that the use of the pagecache by the JFS 'metapage' buffer cache is a
bit odd, and we could probably do without VM-level dirty tracking at all,
but that's a change for another time.

Link: https://lkml.kernel.org/r/20230307143125.27778-4-hch@lst.deSigned-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarDave Kleikamp <dave.kleikamp@oracle.com>
Cc: Changwei Ge <gechangwei@live.cn>
Cc: Evgeniy Dushistov <dushistov@mail.ru>
Cc: Gang He <ghe@suse.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jan Kara via Ocfs2-devel <ocfs2-devel@oss.oracle.com>
Cc: Joel Becker <jlbec@evilplan.org>
Cc: Joseph Qi <jiangqi903@gmail.com>
Cc: Joseph Qi <joseph.qi@linux.alibaba.com>
Cc: Jun Piao <piaojun@huawei.com>
Cc: Junxiao Bi <junxiao.bi@oracle.com>
Cc: Mark Fasheh <mark@fasheh.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent a0d50b11
......@@ -691,6 +691,35 @@ void grab_metapage(struct metapage * mp)
unlock_page(mp->page);
}
static int metapage_write_one(struct page *page)
{
struct folio *folio = page_folio(page);
struct address_space *mapping = folio->mapping;
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
.nr_to_write = folio_nr_pages(folio),
};
int ret = 0;
BUG_ON(!folio_test_locked(folio));
folio_wait_writeback(folio);
if (folio_clear_dirty_for_io(folio)) {
folio_get(folio);
ret = metapage_writepage(page, &wbc);
if (ret == 0)
folio_wait_writeback(folio);
folio_put(folio);
} else {
folio_unlock(folio);
}
if (!ret)
ret = filemap_check_errors(mapping);
return ret;
}
void force_metapage(struct metapage *mp)
{
struct page *page = mp->page;
......@@ -700,8 +729,8 @@ void force_metapage(struct metapage *mp)
get_page(page);
lock_page(page);
set_page_dirty(page);
if (write_one_page(page))
jfs_error(mp->sb, "write_one_page() failed\n");
if (metapage_write_one(page))
jfs_error(mp->sb, "metapage_write_one() failed\n");
clear_bit(META_forcewrite, &mp->flag);
put_page(page);
}
......@@ -746,9 +775,9 @@ void release_metapage(struct metapage * mp)
set_page_dirty(page);
if (test_bit(META_sync, &mp->flag)) {
clear_bit(META_sync, &mp->flag);
if (write_one_page(page))
jfs_error(mp->sb, "write_one_page() failed\n");
lock_page(page); /* write_one_page unlocks the page */
if (metapage_write_one(page))
jfs_error(mp->sb, "metapage_write_one() failed\n");
lock_page(page);
}
} else if (mp->lsn) /* discard_metapage doesn't remove it */
remove_from_logsync(mp);
......
......@@ -1066,12 +1066,6 @@ static inline void folio_cancel_dirty(struct folio *folio)
bool folio_clear_dirty_for_io(struct folio *folio);
bool clear_page_dirty_for_io(struct page *page);
void folio_invalidate(struct folio *folio, size_t offset, size_t length);
int __must_check folio_write_one(struct folio *folio);
static inline int __must_check write_one_page(struct page *page)
{
return folio_write_one(page_folio(page));
}
int __set_page_dirty_nobuffers(struct page *page);
bool noop_dirty_folio(struct address_space *mapping, struct folio *folio);
......
......@@ -2583,46 +2583,6 @@ int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
return ret;
}
/**
* folio_write_one - write out a single folio and wait on I/O.
* @folio: The folio to write.
*
* The folio must be locked by the caller and will be unlocked upon return.
*
* Note that the mapping's AS_EIO/AS_ENOSPC flags will be cleared when this
* function returns.
*
* Return: %0 on success, negative error code otherwise
*/
int folio_write_one(struct folio *folio)
{
struct address_space *mapping = folio->mapping;
int ret = 0;
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
.nr_to_write = folio_nr_pages(folio),
};
BUG_ON(!folio_test_locked(folio));
folio_wait_writeback(folio);
if (folio_clear_dirty_for_io(folio)) {
folio_get(folio);
ret = mapping->a_ops->writepage(&folio->page, &wbc);
if (ret == 0)
folio_wait_writeback(folio);
folio_put(folio);
} else {
folio_unlock(folio);
}
if (!ret)
ret = filemap_check_errors(mapping);
return ret;
}
EXPORT_SYMBOL(folio_write_one);
/*
* For address_spaces which do not use buffers nor write back.
*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment