Commit 4093602d authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

nilfs2: convert nilfs_copy_page() to nilfs_copy_folio()

Both callers already have a folio, so pass it in and use it directly. 
Removes a lot of hidden calls to compound_head().

Link: https://lkml.kernel.org/r/20231016201114.1928083-13-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: default avatarRyusuke Konishi <konishi.ryusuke@gmail.com>
Cc: Andreas Gruenbacher <agruenba@redhat.com>
Cc: Pankaj Raghav <p.raghav@samsung.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent c5521c76
...@@ -184,30 +184,32 @@ void nilfs_page_bug(struct page *page) ...@@ -184,30 +184,32 @@ void nilfs_page_bug(struct page *page)
} }
/** /**
* nilfs_copy_page -- copy the page with buffers * nilfs_copy_folio -- copy the folio with buffers
* @dst: destination page * @dst: destination folio
* @src: source page * @src: source folio
* @copy_dirty: flag whether to copy dirty states on the page's buffer heads. * @copy_dirty: flag whether to copy dirty states on the folio's buffer heads.
* *
* This function is for both data pages and btnode pages. The dirty flag * This function is for both data folios and btnode folios. The dirty flag
* should be treated by caller. The page must not be under i/o. * should be treated by caller. The folio must not be under i/o.
* Both src and dst page must be locked * Both src and dst folio must be locked
*/ */
static void nilfs_copy_page(struct page *dst, struct page *src, int copy_dirty) static void nilfs_copy_folio(struct folio *dst, struct folio *src,
bool copy_dirty)
{ {
struct buffer_head *dbh, *dbufs, *sbh; struct buffer_head *dbh, *dbufs, *sbh;
unsigned long mask = NILFS_BUFFER_INHERENT_BITS; unsigned long mask = NILFS_BUFFER_INHERENT_BITS;
BUG_ON(PageWriteback(dst)); BUG_ON(folio_test_writeback(dst));
sbh = page_buffers(src); sbh = folio_buffers(src);
if (!page_has_buffers(dst)) dbh = folio_buffers(dst);
create_empty_buffers(dst, sbh->b_size, 0); if (!dbh)
dbh = folio_create_empty_buffers(dst, sbh->b_size, 0);
if (copy_dirty) if (copy_dirty)
mask |= BIT(BH_Dirty); mask |= BIT(BH_Dirty);
dbh = dbufs = page_buffers(dst); dbufs = dbh;
do { do {
lock_buffer(sbh); lock_buffer(sbh);
lock_buffer(dbh); lock_buffer(dbh);
...@@ -218,16 +220,16 @@ static void nilfs_copy_page(struct page *dst, struct page *src, int copy_dirty) ...@@ -218,16 +220,16 @@ static void nilfs_copy_page(struct page *dst, struct page *src, int copy_dirty)
dbh = dbh->b_this_page; dbh = dbh->b_this_page;
} while (dbh != dbufs); } while (dbh != dbufs);
copy_highpage(dst, src); folio_copy(dst, src);
if (PageUptodate(src) && !PageUptodate(dst)) if (folio_test_uptodate(src) && !folio_test_uptodate(dst))
SetPageUptodate(dst); folio_mark_uptodate(dst);
else if (!PageUptodate(src) && PageUptodate(dst)) else if (!folio_test_uptodate(src) && folio_test_uptodate(dst))
ClearPageUptodate(dst); folio_clear_uptodate(dst);
if (PageMappedToDisk(src) && !PageMappedToDisk(dst)) if (folio_test_mappedtodisk(src) && !folio_test_mappedtodisk(dst))
SetPageMappedToDisk(dst); folio_set_mappedtodisk(dst);
else if (!PageMappedToDisk(src) && PageMappedToDisk(dst)) else if (!folio_test_mappedtodisk(src) && folio_test_mappedtodisk(dst))
ClearPageMappedToDisk(dst); folio_clear_mappedtodisk(dst);
do { do {
unlock_buffer(sbh); unlock_buffer(sbh);
...@@ -269,7 +271,7 @@ int nilfs_copy_dirty_pages(struct address_space *dmap, ...@@ -269,7 +271,7 @@ int nilfs_copy_dirty_pages(struct address_space *dmap,
NILFS_PAGE_BUG(&folio->page, NILFS_PAGE_BUG(&folio->page,
"found empty page in dat page cache"); "found empty page in dat page cache");
nilfs_copy_page(&dfolio->page, &folio->page, 1); nilfs_copy_folio(dfolio, folio, true);
filemap_dirty_folio(folio_mapping(dfolio), dfolio); filemap_dirty_folio(folio_mapping(dfolio), dfolio);
folio_unlock(dfolio); folio_unlock(dfolio);
...@@ -314,7 +316,7 @@ void nilfs_copy_back_pages(struct address_space *dmap, ...@@ -314,7 +316,7 @@ void nilfs_copy_back_pages(struct address_space *dmap,
if (!IS_ERR(dfolio)) { if (!IS_ERR(dfolio)) {
/* overwrite existing folio in the destination cache */ /* overwrite existing folio in the destination cache */
WARN_ON(folio_test_dirty(dfolio)); WARN_ON(folio_test_dirty(dfolio));
nilfs_copy_page(&dfolio->page, &folio->page, 0); nilfs_copy_folio(dfolio, folio, false);
folio_unlock(dfolio); folio_unlock(dfolio);
folio_put(dfolio); folio_put(dfolio);
/* Do we not need to remove folio from smap here? */ /* Do we not need to remove folio from smap here? */
......
...@@ -799,6 +799,7 @@ void folio_copy(struct folio *dst, struct folio *src) ...@@ -799,6 +799,7 @@ void folio_copy(struct folio *dst, struct folio *src)
cond_resched(); cond_resched();
} }
} }
EXPORT_SYMBOL(folio_copy);
int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
int sysctl_overcommit_ratio __read_mostly = 50; int sysctl_overcommit_ratio __read_mostly = 50;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment