Commit b99b4e0d authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

mm: pass a folio to __swap_writepage()

Both callers now have a folio, so pass that in instead of the page. 
Removes a few hidden calls to compound_head().

Link: https://lkml.kernel.org/r/20231213215842.671461-3-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 96c7b0b4
...@@ -201,7 +201,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc) ...@@ -201,7 +201,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
folio_end_writeback(folio); folio_end_writeback(folio);
return 0; return 0;
} }
__swap_writepage(&folio->page, wbc); __swap_writepage(folio, wbc);
return 0; return 0;
} }
...@@ -368,22 +368,22 @@ static void swap_writepage_bdev_async(struct page *page, ...@@ -368,22 +368,22 @@ static void swap_writepage_bdev_async(struct page *page,
submit_bio(bio); submit_bio(bio);
} }
void __swap_writepage(struct page *page, struct writeback_control *wbc) void __swap_writepage(struct folio *folio, struct writeback_control *wbc)
{ {
struct swap_info_struct *sis = page_swap_info(page); struct swap_info_struct *sis = swp_swap_info(folio->swap);
VM_BUG_ON_PAGE(!PageSwapCache(page), page); VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
/* /*
* ->flags can be updated non-atomicially (scan_swap_map_slots), * ->flags can be updated non-atomicially (scan_swap_map_slots),
* but that will never affect SWP_FS_OPS, so the data_race * but that will never affect SWP_FS_OPS, so the data_race
* is safe. * is safe.
*/ */
if (data_race(sis->flags & SWP_FS_OPS)) if (data_race(sis->flags & SWP_FS_OPS))
swap_writepage_fs(page, wbc); swap_writepage_fs(&folio->page, wbc);
else if (sis->flags & SWP_SYNCHRONOUS_IO) else if (sis->flags & SWP_SYNCHRONOUS_IO)
swap_writepage_bdev_sync(page, wbc, sis); swap_writepage_bdev_sync(&folio->page, wbc, sis);
else else
swap_writepage_bdev_async(page, wbc, sis); swap_writepage_bdev_async(&folio->page, wbc, sis);
} }
void swap_write_unplug(struct swap_iocb *sio) void swap_write_unplug(struct swap_iocb *sio)
......
...@@ -19,7 +19,7 @@ static inline void swap_read_unplug(struct swap_iocb *plug) ...@@ -19,7 +19,7 @@ static inline void swap_read_unplug(struct swap_iocb *plug)
} }
void swap_write_unplug(struct swap_iocb *sio); void swap_write_unplug(struct swap_iocb *sio);
int swap_writepage(struct page *page, struct writeback_control *wbc); int swap_writepage(struct page *page, struct writeback_control *wbc);
void __swap_writepage(struct page *page, struct writeback_control *wbc); void __swap_writepage(struct folio *folio, struct writeback_control *wbc);
/* linux/mm/swap_state.c */ /* linux/mm/swap_state.c */
/* One swap address space for each 64M swap space */ /* One swap address space for each 64M swap space */
......
...@@ -1446,7 +1446,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry, ...@@ -1446,7 +1446,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
folio_set_reclaim(folio); folio_set_reclaim(folio);
/* start writeback */ /* start writeback */
__swap_writepage(&folio->page, &wbc); __swap_writepage(folio, &wbc);
folio_put(folio); folio_put(folio);
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment