mm/swap: Add folio_rotate_reclaimable()

Convert rotate_reclaimable_page() to folio_rotate_reclaimable().  This
eliminates all five of the calls to compound_head() in this function,
saving 75 bytes at the cost of adding 15 bytes to its one caller,
end_page_writeback().  We also save 36 bytes from pagevec_move_tail_fn()
due to using folios there.  Net 96 bytes savings.

Also move its declaration to mm/internal.h as it's only used by filemap.c.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Reviewed-by: default avatarWilliam Kucharski <william.kucharski@oracle.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: default avatarMike Rapoport <rppt@linux.ibm.com>
Reviewed-by: default avatarDavid Howells <dhowells@redhat.com>
parent 9138e47e
...@@ -371,7 +371,6 @@ extern void lru_add_drain(void); ...@@ -371,7 +371,6 @@ extern void lru_add_drain(void);
extern void lru_add_drain_cpu(int cpu); extern void lru_add_drain_cpu(int cpu);
extern void lru_add_drain_cpu_zone(struct zone *zone); extern void lru_add_drain_cpu_zone(struct zone *zone);
extern void lru_add_drain_all(void); extern void lru_add_drain_all(void);
extern void rotate_reclaimable_page(struct page *page);
extern void deactivate_file_page(struct page *page); extern void deactivate_file_page(struct page *page);
extern void deactivate_page(struct page *page); extern void deactivate_page(struct page *page);
extern void mark_page_lazyfree(struct page *page); extern void mark_page_lazyfree(struct page *page);
......
...@@ -1584,8 +1584,9 @@ void end_page_writeback(struct page *page) ...@@ -1584,8 +1584,9 @@ void end_page_writeback(struct page *page)
* ever page writeback. * ever page writeback.
*/ */
if (PageReclaim(page)) { if (PageReclaim(page)) {
struct folio *folio = page_folio(page);
ClearPageReclaim(page); ClearPageReclaim(page);
rotate_reclaimable_page(page); folio_rotate_reclaimable(folio);
} }
/* /*
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
void page_writeback_init(void); void page_writeback_init(void);
vm_fault_t do_swap_page(struct vm_fault *vmf); vm_fault_t do_swap_page(struct vm_fault *vmf);
void folio_rotate_reclaimable(struct folio *folio);
void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
unsigned long floor, unsigned long ceiling); unsigned long floor, unsigned long ceiling);
......
...@@ -38,7 +38,7 @@ void end_swap_bio_write(struct bio *bio) ...@@ -38,7 +38,7 @@ void end_swap_bio_write(struct bio *bio)
* Also print a dire warning that things will go BAD (tm) * Also print a dire warning that things will go BAD (tm)
* very quickly. * very quickly.
* *
* Also clear PG_reclaim to avoid rotate_reclaimable_page() * Also clear PG_reclaim to avoid folio_rotate_reclaimable()
*/ */
set_page_dirty(page); set_page_dirty(page);
pr_alert_ratelimited("Write-error on swap-device (%u:%u:%llu)\n", pr_alert_ratelimited("Write-error on swap-device (%u:%u:%llu)\n",
...@@ -317,7 +317,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc, ...@@ -317,7 +317,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
* temporary failure if the system has limited * temporary failure if the system has limited
* memory for allocating transmit buffers. * memory for allocating transmit buffers.
* Mark the page dirty and avoid * Mark the page dirty and avoid
* rotate_reclaimable_page but rate-limit the * folio_rotate_reclaimable but rate-limit the
* messages but do not flag PageError like * messages but do not flag PageError like
* the normal direct-to-bio case as it could * the normal direct-to-bio case as it could
* be temporary. * be temporary.
......
...@@ -206,11 +206,13 @@ static void pagevec_lru_move_fn(struct pagevec *pvec, ...@@ -206,11 +206,13 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec) static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec)
{ {
if (!PageUnevictable(page)) { struct folio *folio = page_folio(page);
del_page_from_lru_list(page, lruvec);
ClearPageActive(page); if (!folio_test_unevictable(folio)) {
add_page_to_lru_list_tail(page, lruvec); lruvec_del_folio(lruvec, folio);
__count_vm_events(PGROTATED, thp_nr_pages(page)); folio_clear_active(folio);
lruvec_add_folio_tail(lruvec, folio);
__count_vm_events(PGROTATED, folio_nr_pages(folio));
} }
} }
...@@ -227,23 +229,23 @@ static bool pagevec_add_and_need_flush(struct pagevec *pvec, struct page *page) ...@@ -227,23 +229,23 @@ static bool pagevec_add_and_need_flush(struct pagevec *pvec, struct page *page)
} }
/* /*
* Writeback is about to end against a page which has been marked for immediate * Writeback is about to end against a folio which has been marked for
* reclaim. If it still appears to be reclaimable, move it to the tail of the * immediate reclaim. If it still appears to be reclaimable, move it
* inactive list. * to the tail of the inactive list.
* *
* rotate_reclaimable_page() must disable IRQs, to prevent nasty races. * folio_rotate_reclaimable() must disable IRQs, to prevent nasty races.
*/ */
void rotate_reclaimable_page(struct page *page) void folio_rotate_reclaimable(struct folio *folio)
{ {
if (!PageLocked(page) && !PageDirty(page) && if (!folio_test_locked(folio) && !folio_test_dirty(folio) &&
!PageUnevictable(page) && PageLRU(page)) { !folio_test_unevictable(folio) && folio_test_lru(folio)) {
struct pagevec *pvec; struct pagevec *pvec;
unsigned long flags; unsigned long flags;
get_page(page); folio_get(folio);
local_lock_irqsave(&lru_rotate.lock, flags); local_lock_irqsave(&lru_rotate.lock, flags);
pvec = this_cpu_ptr(&lru_rotate.pvec); pvec = this_cpu_ptr(&lru_rotate.pvec);
if (pagevec_add_and_need_flush(pvec, page)) if (pagevec_add_and_need_flush(pvec, &folio->page))
pagevec_lru_move_fn(pvec, pagevec_move_tail_fn); pagevec_lru_move_fn(pvec, pagevec_move_tail_fn);
local_unlock_irqrestore(&lru_rotate.lock, flags); local_unlock_irqrestore(&lru_rotate.lock, flags);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment