Commit 6a6fe9eb authored by Kefeng Wang's avatar Kefeng Wang Committed by Andrew Morton

mm: swap: convert mark_page_lazyfree() to folio_mark_lazyfree()

mark_page_lazyfree() and the callers are converted to use folio, this
rename and make it to take in a folio argument instead of calling
page_folio().

Link: https://lkml.kernel.org/r/20221209020618.190306-1-wangkefeng.wang@huawei.comSigned-off-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Reviewed-by: default avatarVishal Moola (Oracle) <vishal.moola@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent fc986a38
...@@ -402,7 +402,7 @@ extern void lru_add_drain_cpu(int cpu); ...@@ -402,7 +402,7 @@ extern void lru_add_drain_cpu(int cpu);
extern void lru_add_drain_cpu_zone(struct zone *zone); extern void lru_add_drain_cpu_zone(struct zone *zone);
extern void lru_add_drain_all(void); extern void lru_add_drain_all(void);
extern void deactivate_page(struct page *page); extern void deactivate_page(struct page *page);
extern void mark_page_lazyfree(struct page *page); void folio_mark_lazyfree(struct folio *folio);
extern void swap_setup(void); extern void swap_setup(void);
extern void lru_cache_add_inactive_or_unevictable(struct page *page, extern void lru_cache_add_inactive_or_unevictable(struct page *page,
......
...@@ -1660,7 +1660,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, ...@@ -1660,7 +1660,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
tlb_remove_pmd_tlb_entry(tlb, pmd, addr); tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
} }
mark_page_lazyfree(&folio->page); folio_mark_lazyfree(folio);
ret = true; ret = true;
out: out:
spin_unlock(ptl); spin_unlock(ptl);
......
...@@ -728,7 +728,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, ...@@ -728,7 +728,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
set_pte_at(mm, addr, pte, ptent); set_pte_at(mm, addr, pte, ptent);
tlb_remove_tlb_entry(tlb, pte, addr); tlb_remove_tlb_entry(tlb, pte, addr);
} }
mark_page_lazyfree(&folio->page); folio_mark_lazyfree(folio);
} }
out: out:
if (nr_swap) { if (nr_swap) {
......
...@@ -757,16 +757,14 @@ void deactivate_page(struct page *page) ...@@ -757,16 +757,14 @@ void deactivate_page(struct page *page)
} }
/** /**
* mark_page_lazyfree - make an anon page lazyfree * folio_mark_lazyfree - make an anon folio lazyfree
* @page: page to deactivate * @folio: folio to deactivate
* *
* mark_page_lazyfree() moves @page to the inactive file list. * folio_mark_lazyfree() moves @folio to the inactive file list.
* This is done to accelerate the reclaim of @page. * This is done to accelerate the reclaim of @folio.
*/ */
void mark_page_lazyfree(struct page *page) void folio_mark_lazyfree(struct folio *folio)
{ {
struct folio *folio = page_folio(page);
if (folio_test_lru(folio) && folio_test_anon(folio) && if (folio_test_lru(folio) && folio_test_anon(folio) &&
folio_test_swapbacked(folio) && !folio_test_swapcache(folio) && folio_test_swapbacked(folio) && !folio_test_swapcache(folio) &&
!folio_test_unevictable(folio)) { !folio_test_unevictable(folio)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment