Commit fc986a38 authored by Kefeng Wang's avatar Kefeng Wang Committed by Andrew Morton

mm: huge_memory: convert madvise_free_huge_pmd to use a folio

Using folios instead of pages removes several calls to compound_head(),

Link: https://lkml.kernel.org/r/20221207023431.151008-1-wangkefeng.wang@huawei.comSigned-off-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Reviewed-by: default avatarVishal Moola (Oracle) <vishal.moola@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent cb6c33d4
......@@ -1603,7 +1603,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
{
spinlock_t *ptl;
pmd_t orig_pmd;
struct page *page;
struct folio *folio;
struct mm_struct *mm = tlb->mm;
bool ret = false;
......@@ -1623,15 +1623,15 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
goto out;
}
page = pmd_page(orig_pmd);
folio = pfn_folio(pmd_pfn(orig_pmd));
/*
* If other processes are mapping this page, we couldn't discard
* the page unless they all do MADV_FREE so let's skip the page.
* If other processes are mapping this folio, we couldn't discard
* the folio unless they all do MADV_FREE so let's skip the folio.
*/
if (total_mapcount(page) != 1)
if (folio_mapcount(folio) != 1)
goto out;
if (!trylock_page(page))
if (!folio_trylock(folio))
goto out;
/*
......@@ -1639,17 +1639,17 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
* will deactivate only them.
*/
if (next - addr != HPAGE_PMD_SIZE) {
get_page(page);
folio_get(folio);
spin_unlock(ptl);
split_huge_page(page);
unlock_page(page);
put_page(page);
split_folio(folio);
folio_unlock(folio);
folio_put(folio);
goto out_unlocked;
}
if (PageDirty(page))
ClearPageDirty(page);
unlock_page(page);
if (folio_test_dirty(folio))
folio_clear_dirty(folio);
folio_unlock(folio);
if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
pmdp_invalidate(vma, addr, pmd);
......@@ -1660,7 +1660,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
}
mark_page_lazyfree(page);
mark_page_lazyfree(&folio->page);
ret = true;
out:
spin_unlock(ptl);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment