Commit e317a8d8 authored by David Hildenbrand's avatar David Hildenbrand Committed by Andrew Morton

mm/ksm: convert break_ksm() from walk_page_range_vma() to folio_walk

Let's simplify by reusing folio_walk.  Keep the existing behavior by
handling migration entries and zeropages.

Link: https://lkml.kernel.org/r/20240802155524.517137-12-david@redhat.comSigned-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Claudio Imbrenda <imbrenda@linux.ibm.com>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Janosch Frank <frankja@linux.ibm.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 7290840d
......@@ -608,47 +608,6 @@ static inline bool ksm_test_exit(struct mm_struct *mm)
return atomic_read(&mm->mm_users) == 0;
}
static int break_ksm_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long next,
struct mm_walk *walk)
{
struct page *page = NULL;
spinlock_t *ptl;
pte_t *pte;
pte_t ptent;
int ret;
pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
if (!pte)
return 0;
ptent = ptep_get(pte);
if (pte_present(ptent)) {
page = vm_normal_page(walk->vma, addr, ptent);
} else if (!pte_none(ptent)) {
swp_entry_t entry = pte_to_swp_entry(ptent);
/*
* As KSM pages remain KSM pages until freed, no need to wait
* here for migration to end.
*/
if (is_migration_entry(entry))
page = pfn_swap_entry_to_page(entry);
}
/* return 1 if the page is an normal ksm page or KSM-placed zero page */
ret = (page && PageKsm(page)) || is_ksm_zero_pte(ptent);
pte_unmap_unlock(pte, ptl);
return ret;
}
static const struct mm_walk_ops break_ksm_ops = {
.pmd_entry = break_ksm_pmd_entry,
.walk_lock = PGWALK_RDLOCK,
};
static const struct mm_walk_ops break_ksm_lock_vma_ops = {
.pmd_entry = break_ksm_pmd_entry,
.walk_lock = PGWALK_WRLOCK,
};
/*
* We use break_ksm to break COW on a ksm page by triggering unsharing,
* such that the ksm page will get replaced by an exclusive anonymous page.
......@@ -665,16 +624,26 @@ static const struct mm_walk_ops break_ksm_lock_vma_ops = {
static int break_ksm(struct vm_area_struct *vma, unsigned long addr, bool lock_vma)
{
vm_fault_t ret = 0;
const struct mm_walk_ops *ops = lock_vma ?
&break_ksm_lock_vma_ops : &break_ksm_ops;
if (lock_vma)
vma_start_write(vma);
do {
int ksm_page;
bool ksm_page = false;
struct folio_walk fw;
struct folio *folio;
cond_resched();
ksm_page = walk_page_range_vma(vma, addr, addr + 1, ops, NULL);
if (WARN_ON_ONCE(ksm_page < 0))
return ksm_page;
folio = folio_walk_start(&fw, vma, addr,
FW_MIGRATION | FW_ZEROPAGE);
if (folio) {
/* Small folio implies FW_LEVEL_PTE. */
if (!folio_test_large(folio) &&
(folio_test_ksm(folio) || is_ksm_zero_pte(fw.pte)))
ksm_page = true;
folio_walk_end(&fw, vma);
}
if (!ksm_page)
return 0;
ret = handle_mm_fault(vma, addr,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment