Commit d8ea7cc8 authored by Zach O'Keefe's avatar Zach O'Keefe Committed by Andrew Morton

mm/khugepaged: add flag to predicate khugepaged-only behavior

Add .is_khugepaged flag to struct collapse_control so khugepaged-specific
behavior can be elided by MADV_COLLAPSE context.

Start by protecting khugepaged-specific heuristics by this flag.  In
MADV_COLLAPSE, the user presumably has reason to believe the collapse will
be beneficial and khugepaged heuristics shouldn't prevent the user from
doing so:

1) sysfs-controlled knobs khugepaged_max_ptes_[none|swap|shared]

2) requirement that some pages in region being collapsed be young or
   referenced

[zokeefe@google.com: consistently order cc->is_khugepaged and pte_* checks]
  Link: https://lkml.kernel.org/r/20220720140603.1958773-3-zokeefe@google.com
  Link: https://lore.kernel.org/linux-mm/Ys2qJm6FaOQcxkha@google.com/
Link: https://lkml.kernel.org/r/20220706235936.2197195-7-zokeefe@google.comSigned-off-by: default avatarZach O'Keefe <zokeefe@google.com>
Reviewed-by: default avatarYang Shi <shy828301@gmail.com>
Cc: Alex Shi <alex.shi@linux.alibaba.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Chris Kennelly <ckennelly@google.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Helge Deller <deller@gmx.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Pavel Begunkov <asml.silence@gmail.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Rongwei Wang <rongwei.wang@linux.alibaba.com>
Cc: SeongJae Park <sj@kernel.org>
Cc: Song Liu <songliubraving@fb.com>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Dan Carpenter <dan.carpenter@oracle.com>
Cc: "Souptick Joarder (HPE)" <jrdr.linux@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 50ad2f24
...@@ -73,6 +73,8 @@ static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait); ...@@ -73,6 +73,8 @@ static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
* default collapse hugepages if there is at least one pte mapped like * default collapse hugepages if there is at least one pte mapped like
* it would have happened if the vma was large enough during page * it would have happened if the vma was large enough during page
* fault. * fault.
*
* Note that these are only respected if collapse was initiated by khugepaged.
*/ */
static unsigned int khugepaged_max_ptes_none __read_mostly; static unsigned int khugepaged_max_ptes_none __read_mostly;
static unsigned int khugepaged_max_ptes_swap __read_mostly; static unsigned int khugepaged_max_ptes_swap __read_mostly;
...@@ -86,6 +88,8 @@ static struct kmem_cache *mm_slot_cache __read_mostly; ...@@ -86,6 +88,8 @@ static struct kmem_cache *mm_slot_cache __read_mostly;
#define MAX_PTE_MAPPED_THP 8 #define MAX_PTE_MAPPED_THP 8
struct collapse_control { struct collapse_control {
bool is_khugepaged;
/* Num pages scanned per node */ /* Num pages scanned per node */
u32 node_load[MAX_NUMNODES]; u32 node_load[MAX_NUMNODES];
...@@ -554,6 +558,7 @@ static bool is_refcount_suitable(struct page *page) ...@@ -554,6 +558,7 @@ static bool is_refcount_suitable(struct page *page)
static int __collapse_huge_page_isolate(struct vm_area_struct *vma, static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
unsigned long address, unsigned long address,
pte_t *pte, pte_t *pte,
struct collapse_control *cc,
struct list_head *compound_pagelist) struct list_head *compound_pagelist)
{ {
struct page *page = NULL; struct page *page = NULL;
...@@ -566,8 +571,10 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, ...@@ -566,8 +571,10 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
pte_t pteval = *_pte; pte_t pteval = *_pte;
if (pte_none(pteval) || (pte_present(pteval) && if (pte_none(pteval) || (pte_present(pteval) &&
is_zero_pfn(pte_pfn(pteval)))) { is_zero_pfn(pte_pfn(pteval)))) {
++none_or_zero;
if (!userfaultfd_armed(vma) && if (!userfaultfd_armed(vma) &&
++none_or_zero <= khugepaged_max_ptes_none) { (!cc->is_khugepaged ||
none_or_zero <= khugepaged_max_ptes_none)) {
continue; continue;
} else { } else {
result = SCAN_EXCEED_NONE_PTE; result = SCAN_EXCEED_NONE_PTE;
...@@ -587,12 +594,15 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, ...@@ -587,12 +594,15 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
VM_BUG_ON_PAGE(!PageAnon(page), page); VM_BUG_ON_PAGE(!PageAnon(page), page);
if (page_mapcount(page) > 1 && if (page_mapcount(page) > 1) {
++shared > khugepaged_max_ptes_shared) { ++shared;
if (cc->is_khugepaged &&
shared > khugepaged_max_ptes_shared) {
result = SCAN_EXCEED_SHARED_PTE; result = SCAN_EXCEED_SHARED_PTE;
count_vm_event(THP_SCAN_EXCEED_SHARED_PTE); count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
goto out; goto out;
} }
}
if (PageCompound(page)) { if (PageCompound(page)) {
struct page *p; struct page *p;
...@@ -654,10 +664,14 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, ...@@ -654,10 +664,14 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
if (PageCompound(page)) if (PageCompound(page))
list_add_tail(&page->lru, compound_pagelist); list_add_tail(&page->lru, compound_pagelist);
next: next:
/* There should be enough young pte to collapse the page */ /*
if (pte_young(pteval) || * If collapse was initiated by khugepaged, check that there is
page_is_young(page) || PageReferenced(page) || * enough young pte to justify collapsing the page
mmu_notifier_test_young(vma->vm_mm, address)) */
if (cc->is_khugepaged &&
(pte_young(pteval) || page_is_young(page) ||
PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm,
address)))
referenced++; referenced++;
if (pte_write(pteval)) if (pte_write(pteval))
...@@ -666,7 +680,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, ...@@ -666,7 +680,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
if (unlikely(!writable)) { if (unlikely(!writable)) {
result = SCAN_PAGE_RO; result = SCAN_PAGE_RO;
} else if (unlikely(!referenced)) { } else if (unlikely(cc->is_khugepaged && !referenced)) {
result = SCAN_LACK_REFERENCED_PAGE; result = SCAN_LACK_REFERENCED_PAGE;
} else { } else {
result = SCAN_SUCCEED; result = SCAN_SUCCEED;
...@@ -745,6 +759,7 @@ static void khugepaged_alloc_sleep(void) ...@@ -745,6 +759,7 @@ static void khugepaged_alloc_sleep(void)
struct collapse_control khugepaged_collapse_control = { struct collapse_control khugepaged_collapse_control = {
.is_khugepaged = true,
.last_target_node = NUMA_NO_NODE, .last_target_node = NUMA_NO_NODE,
}; };
...@@ -1025,7 +1040,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, ...@@ -1025,7 +1040,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
mmu_notifier_invalidate_range_end(&range); mmu_notifier_invalidate_range_end(&range);
spin_lock(pte_ptl); spin_lock(pte_ptl);
result = __collapse_huge_page_isolate(vma, address, pte, result = __collapse_huge_page_isolate(vma, address, pte, cc,
&compound_pagelist); &compound_pagelist);
spin_unlock(pte_ptl); spin_unlock(pte_ptl);
...@@ -1116,7 +1131,9 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1116,7 +1131,9 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
_pte++, _address += PAGE_SIZE) { _pte++, _address += PAGE_SIZE) {
pte_t pteval = *_pte; pte_t pteval = *_pte;
if (is_swap_pte(pteval)) { if (is_swap_pte(pteval)) {
if (++unmapped <= khugepaged_max_ptes_swap) { ++unmapped;
if (!cc->is_khugepaged ||
unmapped <= khugepaged_max_ptes_swap) {
/* /*
* Always be strict with uffd-wp * Always be strict with uffd-wp
* enabled swap entries. Please see * enabled swap entries. Please see
...@@ -1134,8 +1151,10 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1134,8 +1151,10 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
} }
} }
if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
++none_or_zero;
if (!userfaultfd_armed(vma) && if (!userfaultfd_armed(vma) &&
++none_or_zero <= khugepaged_max_ptes_none) { (!cc->is_khugepaged ||
none_or_zero <= khugepaged_max_ptes_none)) {
continue; continue;
} else { } else {
result = SCAN_EXCEED_NONE_PTE; result = SCAN_EXCEED_NONE_PTE;
...@@ -1165,12 +1184,15 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1165,12 +1184,15 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
goto out_unmap; goto out_unmap;
} }
if (page_mapcount(page) > 1 && if (page_mapcount(page) > 1) {
++shared > khugepaged_max_ptes_shared) { ++shared;
if (cc->is_khugepaged &&
shared > khugepaged_max_ptes_shared) {
result = SCAN_EXCEED_SHARED_PTE; result = SCAN_EXCEED_SHARED_PTE;
count_vm_event(THP_SCAN_EXCEED_SHARED_PTE); count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
goto out_unmap; goto out_unmap;
} }
}
page = compound_head(page); page = compound_head(page);
...@@ -1220,14 +1242,22 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1220,14 +1242,22 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
result = SCAN_PAGE_COUNT; result = SCAN_PAGE_COUNT;
goto out_unmap; goto out_unmap;
} }
if (pte_young(pteval) ||
page_is_young(page) || PageReferenced(page) || /*
mmu_notifier_test_young(vma->vm_mm, address)) * If collapse was initiated by khugepaged, check that there is
* enough young pte to justify collapsing the page
*/
if (cc->is_khugepaged &&
(pte_young(pteval) || page_is_young(page) ||
PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm,
address)))
referenced++; referenced++;
} }
if (!writable) { if (!writable) {
result = SCAN_PAGE_RO; result = SCAN_PAGE_RO;
} else if (!referenced || (unmapped && referenced < HPAGE_PMD_NR/2)) { } else if (cc->is_khugepaged &&
(!referenced ||
(unmapped && referenced < HPAGE_PMD_NR / 2))) {
result = SCAN_LACK_REFERENCED_PAGE; result = SCAN_LACK_REFERENCED_PAGE;
} else { } else {
result = SCAN_SUCCEED; result = SCAN_SUCCEED;
...@@ -1896,7 +1926,9 @@ static int khugepaged_scan_file(struct mm_struct *mm, struct file *file, ...@@ -1896,7 +1926,9 @@ static int khugepaged_scan_file(struct mm_struct *mm, struct file *file,
continue; continue;
if (xa_is_value(page)) { if (xa_is_value(page)) {
if (++swap > khugepaged_max_ptes_swap) { ++swap;
if (cc->is_khugepaged &&
swap > khugepaged_max_ptes_swap) {
result = SCAN_EXCEED_SWAP_PTE; result = SCAN_EXCEED_SWAP_PTE;
count_vm_event(THP_SCAN_EXCEED_SWAP_PTE); count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
break; break;
...@@ -1947,7 +1979,8 @@ static int khugepaged_scan_file(struct mm_struct *mm, struct file *file, ...@@ -1947,7 +1979,8 @@ static int khugepaged_scan_file(struct mm_struct *mm, struct file *file,
rcu_read_unlock(); rcu_read_unlock();
if (result == SCAN_SUCCEED) { if (result == SCAN_SUCCEED) {
if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) { if (cc->is_khugepaged &&
present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
result = SCAN_EXCEED_NONE_PTE; result = SCAN_EXCEED_NONE_PTE;
count_vm_event(THP_SCAN_EXCEED_NONE_PTE); count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
} else { } else {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment