Commit a7f4e6e4 authored by Zach O'Keefe's avatar Zach O'Keefe Committed by Andrew Morton

mm/thp: add flag to enforce sysfs THP in hugepage_vma_check()

MADV_COLLAPSE is not coupled to the kernel-oriented sysfs THP settings[1].

hugepage_vma_check() is the authority on determining if a VMA is eligible
for THP allocation/collapse, and currently enforces the sysfs THP
settings.  Add a flag to disable these checks.  For now, only apply this
arg to anon and file, which use /sys/kernel/transparent_hugepage/enabled. 
We can expand this to shmem, which uses
/sys/kernel/transparent_hugepage/shmem_enabled, later.

Use this flag in collapse_pte_mapped_thp() where previously the VMA flags
passed to hugepage_vma_check() were OR'd with VM_HUGEPAGE to elide the
VM_HUGEPAGE check in "madvise" THP mode.  Prior to "mm: khugepaged: check
THP flag in hugepage_vma_check()", this check also didn't check "never"
THP mode.  As such, this restores the previous behavior of
collapse_pte_mapped_thp() where sysfs THP settings are ignored.  See
comment in code for justification why this is OK.

[1] https://lore.kernel.org/linux-mm/CAAa6QmQxay1_=Pmt8oCX2-Va18t44FV-Vs-WsQt_6+qBks4nZA@mail.gmail.com/

Link: https://lkml.kernel.org/r/20220706235936.2197195-8-zokeefe@google.comSigned-off-by: default avatarZach O'Keefe <zokeefe@google.com>
Reviewed-by: default avatarYang Shi <shy828301@gmail.com>
Cc: Alex Shi <alex.shi@linux.alibaba.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Chris Kennelly <ckennelly@google.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Helge Deller <deller@gmx.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Pavel Begunkov <asml.silence@gmail.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Rongwei Wang <rongwei.wang@linux.alibaba.com>
Cc: SeongJae Park <sj@kernel.org>
Cc: Song Liu <songliubraving@fb.com>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Dan Carpenter <dan.carpenter@oracle.com>
Cc: "Souptick Joarder (HPE)" <jrdr.linux@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent d8ea7cc8
...@@ -864,7 +864,7 @@ static int show_smap(struct seq_file *m, void *v) ...@@ -864,7 +864,7 @@ static int show_smap(struct seq_file *m, void *v)
__show_smap(m, &mss, false); __show_smap(m, &mss, false);
seq_printf(m, "THPeligible: %d\n", seq_printf(m, "THPeligible: %d\n",
hugepage_vma_check(vma, vma->vm_flags, true, false)); hugepage_vma_check(vma, vma->vm_flags, true, false, true));
if (arch_pkeys_enabled()) if (arch_pkeys_enabled())
seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma)); seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma));
......
...@@ -168,9 +168,8 @@ static inline bool file_thp_enabled(struct vm_area_struct *vma) ...@@ -168,9 +168,8 @@ static inline bool file_thp_enabled(struct vm_area_struct *vma)
!inode_is_open_for_write(inode) && S_ISREG(inode->i_mode); !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode);
} }
bool hugepage_vma_check(struct vm_area_struct *vma, bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags,
unsigned long vm_flags, bool smaps, bool in_pf, bool enforce_sysfs);
bool smaps, bool in_pf);
#define transparent_hugepage_use_zero_page() \ #define transparent_hugepage_use_zero_page() \
(transparent_hugepage_flags & \ (transparent_hugepage_flags & \
...@@ -321,8 +320,8 @@ static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, ...@@ -321,8 +320,8 @@ static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
} }
static inline bool hugepage_vma_check(struct vm_area_struct *vma, static inline bool hugepage_vma_check(struct vm_area_struct *vma,
unsigned long vm_flags, unsigned long vm_flags, bool smaps,
bool smaps, bool in_pf) bool in_pf, bool enforce_sysfs)
{ {
return false; return false;
} }
......
...@@ -70,9 +70,8 @@ static atomic_t huge_zero_refcount; ...@@ -70,9 +70,8 @@ static atomic_t huge_zero_refcount;
struct page *huge_zero_page __read_mostly; struct page *huge_zero_page __read_mostly;
unsigned long huge_zero_pfn __read_mostly = ~0UL; unsigned long huge_zero_pfn __read_mostly = ~0UL;
bool hugepage_vma_check(struct vm_area_struct *vma, bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags,
unsigned long vm_flags, bool smaps, bool in_pf, bool enforce_sysfs)
bool smaps, bool in_pf)
{ {
if (!vma->vm_mm) /* vdso */ if (!vma->vm_mm) /* vdso */
return false; return false;
...@@ -121,11 +120,10 @@ bool hugepage_vma_check(struct vm_area_struct *vma, ...@@ -121,11 +120,10 @@ bool hugepage_vma_check(struct vm_area_struct *vma,
if (!in_pf && shmem_file(vma->vm_file)) if (!in_pf && shmem_file(vma->vm_file))
return shmem_huge_enabled(vma); return shmem_huge_enabled(vma);
if (!hugepage_flags_enabled()) /* Enforce sysfs THP requirements as necessary */
return false; if (enforce_sysfs &&
(!hugepage_flags_enabled() || (!(vm_flags & VM_HUGEPAGE) &&
/* THP settings require madvise. */ !hugepage_flags_always())))
if (!(vm_flags & VM_HUGEPAGE) && !hugepage_flags_always())
return false; return false;
/* Only regular file is valid */ /* Only regular file is valid */
......
...@@ -478,7 +478,7 @@ void khugepaged_enter_vma(struct vm_area_struct *vma, ...@@ -478,7 +478,7 @@ void khugepaged_enter_vma(struct vm_area_struct *vma,
{ {
if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) && if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
hugepage_flags_enabled()) { hugepage_flags_enabled()) {
if (hugepage_vma_check(vma, vm_flags, false, false)) if (hugepage_vma_check(vma, vm_flags, false, false, true))
__khugepaged_enter(vma->vm_mm); __khugepaged_enter(vma->vm_mm);
} }
} }
...@@ -848,7 +848,8 @@ static bool khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) ...@@ -848,7 +848,8 @@ static bool khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
*/ */
static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address, static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
struct vm_area_struct **vmap) struct vm_area_struct **vmap,
struct collapse_control *cc)
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma;
...@@ -861,7 +862,8 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address, ...@@ -861,7 +862,8 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
if (!transhuge_vma_suitable(vma, address)) if (!transhuge_vma_suitable(vma, address))
return SCAN_ADDRESS_RANGE; return SCAN_ADDRESS_RANGE;
if (!hugepage_vma_check(vma, vma->vm_flags, false, false)) if (!hugepage_vma_check(vma, vma->vm_flags, false, false,
cc->is_khugepaged))
return SCAN_VMA_CHECK; return SCAN_VMA_CHECK;
/* /*
* Anon VMA expected, the address may be unmapped then * Anon VMA expected, the address may be unmapped then
...@@ -980,7 +982,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, ...@@ -980,7 +982,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
goto out_nolock; goto out_nolock;
mmap_read_lock(mm); mmap_read_lock(mm);
result = hugepage_vma_revalidate(mm, address, &vma); result = hugepage_vma_revalidate(mm, address, &vma, cc);
if (result != SCAN_SUCCEED) { if (result != SCAN_SUCCEED) {
mmap_read_unlock(mm); mmap_read_unlock(mm);
goto out_nolock; goto out_nolock;
...@@ -1012,7 +1014,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, ...@@ -1012,7 +1014,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
* handled by the anon_vma lock + PG_lock. * handled by the anon_vma lock + PG_lock.
*/ */
mmap_write_lock(mm); mmap_write_lock(mm);
result = hugepage_vma_revalidate(mm, address, &vma); result = hugepage_vma_revalidate(mm, address, &vma, cc);
if (result != SCAN_SUCCEED) if (result != SCAN_SUCCEED)
goto out_up_write; goto out_up_write;
/* check if the pmd is still valid */ /* check if the pmd is still valid */
...@@ -1360,12 +1362,13 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr) ...@@ -1360,12 +1362,13 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
return; return;
/* /*
* This vm_flags may not have VM_HUGEPAGE if the page was not * If we are here, we've succeeded in replacing all the native pages
* collapsed by this mm. But we can still collapse if the page is * in the page cache with a single hugepage. If a mm were to fault-in
* the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check() * this memory (mapped by a suitably aligned VMA), we'd get the hugepage
* will not fail the vma for missing VM_HUGEPAGE * and map it by a PMD, regardless of sysfs THP settings. As such, let's
* analogously elide sysfs THP settings here.
*/ */
if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE, false, false)) if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false))
return; return;
/* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */ /* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
...@@ -2048,7 +2051,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result, ...@@ -2048,7 +2051,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
progress++; progress++;
break; break;
} }
if (!hugepage_vma_check(vma, vma->vm_flags, false, false)) { if (!hugepage_vma_check(vma, vma->vm_flags, false, false, true)) {
skip: skip:
progress++; progress++;
continue; continue;
......
...@@ -4985,7 +4985,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, ...@@ -4985,7 +4985,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
return VM_FAULT_OOM; return VM_FAULT_OOM;
retry_pud: retry_pud:
if (pud_none(*vmf.pud) && if (pud_none(*vmf.pud) &&
hugepage_vma_check(vma, vm_flags, false, true)) { hugepage_vma_check(vma, vm_flags, false, true, true)) {
ret = create_huge_pud(&vmf); ret = create_huge_pud(&vmf);
if (!(ret & VM_FAULT_FALLBACK)) if (!(ret & VM_FAULT_FALLBACK))
return ret; return ret;
...@@ -5019,7 +5019,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, ...@@ -5019,7 +5019,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
goto retry_pud; goto retry_pud;
if (pmd_none(*vmf.pmd) && if (pmd_none(*vmf.pmd) &&
hugepage_vma_check(vma, vm_flags, false, true)) { hugepage_vma_check(vma, vm_flags, false, true, true)) {
ret = create_huge_pmd(&vmf); ret = create_huge_pmd(&vmf);
if (!(ret & VM_FAULT_FALLBACK)) if (!(ret & VM_FAULT_FALLBACK))
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment