Commit 895f5ee4 authored by Hugh Dickins's avatar Hugh Dickins Committed by Andrew Morton

mm/khugepaged: allow pte_offset_map[_lock]() to fail

__collapse_huge_page_swapin(): don't drop the map after every pte, it only
has to be dropped by do_swap_page(); give up if pte_offset_map() fails;
trace_mm_collapse_huge_page_swapin() at the end, with result; fix comment
on returned result; fix vmf.pgoff, though it's not used.

collapse_huge_page(): use pte_offset_map_lock() on the _pmd returned from
clearing; allow failure, but it should be impossible there. 
hpage_collapse_scan_pmd() and collapse_pte_mapped_thp() allow for
pte_offset_map_lock() failure.

Link: https://lkml.kernel.org/r/6513e85-d798-34ec-3762-7c24ffb9329@google.comSigned-off-by: default avatarHugh Dickins <hughd@google.com>
Reviewed-by: default avatarYang Shi <shy828301@gmail.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Lorenzo Stoakes <lstoakes@gmail.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: SeongJae Park <sj@kernel.org>
Cc: Song Liu <song@kernel.org>
Cc: Steven Price <steven.price@arm.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yu Zhao <yuzhao@google.com>
Cc: Zack Rusin <zackr@vmware.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent c9c1ee20
...@@ -991,9 +991,8 @@ static int check_pmd_still_valid(struct mm_struct *mm, ...@@ -991,9 +991,8 @@ static int check_pmd_still_valid(struct mm_struct *mm,
* Only done if hpage_collapse_scan_pmd believes it is worthwhile. * Only done if hpage_collapse_scan_pmd believes it is worthwhile.
* *
* Called and returns without pte mapped or spinlocks held. * Called and returns without pte mapped or spinlocks held.
* Note that if false is returned, mmap_lock will be released. * Returns result: if not SCAN_SUCCEED, mmap_lock has been released.
*/ */
static int __collapse_huge_page_swapin(struct mm_struct *mm, static int __collapse_huge_page_swapin(struct mm_struct *mm,
struct vm_area_struct *vma, struct vm_area_struct *vma,
unsigned long haddr, pmd_t *pmd, unsigned long haddr, pmd_t *pmd,
...@@ -1002,23 +1001,35 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm, ...@@ -1002,23 +1001,35 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm,
int swapped_in = 0; int swapped_in = 0;
vm_fault_t ret = 0; vm_fault_t ret = 0;
unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE); unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
int result;
pte_t *pte = NULL;
for (address = haddr; address < end; address += PAGE_SIZE) { for (address = haddr; address < end; address += PAGE_SIZE) {
struct vm_fault vmf = { struct vm_fault vmf = {
.vma = vma, .vma = vma,
.address = address, .address = address,
.pgoff = linear_page_index(vma, haddr), .pgoff = linear_page_index(vma, address),
.flags = FAULT_FLAG_ALLOW_RETRY, .flags = FAULT_FLAG_ALLOW_RETRY,
.pmd = pmd, .pmd = pmd,
}; };
vmf.pte = pte_offset_map(pmd, address); if (!pte++) {
vmf.orig_pte = *vmf.pte; pte = pte_offset_map(pmd, address);
if (!is_swap_pte(vmf.orig_pte)) { if (!pte) {
pte_unmap(vmf.pte); mmap_read_unlock(mm);
continue; result = SCAN_PMD_NULL;
goto out;
}
} }
vmf.orig_pte = *pte;
if (!is_swap_pte(vmf.orig_pte))
continue;
vmf.pte = pte;
ret = do_swap_page(&vmf); ret = do_swap_page(&vmf);
/* Which unmaps pte (after perhaps re-checking the entry) */
pte = NULL;
/* /*
* do_swap_page returns VM_FAULT_RETRY with released mmap_lock. * do_swap_page returns VM_FAULT_RETRY with released mmap_lock.
...@@ -1027,24 +1038,29 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm, ...@@ -1027,24 +1038,29 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm,
* resulting in later failure. * resulting in later failure.
*/ */
if (ret & VM_FAULT_RETRY) { if (ret & VM_FAULT_RETRY) {
trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
/* Likely, but not guaranteed, that page lock failed */ /* Likely, but not guaranteed, that page lock failed */
return SCAN_PAGE_LOCK; result = SCAN_PAGE_LOCK;
goto out;
} }
if (ret & VM_FAULT_ERROR) { if (ret & VM_FAULT_ERROR) {
mmap_read_unlock(mm); mmap_read_unlock(mm);
trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); result = SCAN_FAIL;
return SCAN_FAIL; goto out;
} }
swapped_in++; swapped_in++;
} }
if (pte)
pte_unmap(pte);
/* Drain LRU add pagevec to remove extra pin on the swapped in pages */ /* Drain LRU add pagevec to remove extra pin on the swapped in pages */
if (swapped_in) if (swapped_in)
lru_add_drain(); lru_add_drain();
trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1); result = SCAN_SUCCEED;
return SCAN_SUCCEED; out:
trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, result);
return result;
} }
static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm, static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
...@@ -1144,9 +1160,6 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, ...@@ -1144,9 +1160,6 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
address + HPAGE_PMD_SIZE); address + HPAGE_PMD_SIZE);
mmu_notifier_invalidate_range_start(&range); mmu_notifier_invalidate_range_start(&range);
pte = pte_offset_map(pmd, address);
pte_ptl = pte_lockptr(mm, pmd);
pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */ pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
/* /*
* This removes any huge TLB entry from the CPU so we won't allow * This removes any huge TLB entry from the CPU so we won't allow
...@@ -1161,12 +1174,17 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, ...@@ -1161,12 +1174,17 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
mmu_notifier_invalidate_range_end(&range); mmu_notifier_invalidate_range_end(&range);
tlb_remove_table_sync_one(); tlb_remove_table_sync_one();
spin_lock(pte_ptl); pte = pte_offset_map_lock(mm, &_pmd, address, &pte_ptl);
if (pte) {
result = __collapse_huge_page_isolate(vma, address, pte, cc, result = __collapse_huge_page_isolate(vma, address, pte, cc,
&compound_pagelist); &compound_pagelist);
spin_unlock(pte_ptl); spin_unlock(pte_ptl);
} else {
result = SCAN_PMD_NULL;
}
if (unlikely(result != SCAN_SUCCEED)) { if (unlikely(result != SCAN_SUCCEED)) {
if (pte)
pte_unmap(pte); pte_unmap(pte);
spin_lock(pmd_ptl); spin_lock(pmd_ptl);
BUG_ON(!pmd_none(*pmd)); BUG_ON(!pmd_none(*pmd));
...@@ -1251,6 +1269,11 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm, ...@@ -1251,6 +1269,11 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
memset(cc->node_load, 0, sizeof(cc->node_load)); memset(cc->node_load, 0, sizeof(cc->node_load));
nodes_clear(cc->alloc_nmask); nodes_clear(cc->alloc_nmask);
pte = pte_offset_map_lock(mm, pmd, address, &ptl); pte = pte_offset_map_lock(mm, pmd, address, &ptl);
if (!pte) {
result = SCAN_PMD_NULL;
goto out;
}
for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR; for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR;
_pte++, _address += PAGE_SIZE) { _pte++, _address += PAGE_SIZE) {
pte_t pteval = *_pte; pte_t pteval = *_pte;
...@@ -1620,8 +1643,10 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, ...@@ -1620,8 +1643,10 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
* lockless_pages_from_mm() and the hardware page walker can access page * lockless_pages_from_mm() and the hardware page walker can access page
* tables while all the high-level locks are held in write mode. * tables while all the high-level locks are held in write mode.
*/ */
start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
result = SCAN_FAIL; result = SCAN_FAIL;
start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
if (!start_pte)
goto drop_immap;
/* step 1: check all mapped PTEs are to the right huge page */ /* step 1: check all mapped PTEs are to the right huge page */
for (i = 0, addr = haddr, pte = start_pte; for (i = 0, addr = haddr, pte = start_pte;
...@@ -1695,6 +1720,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, ...@@ -1695,6 +1720,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
abort: abort:
pte_unmap_unlock(start_pte, ptl); pte_unmap_unlock(start_pte, ptl);
drop_immap:
i_mmap_unlock_write(vma->vm_file->f_mapping); i_mmap_unlock_write(vma->vm_file->f_mapping);
goto drop_hpage; goto drop_hpage;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment