Commit a2e17cc2 authored by David Stevens's avatar David Stevens Committed by Andrew Morton

mm/khugepaged: maintain page cache uptodate flag

Make sure that collapse_file doesn't interfere with checking the uptodate
flag in the page cache by only inserting hpage into the page cache after
it has been updated and marked uptodate.  This is achieved by simply not
replacing present pages with hpage when iterating over the target range.

The present pages are already locked, so replacing them with the locked
hpage before the collapse is finalized is unnecessary.  However, it is
necessary to stop freezing the present pages after validating them, since
leaving long-term frozen pages in the page cache can lead to deadlocks. 
Simply checking the reference count is sufficient to ensure that there are
no long-term references hanging around that would the collapse would
break.  Similar to hpage, there is no reason that the present pages
actually need to be frozen in addition to being locked.

This fixes a race where folio_seek_hole_data would mistake hpage for an
fallocated but unwritten page.  This race is visible to userspace via data
temporarily disappearing from SEEK_DATA/SEEK_HOLE.  This also fixes a
similar race where pages could temporarily disappear from mincore.

Link: https://lkml.kernel.org/r/20230404120117.2562166-5-stevensd@google.com
Fixes: f3f0e1d2 ("khugepaged: add support of collapse for tmpfs/shmem pages")
Signed-off-by: default avatarDavid Stevens <stevensd@chromium.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jiaqi Yan <jiaqiyan@google.com>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Peter Xu <peterx@redhat.com>
Cc: Yang Shi <shy828301@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent ac492b9c
...@@ -1859,17 +1859,18 @@ static int retract_page_tables(struct address_space *mapping, pgoff_t pgoff, ...@@ -1859,17 +1859,18 @@ static int retract_page_tables(struct address_space *mapping, pgoff_t pgoff,
* *
* Basic scheme is simple, details are more complex: * Basic scheme is simple, details are more complex:
* - allocate and lock a new huge page; * - allocate and lock a new huge page;
* - scan page cache replacing old pages with the new one * - scan page cache, locking old pages
* + swap/gup in pages if necessary; * + swap/gup in pages if necessary;
* + keep old pages around in case rollback is required; * - copy data to new page
* - handle shmem holes
* + re-validate that holes weren't filled by someone else
* + check for userfaultfd
* - finalize updates to the page cache; * - finalize updates to the page cache;
* - if replacing succeeds: * - if replacing succeeds:
* + copy data over;
* + free old pages;
* + unlock huge page; * + unlock huge page;
* + free old pages;
* - if replacing failed; * - if replacing failed;
* + put all pages back and unfreeze them; * + unlock old pages
* + restore gaps in the page cache;
* + unlock and free huge page; * + unlock and free huge page;
*/ */
static int collapse_file(struct mm_struct *mm, unsigned long addr, static int collapse_file(struct mm_struct *mm, unsigned long addr,
...@@ -1917,12 +1918,6 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, ...@@ -1917,12 +1918,6 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
} }
} while (1); } while (1);
/*
* At this point the hpage is locked and not up-to-date.
* It's safe to insert it into the page cache, because nobody would
* be able to map it or use it in another way until we unlock it.
*/
xas_set(&xas, start); xas_set(&xas, start);
for (index = start; index < end; index++) { for (index = start; index < end; index++) {
page = xas_next(&xas); page = xas_next(&xas);
...@@ -2090,12 +2085,16 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, ...@@ -2090,12 +2085,16 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
VM_BUG_ON_PAGE(page != xas_load(&xas), page); VM_BUG_ON_PAGE(page != xas_load(&xas), page);
/* /*
* The page is expected to have page_count() == 3: * We control three references to the page:
* - we hold a pin on it; * - we hold a pin on it;
* - one reference from page cache; * - one reference from page cache;
* - one from isolate_lru_page; * - one from isolate_lru_page;
* If those are the only references, then any new usage of the
* page will have to fetch it from the page cache. That requires
* locking the page to handle truncate, so any new usage will be
* blocked until we unlock page after collapse/during rollback.
*/ */
if (!page_ref_freeze(page, 3)) { if (page_count(page) != 3) {
result = SCAN_PAGE_COUNT; result = SCAN_PAGE_COUNT;
xas_unlock_irq(&xas); xas_unlock_irq(&xas);
putback_lru_page(page); putback_lru_page(page);
...@@ -2103,16 +2102,14 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, ...@@ -2103,16 +2102,14 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
} }
/* /*
* Add the page to the list to be able to undo the collapse if * Accumulate the pages that are being collapsed.
* something go wrong.
*/ */
list_add_tail(&page->lru, &pagelist); list_add_tail(&page->lru, &pagelist);
/* Finally, replace with the new page. */ /*
xas_store(&xas, hpage); * We can't get an ENOMEM here (because the allocation happened
/* We can't get an ENOMEM here (because the allocation happened before) * before) but let's check for errors (XArray implementation
* but let's check for errors (XArray implementation can be * can be changed in the future)
* changed in the future)
*/ */
WARN_ON_ONCE(xas_error(&xas)); WARN_ON_ONCE(xas_error(&xas));
continue; continue;
...@@ -2157,8 +2154,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, ...@@ -2157,8 +2154,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
goto rollback; goto rollback;
/* /*
* Replacing old pages with new one has succeeded, now we * The old pages are locked, so they won't change anymore.
* attempt to copy the contents.
*/ */
index = start; index = start;
list_for_each_entry(page, &pagelist, lru) { list_for_each_entry(page, &pagelist, lru) {
...@@ -2247,11 +2243,11 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, ...@@ -2247,11 +2243,11 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
/* nr_none is always 0 for non-shmem. */ /* nr_none is always 0 for non-shmem. */
__mod_lruvec_page_state(hpage, NR_SHMEM, nr_none); __mod_lruvec_page_state(hpage, NR_SHMEM, nr_none);
} }
/* Join all the small entries into a single multi-index entry. */
xas_set_order(&xas, start, HPAGE_PMD_ORDER);
xas_store(&xas, hpage);
xas_unlock_irq(&xas);
/*
* Mark hpage as uptodate before inserting it into the page cache so
* that it isn't mistaken for an fallocated but unwritten page.
*/
folio = page_folio(hpage); folio = page_folio(hpage);
folio_mark_uptodate(folio); folio_mark_uptodate(folio);
folio_ref_add(folio, HPAGE_PMD_NR - 1); folio_ref_add(folio, HPAGE_PMD_NR - 1);
...@@ -2260,6 +2256,11 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, ...@@ -2260,6 +2256,11 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
folio_mark_dirty(folio); folio_mark_dirty(folio);
folio_add_lru(folio); folio_add_lru(folio);
/* Join all the small entries into a single multi-index entry. */
xas_set_order(&xas, start, HPAGE_PMD_ORDER);
xas_store(&xas, hpage);
xas_unlock_irq(&xas);
/* /*
* Remove pte page tables, so we can re-fault the page as huge. * Remove pte page tables, so we can re-fault the page as huge.
*/ */
...@@ -2273,47 +2274,29 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, ...@@ -2273,47 +2274,29 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
list_for_each_entry_safe(page, tmp, &pagelist, lru) { list_for_each_entry_safe(page, tmp, &pagelist, lru) {
list_del(&page->lru); list_del(&page->lru);
page->mapping = NULL; page->mapping = NULL;
page_ref_unfreeze(page, 1);
ClearPageActive(page); ClearPageActive(page);
ClearPageUnevictable(page); ClearPageUnevictable(page);
unlock_page(page); unlock_page(page);
put_page(page); folio_put_refs(page_folio(page), 3);
} }
goto out; goto out;
rollback: rollback:
/* Something went wrong: roll back page cache changes */ /* Something went wrong: roll back page cache changes */
xas_lock_irq(&xas);
if (nr_none) { if (nr_none) {
xas_lock_irq(&xas);
mapping->nrpages -= nr_none; mapping->nrpages -= nr_none;
shmem_uncharge(mapping->host, nr_none); shmem_uncharge(mapping->host, nr_none);
xas_unlock_irq(&xas);
} }
xas_set(&xas, start); list_for_each_entry_safe(page, tmp, &pagelist, lru) {
end = index;
for (index = start; index < end; index++) {
xas_next(&xas);
page = list_first_entry_or_null(&pagelist,
struct page, lru);
if (!page || xas.xa_index < page->index) {
nr_none--;
continue;
}
VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
/* Unfreeze the page. */
list_del(&page->lru); list_del(&page->lru);
page_ref_unfreeze(page, 2);
xas_store(&xas, page);
xas_pause(&xas);
xas_unlock_irq(&xas);
unlock_page(page); unlock_page(page);
putback_lru_page(page); putback_lru_page(page);
xas_lock_irq(&xas); put_page(page);
} }
VM_BUG_ON(nr_none);
/* /*
* Undo the updates of filemap_nr_thps_inc for non-SHMEM * Undo the updates of filemap_nr_thps_inc for non-SHMEM
* file only. This undo is not needed unless failure is * file only. This undo is not needed unless failure is
...@@ -2328,8 +2311,6 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, ...@@ -2328,8 +2311,6 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
smp_mb(); smp_mb();
} }
xas_unlock_irq(&xas);
hpage->mapping = NULL; hpage->mapping = NULL;
unlock_page(hpage); unlock_page(hpage);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment