Commit 27a83a60 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Linus Torvalds

mm/filemap: fix filemap_map_pages for THP

We dereference page->mapping and page->index directly after calling
find_subpage() and these fields are not valid for tail pages.  While
commit 4101196b ("mm: page cache: store only head pages in i_pages")
introduced the call to find_subpage(), the problem existed prior to this;
I'm going to suggest all the way back to when THPs first existed.

The user-visible effects of this are almost negligible.  To hit it, you
have to mmap a tmpfs file at an unaligned address and then it's only a
disabled optimisation causing page faults to happen more frequently than
they otherwise would.

Fix this by keeping both head and page pointers and checking the
appropriate one.  We could use page_mapping() and page_to_index(), but
that's higher overhead.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Acked-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: William Kucharski <william.kucharski@oracle.com>
Link: https://lkml.kernel.org/r/20200911012532.24761-1-willy@infradead.orgSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a8cf7f27
...@@ -2793,42 +2793,42 @@ void filemap_map_pages(struct vm_fault *vmf, ...@@ -2793,42 +2793,42 @@ void filemap_map_pages(struct vm_fault *vmf,
pgoff_t last_pgoff = start_pgoff; pgoff_t last_pgoff = start_pgoff;
unsigned long max_idx; unsigned long max_idx;
XA_STATE(xas, &mapping->i_pages, start_pgoff); XA_STATE(xas, &mapping->i_pages, start_pgoff);
struct page *page; struct page *head, *page;
unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss); unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
rcu_read_lock(); rcu_read_lock();
xas_for_each(&xas, page, end_pgoff) { xas_for_each(&xas, head, end_pgoff) {
if (xas_retry(&xas, page)) if (xas_retry(&xas, head))
continue; continue;
if (xa_is_value(page)) if (xa_is_value(head))
goto next; goto next;
/* /*
* Check for a locked page first, as a speculative * Check for a locked page first, as a speculative
* reference may adversely influence page migration. * reference may adversely influence page migration.
*/ */
if (PageLocked(page)) if (PageLocked(head))
goto next; goto next;
if (!page_cache_get_speculative(page)) if (!page_cache_get_speculative(head))
goto next; goto next;
/* Has the page moved or been split? */ /* Has the page moved or been split? */
if (unlikely(page != xas_reload(&xas))) if (unlikely(head != xas_reload(&xas)))
goto skip; goto skip;
page = find_subpage(page, xas.xa_index); page = find_subpage(head, xas.xa_index);
if (!PageUptodate(page) || if (!PageUptodate(head) ||
PageReadahead(page) || PageReadahead(page) ||
PageHWPoison(page)) PageHWPoison(page))
goto skip; goto skip;
if (!trylock_page(page)) if (!trylock_page(head))
goto skip; goto skip;
if (page->mapping != mapping || !PageUptodate(page)) if (head->mapping != mapping || !PageUptodate(head))
goto unlock; goto unlock;
max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
if (page->index >= max_idx) if (xas.xa_index >= max_idx)
goto unlock; goto unlock;
if (mmap_miss > 0) if (mmap_miss > 0)
...@@ -2840,12 +2840,12 @@ void filemap_map_pages(struct vm_fault *vmf, ...@@ -2840,12 +2840,12 @@ void filemap_map_pages(struct vm_fault *vmf,
last_pgoff = xas.xa_index; last_pgoff = xas.xa_index;
if (alloc_set_pte(vmf, page)) if (alloc_set_pte(vmf, page))
goto unlock; goto unlock;
unlock_page(page); unlock_page(head);
goto next; goto next;
unlock: unlock:
unlock_page(page); unlock_page(head);
skip: skip:
put_page(page); put_page(head);
next: next:
/* Huge page is mapped? No need to proceed. */ /* Huge page is mapped? No need to proceed. */
if (pmd_trans_huge(*vmf->pmd)) if (pmd_trans_huge(*vmf->pmd))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment