Commit 070e807c authored by Matthew Wilcox's avatar Matthew Wilcox

page cache: Convert filemap_map_pages to XArray

Slight change of strategy here; if we have trouble getting hold of a
page for whatever reason (eg a compound page is split underneath us),
don't spin to stabilise the page, just continue the iteration, like we
would if we failed to trylock the page.  Since this is a speculative
optimisation, it feels like we should allow the process to take an extra
fault if it turns out to need this page instead of spending time to pin
down a page it may not need.
Signed-off-by: default avatarMatthew Wilcox <willy@infradead.org>
parent c1901cd3
...@@ -2516,45 +2516,31 @@ EXPORT_SYMBOL(filemap_fault); ...@@ -2516,45 +2516,31 @@ EXPORT_SYMBOL(filemap_fault);
void filemap_map_pages(struct vm_fault *vmf, void filemap_map_pages(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff) pgoff_t start_pgoff, pgoff_t end_pgoff)
{ {
struct radix_tree_iter iter;
void **slot;
struct file *file = vmf->vma->vm_file; struct file *file = vmf->vma->vm_file;
struct address_space *mapping = file->f_mapping; struct address_space *mapping = file->f_mapping;
pgoff_t last_pgoff = start_pgoff; pgoff_t last_pgoff = start_pgoff;
unsigned long max_idx; unsigned long max_idx;
XA_STATE(xas, &mapping->i_pages, start_pgoff);
struct page *head, *page; struct page *head, *page;
rcu_read_lock(); rcu_read_lock();
radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start_pgoff) { xas_for_each(&xas, page, end_pgoff) {
if (iter.index > end_pgoff) if (xas_retry(&xas, page))
break; continue;
repeat: if (xa_is_value(page))
page = radix_tree_deref_slot(slot);
if (unlikely(!page))
goto next;
if (radix_tree_exception(page)) {
if (radix_tree_deref_retry(page)) {
slot = radix_tree_iter_retry(&iter);
continue;
}
goto next; goto next;
}
head = compound_head(page); head = compound_head(page);
if (!page_cache_get_speculative(head)) if (!page_cache_get_speculative(head))
goto repeat; goto next;
/* The page was split under us? */ /* The page was split under us? */
if (compound_head(page) != head) { if (compound_head(page) != head)
put_page(head); goto skip;
goto repeat;
}
/* Has the page moved? */ /* Has the page moved? */
if (unlikely(page != *slot)) { if (unlikely(page != xas_reload(&xas)))
put_page(head); goto skip;
goto repeat;
}
if (!PageUptodate(page) || if (!PageUptodate(page) ||
PageReadahead(page) || PageReadahead(page) ||
...@@ -2573,10 +2559,10 @@ void filemap_map_pages(struct vm_fault *vmf, ...@@ -2573,10 +2559,10 @@ void filemap_map_pages(struct vm_fault *vmf,
if (file->f_ra.mmap_miss > 0) if (file->f_ra.mmap_miss > 0)
file->f_ra.mmap_miss--; file->f_ra.mmap_miss--;
vmf->address += (iter.index - last_pgoff) << PAGE_SHIFT; vmf->address += (xas.xa_index - last_pgoff) << PAGE_SHIFT;
if (vmf->pte) if (vmf->pte)
vmf->pte += iter.index - last_pgoff; vmf->pte += xas.xa_index - last_pgoff;
last_pgoff = iter.index; last_pgoff = xas.xa_index;
if (alloc_set_pte(vmf, NULL, page)) if (alloc_set_pte(vmf, NULL, page))
goto unlock; goto unlock;
unlock_page(page); unlock_page(page);
...@@ -2589,8 +2575,6 @@ void filemap_map_pages(struct vm_fault *vmf, ...@@ -2589,8 +2575,6 @@ void filemap_map_pages(struct vm_fault *vmf,
/* Huge page is mapped? No need to proceed. */ /* Huge page is mapped? No need to proceed. */
if (pmd_trans_huge(*vmf->pmd)) if (pmd_trans_huge(*vmf->pmd))
break; break;
if (iter.index == end_pgoff)
break;
} }
rcu_read_unlock(); rcu_read_unlock();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment