Commit a6906972 authored by Matthew Wilcox's avatar Matthew Wilcox

page cache; Convert find_get_pages_range_tag to XArray

The 'end' parameter of the xas_for_each iterator avoids a useless
iteration at the end of the range.
Signed-off-by: default avatarMatthew Wilcox <willy@infradead.org>
parent 3ece58a2
......@@ -363,10 +363,10 @@ static inline unsigned find_get_pages(struct address_space *mapping,
unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
unsigned int nr_pages, struct page **pages);
unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
pgoff_t end, int tag, unsigned int nr_pages,
pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
struct page **pages);
static inline unsigned find_get_pages_tag(struct address_space *mapping,
pgoff_t *index, int tag, unsigned int nr_pages,
pgoff_t *index, xa_mark_t tag, unsigned int nr_pages,
struct page **pages)
{
return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
......
......@@ -1789,74 +1789,58 @@ EXPORT_SYMBOL(find_get_pages_contig);
* @tag. We update @index to index the next page for the traversal.
*/
unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
pgoff_t end, int tag, unsigned int nr_pages,
pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
struct page **pages)
{
struct radix_tree_iter iter;
void **slot;
XA_STATE(xas, &mapping->i_pages, *index);
struct page *page;
unsigned ret = 0;
if (unlikely(!nr_pages))
return 0;
rcu_read_lock();
radix_tree_for_each_tagged(slot, &mapping->i_pages, &iter, *index, tag) {
struct page *head, *page;
if (iter.index > end)
break;
repeat:
page = radix_tree_deref_slot(slot);
if (unlikely(!page))
continue;
if (radix_tree_exception(page)) {
if (radix_tree_deref_retry(page)) {
slot = radix_tree_iter_retry(&iter);
xas_for_each_marked(&xas, page, end, tag) {
struct page *head;
if (xas_retry(&xas, page))
continue;
}
/*
* A shadow entry of a recently evicted page.
*
* Those entries should never be tagged, but
* this tree walk is lockless and the tags are
* looked up in bulk, one radix tree node at a
* time, so there is a sizable window for page
* reclaim to evict a page we saw tagged.
*
* Skip over it.
* Shadow entries should never be tagged, but this iteration
* is lockless so there is a window for page reclaim to evict
* a page we saw tagged. Skip over it.
*/
if (xa_is_value(page))
continue;
}
head = compound_head(page);
if (!page_cache_get_speculative(head))
goto repeat;
goto retry;
/* The page was split under us? */
if (compound_head(page) != head) {
put_page(head);
goto repeat;
}
if (compound_head(page) != head)
goto put_page;
/* Has the page moved? */
if (unlikely(page != *slot)) {
put_page(head);
goto repeat;
}
if (unlikely(page != xas_reload(&xas)))
goto put_page;
pages[ret] = page;
if (++ret == nr_pages) {
*index = pages[ret - 1]->index + 1;
*index = page->index + 1;
goto out;
}
continue;
put_page:
put_page(head);
retry:
xas_reset(&xas);
}
/*
* We come here when we got at @end. We take care to not overflow the
* We come here when we got to @end. We take care to not overflow the
* index @index as it confuses some of the callers. This breaks the
* iteration when there is page at index -1 but that is already broken
* anyway.
* iteration when there is a page at index -1 but that is already
* broken anyway.
*/
if (end == (pgoff_t)-1)
*index = (pgoff_t)-1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment