Commit 084a8990 authored by Matthew Wilcox's avatar Matthew Wilcox

dax: Convert dax_layout_busy_page to XArray

Instead of using a pagevec, just use the XArray iterators.  Add a
conditional rescheduling point which probably should have been there in
the original.
Signed-off-by: default avatarMatthew Wilcox <willy@infradead.org>
parent cfc93c6c
...@@ -722,11 +722,10 @@ static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index, ...@@ -722,11 +722,10 @@ static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
*/ */
struct page *dax_layout_busy_page(struct address_space *mapping) struct page *dax_layout_busy_page(struct address_space *mapping)
{ {
pgoff_t indices[PAGEVEC_SIZE]; XA_STATE(xas, &mapping->i_pages, 0);
void *entry;
unsigned int scanned = 0;
struct page *page = NULL; struct page *page = NULL;
struct pagevec pvec;
pgoff_t index, end;
unsigned i;
/* /*
* In the 'limited' case get_user_pages() for dax is disabled. * In the 'limited' case get_user_pages() for dax is disabled.
...@@ -737,13 +736,9 @@ struct page *dax_layout_busy_page(struct address_space *mapping) ...@@ -737,13 +736,9 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
if (!dax_mapping(mapping) || !mapping_mapped(mapping)) if (!dax_mapping(mapping) || !mapping_mapped(mapping))
return NULL; return NULL;
pagevec_init(&pvec);
index = 0;
end = -1;
/* /*
* If we race get_user_pages_fast() here either we'll see the * If we race get_user_pages_fast() here either we'll see the
* elevated page count in the pagevec_lookup and wait, or * elevated page count in the iteration and wait, or
* get_user_pages_fast() will see that the page it took a reference * get_user_pages_fast() will see that the page it took a reference
* against is no longer mapped in the page tables and bail to the * against is no longer mapped in the page tables and bail to the
* get_user_pages() slow path. The slow path is protected by * get_user_pages() slow path. The slow path is protected by
...@@ -755,43 +750,26 @@ struct page *dax_layout_busy_page(struct address_space *mapping) ...@@ -755,43 +750,26 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
*/ */
unmap_mapping_range(mapping, 0, 0, 1); unmap_mapping_range(mapping, 0, 0, 1);
while (index < end && pagevec_lookup_entries(&pvec, mapping, index, xas_lock_irq(&xas);
min(end - index, (pgoff_t)PAGEVEC_SIZE), xas_for_each(&xas, entry, ULONG_MAX) {
indices)) { if (WARN_ON_ONCE(!xa_is_value(entry)))
for (i = 0; i < pagevec_count(&pvec); i++) { continue;
struct page *pvec_ent = pvec.pages[i]; if (unlikely(dax_is_locked(entry)))
void *entry; entry = get_unlocked_entry(&xas);
if (entry)
index = indices[i]; page = dax_busy_page(entry);
if (index >= end) put_unlocked_entry(&xas, entry);
break;
if (WARN_ON_ONCE(!xa_is_value(pvec_ent)))
continue;
xa_lock_irq(&mapping->i_pages);
entry = get_unlocked_mapping_entry(mapping, index, NULL);
if (entry)
page = dax_busy_page(entry);
put_unlocked_mapping_entry(mapping, index, entry);
xa_unlock_irq(&mapping->i_pages);
if (page)
break;
}
/*
* We don't expect normal struct page entries to exist in our
* tree, but we keep these pagevec calls so that this code is
* consistent with the common pattern for handling pagevecs
* throughout the kernel.
*/
pagevec_remove_exceptionals(&pvec);
pagevec_release(&pvec);
index++;
if (page) if (page)
break; break;
if (++scanned % XA_CHECK_SCHED)
continue;
xas_pause(&xas);
xas_unlock_irq(&xas);
cond_resched();
xas_lock_irq(&xas);
} }
xas_unlock_irq(&xas);
return page; return page;
} }
EXPORT_SYMBOL_GPL(dax_layout_busy_page); EXPORT_SYMBOL_GPL(dax_layout_busy_page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment