Commit ef3038a5 authored by Matthew Wilcox's avatar Matthew Wilcox

memfd: Convert memfd_tag_pins to XArray

Switch to a batch-processing model like memfd_wait_for_pins() and
use the xa_state previously set up by memfd_wait_for_pins().
Signed-off-by: default avatarMatthew Wilcox <willy@infradead.org>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
parent 2313216f
......@@ -28,37 +28,29 @@
#define MEMFD_TAG_PINNED PAGECACHE_TAG_TOWRITE
#define LAST_SCAN 4 /* about 150ms max */
static void memfd_tag_pins(struct address_space *mapping)
static void memfd_tag_pins(struct xa_state *xas)
{
struct radix_tree_iter iter;
void __rcu **slot;
pgoff_t start;
struct page *page;
unsigned int tagged = 0;
lru_add_drain();
start = 0;
rcu_read_lock();
radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) {
page = radix_tree_deref_slot(slot);
if (!page || radix_tree_exception(page)) {
if (radix_tree_deref_retry(page)) {
slot = radix_tree_iter_retry(&iter);
continue;
}
} else if (page_count(page) - page_mapcount(page) > 1) {
xa_lock_irq(&mapping->i_pages);
radix_tree_tag_set(&mapping->i_pages, iter.index,
MEMFD_TAG_PINNED);
xa_unlock_irq(&mapping->i_pages);
}
if (need_resched()) {
slot = radix_tree_iter_resume(slot, &iter);
cond_resched_rcu();
}
xas_lock_irq(xas);
xas_for_each(xas, page, ULONG_MAX) {
if (xa_is_value(page))
continue;
if (page_count(page) - page_mapcount(page) > 1)
xas_set_mark(xas, MEMFD_TAG_PINNED);
if (++tagged % XA_CHECK_SCHED)
continue;
xas_pause(xas);
xas_unlock_irq(xas);
cond_resched();
xas_lock_irq(xas);
}
rcu_read_unlock();
xas_unlock_irq(xas);
}
/*
......@@ -76,7 +68,7 @@ static int memfd_wait_for_pins(struct address_space *mapping)
struct page *page;
int error, scan;
memfd_tag_pins(mapping);
memfd_tag_pins(&xas);
error = 0;
for (scan = 0; scan <= LAST_SCAN; scan++) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment