Commit 5c024e6a authored by Matthew Wilcox's avatar Matthew Wilcox

page cache: Convert page deletion to XArray

The code is slightly shorter and simpler.
Signed-off-by: default avatarMatthew Wilcox <willy@infradead.org>
parent 74d60958
......@@ -111,31 +111,26 @@
* ->tasklist_lock (memory_failure, collect_procs_ao)
*/
static void page_cache_tree_delete(struct address_space *mapping,
static void page_cache_delete(struct address_space *mapping,
struct page *page, void *shadow)
{
int i, nr;
XA_STATE(xas, &mapping->i_pages, page->index);
unsigned int nr = 1;
/* hugetlb pages are represented by one entry in the radix tree */
nr = PageHuge(page) ? 1 : hpage_nr_pages(page);
mapping_set_update(&xas, mapping);
/* hugetlb pages are represented by a single entry in the xarray */
if (!PageHuge(page)) {
xas_set_order(&xas, page->index, compound_order(page));
nr = 1U << compound_order(page);
}
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(PageTail(page), page);
VM_BUG_ON_PAGE(nr != 1 && shadow, page);
for (i = 0; i < nr; i++) {
struct radix_tree_node *node;
void **slot;
__radix_tree_lookup(&mapping->i_pages, page->index + i,
&node, &slot);
VM_BUG_ON_PAGE(!node && nr != 1, page);
radix_tree_clear_tags(&mapping->i_pages, node, slot);
__radix_tree_replace(&mapping->i_pages, node, slot, shadow,
workingset_lookup_update(mapping));
}
xas_store(&xas, shadow);
xas_init_marks(&xas);
page->mapping = NULL;
/* Leave page->index set: truncation lookup relies upon it */
......@@ -234,7 +229,7 @@ void __delete_from_page_cache(struct page *page, void *shadow)
trace_mm_filemap_delete_from_page_cache(page);
unaccount_page_cache_page(mapping, page);
page_cache_tree_delete(mapping, page, shadow);
page_cache_delete(mapping, page, shadow);
}
static void page_cache_free_page(struct address_space *mapping,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment