Commit 6dbaf22c authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

mm: shmem: save one radix tree lookup when truncating swapped pages

Page cache radix tree slots are usually stabilized by the page lock, but
shmem's swap cookies have no such thing.  Because the overall truncation
loop is lockless, the swap entry is currently confirmed by a tree lookup
and then deleted by another tree lookup under the same tree lock region.

Use radix_tree_delete_item() instead, which does the verification and
deletion with only one lookup.  This also allows removing the
delete-only special case from shmem_radix_tree_replace().
Signed-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Reviewed-by: default avatarMinchan Kim <minchan@kernel.org>
Reviewed-by: default avatarRik van Riel <riel@redhat.com>
Acked-by: default avatarMel Gorman <mgorman@suse.de>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Bob Liu <bob.liu@oracle.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jan Kara <jack@suse.cz>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Luigi Semenzato <semenzato@google.com>
Cc: Metin Doslu <metin@citusdata.com>
Cc: Michel Lespinasse <walken@google.com>
Cc: Ozgun Erdogan <ozgun@citusdata.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Roman Gushchin <klamm@yandex-team.ru>
Cc: Ryan Mallon <rmallon@gmail.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 53c59f26
...@@ -242,19 +242,17 @@ static int shmem_radix_tree_replace(struct address_space *mapping, ...@@ -242,19 +242,17 @@ static int shmem_radix_tree_replace(struct address_space *mapping,
pgoff_t index, void *expected, void *replacement) pgoff_t index, void *expected, void *replacement)
{ {
void **pslot; void **pslot;
void *item = NULL; void *item;
VM_BUG_ON(!expected); VM_BUG_ON(!expected);
VM_BUG_ON(!replacement);
pslot = radix_tree_lookup_slot(&mapping->page_tree, index); pslot = radix_tree_lookup_slot(&mapping->page_tree, index);
if (pslot) if (!pslot)
item = radix_tree_deref_slot_protected(pslot, return -ENOENT;
&mapping->tree_lock); item = radix_tree_deref_slot_protected(pslot, &mapping->tree_lock);
if (item != expected) if (item != expected)
return -ENOENT; return -ENOENT;
if (replacement) radix_tree_replace_slot(pslot, replacement);
radix_tree_replace_slot(pslot, replacement);
else
radix_tree_delete(&mapping->page_tree, index);
return 0; return 0;
} }
...@@ -386,14 +384,15 @@ static unsigned shmem_find_get_pages_and_swap(struct address_space *mapping, ...@@ -386,14 +384,15 @@ static unsigned shmem_find_get_pages_and_swap(struct address_space *mapping,
static int shmem_free_swap(struct address_space *mapping, static int shmem_free_swap(struct address_space *mapping,
pgoff_t index, void *radswap) pgoff_t index, void *radswap)
{ {
int error; void *old;
spin_lock_irq(&mapping->tree_lock); spin_lock_irq(&mapping->tree_lock);
error = shmem_radix_tree_replace(mapping, index, radswap, NULL); old = radix_tree_delete_item(&mapping->page_tree, index, radswap);
spin_unlock_irq(&mapping->tree_lock); spin_unlock_irq(&mapping->tree_lock);
if (!error) if (old != radswap)
free_swap_and_cache(radix_to_swp_entry(radswap)); return -ENOENT;
return error; free_swap_and_cache(radix_to_swp_entry(radswap));
return 0;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment