Commit 7165092f authored by Matthew Wilcox's avatar Matthew Wilcox Committed by Linus Torvalds

radix-tree,shmem: introduce radix_tree_iter_next()

shmem likes to occasionally drop the lock, schedule, then reacqire the
lock and continue with the iteration from the last place it left off.
This is currently done with a pretty ugly goto.  Introduce
radix_tree_iter_next() and use it throughout shmem.c.

[koct9i@gmail.com: fix bug in radix_tree_iter_next() for tagged iteration]
Signed-off-by: default avatarMatthew Wilcox <willy@linux.intel.com>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: default avatarKonstantin Khlebnikov <koct9i@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2cf938aa
...@@ -402,6 +402,22 @@ void **radix_tree_iter_retry(struct radix_tree_iter *iter) ...@@ -402,6 +402,22 @@ void **radix_tree_iter_retry(struct radix_tree_iter *iter)
return NULL; return NULL;
} }
/**
* radix_tree_iter_next - resume iterating when the chunk may be invalid
* @iter: iterator state
*
* If the iterator needs to release then reacquire a lock, the chunk may
* have been invalidated by an insertion or deletion. Call this function
* to continue the iteration from the next index.
*/
static inline __must_check
void **radix_tree_iter_next(struct radix_tree_iter *iter)
{
iter->next_index = iter->index + 1;
iter->tags = 0;
return NULL;
}
/** /**
* radix_tree_chunk_size - get current chunk size * radix_tree_chunk_size - get current chunk size
* *
......
...@@ -376,7 +376,6 @@ unsigned long shmem_partial_swap_usage(struct address_space *mapping, ...@@ -376,7 +376,6 @@ unsigned long shmem_partial_swap_usage(struct address_space *mapping,
rcu_read_lock(); rcu_read_lock();
restart:
radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
if (iter.index >= end) if (iter.index >= end)
break; break;
...@@ -393,8 +392,7 @@ unsigned long shmem_partial_swap_usage(struct address_space *mapping, ...@@ -393,8 +392,7 @@ unsigned long shmem_partial_swap_usage(struct address_space *mapping,
if (need_resched()) { if (need_resched()) {
cond_resched_rcu(); cond_resched_rcu();
start = iter.index + 1; slot = radix_tree_iter_next(&iter);
goto restart;
} }
} }
...@@ -1944,7 +1942,6 @@ static void shmem_tag_pins(struct address_space *mapping) ...@@ -1944,7 +1942,6 @@ static void shmem_tag_pins(struct address_space *mapping)
start = 0; start = 0;
rcu_read_lock(); rcu_read_lock();
restart:
radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
page = radix_tree_deref_slot(slot); page = radix_tree_deref_slot(slot);
if (!page || radix_tree_exception(page)) { if (!page || radix_tree_exception(page)) {
...@@ -1961,8 +1958,7 @@ static void shmem_tag_pins(struct address_space *mapping) ...@@ -1961,8 +1958,7 @@ static void shmem_tag_pins(struct address_space *mapping)
if (need_resched()) { if (need_resched()) {
cond_resched_rcu(); cond_resched_rcu();
start = iter.index + 1; slot = radix_tree_iter_next(&iter);
goto restart;
} }
} }
rcu_read_unlock(); rcu_read_unlock();
...@@ -1999,7 +1995,6 @@ static int shmem_wait_for_pins(struct address_space *mapping) ...@@ -1999,7 +1995,6 @@ static int shmem_wait_for_pins(struct address_space *mapping)
start = 0; start = 0;
rcu_read_lock(); rcu_read_lock();
restart:
radix_tree_for_each_tagged(slot, &mapping->page_tree, &iter, radix_tree_for_each_tagged(slot, &mapping->page_tree, &iter,
start, SHMEM_TAG_PINNED) { start, SHMEM_TAG_PINNED) {
...@@ -2033,8 +2028,7 @@ static int shmem_wait_for_pins(struct address_space *mapping) ...@@ -2033,8 +2028,7 @@ static int shmem_wait_for_pins(struct address_space *mapping)
continue_resched: continue_resched:
if (need_resched()) { if (need_resched()) {
cond_resched_rcu(); cond_resched_rcu();
start = iter.index + 1; slot = radix_tree_iter_next(&iter);
goto restart;
} }
} }
rcu_read_unlock(); rcu_read_unlock();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment