Commit b6c418dc authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] Make swapper_space tree_lock irq-safe

->tree_lock is supposed to be IRQ-safe.  Hugh worked out that with his
changes, we never actually take it from interrupt context, so spin_lock() is
sufficient.

Apart from kinda freaking me out, the analysis which led to this decision
becomes untrue with later patches.  So make it irq-safe.
parent a20a9dee
...@@ -69,7 +69,7 @@ static int __add_to_swap_cache(struct page *page, ...@@ -69,7 +69,7 @@ static int __add_to_swap_cache(struct page *page,
error = radix_tree_preload(gfp_mask); error = radix_tree_preload(gfp_mask);
if (!error) { if (!error) {
page_cache_get(page); page_cache_get(page);
spin_lock(&swapper_space.tree_lock); spin_lock_irq(&swapper_space.tree_lock);
error = radix_tree_insert(&swapper_space.page_tree, error = radix_tree_insert(&swapper_space.page_tree,
entry.val, page); entry.val, page);
if (!error) { if (!error) {
...@@ -80,7 +80,7 @@ static int __add_to_swap_cache(struct page *page, ...@@ -80,7 +80,7 @@ static int __add_to_swap_cache(struct page *page,
pagecache_acct(1); pagecache_acct(1);
} else } else
page_cache_release(page); page_cache_release(page);
spin_unlock(&swapper_space.tree_lock); spin_unlock_irq(&swapper_space.tree_lock);
radix_tree_preload_end(); radix_tree_preload_end();
} }
return error; return error;
...@@ -207,9 +207,9 @@ void delete_from_swap_cache(struct page *page) ...@@ -207,9 +207,9 @@ void delete_from_swap_cache(struct page *page)
entry.val = page->private; entry.val = page->private;
spin_lock(&swapper_space.tree_lock); spin_lock_irq(&swapper_space.tree_lock);
__delete_from_swap_cache(page); __delete_from_swap_cache(page);
spin_unlock(&swapper_space.tree_lock); spin_unlock_irq(&swapper_space.tree_lock);
swap_free(entry); swap_free(entry);
page_cache_release(page); page_cache_release(page);
...@@ -308,13 +308,13 @@ struct page * lookup_swap_cache(swp_entry_t entry) ...@@ -308,13 +308,13 @@ struct page * lookup_swap_cache(swp_entry_t entry)
{ {
struct page *page; struct page *page;
spin_lock(&swapper_space.tree_lock); spin_lock_irq(&swapper_space.tree_lock);
page = radix_tree_lookup(&swapper_space.page_tree, entry.val); page = radix_tree_lookup(&swapper_space.page_tree, entry.val);
if (page) { if (page) {
page_cache_get(page); page_cache_get(page);
INC_CACHE_INFO(find_success); INC_CACHE_INFO(find_success);
} }
spin_unlock(&swapper_space.tree_lock); spin_unlock_irq(&swapper_space.tree_lock);
INC_CACHE_INFO(find_total); INC_CACHE_INFO(find_total);
return page; return page;
} }
...@@ -336,12 +336,12 @@ struct page * read_swap_cache_async(swp_entry_t entry) ...@@ -336,12 +336,12 @@ struct page * read_swap_cache_async(swp_entry_t entry)
* called after lookup_swap_cache() failed, re-calling * called after lookup_swap_cache() failed, re-calling
* that would confuse statistics. * that would confuse statistics.
*/ */
spin_lock(&swapper_space.tree_lock); spin_lock_irq(&swapper_space.tree_lock);
found_page = radix_tree_lookup(&swapper_space.page_tree, found_page = radix_tree_lookup(&swapper_space.page_tree,
entry.val); entry.val);
if (found_page) if (found_page)
page_cache_get(found_page); page_cache_get(found_page);
spin_unlock(&swapper_space.tree_lock); spin_unlock_irq(&swapper_space.tree_lock);
if (found_page) if (found_page)
break; break;
......
...@@ -289,10 +289,10 @@ static int exclusive_swap_page(struct page *page) ...@@ -289,10 +289,10 @@ static int exclusive_swap_page(struct page *page)
/* Is the only swap cache user the cache itself? */ /* Is the only swap cache user the cache itself? */
if (p->swap_map[swp_offset(entry)] == 1) { if (p->swap_map[swp_offset(entry)] == 1) {
/* Recheck the page count with the swapcache lock held.. */ /* Recheck the page count with the swapcache lock held.. */
spin_lock(&swapper_space.tree_lock); spin_lock_irq(&swapper_space.tree_lock);
if (page_count(page) == 2) if (page_count(page) == 2)
retval = 1; retval = 1;
spin_unlock(&swapper_space.tree_lock); spin_unlock_irq(&swapper_space.tree_lock);
} }
swap_info_put(p); swap_info_put(p);
} }
...@@ -360,13 +360,13 @@ int remove_exclusive_swap_page(struct page *page) ...@@ -360,13 +360,13 @@ int remove_exclusive_swap_page(struct page *page)
retval = 0; retval = 0;
if (p->swap_map[swp_offset(entry)] == 1) { if (p->swap_map[swp_offset(entry)] == 1) {
/* Recheck the page count with the swapcache lock held.. */ /* Recheck the page count with the swapcache lock held.. */
spin_lock(&swapper_space.tree_lock); spin_lock_irq(&swapper_space.tree_lock);
if ((page_count(page) == 2) && !PageWriteback(page)) { if ((page_count(page) == 2) && !PageWriteback(page)) {
__delete_from_swap_cache(page); __delete_from_swap_cache(page);
SetPageDirty(page); SetPageDirty(page);
retval = 1; retval = 1;
} }
spin_unlock(&swapper_space.tree_lock); spin_unlock_irq(&swapper_space.tree_lock);
} }
swap_info_put(p); swap_info_put(p);
...@@ -390,12 +390,12 @@ void free_swap_and_cache(swp_entry_t entry) ...@@ -390,12 +390,12 @@ void free_swap_and_cache(swp_entry_t entry)
p = swap_info_get(entry); p = swap_info_get(entry);
if (p) { if (p) {
if (swap_entry_free(p, swp_offset(entry)) == 1) { if (swap_entry_free(p, swp_offset(entry)) == 1) {
spin_lock(&swapper_space.tree_lock); spin_lock_irq(&swapper_space.tree_lock);
page = radix_tree_lookup(&swapper_space.page_tree, page = radix_tree_lookup(&swapper_space.page_tree,
entry.val); entry.val);
if (page && TestSetPageLocked(page)) if (page && TestSetPageLocked(page))
page = NULL; page = NULL;
spin_unlock(&swapper_space.tree_lock); spin_unlock_irq(&swapper_space.tree_lock);
} }
swap_info_put(p); swap_info_put(p);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment