Commit 35c754d7 authored by Balbir Singh's avatar Balbir Singh Committed by Linus Torvalds

memory controller BUG_ON()

Move mem_controller_cache_charge() above radix_tree_preload().
radix_tree_preload() disables preemption, even though the gfp_mask passed
contains __GFP_WAIT, we cannot really do __GFP_WAIT allocations, thus we
hit a BUG_ON() in kmem_cache_alloc().

This patch moves mem_controller_cache_charge() to above radix_tree_preload()
for cache charging.
Signed-off-by: default avatarBalbir Singh <balbir@linux.vnet.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 044d66c1
...@@ -460,14 +460,12 @@ int filemap_write_and_wait_range(struct address_space *mapping, ...@@ -460,14 +460,12 @@ int filemap_write_and_wait_range(struct address_space *mapping,
int add_to_page_cache(struct page *page, struct address_space *mapping, int add_to_page_cache(struct page *page, struct address_space *mapping,
pgoff_t offset, gfp_t gfp_mask) pgoff_t offset, gfp_t gfp_mask)
{ {
int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); int error = mem_cgroup_cache_charge(page, current->mm, gfp_mask);
if (error == 0) {
error = mem_cgroup_cache_charge(page, current->mm, gfp_mask);
if (error) if (error)
goto out; goto out;
error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
if (error == 0) {
write_lock_irq(&mapping->tree_lock); write_lock_irq(&mapping->tree_lock);
error = radix_tree_insert(&mapping->page_tree, offset, page); error = radix_tree_insert(&mapping->page_tree, offset, page);
if (!error) { if (!error) {
...@@ -482,7 +480,8 @@ int add_to_page_cache(struct page *page, struct address_space *mapping, ...@@ -482,7 +480,8 @@ int add_to_page_cache(struct page *page, struct address_space *mapping,
write_unlock_irq(&mapping->tree_lock); write_unlock_irq(&mapping->tree_lock);
radix_tree_preload_end(); radix_tree_preload_end();
} } else
mem_cgroup_uncharge_page(page);
out: out:
return error; return error;
} }
......
...@@ -75,13 +75,13 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) ...@@ -75,13 +75,13 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
BUG_ON(!PageLocked(page)); BUG_ON(!PageLocked(page));
BUG_ON(PageSwapCache(page)); BUG_ON(PageSwapCache(page));
BUG_ON(PagePrivate(page)); BUG_ON(PagePrivate(page));
error = radix_tree_preload(gfp_mask);
if (!error) {
error = mem_cgroup_cache_charge(page, current->mm, gfp_mask); error = mem_cgroup_cache_charge(page, current->mm, gfp_mask);
if (error) if (error)
goto out; goto out;
error = radix_tree_preload(gfp_mask);
if (!error) {
write_lock_irq(&swapper_space.tree_lock); write_lock_irq(&swapper_space.tree_lock);
error = radix_tree_insert(&swapper_space.page_tree, error = radix_tree_insert(&swapper_space.page_tree,
entry.val, page); entry.val, page);
...@@ -97,7 +97,8 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) ...@@ -97,7 +97,8 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
} }
write_unlock_irq(&swapper_space.tree_lock); write_unlock_irq(&swapper_space.tree_lock);
radix_tree_preload_end(); radix_tree_preload_end();
} } else
mem_cgroup_uncharge_page(page);
out: out:
return error; return error;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment