Commit fdf1cdb9 authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

mm: remove unnecessary uses of lock_page_memcg()

There are several users that nest lock_page_memcg() inside lock_page()
to prevent page->mem_cgroup from changing.  But the page lock prevents
pages from moving between cgroups, so that is unnecessary overhead.

Remove lock_page_memcg() in contexts with locked contexts and fix the
debug code in the page stat functions to be okay with the page lock.
Signed-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Acked-by: default avatarVladimir Davydov <vdavydov@virtuozzo.com>
Cc: Michal Hocko <mhocko@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 62cccb8c
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <linux/eventfd.h> #include <linux/eventfd.h>
#include <linux/mmzone.h> #include <linux/mmzone.h>
#include <linux/writeback.h> #include <linux/writeback.h>
#include <linux/page-flags.h>
struct mem_cgroup; struct mem_cgroup;
struct page; struct page;
...@@ -464,18 +465,19 @@ void unlock_page_memcg(struct page *page); ...@@ -464,18 +465,19 @@ void unlock_page_memcg(struct page *page);
* @idx: page state item to account * @idx: page state item to account
* @val: number of pages (positive or negative) * @val: number of pages (positive or negative)
* *
* Callers must use lock_page_memcg() to prevent double accounting * The @page must be locked or the caller must use lock_page_memcg()
* when the page is concurrently being moved to another memcg: * to prevent double accounting when the page is concurrently being
* moved to another memcg:
* *
* lock_page_memcg(page); * lock_page(page) or lock_page_memcg(page)
* if (TestClearPageState(page)) * if (TestClearPageState(page))
* mem_cgroup_update_page_stat(page, state, -1); * mem_cgroup_update_page_stat(page, state, -1);
* unlock_page_memcg(page); * unlock_page(page) or unlock_page_memcg(page)
*/ */
static inline void mem_cgroup_update_page_stat(struct page *page, static inline void mem_cgroup_update_page_stat(struct page *page,
enum mem_cgroup_stat_index idx, int val) enum mem_cgroup_stat_index idx, int val)
{ {
VM_BUG_ON(!rcu_read_lock_held()); VM_BUG_ON(!(rcu_read_lock_held() || PageLocked(page)));
if (page->mem_cgroup) if (page->mem_cgroup)
this_cpu_add(page->mem_cgroup->stat->count[idx], val); this_cpu_add(page->mem_cgroup->stat->count[idx], val);
......
...@@ -176,8 +176,7 @@ static void page_cache_tree_delete(struct address_space *mapping, ...@@ -176,8 +176,7 @@ static void page_cache_tree_delete(struct address_space *mapping,
/* /*
* Delete a page from the page cache and free it. Caller has to make * Delete a page from the page cache and free it. Caller has to make
* sure the page is locked and that nobody else uses it - or that usage * sure the page is locked and that nobody else uses it - or that usage
* is safe. The caller must hold the mapping's tree_lock and * is safe. The caller must hold the mapping's tree_lock.
* lock_page_memcg().
*/ */
void __delete_from_page_cache(struct page *page, void *shadow) void __delete_from_page_cache(struct page *page, void *shadow)
{ {
...@@ -260,11 +259,9 @@ void delete_from_page_cache(struct page *page) ...@@ -260,11 +259,9 @@ void delete_from_page_cache(struct page *page)
freepage = mapping->a_ops->freepage; freepage = mapping->a_ops->freepage;
lock_page_memcg(page);
spin_lock_irqsave(&mapping->tree_lock, flags); spin_lock_irqsave(&mapping->tree_lock, flags);
__delete_from_page_cache(page, NULL); __delete_from_page_cache(page, NULL);
spin_unlock_irqrestore(&mapping->tree_lock, flags); spin_unlock_irqrestore(&mapping->tree_lock, flags);
unlock_page_memcg(page);
if (freepage) if (freepage)
freepage(page); freepage(page);
...@@ -557,7 +554,6 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) ...@@ -557,7 +554,6 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
new->mapping = mapping; new->mapping = mapping;
new->index = offset; new->index = offset;
lock_page_memcg(old);
spin_lock_irqsave(&mapping->tree_lock, flags); spin_lock_irqsave(&mapping->tree_lock, flags);
__delete_from_page_cache(old, NULL); __delete_from_page_cache(old, NULL);
error = radix_tree_insert(&mapping->page_tree, offset, new); error = radix_tree_insert(&mapping->page_tree, offset, new);
...@@ -572,7 +568,6 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) ...@@ -572,7 +568,6 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
if (PageSwapBacked(new)) if (PageSwapBacked(new))
__inc_zone_page_state(new, NR_SHMEM); __inc_zone_page_state(new, NR_SHMEM);
spin_unlock_irqrestore(&mapping->tree_lock, flags); spin_unlock_irqrestore(&mapping->tree_lock, flags);
unlock_page_memcg(old);
mem_cgroup_migrate(old, new); mem_cgroup_migrate(old, new);
radix_tree_preload_end(); radix_tree_preload_end();
if (freepage) if (freepage)
......
...@@ -2700,7 +2700,6 @@ int clear_page_dirty_for_io(struct page *page) ...@@ -2700,7 +2700,6 @@ int clear_page_dirty_for_io(struct page *page)
* always locked coming in here, so we get the desired * always locked coming in here, so we get the desired
* exclusion. * exclusion.
*/ */
lock_page_memcg(page);
wb = unlocked_inode_to_wb_begin(inode, &locked); wb = unlocked_inode_to_wb_begin(inode, &locked);
if (TestClearPageDirty(page)) { if (TestClearPageDirty(page)) {
mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY); mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY);
...@@ -2709,7 +2708,6 @@ int clear_page_dirty_for_io(struct page *page) ...@@ -2709,7 +2708,6 @@ int clear_page_dirty_for_io(struct page *page)
ret = 1; ret = 1;
} }
unlocked_inode_to_wb_end(inode, locked); unlocked_inode_to_wb_end(inode, locked);
unlock_page_memcg(page);
return ret; return ret;
} }
return TestClearPageDirty(page); return TestClearPageDirty(page);
......
...@@ -527,7 +527,6 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page) ...@@ -527,7 +527,6 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL)) if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
return 0; return 0;
lock_page_memcg(page);
spin_lock_irqsave(&mapping->tree_lock, flags); spin_lock_irqsave(&mapping->tree_lock, flags);
if (PageDirty(page)) if (PageDirty(page))
goto failed; goto failed;
...@@ -535,7 +534,6 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page) ...@@ -535,7 +534,6 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
BUG_ON(page_has_private(page)); BUG_ON(page_has_private(page));
__delete_from_page_cache(page, NULL); __delete_from_page_cache(page, NULL);
spin_unlock_irqrestore(&mapping->tree_lock, flags); spin_unlock_irqrestore(&mapping->tree_lock, flags);
unlock_page_memcg(page);
if (mapping->a_ops->freepage) if (mapping->a_ops->freepage)
mapping->a_ops->freepage(page); mapping->a_ops->freepage(page);
...@@ -544,7 +542,6 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page) ...@@ -544,7 +542,6 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
return 1; return 1;
failed: failed:
spin_unlock_irqrestore(&mapping->tree_lock, flags); spin_unlock_irqrestore(&mapping->tree_lock, flags);
unlock_page_memcg(page);
return 0; return 0;
} }
......
...@@ -607,7 +607,6 @@ static int __remove_mapping(struct address_space *mapping, struct page *page, ...@@ -607,7 +607,6 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
BUG_ON(!PageLocked(page)); BUG_ON(!PageLocked(page));
BUG_ON(mapping != page_mapping(page)); BUG_ON(mapping != page_mapping(page));
lock_page_memcg(page);
spin_lock_irqsave(&mapping->tree_lock, flags); spin_lock_irqsave(&mapping->tree_lock, flags);
/* /*
* The non racy check for a busy page. * The non racy check for a busy page.
...@@ -647,7 +646,6 @@ static int __remove_mapping(struct address_space *mapping, struct page *page, ...@@ -647,7 +646,6 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
mem_cgroup_swapout(page, swap); mem_cgroup_swapout(page, swap);
__delete_from_swap_cache(page); __delete_from_swap_cache(page);
spin_unlock_irqrestore(&mapping->tree_lock, flags); spin_unlock_irqrestore(&mapping->tree_lock, flags);
unlock_page_memcg(page);
swapcache_free(swap); swapcache_free(swap);
} else { } else {
void (*freepage)(struct page *); void (*freepage)(struct page *);
...@@ -675,7 +673,6 @@ static int __remove_mapping(struct address_space *mapping, struct page *page, ...@@ -675,7 +673,6 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
shadow = workingset_eviction(mapping, page); shadow = workingset_eviction(mapping, page);
__delete_from_page_cache(page, shadow); __delete_from_page_cache(page, shadow);
spin_unlock_irqrestore(&mapping->tree_lock, flags); spin_unlock_irqrestore(&mapping->tree_lock, flags);
unlock_page_memcg(page);
if (freepage != NULL) if (freepage != NULL)
freepage(page); freepage(page);
...@@ -685,7 +682,6 @@ static int __remove_mapping(struct address_space *mapping, struct page *page, ...@@ -685,7 +682,6 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
cannot_free: cannot_free:
spin_unlock_irqrestore(&mapping->tree_lock, flags); spin_unlock_irqrestore(&mapping->tree_lock, flags);
unlock_page_memcg(page);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment