Commit 6de22619 authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

mm: memcontrol: track move_lock state internally

The complexity of memcg page stat synchronization is currently leaking
into the callsites, forcing them to keep track of the move_lock state and
the IRQ flags.  Simplify the API by tracking it in the memcg.
Signed-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Acked-by: default avatarMichal Hocko <mhocko@suse.cz>
Reviewed-by: default avatarVladimir Davydov <vdavydov@parallels.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 93aa7d95
...@@ -138,12 +138,10 @@ static inline bool mem_cgroup_disabled(void) ...@@ -138,12 +138,10 @@ static inline bool mem_cgroup_disabled(void)
return false; return false;
} }
struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, bool *locked, struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page);
unsigned long *flags);
void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool *locked,
unsigned long *flags);
void mem_cgroup_update_page_stat(struct mem_cgroup *memcg, void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
enum mem_cgroup_stat_index idx, int val); enum mem_cgroup_stat_index idx, int val);
void mem_cgroup_end_page_stat(struct mem_cgroup *memcg);
static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg, static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
enum mem_cgroup_stat_index idx) enum mem_cgroup_stat_index idx)
...@@ -285,14 +283,12 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) ...@@ -285,14 +283,12 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
{ {
} }
static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page)
bool *locked, unsigned long *flags)
{ {
return NULL; return NULL;
} }
static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg)
bool *locked, unsigned long *flags)
{ {
} }
......
...@@ -325,9 +325,11 @@ struct mem_cgroup { ...@@ -325,9 +325,11 @@ struct mem_cgroup {
/* /*
* set > 0 if pages under this cgroup are moving to other cgroup. * set > 0 if pages under this cgroup are moving to other cgroup.
*/ */
atomic_t moving_account; atomic_t moving_account;
/* taken only while moving_account > 0 */ /* taken only while moving_account > 0 */
spinlock_t move_lock; spinlock_t move_lock;
struct task_struct *move_lock_task;
unsigned long move_lock_flags;
/* /*
* percpu counter. * percpu counter.
*/ */
...@@ -1977,34 +1979,33 @@ bool mem_cgroup_oom_synchronize(bool handle) ...@@ -1977,34 +1979,33 @@ bool mem_cgroup_oom_synchronize(bool handle)
/** /**
* mem_cgroup_begin_page_stat - begin a page state statistics transaction * mem_cgroup_begin_page_stat - begin a page state statistics transaction
* @page: page that is going to change accounted state * @page: page that is going to change accounted state
* @locked: &memcg->move_lock slowpath was taken
* @flags: IRQ-state flags for &memcg->move_lock
* *
* This function must mark the beginning of an accounted page state * This function must mark the beginning of an accounted page state
* change to prevent double accounting when the page is concurrently * change to prevent double accounting when the page is concurrently
* being moved to another memcg: * being moved to another memcg:
* *
* memcg = mem_cgroup_begin_page_stat(page, &locked, &flags); * memcg = mem_cgroup_begin_page_stat(page);
* if (TestClearPageState(page)) * if (TestClearPageState(page))
* mem_cgroup_update_page_stat(memcg, state, -1); * mem_cgroup_update_page_stat(memcg, state, -1);
* mem_cgroup_end_page_stat(memcg, locked, flags); * mem_cgroup_end_page_stat(memcg);
*
* The RCU lock is held throughout the transaction. The fast path can
* get away without acquiring the memcg->move_lock (@locked is false)
* because page moving starts with an RCU grace period.
*
* The RCU lock also protects the memcg from being freed when the page
* state that is going to change is the only thing preventing the page
* from being uncharged. E.g. end-writeback clearing PageWriteback(),
* which allows migration to go ahead and uncharge the page before the
* account transaction might be complete.
*/ */
struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page)
bool *locked,
unsigned long *flags)
{ {
struct mem_cgroup *memcg; struct mem_cgroup *memcg;
unsigned long flags;
/*
* The RCU lock is held throughout the transaction. The fast
* path can get away without acquiring the memcg->move_lock
* because page moving starts with an RCU grace period.
*
* The RCU lock also protects the memcg from being freed when
* the page state that is going to change is the only thing
* preventing the page from being uncharged.
* E.g. end-writeback clearing PageWriteback(), which allows
* migration to go ahead and uncharge the page before the
* account transaction might be complete.
*/
rcu_read_lock(); rcu_read_lock();
if (mem_cgroup_disabled()) if (mem_cgroup_disabled())
...@@ -2014,16 +2015,22 @@ struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, ...@@ -2014,16 +2015,22 @@ struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page,
if (unlikely(!memcg)) if (unlikely(!memcg))
return NULL; return NULL;
*locked = false;
if (atomic_read(&memcg->moving_account) <= 0) if (atomic_read(&memcg->moving_account) <= 0)
return memcg; return memcg;
spin_lock_irqsave(&memcg->move_lock, *flags); spin_lock_irqsave(&memcg->move_lock, flags);
if (memcg != page->mem_cgroup) { if (memcg != page->mem_cgroup) {
spin_unlock_irqrestore(&memcg->move_lock, *flags); spin_unlock_irqrestore(&memcg->move_lock, flags);
goto again; goto again;
} }
*locked = true;
/*
* When charge migration first begins, we can have locked and
* unlocked page stat updates happening concurrently. Track
* the task who has the lock for mem_cgroup_end_page_stat().
*/
memcg->move_lock_task = current;
memcg->move_lock_flags = flags;
return memcg; return memcg;
} }
...@@ -2031,14 +2038,17 @@ struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, ...@@ -2031,14 +2038,17 @@ struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page,
/** /**
* mem_cgroup_end_page_stat - finish a page state statistics transaction * mem_cgroup_end_page_stat - finish a page state statistics transaction
* @memcg: the memcg that was accounted against * @memcg: the memcg that was accounted against
* @locked: value received from mem_cgroup_begin_page_stat()
* @flags: value received from mem_cgroup_begin_page_stat()
*/ */
void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool *locked, void mem_cgroup_end_page_stat(struct mem_cgroup *memcg)
unsigned long *flags)
{ {
if (memcg && *locked) if (memcg && memcg->move_lock_task == current) {
spin_unlock_irqrestore(&memcg->move_lock, *flags); unsigned long flags = memcg->move_lock_flags;
memcg->move_lock_task = NULL;
memcg->move_lock_flags = 0;
spin_unlock_irqrestore(&memcg->move_lock, flags);
}
rcu_read_unlock(); rcu_read_unlock();
} }
......
...@@ -2308,12 +2308,10 @@ EXPORT_SYMBOL(clear_page_dirty_for_io); ...@@ -2308,12 +2308,10 @@ EXPORT_SYMBOL(clear_page_dirty_for_io);
int test_clear_page_writeback(struct page *page) int test_clear_page_writeback(struct page *page)
{ {
struct address_space *mapping = page_mapping(page); struct address_space *mapping = page_mapping(page);
unsigned long memcg_flags;
struct mem_cgroup *memcg; struct mem_cgroup *memcg;
bool locked;
int ret; int ret;
memcg = mem_cgroup_begin_page_stat(page, &locked, &memcg_flags); memcg = mem_cgroup_begin_page_stat(page);
if (mapping) { if (mapping) {
struct backing_dev_info *bdi = mapping->backing_dev_info; struct backing_dev_info *bdi = mapping->backing_dev_info;
unsigned long flags; unsigned long flags;
...@@ -2338,19 +2336,17 @@ int test_clear_page_writeback(struct page *page) ...@@ -2338,19 +2336,17 @@ int test_clear_page_writeback(struct page *page)
dec_zone_page_state(page, NR_WRITEBACK); dec_zone_page_state(page, NR_WRITEBACK);
inc_zone_page_state(page, NR_WRITTEN); inc_zone_page_state(page, NR_WRITTEN);
} }
mem_cgroup_end_page_stat(memcg, &locked, &memcg_flags); mem_cgroup_end_page_stat(memcg);
return ret; return ret;
} }
int __test_set_page_writeback(struct page *page, bool keep_write) int __test_set_page_writeback(struct page *page, bool keep_write)
{ {
struct address_space *mapping = page_mapping(page); struct address_space *mapping = page_mapping(page);
unsigned long memcg_flags;
struct mem_cgroup *memcg; struct mem_cgroup *memcg;
bool locked;
int ret; int ret;
memcg = mem_cgroup_begin_page_stat(page, &locked, &memcg_flags); memcg = mem_cgroup_begin_page_stat(page);
if (mapping) { if (mapping) {
struct backing_dev_info *bdi = mapping->backing_dev_info; struct backing_dev_info *bdi = mapping->backing_dev_info;
unsigned long flags; unsigned long flags;
...@@ -2380,7 +2376,7 @@ int __test_set_page_writeback(struct page *page, bool keep_write) ...@@ -2380,7 +2376,7 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_WRITEBACK); mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
inc_zone_page_state(page, NR_WRITEBACK); inc_zone_page_state(page, NR_WRITEBACK);
} }
mem_cgroup_end_page_stat(memcg, &locked, &memcg_flags); mem_cgroup_end_page_stat(memcg);
return ret; return ret;
} }
......
...@@ -1085,24 +1085,20 @@ void page_add_new_anon_rmap(struct page *page, ...@@ -1085,24 +1085,20 @@ void page_add_new_anon_rmap(struct page *page,
void page_add_file_rmap(struct page *page) void page_add_file_rmap(struct page *page)
{ {
struct mem_cgroup *memcg; struct mem_cgroup *memcg;
unsigned long flags;
bool locked;
memcg = mem_cgroup_begin_page_stat(page, &locked, &flags); memcg = mem_cgroup_begin_page_stat(page);
if (atomic_inc_and_test(&page->_mapcount)) { if (atomic_inc_and_test(&page->_mapcount)) {
__inc_zone_page_state(page, NR_FILE_MAPPED); __inc_zone_page_state(page, NR_FILE_MAPPED);
mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED); mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
} }
mem_cgroup_end_page_stat(memcg, &locked, &flags); mem_cgroup_end_page_stat(memcg);
} }
static void page_remove_file_rmap(struct page *page) static void page_remove_file_rmap(struct page *page)
{ {
struct mem_cgroup *memcg; struct mem_cgroup *memcg;
unsigned long flags;
bool locked;
memcg = mem_cgroup_begin_page_stat(page, &locked, &flags); memcg = mem_cgroup_begin_page_stat(page);
/* page still mapped by someone else? */ /* page still mapped by someone else? */
if (!atomic_add_negative(-1, &page->_mapcount)) if (!atomic_add_negative(-1, &page->_mapcount))
...@@ -1123,7 +1119,7 @@ static void page_remove_file_rmap(struct page *page) ...@@ -1123,7 +1119,7 @@ static void page_remove_file_rmap(struct page *page)
if (unlikely(PageMlocked(page))) if (unlikely(PageMlocked(page)))
clear_page_mlock(page); clear_page_mlock(page);
out: out:
mem_cgroup_end_page_stat(memcg, &locked, &flags); mem_cgroup_end_page_stat(memcg);
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment