Commit 1c824a68 authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

mm: page-writeback: simplify memcg handling in test_clear_page_writeback()

Page writeback doesn't hold a page reference, which allows truncate to
free a page the second PageWriteback is cleared.  This used to require
special attention in test_clear_page_writeback(), where we had to be
careful not to rely on the unstable page->memcg binding and look up all
the necessary information before clearing the writeback flag.

Since commit 073861ed ("mm: fix VM_BUG_ON(PageTail) and
BUG_ON(PageWriteback)") test_clear_page_writeback() is called with an
explicit reference on the page, and this dance is no longer needed.

Use unlock_page_memcg() and dec_lruvec_page_state() directly.

This removes the last user of the lock_page_memcg() return value, change
it to void.  Touch up the comments in there as well.  This also removes
the last extern user of __unlock_page_memcg(), make it static.  Further,
it removes the last user of dec_lruvec_state(), delete it, along with a
few other unused helpers.

Link: https://lkml.kernel.org/r/YCQbYAWg4nvBFL6h@cmpxchg.orgSigned-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Acked-by: default avatarHugh Dickins <hughd@google.com>
Reviewed-by: default avatarShakeel Butt <shakeelb@google.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Cc: Roman Gushchin <guro@fb.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 79e3094c
...@@ -867,8 +867,7 @@ void mem_cgroup_print_oom_group(struct mem_cgroup *memcg); ...@@ -867,8 +867,7 @@ void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
extern bool cgroup_memory_noswap; extern bool cgroup_memory_noswap;
#endif #endif
struct mem_cgroup *lock_page_memcg(struct page *page); void lock_page_memcg(struct page *page);
void __unlock_page_memcg(struct mem_cgroup *memcg);
void unlock_page_memcg(struct page *page); void unlock_page_memcg(struct page *page);
/* /*
...@@ -1289,12 +1288,7 @@ mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) ...@@ -1289,12 +1288,7 @@ mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
{ {
} }
static inline struct mem_cgroup *lock_page_memcg(struct page *page) static inline void lock_page_memcg(struct page *page)
{
return NULL;
}
static inline void __unlock_page_memcg(struct mem_cgroup *memcg)
{ {
} }
......
...@@ -512,16 +512,10 @@ static inline void mod_lruvec_page_state(struct page *page, ...@@ -512,16 +512,10 @@ static inline void mod_lruvec_page_state(struct page *page,
#endif /* CONFIG_MEMCG */ #endif /* CONFIG_MEMCG */
static inline void __inc_lruvec_state(struct lruvec *lruvec, static inline void inc_lruvec_state(struct lruvec *lruvec,
enum node_stat_item idx) enum node_stat_item idx)
{
__mod_lruvec_state(lruvec, idx, 1);
}
static inline void __dec_lruvec_state(struct lruvec *lruvec,
enum node_stat_item idx)
{ {
__mod_lruvec_state(lruvec, idx, -1); mod_lruvec_state(lruvec, idx, 1);
} }
static inline void __inc_lruvec_page_state(struct page *page, static inline void __inc_lruvec_page_state(struct page *page,
...@@ -536,18 +530,6 @@ static inline void __dec_lruvec_page_state(struct page *page, ...@@ -536,18 +530,6 @@ static inline void __dec_lruvec_page_state(struct page *page,
__mod_lruvec_page_state(page, idx, -1); __mod_lruvec_page_state(page, idx, -1);
} }
static inline void inc_lruvec_state(struct lruvec *lruvec,
enum node_stat_item idx)
{
mod_lruvec_state(lruvec, idx, 1);
}
static inline void dec_lruvec_state(struct lruvec *lruvec,
enum node_stat_item idx)
{
mod_lruvec_state(lruvec, idx, -1);
}
static inline void inc_lruvec_page_state(struct page *page, static inline void inc_lruvec_page_state(struct page *page,
enum node_stat_item idx) enum node_stat_item idx)
{ {
......
...@@ -2118,11 +2118,10 @@ void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) ...@@ -2118,11 +2118,10 @@ void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
* This function protects unlocked LRU pages from being moved to * This function protects unlocked LRU pages from being moved to
* another cgroup. * another cgroup.
* *
* It ensures lifetime of the returned memcg. Caller is responsible * It ensures lifetime of the locked memcg. Caller is responsible
* for the lifetime of the page; __unlock_page_memcg() is available * for the lifetime of the page.
* when @page might get freed inside the locked section.
*/ */
struct mem_cgroup *lock_page_memcg(struct page *page) void lock_page_memcg(struct page *page)
{ {
struct page *head = compound_head(page); /* rmap on tail pages */ struct page *head = compound_head(page); /* rmap on tail pages */
struct mem_cgroup *memcg; struct mem_cgroup *memcg;
...@@ -2132,21 +2131,15 @@ struct mem_cgroup *lock_page_memcg(struct page *page) ...@@ -2132,21 +2131,15 @@ struct mem_cgroup *lock_page_memcg(struct page *page)
* The RCU lock is held throughout the transaction. The fast * The RCU lock is held throughout the transaction. The fast
* path can get away without acquiring the memcg->move_lock * path can get away without acquiring the memcg->move_lock
* because page moving starts with an RCU grace period. * because page moving starts with an RCU grace period.
*
* The RCU lock also protects the memcg from being freed when
* the page state that is going to change is the only thing
* preventing the page itself from being freed. E.g. writeback
* doesn't hold a page reference and relies on PG_writeback to
* keep off truncation, migration and so forth.
*/ */
rcu_read_lock(); rcu_read_lock();
if (mem_cgroup_disabled()) if (mem_cgroup_disabled())
return NULL; return;
again: again:
memcg = page_memcg(head); memcg = page_memcg(head);
if (unlikely(!memcg)) if (unlikely(!memcg))
return NULL; return;
#ifdef CONFIG_PROVE_LOCKING #ifdef CONFIG_PROVE_LOCKING
local_irq_save(flags); local_irq_save(flags);
...@@ -2155,7 +2148,7 @@ struct mem_cgroup *lock_page_memcg(struct page *page) ...@@ -2155,7 +2148,7 @@ struct mem_cgroup *lock_page_memcg(struct page *page)
#endif #endif
if (atomic_read(&memcg->moving_account) <= 0) if (atomic_read(&memcg->moving_account) <= 0)
return memcg; return;
spin_lock_irqsave(&memcg->move_lock, flags); spin_lock_irqsave(&memcg->move_lock, flags);
if (memcg != page_memcg(head)) { if (memcg != page_memcg(head)) {
...@@ -2164,24 +2157,17 @@ struct mem_cgroup *lock_page_memcg(struct page *page) ...@@ -2164,24 +2157,17 @@ struct mem_cgroup *lock_page_memcg(struct page *page)
} }
/* /*
* When charge migration first begins, we can have locked and * When charge migration first begins, we can have multiple
* unlocked page stat updates happening concurrently. Track * critical sections holding the fast-path RCU lock and one
* the task who has the lock for unlock_page_memcg(). * holding the slowpath move_lock. Track the task who has the
* move_lock for unlock_page_memcg().
*/ */
memcg->move_lock_task = current; memcg->move_lock_task = current;
memcg->move_lock_flags = flags; memcg->move_lock_flags = flags;
return memcg;
} }
EXPORT_SYMBOL(lock_page_memcg); EXPORT_SYMBOL(lock_page_memcg);
/** static void __unlock_page_memcg(struct mem_cgroup *memcg)
* __unlock_page_memcg - unlock and unpin a memcg
* @memcg: the memcg
*
* Unlock and unpin a memcg returned by lock_page_memcg().
*/
void __unlock_page_memcg(struct mem_cgroup *memcg)
{ {
if (memcg && memcg->move_lock_task == current) { if (memcg && memcg->move_lock_task == current) {
unsigned long flags = memcg->move_lock_flags; unsigned long flags = memcg->move_lock_flags;
......
...@@ -2722,12 +2722,9 @@ EXPORT_SYMBOL(clear_page_dirty_for_io); ...@@ -2722,12 +2722,9 @@ EXPORT_SYMBOL(clear_page_dirty_for_io);
int test_clear_page_writeback(struct page *page) int test_clear_page_writeback(struct page *page)
{ {
struct address_space *mapping = page_mapping(page); struct address_space *mapping = page_mapping(page);
struct mem_cgroup *memcg;
struct lruvec *lruvec;
int ret; int ret;
memcg = lock_page_memcg(page); lock_page_memcg(page);
lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
if (mapping && mapping_use_writeback_tags(mapping)) { if (mapping && mapping_use_writeback_tags(mapping)) {
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
struct backing_dev_info *bdi = inode_to_bdi(inode); struct backing_dev_info *bdi = inode_to_bdi(inode);
...@@ -2755,11 +2752,11 @@ int test_clear_page_writeback(struct page *page) ...@@ -2755,11 +2752,11 @@ int test_clear_page_writeback(struct page *page)
ret = TestClearPageWriteback(page); ret = TestClearPageWriteback(page);
} }
if (ret) { if (ret) {
dec_lruvec_state(lruvec, NR_WRITEBACK); dec_lruvec_page_state(page, NR_WRITEBACK);
dec_zone_page_state(page, NR_ZONE_WRITE_PENDING); dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
inc_node_page_state(page, NR_WRITTEN); inc_node_page_state(page, NR_WRITTEN);
} }
__unlock_page_memcg(memcg); unlock_page_memcg(page);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment