Commit 713735b4 authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

memcg: correctly order reading PCG_USED and pc->mem_cgroup

The placement of the read-side barrier is confused: the writer first
sets pc->mem_cgroup, then PCG_USED.  The read-side barrier has to be
between testing PCG_USED and reading pc->mem_cgroup.
Signed-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Acked-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: default avatarDaisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2550326a
...@@ -836,13 +836,12 @@ void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru) ...@@ -836,13 +836,12 @@ void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
return; return;
pc = lookup_page_cgroup(page); pc = lookup_page_cgroup(page);
/*
* Used bit is set without atomic ops but after smp_wmb().
* For making pc->mem_cgroup visible, insert smp_rmb() here.
*/
smp_rmb();
/* unused or root page is not rotated. */ /* unused or root page is not rotated. */
if (!PageCgroupUsed(pc) || mem_cgroup_is_root(pc->mem_cgroup)) if (!PageCgroupUsed(pc))
return;
/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
smp_rmb();
if (mem_cgroup_is_root(pc->mem_cgroup))
return; return;
mz = page_cgroup_zoneinfo(pc); mz = page_cgroup_zoneinfo(pc);
list_move(&pc->lru, &mz->lists[lru]); list_move(&pc->lru, &mz->lists[lru]);
...@@ -857,14 +856,10 @@ void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru) ...@@ -857,14 +856,10 @@ void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
return; return;
pc = lookup_page_cgroup(page); pc = lookup_page_cgroup(page);
VM_BUG_ON(PageCgroupAcctLRU(pc)); VM_BUG_ON(PageCgroupAcctLRU(pc));
/*
* Used bit is set without atomic ops but after smp_wmb().
* For making pc->mem_cgroup visible, insert smp_rmb() here.
*/
smp_rmb();
if (!PageCgroupUsed(pc)) if (!PageCgroupUsed(pc))
return; return;
/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
smp_rmb();
mz = page_cgroup_zoneinfo(pc); mz = page_cgroup_zoneinfo(pc);
/* huge page split is done under lru_lock. so, we have no races. */ /* huge page split is done under lru_lock. so, we have no races. */
MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page); MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
...@@ -1031,14 +1026,10 @@ mem_cgroup_get_reclaim_stat_from_page(struct page *page) ...@@ -1031,14 +1026,10 @@ mem_cgroup_get_reclaim_stat_from_page(struct page *page)
return NULL; return NULL;
pc = lookup_page_cgroup(page); pc = lookup_page_cgroup(page);
/*
* Used bit is set without atomic ops but after smp_wmb().
* For making pc->mem_cgroup visible, insert smp_rmb() here.
*/
smp_rmb();
if (!PageCgroupUsed(pc)) if (!PageCgroupUsed(pc))
return NULL; return NULL;
/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
smp_rmb();
mz = page_cgroup_zoneinfo(pc); mz = page_cgroup_zoneinfo(pc);
if (!mz) if (!mz)
return NULL; return NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment