Commit 7f0f1546 authored by KAMEZAWA Hiroyuki's avatar KAMEZAWA Hiroyuki Committed by Linus Torvalds

memcg: fix css_id() RCU locking for real

Commit ad4ba375 ("memcg: css_id() must be
called under rcu_read_lock()") modifies memcontol.c for fixing RCU check
message.  But Andrew Morton pointed out that the fix doesn't seems sane
and it was just for hidining lockdep messages.

This is a patch for do proper things.  Checking again, all places,
accessing without rcu_read_lock, that commit fixies was intentional....
all callers of css_id() has reference count on it.  So, it's not necessary
to be under rcu_read_lock().

Considering again, we can use rcu_dereference_check for css_id().  We know
css->id is valid if css->refcnt > 0.  (css->id never changes and freed
after css->refcnt going to be 0.)

This patch makes use of rcu_dereference_check() in css_id/depth and remove
unnecessary rcu-read-lock added by the commit.
Signed-off-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 11cad320
......@@ -4435,7 +4435,15 @@ __setup("cgroup_disable=", cgroup_disable);
*/
unsigned short css_id(struct cgroup_subsys_state *css)
{
struct css_id *cssid = rcu_dereference(css->id);
struct css_id *cssid;
/*
* This css_id() can return correct value when somone has refcnt
* on this or this is under rcu_read_lock(). Once css->id is allocated,
* it's unchanged until freed.
*/
cssid = rcu_dereference_check(css->id,
rcu_read_lock_held() || atomic_read(&css->refcnt));
if (cssid)
return cssid->id;
......@@ -4445,7 +4453,10 @@ EXPORT_SYMBOL_GPL(css_id);
unsigned short css_depth(struct cgroup_subsys_state *css)
{
struct css_id *cssid = rcu_dereference(css->id);
struct css_id *cssid;
cssid = rcu_dereference_check(css->id,
rcu_read_lock_held() || atomic_read(&css->refcnt));
if (cssid)
return cssid->depth;
......
......@@ -2314,9 +2314,7 @@ mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
/* record memcg information */
if (do_swap_account && swapout && memcg) {
rcu_read_lock();
swap_cgroup_record(ent, css_id(&memcg->css));
rcu_read_unlock();
mem_cgroup_get(memcg);
}
if (swapout && memcg)
......@@ -2373,10 +2371,8 @@ static int mem_cgroup_move_swap_account(swp_entry_t entry,
{
unsigned short old_id, new_id;
rcu_read_lock();
old_id = css_id(&from->css);
new_id = css_id(&to->css);
rcu_read_unlock();
if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
mem_cgroup_swap_statistics(from, false);
......@@ -4044,16 +4040,11 @@ static int is_target_pte_for_mc(struct vm_area_struct *vma,
put_page(page);
}
/* throught */
if (ent.val && do_swap_account && !ret) {
unsigned short id;
rcu_read_lock();
id = css_id(&mc.from->css);
rcu_read_unlock();
if (id == lookup_swap_cgroup(ent)) {
ret = MC_TARGET_SWAP;
if (target)
target->ent = ent;
}
if (ent.val && do_swap_account && !ret &&
css_id(&mc.from->css) == lookup_swap_cgroup(ent)) {
ret = MC_TARGET_SWAP;
if (target)
target->ent = ent;
}
return ret;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment