Commit b78949eb authored by Mandeep Singh Baines's avatar Mandeep Singh Baines Committed by Tejun Heo

cgroup: simplify double-check locking in cgroup_attach_proc

To keep the complexity of the double-check locking in one place, move
the thread_group_leader check up into attach_task_by_pid().  This
allows us to use a goto instead of returning -EAGAIN.

While at it, convert a couple of returns to gotos and use rcu for the
!pid case also in order to simplify the logic.

Changes in V2:
* https://lkml.org/lkml/2011/12/22/86 (Tejun Heo)
  * Use a goto instead of returning -EAGAIN
Signed-off-by: default avatarMandeep Singh Baines <msb@chromium.org>
Acked-by: default avatarLi Zefan <lizf@cn.fujitsu.com>
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: containers@lists.linux-foundation.org
Cc: cgroups@vger.kernel.org
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Paul Menage <paul@paulmenage.org>
parent 24528255
...@@ -2104,19 +2104,6 @@ static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader) ...@@ -2104,19 +2104,6 @@ static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
/* prevent changes to the threadgroup list while we take a snapshot. */ /* prevent changes to the threadgroup list while we take a snapshot. */
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
if (!thread_group_leader(leader)) {
/*
* a race with de_thread from another thread's exec() may strip
* us of our leadership, making while_each_thread unsafe to use
* on this task. if this happens, there is no choice but to
* throw this task away and try again (from cgroup_procs_write);
* this is "double-double-toil-and-trouble-check locking".
*/
read_unlock(&tasklist_lock);
retval = -EAGAIN;
goto out_free_group_list;
}
tsk = leader; tsk = leader;
i = 0; i = 0;
do { do {
...@@ -2245,22 +2232,14 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup) ...@@ -2245,22 +2232,14 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
if (!cgroup_lock_live_group(cgrp)) if (!cgroup_lock_live_group(cgrp))
return -ENODEV; return -ENODEV;
retry_find_task:
rcu_read_lock();
if (pid) { if (pid) {
rcu_read_lock();
tsk = find_task_by_vpid(pid); tsk = find_task_by_vpid(pid);
if (!tsk) { if (!tsk) {
rcu_read_unlock(); rcu_read_unlock();
cgroup_unlock(); ret= -ESRCH;
return -ESRCH; goto out_unlock_cgroup;
}
if (threadgroup) {
/*
* RCU protects this access, since tsk was found in the
* tid map. a race with de_thread may cause group_leader
* to stop being the leader, but cgroup_attach_proc will
* detect it later.
*/
tsk = tsk->group_leader;
} }
/* /*
* even if we're attaching all tasks in the thread group, we * even if we're attaching all tasks in the thread group, we
...@@ -2271,29 +2250,38 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup) ...@@ -2271,29 +2250,38 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
cred->euid != tcred->uid && cred->euid != tcred->uid &&
cred->euid != tcred->suid) { cred->euid != tcred->suid) {
rcu_read_unlock(); rcu_read_unlock();
cgroup_unlock(); ret = -EACCES;
return -EACCES; goto out_unlock_cgroup;
} }
get_task_struct(tsk); } else
rcu_read_unlock(); tsk = current;
} else {
if (threadgroup)
tsk = current->group_leader;
else
tsk = current;
get_task_struct(tsk);
}
threadgroup_lock(tsk);
if (threadgroup) if (threadgroup)
tsk = tsk->group_leader;
get_task_struct(tsk);
rcu_read_unlock();
threadgroup_lock(tsk);
if (threadgroup) {
if (!thread_group_leader(tsk)) {
/*
* a race with de_thread from another thread's exec()
* may strip us of our leadership, if this happens,
* there is no choice but to throw this task away and
* try again; this is
* "double-double-toil-and-trouble-check locking".
*/
threadgroup_unlock(tsk);
put_task_struct(tsk);
goto retry_find_task;
}
ret = cgroup_attach_proc(cgrp, tsk); ret = cgroup_attach_proc(cgrp, tsk);
else } else
ret = cgroup_attach_task(cgrp, tsk); ret = cgroup_attach_task(cgrp, tsk);
threadgroup_unlock(tsk); threadgroup_unlock(tsk);
put_task_struct(tsk); put_task_struct(tsk);
out_unlock_cgroup:
cgroup_unlock(); cgroup_unlock();
return ret; return ret;
} }
...@@ -2305,16 +2293,7 @@ static int cgroup_tasks_write(struct cgroup *cgrp, struct cftype *cft, u64 pid) ...@@ -2305,16 +2293,7 @@ static int cgroup_tasks_write(struct cgroup *cgrp, struct cftype *cft, u64 pid)
static int cgroup_procs_write(struct cgroup *cgrp, struct cftype *cft, u64 tgid) static int cgroup_procs_write(struct cgroup *cgrp, struct cftype *cft, u64 tgid)
{ {
int ret; return attach_task_by_pid(cgrp, tgid, true);
do {
/*
* attach_proc fails with -EAGAIN if threadgroup leadership
* changes in the middle of the operation, in which case we need
* to find the task_struct for the new leader and start over.
*/
ret = attach_task_by_pid(cgrp, tgid, true);
} while (ret == -EAGAIN);
return ret;
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment