Commit be367d09 authored by Ben Blum's avatar Ben Blum Committed by Linus Torvalds

cgroups: let ss->can_attach and ss->attach do whole threadgroups at a time

Alter the ss->can_attach and ss->attach functions to be able to deal with
a whole threadgroup at a time, for use in cgroup_attach_proc.  (This is a
pre-patch to cgroup-procs-writable.patch.)

Currently, new mode of the attach function can only tell the subsystem
about the old cgroup of the threadgroup leader.  No subsystem currently
needs that information for each thread that's being moved, but if one were
to be added (for example, one that counts tasks within a group) this bit
would need to be reworked a bit to tell the subsystem the right
information.

[hidave.darkstar@gmail.com: fix build]
Signed-off-by: default avatarBen Blum <bblum@google.com>
Signed-off-by: default avatarPaul Menage <menage@google.com>
Acked-by: default avatarLi Zefan <lizf@cn.fujitsu.com>
Reviewed-by: default avatarMatt Helsley <matthltc@us.ibm.com>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Dave Young <hidave.darkstar@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent c378369d
...@@ -521,7 +521,7 @@ rmdir() will fail with it. From this behavior, pre_destroy() can be ...@@ -521,7 +521,7 @@ rmdir() will fail with it. From this behavior, pre_destroy() can be
called multiple times against a cgroup. called multiple times against a cgroup.
int can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, int can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct task_struct *task) struct task_struct *task, bool threadgroup)
(cgroup_mutex held by caller) (cgroup_mutex held by caller)
Called prior to moving a task into a cgroup; if the subsystem Called prior to moving a task into a cgroup; if the subsystem
...@@ -529,14 +529,20 @@ returns an error, this will abort the attach operation. If a NULL ...@@ -529,14 +529,20 @@ returns an error, this will abort the attach operation. If a NULL
task is passed, then a successful result indicates that *any* task is passed, then a successful result indicates that *any*
unspecified task can be moved into the cgroup. Note that this isn't unspecified task can be moved into the cgroup. Note that this isn't
called on a fork. If this method returns 0 (success) then this should called on a fork. If this method returns 0 (success) then this should
remain valid while the caller holds cgroup_mutex. remain valid while the caller holds cgroup_mutex. If threadgroup is
true, then a successful result indicates that all threads in the given
thread's threadgroup can be moved together.
void attach(struct cgroup_subsys *ss, struct cgroup *cgrp, void attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct cgroup *old_cgrp, struct task_struct *task) struct cgroup *old_cgrp, struct task_struct *task,
bool threadgroup)
(cgroup_mutex held by caller) (cgroup_mutex held by caller)
Called after the task has been attached to the cgroup, to allow any Called after the task has been attached to the cgroup, to allow any
post-attachment activity that requires memory allocations or blocking. post-attachment activity that requires memory allocations or blocking.
If threadgroup is true, the subsystem should take care of all threads
in the specified thread's threadgroup. Currently does not support any
subsystem that might need the old_cgrp for every thread in the group.
void fork(struct cgroup_subsy *ss, struct task_struct *task) void fork(struct cgroup_subsy *ss, struct task_struct *task)
......
...@@ -425,10 +425,11 @@ struct cgroup_subsys { ...@@ -425,10 +425,11 @@ struct cgroup_subsys {
struct cgroup *cgrp); struct cgroup *cgrp);
int (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); int (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
int (*can_attach)(struct cgroup_subsys *ss, int (*can_attach)(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct cgroup *cgrp, struct task_struct *tsk); struct task_struct *tsk, bool threadgroup);
void (*attach)(struct cgroup_subsys *ss, struct cgroup *cgrp, void (*attach)(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct cgroup *old_cgrp, struct task_struct *tsk); struct cgroup *old_cgrp, struct task_struct *tsk,
bool threadgroup);
void (*fork)(struct cgroup_subsys *ss, struct task_struct *task); void (*fork)(struct cgroup_subsys *ss, struct task_struct *task);
void (*exit)(struct cgroup_subsys *ss, struct task_struct *task); void (*exit)(struct cgroup_subsys *ss, struct task_struct *task);
int (*populate)(struct cgroup_subsys *ss, int (*populate)(struct cgroup_subsys *ss,
......
...@@ -1552,7 +1552,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) ...@@ -1552,7 +1552,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
for_each_subsys(root, ss) { for_each_subsys(root, ss) {
if (ss->can_attach) { if (ss->can_attach) {
retval = ss->can_attach(ss, cgrp, tsk); retval = ss->can_attach(ss, cgrp, tsk, false);
if (retval) if (retval)
return retval; return retval;
} }
...@@ -1590,7 +1590,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) ...@@ -1590,7 +1590,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
for_each_subsys(root, ss) { for_each_subsys(root, ss) {
if (ss->attach) if (ss->attach)
ss->attach(ss, cgrp, oldcgrp, tsk); ss->attach(ss, cgrp, oldcgrp, tsk, false);
} }
set_bit(CGRP_RELEASABLE, &oldcgrp->flags); set_bit(CGRP_RELEASABLE, &oldcgrp->flags);
synchronize_rcu(); synchronize_rcu();
......
...@@ -159,7 +159,7 @@ static bool is_task_frozen_enough(struct task_struct *task) ...@@ -159,7 +159,7 @@ static bool is_task_frozen_enough(struct task_struct *task)
*/ */
static int freezer_can_attach(struct cgroup_subsys *ss, static int freezer_can_attach(struct cgroup_subsys *ss,
struct cgroup *new_cgroup, struct cgroup *new_cgroup,
struct task_struct *task) struct task_struct *task, bool threadgroup)
{ {
struct freezer *freezer; struct freezer *freezer;
...@@ -177,6 +177,19 @@ static int freezer_can_attach(struct cgroup_subsys *ss, ...@@ -177,6 +177,19 @@ static int freezer_can_attach(struct cgroup_subsys *ss,
if (freezer->state == CGROUP_FROZEN) if (freezer->state == CGROUP_FROZEN)
return -EBUSY; return -EBUSY;
if (threadgroup) {
struct task_struct *c;
rcu_read_lock();
list_for_each_entry_rcu(c, &task->thread_group, thread_group) {
if (is_task_frozen_enough(c)) {
rcu_read_unlock();
return -EBUSY;
}
}
rcu_read_unlock();
}
return 0; return 0;
} }
......
...@@ -1324,9 +1324,10 @@ static int fmeter_getrate(struct fmeter *fmp) ...@@ -1324,9 +1324,10 @@ static int fmeter_getrate(struct fmeter *fmp)
static cpumask_var_t cpus_attach; static cpumask_var_t cpus_attach;
/* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */ /* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */
static int cpuset_can_attach(struct cgroup_subsys *ss, static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont,
struct cgroup *cont, struct task_struct *tsk) struct task_struct *tsk, bool threadgroup)
{ {
int ret;
struct cpuset *cs = cgroup_cs(cont); struct cpuset *cs = cgroup_cs(cont);
if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
...@@ -1343,18 +1344,51 @@ static int cpuset_can_attach(struct cgroup_subsys *ss, ...@@ -1343,18 +1344,51 @@ static int cpuset_can_attach(struct cgroup_subsys *ss,
if (tsk->flags & PF_THREAD_BOUND) if (tsk->flags & PF_THREAD_BOUND)
return -EINVAL; return -EINVAL;
return security_task_setscheduler(tsk, 0, NULL); ret = security_task_setscheduler(tsk, 0, NULL);
if (ret)
return ret;
if (threadgroup) {
struct task_struct *c;
rcu_read_lock();
list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
ret = security_task_setscheduler(c, 0, NULL);
if (ret) {
rcu_read_unlock();
return ret;
}
}
rcu_read_unlock();
}
return 0;
}
static void cpuset_attach_task(struct task_struct *tsk, nodemask_t *to,
struct cpuset *cs)
{
int err;
/*
* can_attach beforehand should guarantee that this doesn't fail.
* TODO: have a better way to handle failure here
*/
err = set_cpus_allowed_ptr(tsk, cpus_attach);
WARN_ON_ONCE(err);
task_lock(tsk);
cpuset_change_task_nodemask(tsk, to);
task_unlock(tsk);
cpuset_update_task_spread_flag(cs, tsk);
} }
static void cpuset_attach(struct cgroup_subsys *ss, static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cont,
struct cgroup *cont, struct cgroup *oldcont, struct cgroup *oldcont, struct task_struct *tsk,
struct task_struct *tsk) bool threadgroup)
{ {
nodemask_t from, to; nodemask_t from, to;
struct mm_struct *mm; struct mm_struct *mm;
struct cpuset *cs = cgroup_cs(cont); struct cpuset *cs = cgroup_cs(cont);
struct cpuset *oldcs = cgroup_cs(oldcont); struct cpuset *oldcs = cgroup_cs(oldcont);
int err;
if (cs == &top_cpuset) { if (cs == &top_cpuset) {
cpumask_copy(cpus_attach, cpu_possible_mask); cpumask_copy(cpus_attach, cpu_possible_mask);
...@@ -1363,15 +1397,19 @@ static void cpuset_attach(struct cgroup_subsys *ss, ...@@ -1363,15 +1397,19 @@ static void cpuset_attach(struct cgroup_subsys *ss,
guarantee_online_cpus(cs, cpus_attach); guarantee_online_cpus(cs, cpus_attach);
guarantee_online_mems(cs, &to); guarantee_online_mems(cs, &to);
} }
err = set_cpus_allowed_ptr(tsk, cpus_attach);
if (err)
return;
task_lock(tsk); /* do per-task migration stuff possibly for each in the threadgroup */
cpuset_change_task_nodemask(tsk, &to); cpuset_attach_task(tsk, &to, cs);
task_unlock(tsk); if (threadgroup) {
cpuset_update_task_spread_flag(cs, tsk); struct task_struct *c;
rcu_read_lock();
list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
cpuset_attach_task(c, &to, cs);
}
rcu_read_unlock();
}
/* change mm; only needs to be done once even if threadgroup */
from = oldcs->mems_allowed; from = oldcs->mems_allowed;
to = cs->mems_allowed; to = cs->mems_allowed;
mm = get_task_mm(tsk); mm = get_task_mm(tsk);
......
...@@ -42,8 +42,8 @@ int ns_cgroup_clone(struct task_struct *task, struct pid *pid) ...@@ -42,8 +42,8 @@ int ns_cgroup_clone(struct task_struct *task, struct pid *pid)
* (hence either you are in the same cgroup as task, or in an * (hence either you are in the same cgroup as task, or in an
* ancestor cgroup thereof) * ancestor cgroup thereof)
*/ */
static int ns_can_attach(struct cgroup_subsys *ss, static int ns_can_attach(struct cgroup_subsys *ss, struct cgroup *new_cgroup,
struct cgroup *new_cgroup, struct task_struct *task) struct task_struct *task, bool threadgroup)
{ {
if (current != task) { if (current != task) {
if (!capable(CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
...@@ -56,6 +56,18 @@ static int ns_can_attach(struct cgroup_subsys *ss, ...@@ -56,6 +56,18 @@ static int ns_can_attach(struct cgroup_subsys *ss,
if (!cgroup_is_descendant(new_cgroup, task)) if (!cgroup_is_descendant(new_cgroup, task))
return -EPERM; return -EPERM;
if (threadgroup) {
struct task_struct *c;
rcu_read_lock();
list_for_each_entry_rcu(c, &task->thread_group, thread_group) {
if (!cgroup_is_descendant(new_cgroup, c)) {
rcu_read_unlock();
return -EPERM;
}
}
rcu_read_unlock();
}
return 0; return 0;
} }
......
...@@ -10377,8 +10377,7 @@ cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) ...@@ -10377,8 +10377,7 @@ cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
} }
static int static int
cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
struct task_struct *tsk)
{ {
#ifdef CONFIG_RT_GROUP_SCHED #ifdef CONFIG_RT_GROUP_SCHED
if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk)) if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
...@@ -10388,15 +10387,45 @@ cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, ...@@ -10388,15 +10387,45 @@ cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
if (tsk->sched_class != &fair_sched_class) if (tsk->sched_class != &fair_sched_class)
return -EINVAL; return -EINVAL;
#endif #endif
return 0;
}
static int
cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct task_struct *tsk, bool threadgroup)
{
int retval = cpu_cgroup_can_attach_task(cgrp, tsk);
if (retval)
return retval;
if (threadgroup) {
struct task_struct *c;
rcu_read_lock();
list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
retval = cpu_cgroup_can_attach_task(cgrp, c);
if (retval) {
rcu_read_unlock();
return retval;
}
}
rcu_read_unlock();
}
return 0; return 0;
} }
static void static void
cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct cgroup *old_cont, struct task_struct *tsk) struct cgroup *old_cont, struct task_struct *tsk,
bool threadgroup)
{ {
sched_move_task(tsk); sched_move_task(tsk);
if (threadgroup) {
struct task_struct *c;
rcu_read_lock();
list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
sched_move_task(c);
}
rcu_read_unlock();
}
} }
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
......
...@@ -2612,7 +2612,8 @@ static int mem_cgroup_populate(struct cgroup_subsys *ss, ...@@ -2612,7 +2612,8 @@ static int mem_cgroup_populate(struct cgroup_subsys *ss,
static void mem_cgroup_move_task(struct cgroup_subsys *ss, static void mem_cgroup_move_task(struct cgroup_subsys *ss,
struct cgroup *cont, struct cgroup *cont,
struct cgroup *old_cont, struct cgroup *old_cont,
struct task_struct *p) struct task_struct *p,
bool threadgroup)
{ {
mutex_lock(&memcg_tasklist); mutex_lock(&memcg_tasklist);
/* /*
......
...@@ -61,7 +61,8 @@ static inline struct dev_cgroup *task_devcgroup(struct task_struct *task) ...@@ -61,7 +61,8 @@ static inline struct dev_cgroup *task_devcgroup(struct task_struct *task)
struct cgroup_subsys devices_subsys; struct cgroup_subsys devices_subsys;
static int devcgroup_can_attach(struct cgroup_subsys *ss, static int devcgroup_can_attach(struct cgroup_subsys *ss,
struct cgroup *new_cgroup, struct task_struct *task) struct cgroup *new_cgroup, struct task_struct *task,
bool threadgroup)
{ {
if (current != task && !capable(CAP_SYS_ADMIN)) if (current != task && !capable(CAP_SYS_ADMIN))
return -EPERM; return -EPERM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment