Commit bfc2cf6f authored by Tejun Heo's avatar Tejun Heo

cgroup: call subsys->*attach() only for subsystems which are actually affected by migration

Currently, subsys->*attach() callbacks are called for all subsystems
which are attached to the hierarchy on which the migration is taking
place.

With cgroup_migrate_prepare_dst() filtering out identity migrations,
v1 hierarchies can avoid spurious ->*attach() callback invocations
where the source and destination csses are identical; however, this
isn't enough on v2 as only a subset of the attached controllers can be
affected on controller enable/disable.

While spurious ->*attach() invocations aren't critically broken,
they're unnecessary overhead and can lead to temporary overcharges on
certain controllers.  Fix it by tracking which subsystems are affected
by a migration and invoking ->*attach() callbacks only on those
subsystems.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Acked-by: default avatarZefan Li <lizefan@huawei.com>
parent e595cd70
...@@ -62,6 +62,9 @@ struct cgroup_mgctx { ...@@ -62,6 +62,9 @@ struct cgroup_mgctx {
/* tasks and csets to migrate */ /* tasks and csets to migrate */
struct cgroup_taskset tset; struct cgroup_taskset tset;
/* subsystems affected by migration */
u16 ss_mask;
}; };
#define CGROUP_TASKSET_INIT(tset) \ #define CGROUP_TASKSET_INIT(tset) \
...@@ -172,7 +175,7 @@ void cgroup_migrate_add_src(struct css_set *src_cset, struct cgroup *dst_cgrp, ...@@ -172,7 +175,7 @@ void cgroup_migrate_add_src(struct css_set *src_cset, struct cgroup *dst_cgrp,
struct cgroup_mgctx *mgctx); struct cgroup_mgctx *mgctx);
int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx); int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx);
int cgroup_migrate(struct task_struct *leader, bool threadgroup, int cgroup_migrate(struct task_struct *leader, bool threadgroup,
struct cgroup_mgctx *mgctx, struct cgroup_root *root); struct cgroup_mgctx *mgctx);
int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader, int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader,
bool threadgroup); bool threadgroup);
......
...@@ -125,7 +125,7 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from) ...@@ -125,7 +125,7 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
css_task_iter_end(&it); css_task_iter_end(&it);
if (task) { if (task) {
ret = cgroup_migrate(task, false, &mgctx, to->root); ret = cgroup_migrate(task, false, &mgctx);
if (!ret) if (!ret)
trace_cgroup_transfer_tasks(to, task, false); trace_cgroup_transfer_tasks(to, task, false);
put_task_struct(task); put_task_struct(task);
......
...@@ -2019,15 +2019,13 @@ struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset, ...@@ -2019,15 +2019,13 @@ struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
/** /**
* cgroup_taskset_migrate - migrate a taskset * cgroup_taskset_migrate - migrate a taskset
* @mgctx: migration context * @mgctx: migration context
* @root: cgroup root the migration is taking place on
* *
* Migrate tasks in @mgctx as setup by migration preparation functions. * Migrate tasks in @mgctx as setup by migration preparation functions.
* This function fails iff one of the ->can_attach callbacks fails and * This function fails iff one of the ->can_attach callbacks fails and
* guarantees that either all or none of the tasks in @mgctx are migrated. * guarantees that either all or none of the tasks in @mgctx are migrated.
* @mgctx is consumed regardless of success. * @mgctx is consumed regardless of success.
*/ */
static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx, static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx)
struct cgroup_root *root)
{ {
struct cgroup_taskset *tset = &mgctx->tset; struct cgroup_taskset *tset = &mgctx->tset;
struct cgroup_subsys *ss; struct cgroup_subsys *ss;
...@@ -2040,7 +2038,7 @@ static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx, ...@@ -2040,7 +2038,7 @@ static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx,
return 0; return 0;
/* check that we can legitimately attach to the cgroup */ /* check that we can legitimately attach to the cgroup */
do_each_subsys_mask(ss, ssid, root->subsys_mask) { do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
if (ss->can_attach) { if (ss->can_attach) {
tset->ssid = ssid; tset->ssid = ssid;
ret = ss->can_attach(tset); ret = ss->can_attach(tset);
...@@ -2076,7 +2074,7 @@ static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx, ...@@ -2076,7 +2074,7 @@ static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx,
*/ */
tset->csets = &tset->dst_csets; tset->csets = &tset->dst_csets;
do_each_subsys_mask(ss, ssid, root->subsys_mask) { do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
if (ss->attach) { if (ss->attach) {
tset->ssid = ssid; tset->ssid = ssid;
ss->attach(tset); ss->attach(tset);
...@@ -2087,7 +2085,7 @@ static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx, ...@@ -2087,7 +2085,7 @@ static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx,
goto out_release_tset; goto out_release_tset;
out_cancel_attach: out_cancel_attach:
do_each_subsys_mask(ss, ssid, root->subsys_mask) { do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
if (ssid == failed_ssid) if (ssid == failed_ssid)
break; break;
if (ss->cancel_attach) { if (ss->cancel_attach) {
...@@ -2223,6 +2221,8 @@ int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx) ...@@ -2223,6 +2221,8 @@ int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx)
list_for_each_entry_safe(src_cset, tmp_cset, &mgctx->preloaded_src_csets, list_for_each_entry_safe(src_cset, tmp_cset, &mgctx->preloaded_src_csets,
mg_preload_node) { mg_preload_node) {
struct css_set *dst_cset; struct css_set *dst_cset;
struct cgroup_subsys *ss;
int ssid;
dst_cset = find_css_set(src_cset, src_cset->mg_dst_cgrp); dst_cset = find_css_set(src_cset, src_cset->mg_dst_cgrp);
if (!dst_cset) if (!dst_cset)
...@@ -2251,6 +2251,10 @@ int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx) ...@@ -2251,6 +2251,10 @@ int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx)
&mgctx->preloaded_dst_csets); &mgctx->preloaded_dst_csets);
else else
put_css_set(dst_cset); put_css_set(dst_cset);
for_each_subsys(ss, ssid)
if (src_cset->subsys[ssid] != dst_cset->subsys[ssid])
mgctx->ss_mask |= 1 << ssid;
} }
return 0; return 0;
...@@ -2263,7 +2267,6 @@ int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx) ...@@ -2263,7 +2267,6 @@ int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx)
* cgroup_migrate - migrate a process or task to a cgroup * cgroup_migrate - migrate a process or task to a cgroup
* @leader: the leader of the process or the task to migrate * @leader: the leader of the process or the task to migrate
* @threadgroup: whether @leader points to the whole process or a single task * @threadgroup: whether @leader points to the whole process or a single task
* @root: cgroup root migration is taking place on
* @mgctx: migration context * @mgctx: migration context
* *
* Migrate a process or task denoted by @leader. If migrating a process, * Migrate a process or task denoted by @leader. If migrating a process,
...@@ -2279,7 +2282,7 @@ int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx) ...@@ -2279,7 +2282,7 @@ int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx)
* actually starting migrating. * actually starting migrating.
*/ */
int cgroup_migrate(struct task_struct *leader, bool threadgroup, int cgroup_migrate(struct task_struct *leader, bool threadgroup,
struct cgroup_mgctx *mgctx, struct cgroup_root *root) struct cgroup_mgctx *mgctx)
{ {
struct task_struct *task; struct task_struct *task;
...@@ -2299,7 +2302,7 @@ int cgroup_migrate(struct task_struct *leader, bool threadgroup, ...@@ -2299,7 +2302,7 @@ int cgroup_migrate(struct task_struct *leader, bool threadgroup,
rcu_read_unlock(); rcu_read_unlock();
spin_unlock_irq(&css_set_lock); spin_unlock_irq(&css_set_lock);
return cgroup_migrate_execute(mgctx, root); return cgroup_migrate_execute(mgctx);
} }
/** /**
...@@ -2335,7 +2338,7 @@ int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader, ...@@ -2335,7 +2338,7 @@ int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader,
/* prepare dst csets and commit */ /* prepare dst csets and commit */
ret = cgroup_migrate_prepare_dst(&mgctx); ret = cgroup_migrate_prepare_dst(&mgctx);
if (!ret) if (!ret)
ret = cgroup_migrate(leader, threadgroup, &mgctx, dst_cgrp->root); ret = cgroup_migrate(leader, threadgroup, &mgctx);
cgroup_migrate_finish(&mgctx); cgroup_migrate_finish(&mgctx);
...@@ -2539,7 +2542,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp) ...@@ -2539,7 +2542,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
} }
spin_unlock_irq(&css_set_lock); spin_unlock_irq(&css_set_lock);
ret = cgroup_migrate_execute(&mgctx, cgrp->root); ret = cgroup_migrate_execute(&mgctx);
out_finish: out_finish:
cgroup_migrate_finish(&mgctx); cgroup_migrate_finish(&mgctx);
percpu_up_write(&cgroup_threadgroup_rwsem); percpu_up_write(&cgroup_threadgroup_rwsem);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment