Commit a9746d8d authored by Tejun Heo's avatar Tejun Heo

cgroup: factor out cgroup_kn_lock_live() and cgroup_kn_unlock()

cgroup_mkdir(), cgroup_rmdir() and cgroup_subtree_control_write()
share the logic to break active protection so that they can grab
cgroup_tree_mutex which nests above active protection and/or remove
self.  Factor out this logic into cgroup_kn_lock_live() and
cgroup_kn_unlock().

This patch doesn't introduce any functional changes.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Acked-by: default avatarLi Zefan <lizefan@huawei.com>
parent cfc79d5b
...@@ -1093,6 +1093,75 @@ static void cgroup_put(struct cgroup *cgrp) ...@@ -1093,6 +1093,75 @@ static void cgroup_put(struct cgroup *cgrp)
call_rcu(&cgrp->rcu_head, cgroup_free_rcu); call_rcu(&cgrp->rcu_head, cgroup_free_rcu);
} }
/**
* cgroup_kn_unlock - unlocking helper for cgroup kernfs methods
* @kn: the kernfs_node being serviced
*
* This helper undoes cgroup_kn_lock_live() and should be invoked before
* the method finishes if locking succeeded. Note that once this function
* returns the cgroup returned by cgroup_kn_lock_live() may become
* inaccessible any time. If the caller intends to continue to access the
* cgroup, it should pin it before invoking this function.
*/
static void cgroup_kn_unlock(struct kernfs_node *kn)
{
struct cgroup *cgrp;
if (kernfs_type(kn) == KERNFS_DIR)
cgrp = kn->priv;
else
cgrp = kn->parent->priv;
mutex_unlock(&cgroup_mutex);
mutex_unlock(&cgroup_tree_mutex);
kernfs_unbreak_active_protection(kn);
cgroup_put(cgrp);
}
/**
* cgroup_kn_lock_live - locking helper for cgroup kernfs methods
* @kn: the kernfs_node being serviced
*
* This helper is to be used by a cgroup kernfs method currently servicing
* @kn. It breaks the active protection, performs cgroup locking and
* verifies that the associated cgroup is alive. Returns the cgroup if
* alive; otherwise, %NULL. A successful return should be undone by a
* matching cgroup_kn_unlock() invocation.
*
* Any cgroup kernfs method implementation which requires locking the
* associated cgroup should use this helper. It avoids nesting cgroup
* locking under kernfs active protection and allows all kernfs operations
* including self-removal.
*/
static struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn)
{
struct cgroup *cgrp;
if (kernfs_type(kn) == KERNFS_DIR)
cgrp = kn->priv;
else
cgrp = kn->parent->priv;
/*
* We're gonna grab cgroup_tree_mutex which nests outside kernfs
* active_ref. cgroup liveliness check alone provides enough
* protection against removal. Ensure @cgrp stays accessible and
* break the active_ref protection.
*/
cgroup_get(cgrp);
kernfs_break_active_protection(kn);
mutex_lock(&cgroup_tree_mutex);
mutex_lock(&cgroup_mutex);
if (!cgroup_is_dead(cgrp))
return cgrp;
cgroup_kn_unlock(kn);
return NULL;
}
static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft) static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
{ {
char name[CGROUP_FILE_NAME_MAX]; char name[CGROUP_FILE_NAME_MAX];
...@@ -2541,7 +2610,7 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of, ...@@ -2541,7 +2610,7 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
loff_t off) loff_t off)
{ {
unsigned int enable = 0, disable = 0; unsigned int enable = 0, disable = 0;
struct cgroup *cgrp = of_css(of)->cgroup, *child; struct cgroup *cgrp, *child;
struct cgroup_subsys *ss; struct cgroup_subsys *ss;
char *tok; char *tok;
int ssid, ret; int ssid, ret;
...@@ -2573,20 +2642,9 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of, ...@@ -2573,20 +2642,9 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
return -EINVAL; return -EINVAL;
} }
/* cgrp = cgroup_kn_lock_live(of->kn);
* We're gonna grab cgroup_tree_mutex which nests outside kernfs if (!cgrp)
* active_ref. cgroup_lock_live_group() already provides enough return -ENODEV;
* protection. Ensure @cgrp stays accessible and break the
* active_ref protection.
*/
cgroup_get(cgrp);
kernfs_break_active_protection(of->kn);
mutex_lock(&cgroup_tree_mutex);
if (!cgroup_lock_live_group(cgrp)) {
ret = -ENODEV;
goto out_unlock_tree;
}
for_each_subsys(ss, ssid) { for_each_subsys(ss, ssid) {
if (enable & (1 << ssid)) { if (enable & (1 << ssid)) {
...@@ -2610,14 +2668,12 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of, ...@@ -2610,14 +2668,12 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
cgroup_get(child); cgroup_get(child);
prepare_to_wait(&child->offline_waitq, &wait, prepare_to_wait(&child->offline_waitq, &wait,
TASK_UNINTERRUPTIBLE); TASK_UNINTERRUPTIBLE);
mutex_unlock(&cgroup_mutex); cgroup_kn_unlock(of->kn);
mutex_unlock(&cgroup_tree_mutex);
schedule(); schedule();
finish_wait(&child->offline_waitq, &wait); finish_wait(&child->offline_waitq, &wait);
cgroup_put(child); cgroup_put(child);
ret = restart_syscall(); return restart_syscall();
goto out_unbreak;
} }
/* unavailable or not enabled on the parent? */ /* unavailable or not enabled on the parent? */
...@@ -2693,12 +2749,7 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of, ...@@ -2693,12 +2749,7 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
kernfs_activate(cgrp->kn); kernfs_activate(cgrp->kn);
ret = 0; ret = 0;
out_unlock: out_unlock:
mutex_unlock(&cgroup_mutex); cgroup_kn_unlock(of->kn);
out_unlock_tree:
mutex_unlock(&cgroup_tree_mutex);
out_unbreak:
kernfs_unbreak_active_protection(of->kn);
cgroup_put(cgrp);
return ret ?: nbytes; return ret ?: nbytes;
err_undo_css: err_undo_css:
...@@ -4238,25 +4289,16 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss) ...@@ -4238,25 +4289,16 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss)
static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
umode_t mode) umode_t mode)
{ {
struct cgroup *parent = parent_kn->priv, *cgrp; struct cgroup *parent, *cgrp;
struct cgroup_root *root = parent->root; struct cgroup_root *root;
struct cgroup_subsys *ss; struct cgroup_subsys *ss;
struct kernfs_node *kn; struct kernfs_node *kn;
int ssid, ret; int ssid, ret;
/* parent = cgroup_kn_lock_live(parent_kn);
* cgroup_mkdir() grabs cgroup_tree_mutex which nests outside if (!parent)
* kernfs active_ref and cgroup_create() already synchronizes return -ENODEV;
* properly against removal through cgroup_lock_live_group(). root = parent->root;
* Break it before calling cgroup_create().
*/
cgroup_get(parent);
kernfs_break_active_protection(parent_kn);
mutex_lock(&cgroup_tree_mutex);
if (!cgroup_lock_live_group(parent)) {
ret = -ENODEV;
goto out_unlock_tree;
}
/* allocate the cgroup and its ID, 0 is reserved for the root */ /* allocate the cgroup and its ID, 0 is reserved for the root */
cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL); cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL);
...@@ -4348,11 +4390,7 @@ static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, ...@@ -4348,11 +4390,7 @@ static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
out_free_cgrp: out_free_cgrp:
kfree(cgrp); kfree(cgrp);
out_unlock: out_unlock:
mutex_unlock(&cgroup_mutex); cgroup_kn_unlock(parent_kn);
out_unlock_tree:
mutex_unlock(&cgroup_tree_mutex);
kernfs_unbreak_active_protection(parent_kn);
cgroup_put(parent);
return ret; return ret;
out_destroy: out_destroy:
...@@ -4579,32 +4617,17 @@ static void cgroup_destroy_css_killed(struct cgroup *cgrp) ...@@ -4579,32 +4617,17 @@ static void cgroup_destroy_css_killed(struct cgroup *cgrp)
static int cgroup_rmdir(struct kernfs_node *kn) static int cgroup_rmdir(struct kernfs_node *kn)
{ {
struct cgroup *cgrp = kn->priv; struct cgroup *cgrp;
int ret = 0; int ret = 0;
/* cgrp = cgroup_kn_lock_live(kn);
* This is self-destruction but @kn can't be removed while this if (!cgrp)
* callback is in progress. Let's break active protection. Once return 0;
* the protection is broken, @cgrp can be destroyed at any point. cgroup_get(cgrp); /* for @kn->priv clearing */
* Pin it so that it stays accessible.
*/
cgroup_get(cgrp);
kernfs_break_active_protection(kn);
mutex_lock(&cgroup_tree_mutex);
mutex_lock(&cgroup_mutex);
/*
* @cgrp might already have been destroyed while we're trying to
* grab the mutexes.
*/
if (!cgroup_is_dead(cgrp))
ret = cgroup_destroy_locked(cgrp);
mutex_unlock(&cgroup_mutex); ret = cgroup_destroy_locked(cgrp);
mutex_unlock(&cgroup_tree_mutex);
kernfs_unbreak_active_protection(kn); cgroup_kn_unlock(kn);
/* /*
* There are two control paths which try to determine cgroup from * There are two control paths which try to determine cgroup from
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment