Commit c8d9c90c authored by Paul Jackson's avatar Paul Jackson Committed by Linus Torvalds

hotplug cpu: move tasks in empty cpusets to parent various other fixes

Various minor formatting and comment tweaks to Cliff Wickman's
[PATCH_3_of_3]_cpusets__update_cpumask_revision.patch

I had had "iff", meaning "if and only if" in a comment.  However, except for
ancient mathematicians, the abbreviation "iff" was a tad too cryptic.  Cliff
changed it to "if", presumably figuring that the "iff" was a typo.  However,
it was the "only if" half of the conjunction that was most interesting.
Reword to emphasis the "only if" aspect.

The locking comment for remove_tasks_in_empty_cpuset() was wrong; it said
callback_mutex had to be held on entry.  The opposite is true.

Several mentions of attach_task() in comments needed to be
changed to cgroup_attach_task().

A comment about notify_on_release was no longer relevant,
as the line of code it had commented, namely:
	set_bit(CS_RELEASED_RESOURCE, &parent->flags);
is no longer present in that place in the cpuset.c code.

Similarly a comment about notify_on_release before the
scan_for_empty_cpusets() routine was no longer relevant.

Removed extra parentheses and unnecessary return statement.

Renamed attach_task() to cpuset_attach() in various comments.

Removed comment about not needing memory migration, as it seems the migration
is done anyway, via the cpuset_attach() callback from cgroup_attach_task().
Signed-off-by: default avatarPaul Jackson <pj@sgi.com>
Acked-by: default avatarCliff Wickman <cpw@sgi.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Paul Menage <menage@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2df167a3
...@@ -752,7 +752,7 @@ static int update_cpumask(struct cpuset *cs, char *buf) ...@@ -752,7 +752,7 @@ static int update_cpumask(struct cpuset *cs, char *buf)
trialcs = *cs; trialcs = *cs;
/* /*
* An empty cpus_allowed is ok if there are no tasks in the cpuset. * An empty cpus_allowed is ok only if the cpuset has no tasks.
* Since cpulist_parse() fails on an empty mask, we special case * Since cpulist_parse() fails on an empty mask, we special case
* that parsing. The validate_change() call ensures that cpusets * that parsing. The validate_change() call ensures that cpusets
* with tasks have cpus. * with tasks have cpus.
...@@ -809,7 +809,7 @@ static int update_cpumask(struct cpuset *cs, char *buf) ...@@ -809,7 +809,7 @@ static int update_cpumask(struct cpuset *cs, char *buf)
* so that the migration code can allocate pages on these nodes. * so that the migration code can allocate pages on these nodes.
* *
* Call holding cgroup_mutex, so current's cpuset won't change * Call holding cgroup_mutex, so current's cpuset won't change
* during this call, as cgroup_mutex holds off any attach_task() * during this call, as manage_mutex holds off any cpuset_attach()
* calls. Therefore we don't need to take task_lock around the * calls. Therefore we don't need to take task_lock around the
* call to guarantee_online_mems(), as we know no one is changing * call to guarantee_online_mems(), as we know no one is changing
* our task's cpuset. * our task's cpuset.
...@@ -1661,8 +1661,8 @@ void cpuset_do_move_task(struct task_struct *tsk, struct cgroup_scanner *scan) ...@@ -1661,8 +1661,8 @@ void cpuset_do_move_task(struct task_struct *tsk, struct cgroup_scanner *scan)
* @from: cpuset in which the tasks currently reside * @from: cpuset in which the tasks currently reside
* @to: cpuset to which the tasks will be moved * @to: cpuset to which the tasks will be moved
* *
* Called with manage_sem held * Called with cgroup_mutex held
* callback_mutex must not be held, as attach_task() will take it. * callback_mutex must not be held, as cpuset_attach() will take it.
* *
* The cgroup_scan_tasks() function will scan all the tasks in a cgroup, * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
* calling callback functions for each. * calling callback functions for each.
...@@ -1689,18 +1689,18 @@ static void move_member_tasks_to_cpuset(struct cpuset *from, struct cpuset *to) ...@@ -1689,18 +1689,18 @@ static void move_member_tasks_to_cpuset(struct cpuset *from, struct cpuset *to)
* last CPU or node from a cpuset, then move the tasks in the empty * last CPU or node from a cpuset, then move the tasks in the empty
* cpuset to its next-highest non-empty parent. * cpuset to its next-highest non-empty parent.
* *
* The parent cpuset has some superset of the 'mems' nodes that the * Called with cgroup_mutex held
* newly empty cpuset held, so no migration of memory is necessary. * callback_mutex must not be held, as cpuset_attach() will take it.
*
* Called with both manage_sem and callback_sem held
*/ */
static void remove_tasks_in_empty_cpuset(struct cpuset *cs) static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
{ {
struct cpuset *parent; struct cpuset *parent;
/* the cgroup's css_sets list is in use if there are tasks /*
in the cpuset; the list is empty if there are none; * The cgroup's css_sets list is in use if there are tasks
the cs->css.refcnt seems always 0 */ * in the cpuset; the list is empty if there are none;
* the cs->css.refcnt seems always 0.
*/
if (list_empty(&cs->css.cgroup->css_sets)) if (list_empty(&cs->css.cgroup->css_sets))
return; return;
...@@ -1709,14 +1709,8 @@ static void remove_tasks_in_empty_cpuset(struct cpuset *cs) ...@@ -1709,14 +1709,8 @@ static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
* has online cpus, so can't be empty). * has online cpus, so can't be empty).
*/ */
parent = cs->parent; parent = cs->parent;
while (cpus_empty(parent->cpus_allowed)) { while (cpus_empty(parent->cpus_allowed))
/*
* this empty cpuset should now be considered to
* have been used, and therefore eligible for
* release when empty (if it is notify_on_release)
*/
parent = parent->parent; parent = parent->parent;
}
move_member_tasks_to_cpuset(cs, parent); move_member_tasks_to_cpuset(cs, parent);
} }
...@@ -1725,10 +1719,6 @@ static void remove_tasks_in_empty_cpuset(struct cpuset *cs) ...@@ -1725,10 +1719,6 @@ static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
* Walk the specified cpuset subtree and look for empty cpusets. * Walk the specified cpuset subtree and look for empty cpusets.
* The tasks of such cpuset must be moved to a parent cpuset. * The tasks of such cpuset must be moved to a parent cpuset.
* *
* Note that such a notify_on_release cpuset must have had, at some time,
* member tasks or cpuset descendants and cpus and memory, before it can
* be a candidate for release.
*
* Called with cgroup_mutex held. We take callback_mutex to modify * Called with cgroup_mutex held. We take callback_mutex to modify
* cpus_allowed and mems_allowed. * cpus_allowed and mems_allowed.
* *
...@@ -1764,8 +1754,8 @@ static void scan_for_empty_cpusets(const struct cpuset *root) ...@@ -1764,8 +1754,8 @@ static void scan_for_empty_cpusets(const struct cpuset *root)
cpus_and(cp->cpus_allowed, cp->cpus_allowed, cpu_online_map); cpus_and(cp->cpus_allowed, cp->cpus_allowed, cpu_online_map);
nodes_and(cp->mems_allowed, cp->mems_allowed, nodes_and(cp->mems_allowed, cp->mems_allowed,
node_states[N_HIGH_MEMORY]); node_states[N_HIGH_MEMORY]);
if ((cpus_empty(cp->cpus_allowed) || if (cpus_empty(cp->cpus_allowed) ||
nodes_empty(cp->mems_allowed))) { nodes_empty(cp->mems_allowed)) {
/* Move tasks from the empty cpuset to a parent */ /* Move tasks from the empty cpuset to a parent */
mutex_unlock(&callback_mutex); mutex_unlock(&callback_mutex);
remove_tasks_in_empty_cpuset(cp); remove_tasks_in_empty_cpuset(cp);
...@@ -1773,7 +1763,6 @@ static void scan_for_empty_cpusets(const struct cpuset *root) ...@@ -1773,7 +1763,6 @@ static void scan_for_empty_cpusets(const struct cpuset *root)
} }
} }
mutex_unlock(&callback_mutex); mutex_unlock(&callback_mutex);
return;
} }
/* /*
...@@ -2207,7 +2196,7 @@ void __cpuset_memory_pressure_bump(void) ...@@ -2207,7 +2196,7 @@ void __cpuset_memory_pressure_bump(void)
* - Used for /proc/<pid>/cpuset. * - Used for /proc/<pid>/cpuset.
* - No need to task_lock(tsk) on this tsk->cpuset reference, as it * - No need to task_lock(tsk) on this tsk->cpuset reference, as it
* doesn't really matter if tsk->cpuset changes after we read it, * doesn't really matter if tsk->cpuset changes after we read it,
* and we take cgroup_mutex, keeping attach_task() from changing it * and we take cgroup_mutex, keeping cpuset_attach() from changing it
* anyway. * anyway.
*/ */
static int proc_cpuset_show(struct seq_file *m, void *unused_v) static int proc_cpuset_show(struct seq_file *m, void *unused_v)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment