Commit 040b9d7c authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar:
 "Three fixes:

   - fix a suspend/resume cpusets bug

   - fix a !CONFIG_NUMA_BALANCING bug

   - fix a kerneldoc warning"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/fair: Fix nuisance kernel-doc warning
  sched/cpuset/pm: Fix cpuset vs. suspend-resume bugs
  sched/fair: Fix wake_affine_llc() balancing rules
parents e6328a7a 46123355
...@@ -51,7 +51,9 @@ static inline void cpuset_dec(void) ...@@ -51,7 +51,9 @@ static inline void cpuset_dec(void)
extern int cpuset_init(void); extern int cpuset_init(void);
extern void cpuset_init_smp(void); extern void cpuset_init_smp(void);
extern void cpuset_force_rebuild(void);
extern void cpuset_update_active_cpus(void); extern void cpuset_update_active_cpus(void);
extern void cpuset_wait_for_hotplug(void);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
extern void cpuset_cpus_allowed_fallback(struct task_struct *p); extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p); extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
...@@ -164,11 +166,15 @@ static inline bool cpusets_enabled(void) { return false; } ...@@ -164,11 +166,15 @@ static inline bool cpusets_enabled(void) { return false; }
static inline int cpuset_init(void) { return 0; } static inline int cpuset_init(void) { return 0; }
static inline void cpuset_init_smp(void) {} static inline void cpuset_init_smp(void) {}
static inline void cpuset_force_rebuild(void) { }
static inline void cpuset_update_active_cpus(void) static inline void cpuset_update_active_cpus(void)
{ {
partition_sched_domains(1, NULL, NULL); partition_sched_domains(1, NULL, NULL);
} }
static inline void cpuset_wait_for_hotplug(void) { }
static inline void cpuset_cpus_allowed(struct task_struct *p, static inline void cpuset_cpus_allowed(struct task_struct *p,
struct cpumask *mask) struct cpumask *mask)
{ {
......
...@@ -2275,6 +2275,13 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs) ...@@ -2275,6 +2275,13 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs)
mutex_unlock(&cpuset_mutex); mutex_unlock(&cpuset_mutex);
} }
static bool force_rebuild;
void cpuset_force_rebuild(void)
{
force_rebuild = true;
}
/** /**
* cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
* *
...@@ -2349,8 +2356,10 @@ static void cpuset_hotplug_workfn(struct work_struct *work) ...@@ -2349,8 +2356,10 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
} }
/* rebuild sched domains if cpus_allowed has changed */ /* rebuild sched domains if cpus_allowed has changed */
if (cpus_updated) if (cpus_updated || force_rebuild) {
force_rebuild = false;
rebuild_sched_domains(); rebuild_sched_domains();
}
} }
void cpuset_update_active_cpus(void) void cpuset_update_active_cpus(void)
...@@ -2363,6 +2372,11 @@ void cpuset_update_active_cpus(void) ...@@ -2363,6 +2372,11 @@ void cpuset_update_active_cpus(void)
schedule_work(&cpuset_hotplug_work); schedule_work(&cpuset_hotplug_work);
} }
void cpuset_wait_for_hotplug(void)
{
flush_work(&cpuset_hotplug_work);
}
/* /*
* Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY]. * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
* Call this routine anytime after node_states[N_MEMORY] changes. * Call this routine anytime after node_states[N_MEMORY] changes.
......
...@@ -20,8 +20,9 @@ ...@@ -20,8 +20,9 @@
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/kmod.h> #include <linux/kmod.h>
#include <trace/events/power.h> #include <trace/events/power.h>
#include <linux/cpuset.h>
/* /*
* Timeout for stopping processes * Timeout for stopping processes
*/ */
unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC; unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC;
...@@ -202,6 +203,8 @@ void thaw_processes(void) ...@@ -202,6 +203,8 @@ void thaw_processes(void)
__usermodehelper_set_disable_depth(UMH_FREEZING); __usermodehelper_set_disable_depth(UMH_FREEZING);
thaw_workqueues(); thaw_workqueues();
cpuset_wait_for_hotplug();
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
for_each_process_thread(g, p) { for_each_process_thread(g, p) {
/* No other threads should have PF_SUSPEND_TASK set */ /* No other threads should have PF_SUSPEND_TASK set */
......
...@@ -5556,16 +5556,15 @@ static void cpuset_cpu_active(void) ...@@ -5556,16 +5556,15 @@ static void cpuset_cpu_active(void)
* operation in the resume sequence, just build a single sched * operation in the resume sequence, just build a single sched
* domain, ignoring cpusets. * domain, ignoring cpusets.
*/ */
num_cpus_frozen--; partition_sched_domains(1, NULL, NULL);
if (likely(num_cpus_frozen)) { if (--num_cpus_frozen)
partition_sched_domains(1, NULL, NULL);
return; return;
}
/* /*
* This is the last CPU online operation. So fall through and * This is the last CPU online operation. So fall through and
* restore the original sched domains by considering the * restore the original sched domains by considering the
* cpuset configurations. * cpuset configurations.
*/ */
cpuset_force_rebuild();
} }
cpuset_update_active_cpus(); cpuset_update_active_cpus();
} }
......
...@@ -5424,7 +5424,7 @@ wake_affine_llc(struct sched_domain *sd, struct task_struct *p, ...@@ -5424,7 +5424,7 @@ wake_affine_llc(struct sched_domain *sd, struct task_struct *p,
return false; return false;
/* if this cache has capacity, come here */ /* if this cache has capacity, come here */
if (this_stats.has_capacity && this_stats.nr_running < prev_stats.nr_running+1) if (this_stats.has_capacity && this_stats.nr_running+1 < prev_stats.nr_running)
return true; return true;
/* /*
...@@ -7708,7 +7708,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd ...@@ -7708,7 +7708,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
* number. * number.
* *
* Return: 1 when packing is required and a task should be moved to * Return: 1 when packing is required and a task should be moved to
* this CPU. The amount of the imbalance is returned in *imbalance. * this CPU. The amount of the imbalance is returned in env->imbalance.
* *
* @env: The load balancing environment. * @env: The load balancing environment.
* @sds: Statistics of the sched_domain which is to be packed * @sds: Statistics of the sched_domain which is to be packed
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment