Commit 7fda0412 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar.

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  cpusets: Remove an unused variable
  sched/rt: Improve pick_next_highest_task_rt()
  sched: Fix select_fallback_rq() vs cpu_active/cpu_online
  sched/x86/smp: Do not enable IRQs over calibrate_delay()
  sched: Fix compiler warning about declared inline after use
  MAINTAINERS: Update email address for SCHEDULER and PERF EVENTS
parents 6b8212a3 160594e9
...@@ -5185,7 +5185,7 @@ F: kernel/delayacct.c ...@@ -5185,7 +5185,7 @@ F: kernel/delayacct.c
PERFORMANCE EVENTS SUBSYSTEM PERFORMANCE EVENTS SUBSYSTEM
M: Peter Zijlstra <a.p.zijlstra@chello.nl> M: Peter Zijlstra <a.p.zijlstra@chello.nl>
M: Paul Mackerras <paulus@samba.org> M: Paul Mackerras <paulus@samba.org>
M: Ingo Molnar <mingo@elte.hu> M: Ingo Molnar <mingo@redhat.com>
M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
S: Supported S: Supported
...@@ -5833,7 +5833,7 @@ S: Maintained ...@@ -5833,7 +5833,7 @@ S: Maintained
F: drivers/watchdog/sc1200wdt.c F: drivers/watchdog/sc1200wdt.c
SCHEDULER SCHEDULER
M: Ingo Molnar <mingo@elte.hu> M: Ingo Molnar <mingo@redhat.com>
M: Peter Zijlstra <peterz@infradead.org> M: Peter Zijlstra <peterz@infradead.org>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git sched/core T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git sched/core
S: Maintained S: Maintained
......
...@@ -219,14 +219,9 @@ static void __cpuinit smp_callin(void) ...@@ -219,14 +219,9 @@ static void __cpuinit smp_callin(void)
* Update loops_per_jiffy in cpu_data. Previous call to * Update loops_per_jiffy in cpu_data. Previous call to
* smp_store_cpu_info() stored a value that is close but not as * smp_store_cpu_info() stored a value that is close but not as
* accurate as the value just calculated. * accurate as the value just calculated.
*
* Need to enable IRQs because it can take longer and then
* the NMI watchdog might kill us.
*/ */
local_irq_enable();
calibrate_delay(); calibrate_delay();
cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy; cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy;
local_irq_disable();
pr_debug("Stack at about %p\n", &cpuid); pr_debug("Stack at about %p\n", &cpuid);
/* /*
......
...@@ -22,7 +22,7 @@ extern int cpuset_init(void); ...@@ -22,7 +22,7 @@ extern int cpuset_init(void);
extern void cpuset_init_smp(void); extern void cpuset_init_smp(void);
extern void cpuset_update_active_cpus(void); extern void cpuset_update_active_cpus(void);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
extern int cpuset_cpus_allowed_fallback(struct task_struct *p); extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p); extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
#define cpuset_current_mems_allowed (current->mems_allowed) #define cpuset_current_mems_allowed (current->mems_allowed)
void cpuset_init_current_mems_allowed(void); void cpuset_init_current_mems_allowed(void);
...@@ -135,10 +135,8 @@ static inline void cpuset_cpus_allowed(struct task_struct *p, ...@@ -135,10 +135,8 @@ static inline void cpuset_cpus_allowed(struct task_struct *p,
cpumask_copy(mask, cpu_possible_mask); cpumask_copy(mask, cpu_possible_mask);
} }
static inline int cpuset_cpus_allowed_fallback(struct task_struct *p) static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
{ {
do_set_cpus_allowed(p, cpu_possible_mask);
return cpumask_any(cpu_active_mask);
} }
static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
......
...@@ -2162,10 +2162,9 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) ...@@ -2162,10 +2162,9 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
mutex_unlock(&callback_mutex); mutex_unlock(&callback_mutex);
} }
int cpuset_cpus_allowed_fallback(struct task_struct *tsk) void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
{ {
const struct cpuset *cs; const struct cpuset *cs;
int cpu;
rcu_read_lock(); rcu_read_lock();
cs = task_cs(tsk); cs = task_cs(tsk);
...@@ -2186,22 +2185,10 @@ int cpuset_cpus_allowed_fallback(struct task_struct *tsk) ...@@ -2186,22 +2185,10 @@ int cpuset_cpus_allowed_fallback(struct task_struct *tsk)
* changes in tsk_cs()->cpus_allowed. Otherwise we can temporary * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
* set any mask even if it is not right from task_cs() pov, * set any mask even if it is not right from task_cs() pov,
* the pending set_cpus_allowed_ptr() will fix things. * the pending set_cpus_allowed_ptr() will fix things.
*
* select_fallback_rq() will fix things ups and set cpu_possible_mask
* if required.
*/ */
cpu = cpumask_any_and(&tsk->cpus_allowed, cpu_active_mask);
if (cpu >= nr_cpu_ids) {
/*
* Either tsk->cpus_allowed is wrong (see above) or it
* is actually empty. The latter case is only possible
* if we are racing with remove_tasks_in_empty_cpuset().
* Like above we can temporary set any mask and rely on
* set_cpus_allowed_ptr() as synchronization point.
*/
do_set_cpus_allowed(tsk, cpu_possible_mask);
cpu = cpumask_any(cpu_active_mask);
}
return cpu;
} }
void cpuset_init_current_mems_allowed(void) void cpuset_init_current_mems_allowed(void)
......
...@@ -1265,29 +1265,59 @@ EXPORT_SYMBOL_GPL(kick_process); ...@@ -1265,29 +1265,59 @@ EXPORT_SYMBOL_GPL(kick_process);
*/ */
static int select_fallback_rq(int cpu, struct task_struct *p) static int select_fallback_rq(int cpu, struct task_struct *p)
{ {
int dest_cpu;
const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu)); const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
enum { cpuset, possible, fail } state = cpuset;
int dest_cpu;
/* Look for allowed, online CPU in same node. */ /* Look for allowed, online CPU in same node. */
for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask) for_each_cpu_mask(dest_cpu, *nodemask) {
if (!cpu_online(dest_cpu))
continue;
if (!cpu_active(dest_cpu))
continue;
if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
return dest_cpu; return dest_cpu;
}
/* Any allowed, online CPU? */ for (;;) {
dest_cpu = cpumask_any_and(tsk_cpus_allowed(p), cpu_active_mask); /* Any allowed, online CPU? */
if (dest_cpu < nr_cpu_ids) for_each_cpu_mask(dest_cpu, *tsk_cpus_allowed(p)) {
return dest_cpu; if (!cpu_online(dest_cpu))
continue;
if (!cpu_active(dest_cpu))
continue;
goto out;
}
/* No more Mr. Nice Guy. */ switch (state) {
dest_cpu = cpuset_cpus_allowed_fallback(p); case cpuset:
/* /* No more Mr. Nice Guy. */
* Don't tell them about moving exiting tasks or cpuset_cpus_allowed_fallback(p);
* kernel threads (both mm NULL), since they never state = possible;
* leave kernel. break;
*/
if (p->mm && printk_ratelimit()) { case possible:
printk_sched("process %d (%s) no longer affine to cpu%d\n", do_set_cpus_allowed(p, cpu_possible_mask);
task_pid_nr(p), p->comm, cpu); state = fail;
break;
case fail:
BUG();
break;
}
}
out:
if (state != cpuset) {
/*
* Don't tell them about moving exiting tasks or
* kernel threads (both mm NULL), since they never
* leave kernel.
*/
if (p->mm && printk_ratelimit()) {
printk_sched("process %d (%s) no longer affine to cpu%d\n",
task_pid_nr(p), p->comm, cpu);
}
} }
return dest_cpu; return dest_cpu;
......
...@@ -416,8 +416,8 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse) ...@@ -416,8 +416,8 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse)
#endif /* CONFIG_FAIR_GROUP_SCHED */ #endif /* CONFIG_FAIR_GROUP_SCHED */
static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, static __always_inline
unsigned long delta_exec); void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec);
/************************************************************** /**************************************************************
* Scheduling class tree data structure manipulation methods: * Scheduling class tree data structure manipulation methods:
...@@ -1162,7 +1162,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) ...@@ -1162,7 +1162,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
__clear_buddies_skip(se); __clear_buddies_skip(se);
} }
static void return_cfs_rq_runtime(struct cfs_rq *cfs_rq); static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
static void static void
dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
...@@ -1546,8 +1546,8 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, ...@@ -1546,8 +1546,8 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
resched_task(rq_of(cfs_rq)->curr); resched_task(rq_of(cfs_rq)->curr);
} }
static __always_inline void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, static __always_inline
unsigned long delta_exec) void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec)
{ {
if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
return; return;
...@@ -2073,11 +2073,11 @@ void unthrottle_offline_cfs_rqs(struct rq *rq) ...@@ -2073,11 +2073,11 @@ void unthrottle_offline_cfs_rqs(struct rq *rq)
} }
#else /* CONFIG_CFS_BANDWIDTH */ #else /* CONFIG_CFS_BANDWIDTH */
static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, static __always_inline
unsigned long delta_exec) {} void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec) {}
static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
static void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
{ {
......
...@@ -1428,7 +1428,7 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu) ...@@ -1428,7 +1428,7 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
next_idx: next_idx:
if (idx >= MAX_RT_PRIO) if (idx >= MAX_RT_PRIO)
continue; continue;
if (next && next->prio < idx) if (next && next->prio <= idx)
continue; continue;
list_for_each_entry(rt_se, array->queue + idx, run_list) { list_for_each_entry(rt_se, array->queue + idx, run_list) {
struct task_struct *p; struct task_struct *p;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment