Commit f22e08a7 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar.

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched: Fix incorrect usage of for_each_cpu_mask() in select_fallback_rq()
  sched: Fix __schedule_bug() output when called from an interrupt
  sched/arch: Introduce the finish_arch_post_lock_switch() scheduler callback
parents f187e9fd e3831edd
...@@ -1270,7 +1270,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p) ...@@ -1270,7 +1270,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
int dest_cpu; int dest_cpu;
/* Look for allowed, online CPU in same node. */ /* Look for allowed, online CPU in same node. */
for_each_cpu_mask(dest_cpu, *nodemask) { for_each_cpu(dest_cpu, nodemask) {
if (!cpu_online(dest_cpu)) if (!cpu_online(dest_cpu))
continue; continue;
if (!cpu_active(dest_cpu)) if (!cpu_active(dest_cpu))
...@@ -1281,7 +1281,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p) ...@@ -1281,7 +1281,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
for (;;) { for (;;) {
/* Any allowed, online CPU? */ /* Any allowed, online CPU? */
for_each_cpu_mask(dest_cpu, *tsk_cpus_allowed(p)) { for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
if (!cpu_online(dest_cpu)) if (!cpu_online(dest_cpu))
continue; continue;
if (!cpu_active(dest_cpu)) if (!cpu_active(dest_cpu))
...@@ -1964,6 +1964,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) ...@@ -1964,6 +1964,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
local_irq_enable(); local_irq_enable();
#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
finish_lock_switch(rq, prev); finish_lock_switch(rq, prev);
finish_arch_post_lock_switch();
fire_sched_in_preempt_notifiers(current); fire_sched_in_preempt_notifiers(current);
if (mm) if (mm)
...@@ -3101,8 +3102,6 @@ EXPORT_SYMBOL(sub_preempt_count); ...@@ -3101,8 +3102,6 @@ EXPORT_SYMBOL(sub_preempt_count);
*/ */
static noinline void __schedule_bug(struct task_struct *prev) static noinline void __schedule_bug(struct task_struct *prev)
{ {
struct pt_regs *regs = get_irq_regs();
if (oops_in_progress) if (oops_in_progress)
return; return;
...@@ -3113,11 +3112,7 @@ static noinline void __schedule_bug(struct task_struct *prev) ...@@ -3113,11 +3112,7 @@ static noinline void __schedule_bug(struct task_struct *prev)
print_modules(); print_modules();
if (irqs_disabled()) if (irqs_disabled())
print_irqtrace_events(prev); print_irqtrace_events(prev);
dump_stack();
if (regs)
show_regs(regs);
else
dump_stack();
} }
/* /*
......
...@@ -681,6 +681,9 @@ static inline int task_running(struct rq *rq, struct task_struct *p) ...@@ -681,6 +681,9 @@ static inline int task_running(struct rq *rq, struct task_struct *p)
#ifndef finish_arch_switch #ifndef finish_arch_switch
# define finish_arch_switch(prev) do { } while (0) # define finish_arch_switch(prev) do { } while (0)
#endif #endif
#ifndef finish_arch_post_lock_switch
# define finish_arch_post_lock_switch() do { } while (0)
#endif
#ifndef __ARCH_WANT_UNLOCKED_CTXSW #ifndef __ARCH_WANT_UNLOCKED_CTXSW
static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment