Commit c5405a49 authored by Neil Zhang's avatar Neil Zhang Committed by Ingo Molnar

sched: Remove redundant update_runtime notifier

migration_call() will do all the things that update_runtime() does.
So let's remove it.

Furthermore, there is potential risk that the current code will catch
BUG_ON at line 689 of rt.c when do cpu hotplug while there are realtime
threads running because of enabling runtime twice while the rt_runtime
may already changed.
Signed-off-by: default avatarNeil Zhang <zhangwm@marvell.com>
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1365685499-26515-1-git-send-email-zhangwm@marvell.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 41261b6a
...@@ -6285,9 +6285,6 @@ void __init sched_init_smp(void) ...@@ -6285,9 +6285,6 @@ void __init sched_init_smp(void)
hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE); hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE); hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
/* RT runtime code needs to handle some hotplug events */
hotcpu_notifier(update_runtime, 0);
init_hrtick(); init_hrtick();
/* Move init over to a non-isolated CPU */ /* Move init over to a non-isolated CPU */
......
...@@ -699,15 +699,6 @@ static void __disable_runtime(struct rq *rq) ...@@ -699,15 +699,6 @@ static void __disable_runtime(struct rq *rq)
} }
} }
static void disable_runtime(struct rq *rq)
{
unsigned long flags;
raw_spin_lock_irqsave(&rq->lock, flags);
__disable_runtime(rq);
raw_spin_unlock_irqrestore(&rq->lock, flags);
}
static void __enable_runtime(struct rq *rq) static void __enable_runtime(struct rq *rq)
{ {
rt_rq_iter_t iter; rt_rq_iter_t iter;
...@@ -732,37 +723,6 @@ static void __enable_runtime(struct rq *rq) ...@@ -732,37 +723,6 @@ static void __enable_runtime(struct rq *rq)
} }
} }
static void enable_runtime(struct rq *rq)
{
unsigned long flags;
raw_spin_lock_irqsave(&rq->lock, flags);
__enable_runtime(rq);
raw_spin_unlock_irqrestore(&rq->lock, flags);
}
int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
int cpu = (int)(long)hcpu;
switch (action) {
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
disable_runtime(cpu_rq(cpu));
return NOTIFY_OK;
case CPU_DOWN_FAILED:
case CPU_DOWN_FAILED_FROZEN:
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
enable_runtime(cpu_rq(cpu));
return NOTIFY_OK;
default:
return NOTIFY_DONE;
}
}
static int balance_runtime(struct rt_rq *rt_rq) static int balance_runtime(struct rt_rq *rt_rq)
{ {
int more = 0; int more = 0;
......
...@@ -1041,7 +1041,6 @@ static inline void idle_balance(int cpu, struct rq *rq) ...@@ -1041,7 +1041,6 @@ static inline void idle_balance(int cpu, struct rq *rq)
extern void sysrq_sched_debug_show(void); extern void sysrq_sched_debug_show(void);
extern void sched_init_granularity(void); extern void sched_init_granularity(void);
extern void update_max_interval(void); extern void update_max_interval(void);
extern int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu);
extern void init_sched_rt_class(void); extern void init_sched_rt_class(void);
extern void init_sched_fair_class(void); extern void init_sched_fair_class(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment