Commit 7a3e686e authored by Len Brown's avatar Len Brown Committed by Thomas Gleixner

x86/idle: Remove enter_idle(), exit_idle()

Upon removal of the is_idle flag, these routines became NOPs.
Signed-off-by: default avatarLen Brown <len.brown@intel.com>
Acked-by: default avatarIngo Molnar <mingo@kernel.org>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: http://lkml.kernel.org/r/822f2c22cc5890f7b8ea0eeec60277eb44505b4e.1479449716.git.len.brown@intel.comSigned-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 9694be73
...@@ -639,7 +639,6 @@ extern void irq_exit(void); ...@@ -639,7 +639,6 @@ extern void irq_exit(void);
static inline void entering_irq(void) static inline void entering_irq(void)
{ {
irq_enter(); irq_enter();
exit_idle();
} }
static inline void entering_ack_irq(void) static inline void entering_ack_irq(void)
......
#ifndef _ASM_X86_IDLE_H #ifndef _ASM_X86_IDLE_H
#define _ASM_X86_IDLE_H #define _ASM_X86_IDLE_H
#ifdef CONFIG_X86_64
void enter_idle(void);
void exit_idle(void);
#else /* !CONFIG_X86_64 */
static inline void enter_idle(void) { }
static inline void exit_idle(void) { }
static inline void __exit_idle(void) { }
#endif /* CONFIG_X86_64 */
void amd_e400_remove_cpu(int cpu); void amd_e400_remove_cpu(int cpu);
#endif /* _ASM_X86_IDLE_H */ #endif /* _ASM_X86_IDLE_H */
...@@ -267,13 +267,11 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code) ...@@ -267,13 +267,11 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
case KVM_PV_REASON_PAGE_NOT_PRESENT: case KVM_PV_REASON_PAGE_NOT_PRESENT:
/* page is swapped out by the host. */ /* page is swapped out by the host. */
prev_state = exception_enter(); prev_state = exception_enter();
exit_idle();
kvm_async_pf_task_wait((u32)read_cr2()); kvm_async_pf_task_wait((u32)read_cr2());
exception_exit(prev_state); exception_exit(prev_state);
break; break;
case KVM_PV_REASON_PAGE_READY: case KVM_PV_REASON_PAGE_READY:
rcu_irq_enter(); rcu_irq_enter();
exit_idle();
kvm_async_pf_task_wake((u32)read_cr2()); kvm_async_pf_task_wake((u32)read_cr2());
rcu_irq_exit(); rcu_irq_exit();
break; break;
......
...@@ -234,34 +234,9 @@ static inline void play_dead(void) ...@@ -234,34 +234,9 @@ static inline void play_dead(void)
} }
#endif #endif
#ifdef CONFIG_X86_64
void enter_idle(void)
{
}
static void __exit_idle(void)
{
}
/* Called from interrupts to signify idle end */
void exit_idle(void)
{
/* idle loop has pid 0 */
if (current->pid)
return;
__exit_idle();
}
#endif
void arch_cpu_idle_enter(void) void arch_cpu_idle_enter(void)
{ {
local_touch_nmi(); local_touch_nmi();
enter_idle();
}
void arch_cpu_idle_exit(void)
{
__exit_idle();
} }
void arch_cpu_idle_dead(void) void arch_cpu_idle_dead(void)
......
...@@ -1256,7 +1256,6 @@ void xen_evtchn_do_upcall(struct pt_regs *regs) ...@@ -1256,7 +1256,6 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
irq_enter(); irq_enter();
#ifdef CONFIG_X86 #ifdef CONFIG_X86
exit_idle();
inc_irq_stat(irq_hv_callback_count); inc_irq_stat(irq_hv_callback_count);
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment