Commit c9c89868 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'x86/idle' into sched/core

Merge these x86 specific bits - we are going to add generic bits as well.
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents 0bd3a173 7d590cca
#ifndef _ASM_X86_MWAIT_H #ifndef _ASM_X86_MWAIT_H
#define _ASM_X86_MWAIT_H #define _ASM_X86_MWAIT_H
#include <linux/sched.h>
#define MWAIT_SUBSTATE_MASK 0xf #define MWAIT_SUBSTATE_MASK 0xf
#define MWAIT_CSTATE_MASK 0xf #define MWAIT_CSTATE_MASK 0xf
#define MWAIT_SUBSTATE_SIZE 4 #define MWAIT_SUBSTATE_SIZE 4
...@@ -13,4 +15,45 @@ ...@@ -13,4 +15,45 @@
#define MWAIT_ECX_INTERRUPT_BREAK 0x1 #define MWAIT_ECX_INTERRUPT_BREAK 0x1
static inline void __monitor(const void *eax, unsigned long ecx,
unsigned long edx)
{
/* "monitor %eax, %ecx, %edx;" */
asm volatile(".byte 0x0f, 0x01, 0xc8;"
:: "a" (eax), "c" (ecx), "d"(edx));
}
static inline void __mwait(unsigned long eax, unsigned long ecx)
{
/* "mwait %eax, %ecx;" */
asm volatile(".byte 0x0f, 0x01, 0xc9;"
:: "a" (eax), "c" (ecx));
}
/*
* This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
* which can obviate IPI to trigger checking of need_resched.
* We execute MONITOR against need_resched and enter optimized wait state
* through MWAIT. Whenever someone changes need_resched, we would be woken
* up from MWAIT (without an IPI).
*
* New with Core Duo processors, MWAIT can take some hints based on CPU
* capability.
*/
static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
{
if (!current_set_polling_and_test()) {
if (static_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) {
mb();
clflush((void *)&current_thread_info()->flags);
mb();
}
__monitor((void *)&current_thread_info()->flags, 0, 0);
if (!need_resched())
__mwait(eax, ecx);
}
__current_clr_polling();
}
#endif /* _ASM_X86_MWAIT_H */ #endif /* _ASM_X86_MWAIT_H */
...@@ -700,29 +700,6 @@ static inline void sync_core(void) ...@@ -700,29 +700,6 @@ static inline void sync_core(void)
#endif #endif
} }
static inline void __monitor(const void *eax, unsigned long ecx,
unsigned long edx)
{
/* "monitor %eax, %ecx, %edx;" */
asm volatile(".byte 0x0f, 0x01, 0xc8;"
:: "a" (eax), "c" (ecx), "d"(edx));
}
static inline void __mwait(unsigned long eax, unsigned long ecx)
{
/* "mwait %eax, %ecx;" */
asm volatile(".byte 0x0f, 0x01, 0xc9;"
:: "a" (eax), "c" (ecx));
}
static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
{
trace_hardirqs_on();
/* "mwait %eax, %ecx;" */
asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
:: "a" (eax), "c" (ecx));
}
extern void select_idle_routine(const struct cpuinfo_x86 *c); extern void select_idle_routine(const struct cpuinfo_x86 *c);
extern void init_amd_e400_c1e_mask(void); extern void init_amd_e400_c1e_mask(void);
......
...@@ -150,29 +150,6 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, ...@@ -150,29 +150,6 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
} }
EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe); EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
/*
* This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
* which can obviate IPI to trigger checking of need_resched.
* We execute MONITOR against need_resched and enter optimized wait state
* through MWAIT. Whenever someone changes need_resched, we would be woken
* up from MWAIT (without an IPI).
*
* New with Core Duo processors, MWAIT can take some hints based on CPU
* capability.
*/
void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
{
if (!need_resched()) {
if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
clflush((void *)&current_thread_info()->flags);
__monitor((void *)&current_thread_info()->flags, 0, 0);
smp_mb();
if (!need_resched())
__mwait(ax, cx);
}
}
void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx) void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx)
{ {
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
......
...@@ -1417,7 +1417,9 @@ static inline void mwait_play_dead(void) ...@@ -1417,7 +1417,9 @@ static inline void mwait_play_dead(void)
* The WBINVD is insufficient due to the spurious-wakeup * The WBINVD is insufficient due to the spurious-wakeup
* case where we return around the loop. * case where we return around the loop.
*/ */
mb();
clflush(mwait_ptr); clflush(mwait_ptr);
mb();
__monitor(mwait_ptr, 0, 0); __monitor(mwait_ptr, 0, 0);
mb(); mb();
__mwait(eax, 0); __mwait(eax, 0);
......
...@@ -193,10 +193,7 @@ static int power_saving_thread(void *data) ...@@ -193,10 +193,7 @@ static int power_saving_thread(void *data)
CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
stop_critical_timings(); stop_critical_timings();
__monitor((void *)&current_thread_info()->flags, 0, 0); mwait_idle_with_hints(power_saving_mwait_eax, 1);
smp_mb();
if (!need_resched())
__mwait(power_saving_mwait_eax, 1);
start_critical_timings(); start_critical_timings();
if (lapic_marked_unstable) if (lapic_marked_unstable)
......
...@@ -727,11 +727,6 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, ...@@ -727,11 +727,6 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
if (unlikely(!pr)) if (unlikely(!pr))
return -EINVAL; return -EINVAL;
if (cx->entry_method == ACPI_CSTATE_FFH) {
if (current_set_polling_and_test())
return -EINVAL;
}
lapic_timer_state_broadcast(pr, cx, 1); lapic_timer_state_broadcast(pr, cx, 1);
acpi_idle_do_entry(cx); acpi_idle_do_entry(cx);
...@@ -785,11 +780,6 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, ...@@ -785,11 +780,6 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
if (unlikely(!pr)) if (unlikely(!pr))
return -EINVAL; return -EINVAL;
if (cx->entry_method == ACPI_CSTATE_FFH) {
if (current_set_polling_and_test())
return -EINVAL;
}
/* /*
* Must be done before busmaster disable as we might need to * Must be done before busmaster disable as we might need to
* access HPET ! * access HPET !
...@@ -841,11 +831,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, ...@@ -841,11 +831,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
} }
} }
if (cx->entry_method == ACPI_CSTATE_FFH) {
if (current_set_polling_and_test())
return -EINVAL;
}
acpi_unlazy_tlb(smp_processor_id()); acpi_unlazy_tlb(smp_processor_id());
/* Tell the scheduler that we are going deep-idle: */ /* Tell the scheduler that we are going deep-idle: */
......
...@@ -377,16 +377,7 @@ static int intel_idle(struct cpuidle_device *dev, ...@@ -377,16 +377,7 @@ static int intel_idle(struct cpuidle_device *dev,
if (!(lapic_timer_reliable_states & (1 << (cstate)))) if (!(lapic_timer_reliable_states & (1 << (cstate))))
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
if (!current_set_polling_and_test()) { mwait_idle_with_hints(eax, ecx);
if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
clflush((void *)&current_thread_info()->flags);
__monitor((void *)&current_thread_info()->flags, 0, 0);
smp_mb();
if (!need_resched())
__mwait(eax, ecx);
}
if (!(lapic_timer_reliable_states & (1 << (cstate)))) if (!(lapic_timer_reliable_states & (1 << (cstate))))
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
......
...@@ -438,9 +438,7 @@ static int clamp_thread(void *arg) ...@@ -438,9 +438,7 @@ static int clamp_thread(void *arg)
*/ */
local_touch_nmi(); local_touch_nmi();
stop_critical_timings(); stop_critical_timings();
__monitor((void *)&current_thread_info()->flags, 0, 0); mwait_idle_with_hints(eax, ecx);
cpu_relax(); /* allow HT sibling to run */
__mwait(eax, ecx);
start_critical_timings(); start_critical_timings();
atomic_inc(&idle_wakeup_counter); atomic_inc(&idle_wakeup_counter);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment