Commit 2ddb9f17 authored by Len Brown's avatar Len Brown

Merge branch 'pmtimer-overflow' into release

parents a3b2c5e4 ff69f2bb
...@@ -64,7 +64,6 @@ ...@@ -64,7 +64,6 @@
#define _COMPONENT ACPI_PROCESSOR_COMPONENT #define _COMPONENT ACPI_PROCESSOR_COMPONENT
ACPI_MODULE_NAME("processor_idle"); ACPI_MODULE_NAME("processor_idle");
#define ACPI_PROCESSOR_FILE_POWER "power" #define ACPI_PROCESSOR_FILE_POWER "power"
#define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
#define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY) #define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY)
#define C2_OVERHEAD 1 /* 1us */ #define C2_OVERHEAD 1 /* 1us */
#define C3_OVERHEAD 1 /* 1us */ #define C3_OVERHEAD 1 /* 1us */
...@@ -78,6 +77,10 @@ module_param(nocst, uint, 0000); ...@@ -78,6 +77,10 @@ module_param(nocst, uint, 0000);
static unsigned int latency_factor __read_mostly = 2; static unsigned int latency_factor __read_mostly = 2;
module_param(latency_factor, uint, 0644); module_param(latency_factor, uint, 0644);
static s64 us_to_pm_timer_ticks(s64 t)
{
return div64_u64(t * PM_TIMER_FREQUENCY, 1000000);
}
/* /*
* IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
* For now disable this. Probably a bug somewhere else. * For now disable this. Probably a bug somewhere else.
...@@ -108,25 +111,6 @@ static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = { ...@@ -108,25 +111,6 @@ static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
{}, {},
}; };
static inline u32 ticks_elapsed(u32 t1, u32 t2)
{
if (t2 >= t1)
return (t2 - t1);
else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
else
return ((0xFFFFFFFF - t1) + t2);
}
static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2)
{
if (t2 >= t1)
return PM_TIMER_TICKS_TO_US(t2 - t1);
else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
return PM_TIMER_TICKS_TO_US(((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
else
return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2);
}
/* /*
* Callers should disable interrupts before the call and enable * Callers should disable interrupts before the call and enable
...@@ -802,7 +786,8 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) ...@@ -802,7 +786,8 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
static int acpi_idle_enter_c1(struct cpuidle_device *dev, static int acpi_idle_enter_c1(struct cpuidle_device *dev,
struct cpuidle_state *state) struct cpuidle_state *state)
{ {
u32 t1, t2; ktime_t kt1, kt2;
s64 idle_time;
struct acpi_processor *pr; struct acpi_processor *pr;
struct acpi_processor_cx *cx = cpuidle_get_statedata(state); struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
...@@ -820,14 +805,15 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, ...@@ -820,14 +805,15 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
return 0; return 0;
} }
t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); kt1 = ktime_get_real();
acpi_idle_do_entry(cx); acpi_idle_do_entry(cx);
t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); kt2 = ktime_get_real();
idle_time = ktime_to_us(ktime_sub(kt2, kt1));
local_irq_enable(); local_irq_enable();
cx->usage++; cx->usage++;
return ticks_elapsed_in_us(t1, t2); return idle_time;
} }
/** /**
...@@ -840,8 +826,9 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, ...@@ -840,8 +826,9 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
{ {
struct acpi_processor *pr; struct acpi_processor *pr;
struct acpi_processor_cx *cx = cpuidle_get_statedata(state); struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
u32 t1, t2; ktime_t kt1, kt2;
int sleep_ticks = 0; s64 idle_time;
s64 sleep_ticks = 0;
pr = __get_cpu_var(processors); pr = __get_cpu_var(processors);
...@@ -874,18 +861,19 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, ...@@ -874,18 +861,19 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
if (cx->type == ACPI_STATE_C3) if (cx->type == ACPI_STATE_C3)
ACPI_FLUSH_CPU_CACHE(); ACPI_FLUSH_CPU_CACHE();
t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); kt1 = ktime_get_real();
/* Tell the scheduler that we are going deep-idle: */ /* Tell the scheduler that we are going deep-idle: */
sched_clock_idle_sleep_event(); sched_clock_idle_sleep_event();
acpi_idle_do_entry(cx); acpi_idle_do_entry(cx);
t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); kt2 = ktime_get_real();
idle_time = ktime_to_us(ktime_sub(kt2, kt1));
#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86) #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
/* TSC could halt in idle, so notify users */ /* TSC could halt in idle, so notify users */
if (tsc_halts_in_c(cx->type)) if (tsc_halts_in_c(cx->type))
mark_tsc_unstable("TSC halts in idle");; mark_tsc_unstable("TSC halts in idle");;
#endif #endif
sleep_ticks = ticks_elapsed(t1, t2); sleep_ticks = us_to_pm_timer_ticks(idle_time);
/* Tell the scheduler how much we idled: */ /* Tell the scheduler how much we idled: */
sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
...@@ -897,7 +885,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, ...@@ -897,7 +885,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
acpi_state_timer_broadcast(pr, cx, 0); acpi_state_timer_broadcast(pr, cx, 0);
cx->time += sleep_ticks; cx->time += sleep_ticks;
return ticks_elapsed_in_us(t1, t2); return idle_time;
} }
static int c3_cpu_count; static int c3_cpu_count;
...@@ -915,8 +903,10 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, ...@@ -915,8 +903,10 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
{ {
struct acpi_processor *pr; struct acpi_processor *pr;
struct acpi_processor_cx *cx = cpuidle_get_statedata(state); struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
u32 t1, t2; ktime_t kt1, kt2;
int sleep_ticks = 0; s64 idle_time;
s64 sleep_ticks = 0;
pr = __get_cpu_var(processors); pr = __get_cpu_var(processors);
...@@ -983,9 +973,10 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, ...@@ -983,9 +973,10 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
ACPI_FLUSH_CPU_CACHE(); ACPI_FLUSH_CPU_CACHE();
} }
t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); kt1 = ktime_get_real();
acpi_idle_do_entry(cx); acpi_idle_do_entry(cx);
t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); kt2 = ktime_get_real();
idle_time = ktime_to_us(ktime_sub(kt2, kt1));
/* Re-enable bus master arbitration */ /* Re-enable bus master arbitration */
if (pr->flags.bm_check && pr->flags.bm_control) { if (pr->flags.bm_check && pr->flags.bm_control) {
...@@ -1000,7 +991,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, ...@@ -1000,7 +991,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
if (tsc_halts_in_c(ACPI_STATE_C3)) if (tsc_halts_in_c(ACPI_STATE_C3))
mark_tsc_unstable("TSC halts in idle"); mark_tsc_unstable("TSC halts in idle");
#endif #endif
sleep_ticks = ticks_elapsed(t1, t2); sleep_ticks = us_to_pm_timer_ticks(idle_time);
/* Tell the scheduler how much we idled: */ /* Tell the scheduler how much we idled: */
sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
...@@ -1011,7 +1002,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, ...@@ -1011,7 +1002,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
acpi_state_timer_broadcast(pr, cx, 0); acpi_state_timer_broadcast(pr, cx, 0);
cx->time += sleep_ticks; cx->time += sleep_ticks;
return ticks_elapsed_in_us(t1, t2); return idle_time;
} }
struct cpuidle_driver acpi_idle_driver = { struct cpuidle_driver acpi_idle_driver = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment