Commit 76695841 authored by Russell King's avatar Russell King

The some of the existing ARM CPU "wait for interrupt" implementations

were buggy - they'd test hlt_counter/need_resched with interrupts
enabled, and then go to sleep.  An interrupt could be received between
the test and sleeping, and we could have 10ms latency before we ran
a task.  This changeset fixes this by providing a generic "cpu idle"
wrapper that handles these issues.
parent 94394e5b
......@@ -31,14 +31,6 @@
#include <asm/processor.h>
#include <asm/uaccess.h>
/*
* Values for cpu_do_idle()
*/
#define IDLE_WAIT_SLOW 0
#define IDLE_WAIT_FAST 1
#define IDLE_CLOCK_SLOW 2
#define IDLE_CLOCK_FAST 3
extern const char *processor_modes[];
extern void setup_mm_for_reboot(char mode);
......@@ -77,6 +69,18 @@ __setup("hlt", hlt_setup);
void (*pm_idle)(void);
void (*pm_power_off)(void);
/*
* This is our default idle handler. We need to disable
* interrupts here to ensure we don't miss a wakeup call.
*/
static void default_idle(void)
{
__cli();
if (!need_resched() && !hlt_counter)
arch_idle();
__sti();
}
/*
* The idle thread. We try to conserve power, while trying to keep
* overall latency low. The architecture specific idle is passed
......@@ -89,7 +93,7 @@ void cpu_idle(void)
while (1) {
void (*idle)(void) = pm_idle;
if (!idle)
idle = arch_idle;
idle = default_idle;
leds_event(led_idle_start);
while (!need_resched())
idle();
......
......@@ -11,8 +11,7 @@
static inline void arch_idle(void)
{
#if 0
if (!hlt_counter)
cpu_do_idle(0);
cpu_do_idle();
#endif
}
......
......@@ -15,8 +15,7 @@
#ifndef __ASM_ARCH_SYSTEM_H
#define __ASM_ARCH_SYSTEM_H
static inline void
arch_idle(void)
static inline void arch_idle(void)
{
}
......
......@@ -8,9 +8,8 @@
* published by the Free Software Foundation.
*/
static void arch_idle(void)
static inline void arch_idle(void)
{
while (!need_resched() && !hlt_counter);
}
static inline void arch_reset(char mode)
......
......@@ -7,11 +7,11 @@
#define __ASM_ARCH_SYSTEM_H
#include <asm/hardware/iomd.h>
#include <asm/io.h>
static void arch_idle(void)
static inline void arch_idle(void)
{
while (!need_resched() && !hlt_counter)
iomd_writeb(0, IOMD_SUSMODE);
iomd_writeb(0, IOMD_SUSMODE);
}
#define arch_reset(mode) \
......
......@@ -22,7 +22,7 @@
#include <asm/hardware/clps7111.h>
static void arch_idle(void)
static inline void arch_idle(void)
{
clps_writel(1, HALT);
__asm__ __volatile__(
......
......@@ -20,31 +20,18 @@
* Instead, we spin, polling the IRQ_STAT register for the occurrence
* of any interrupt with core clock down to the memory clock.
*/
static void arch_idle(void)
static inline void arch_idle(void)
{
const char *irq_stat = (char *)0xff000000;
long flags;
if (!hlt_counter)
return;
/* disable clock switching */
asm volatile ("mcr%? p15, 0, ip, c15, c2, 2");
do {
/* disable interrupts */
cli();
/* check need_resched here to avoid races */
if (need_resched()) {
sti();
return;
}
/* disable clock switching */
asm volatile ("mcr%? p15, 0, ip, c15, c2, 2");
/* wait for an interrupt to occur */
while (!*irq_stat);
/* enable clock switching */
asm volatile ("mcr%? p15, 0, ip, c15, c1, 2");
/* allow the interrupt to happen */
sti();
} while (!need_resched());
/* wait for an interrupt to occur */
while (!*irq_stat);
/* enable clock switching */
asm volatile ("mcr%? p15, 0, ip, c15, c1, 2");
}
#define arch_reset(mode) cpu_reset(0x80000000)
......
......@@ -13,26 +13,9 @@
#include <asm/leds.h>
#include <asm/mach-types.h>
static void arch_idle(void)
static inline void arch_idle(void)
{
unsigned long start_idle;
start_idle = jiffies;
do {
if (need_resched() || hlt_counter)
goto slow_out;
cpu_do_idle(IDLE_WAIT_FAST);
} while (time_before(jiffies, start_idle + HZ/50));
cpu_do_idle(IDLE_CLOCK_SLOW);
while (!need_resched() && !hlt_counter) {
cpu_do_idle(IDLE_WAIT_SLOW);
}
cpu_do_idle(IDLE_CLOCK_FAST);
slow_out:
cpu_do_idle();
}
static inline void arch_reset(char mode)
......
......@@ -24,13 +24,13 @@
#include <asm/arch/platform.h>
static void arch_idle(void)
static inline void arch_idle(void)
{
/*
* This should do all the clock switching
* and wait for interrupt tricks
*/
cpu_do_idle(0);
cpu_do_idle();
}
extern __inline__ void arch_reset(char mode)
......
......@@ -23,13 +23,13 @@
#include <asm/arch/platform.h>
static void arch_idle(void)
static inline void arch_idle(void)
{
/*
* This should do all the clock switching
* and wait for interrupt tricks
*/
cpu_do_idle(0);
cpu_do_idle();
}
static inline void arch_reset(char mode)
......
......@@ -10,10 +10,7 @@
static inline void arch_idle(void)
{
if (!hlt_counter)
{
cpu_do_idle(0);
}
cpu_do_idle();
}
......
......@@ -12,9 +12,8 @@
#ifndef __ASM_ARCH_SYSTEM_H
#define __ASM_ARCH_SYSTEM_H
static void arch_idle(void)
static inline void arch_idle(void)
{
/* fixme: this needs to be cleaned up (converted from ASM code) --rmk */
*(unsigned long *)(IO_BASE + 0x50004) = 1; /* idle mode */
}
......
......@@ -14,10 +14,9 @@
#ifndef __ASM_ARCH_SYSTEM_H
#define __ASM_ARCH_SYSTEM_H
static void arch_idle(void)
static inline void arch_idle(void)
{
while (!need_resched() && !hlt_counter)
cpu_do_idle(IDLE_WAIT_SLOW);
cpu_do_idle();
}
#define arch_reset(mode) do { } while (0)
......
......@@ -14,13 +14,7 @@
static inline void arch_idle(void)
{
if (!hlt_counter) {
int flags;
local_irq_save(flags);
if(!need_resched())
cpu_do_idle(0);
local_irq_restore(flags);
}
cpu_do_idle();
}
......
......@@ -11,26 +11,9 @@
#include <asm/hardware/iomd.h>
#include <asm/io.h>
static void arch_idle(void)
static inline void arch_idle(void)
{
unsigned long start_idle;
start_idle = jiffies;
do {
if (need_resched() || hlt_counter)
goto slow_out;
cpu_do_idle(IDLE_WAIT_FAST);
} while (time_before(jiffies, start_idle + HZ/50));
cpu_do_idle(IDLE_CLOCK_SLOW);
while (!need_resched() && !hlt_counter) {
cpu_do_idle(IDLE_WAIT_SLOW);
}
cpu_do_idle(IDLE_CLOCK_FAST);
slow_out:
cpu_do_idle();
}
static inline void arch_reset(char mode)
......
......@@ -7,13 +7,7 @@
static inline void arch_idle(void)
{
if (!hlt_counter) {
int flags;
local_irq_save(flags);
if (!need_resched())
cpu_do_idle(0);
local_irq_restore(flags);
}
cpu_do_idle();
}
#ifdef CONFIG_SA1100_VICTOR
......
......@@ -21,7 +21,7 @@ static void arch_reset(char mode)
}
static void arch_idle(void)
static inline void arch_idle(void)
{
}
......
......@@ -6,26 +6,9 @@
#ifndef __ASM_ARCH_SYSTEM_H
#define __ASM_ARCH_SYSTEM_H
static void arch_idle(void)
static inline void arch_idle(void)
{
unsigned long start_idle;
start_idle = jiffies;
do {
if (need_resched() || hlt_counter)
goto slow_out;
cpu_do_idle(IDLE_WAIT_FAST);
} while (time_before(jiffies, start_idle + HZ/50));
cpu_do_idle(IDLE_CLOCK_SLOW);
while (!need_resched() && !hlt_counter) {
cpu_do_idle(IDLE_WAIT_SLOW);
}
cpu_do_idle(IDLE_CLOCK_FAST);
slow_out:
cpu_do_idle();
}
#define arch_reset(mode) do { } while (0)
......
......@@ -43,7 +43,7 @@ extern struct processor {
/*
* Idle the processor
*/
int (*_do_idle)(int mode);
int (*_do_idle)(void);
/*
* Processor architecture specific
*/
......@@ -113,7 +113,7 @@ extern const struct processor sa110_processor_functions;
#define cpu_proc_init() processor._proc_init()
#define cpu_proc_fin() processor._proc_fin()
#define cpu_reset(addr) processor.reset(addr)
#define cpu_do_idle(mode) processor._do_idle(mode)
#define cpu_do_idle() processor._do_idle()
#define cpu_cache_clean_invalidate_all() processor.cache.clean_invalidate_all()
#define cpu_cache_clean_invalidate_range(s,e,f) processor.cache.clean_invalidate_range(s,e,f)
......
......@@ -52,7 +52,7 @@ extern void cpu_data_abort(unsigned long pc);
extern void cpu_check_bugs(void);
extern void cpu_proc_init(void);
extern void cpu_proc_fin(void);
extern int cpu_do_idle(int mode);
extern int cpu_do_idle(void);
extern void cpu_cache_clean_invalidate_all(void);
extern void cpu_cache_clean_invalidate_range(unsigned long address, unsigned long end, int flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment