Commit 87fa05ae authored by Sam Ravnborg's avatar Sam Ravnborg Committed by Thomas Gleixner

sparc: Use generic idle loop

Add generic cpu_idle support

sparc32:
- replace call to cpu_idle() with cpu_startup_entry()
- add arch_cpu_idle()

sparc64:
- smp_callin() now include cpu_startup_entry() call so we can
  skip calling cpu_idle from assembler
- add arch_cpu_idle() and arch_cpu_idle_dead()
Signed-off-by: default avatarSam Ravnborg <sam@ravnborg.org>
Reviewed-by: default avatar"Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com>
Cc: torvalds@linux-foundation.org
Cc: rusty@rustcorp.com.au
Cc: paulmck@linux.vnet.ibm.com
Cc: peterz@infradead.org
Cc: magnus.damm@gmail.com
Acked-by: default avatarDavid Miller <davem@davemloft.net>
Link: http://lkml.kernel.org/r/20130411193850.GA2330@merkur.ravnborg.orgSigned-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 781b0e87
...@@ -37,6 +37,7 @@ config SPARC ...@@ -37,6 +37,7 @@ config SPARC
select GENERIC_SMP_IDLE_THREAD select GENERIC_SMP_IDLE_THREAD
select GENERIC_CMOS_UPDATE select GENERIC_CMOS_UPDATE
select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS
select GENERIC_IDLE_LOOP
select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER select GENERIC_STRNLEN_USER
select MODULES_USE_ELF_RELA select MODULES_USE_ELF_RELA
......
...@@ -128,8 +128,7 @@ hv_cpu_startup: ...@@ -128,8 +128,7 @@ hv_cpu_startup:
call smp_callin call smp_callin
nop nop
call cpu_idle
mov 0, %o0
call cpu_panic call cpu_panic
nop nop
......
...@@ -64,23 +64,12 @@ extern void fpsave(unsigned long *, unsigned long *, void *, unsigned long *); ...@@ -64,23 +64,12 @@ extern void fpsave(unsigned long *, unsigned long *, void *, unsigned long *);
struct task_struct *last_task_used_math = NULL; struct task_struct *last_task_used_math = NULL;
struct thread_info *current_set[NR_CPUS]; struct thread_info *current_set[NR_CPUS];
/* /* Idle loop support. */
* the idle loop on a Sparc... ;) void arch_cpu_idle(void)
*/
void cpu_idle(void)
{ {
set_thread_flag(TIF_POLLING_NRFLAG); if (sparc_idle)
(*sparc_idle)();
/* endless idle loop with no priority at all */ local_irq_enable();
for (;;) {
while (!need_resched()) {
if (sparc_idle)
(*sparc_idle)();
else
cpu_relax();
}
schedule_preempt_disabled();
}
} }
/* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */ /* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */
......
...@@ -52,20 +52,17 @@ ...@@ -52,20 +52,17 @@
#include "kstack.h" #include "kstack.h"
static void sparc64_yield(int cpu) /* Idle loop support on sparc64. */
void arch_cpu_idle(void)
{ {
if (tlb_type != hypervisor) { if (tlb_type != hypervisor) {
touch_nmi_watchdog(); touch_nmi_watchdog();
return; } else {
}
clear_thread_flag(TIF_POLLING_NRFLAG);
smp_mb__after_clear_bit();
while (!need_resched() && !cpu_is_offline(cpu)) {
unsigned long pstate; unsigned long pstate;
/* Disable interrupts. */ /* The sun4v sleeping code requires that we have PSTATE.IE cleared over
* the cpu sleep hypervisor call.
*/
__asm__ __volatile__( __asm__ __volatile__(
"rdpr %%pstate, %0\n\t" "rdpr %%pstate, %0\n\t"
"andn %0, %1, %0\n\t" "andn %0, %1, %0\n\t"
...@@ -73,7 +70,7 @@ static void sparc64_yield(int cpu) ...@@ -73,7 +70,7 @@ static void sparc64_yield(int cpu)
: "=&r" (pstate) : "=&r" (pstate)
: "i" (PSTATE_IE)); : "i" (PSTATE_IE));
if (!need_resched() && !cpu_is_offline(cpu)) if (!need_resched() && !cpu_is_offline(smp_processor_id()))
sun4v_cpu_yield(); sun4v_cpu_yield();
/* Re-enable interrupts. */ /* Re-enable interrupts. */
...@@ -84,36 +81,16 @@ static void sparc64_yield(int cpu) ...@@ -84,36 +81,16 @@ static void sparc64_yield(int cpu)
: "=&r" (pstate) : "=&r" (pstate)
: "i" (PSTATE_IE)); : "i" (PSTATE_IE));
} }
local_irq_enable();
set_thread_flag(TIF_POLLING_NRFLAG);
} }
/* The idle loop on sparc64. */
void cpu_idle(void)
{
int cpu = smp_processor_id();
set_thread_flag(TIF_POLLING_NRFLAG);
while(1) {
tick_nohz_idle_enter();
rcu_idle_enter();
while (!need_resched() && !cpu_is_offline(cpu))
sparc64_yield(cpu);
rcu_idle_exit();
tick_nohz_idle_exit();
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
if (cpu_is_offline(cpu)) { void arch_cpu_idle_dead()
sched_preempt_enable_no_resched(); {
cpu_play_dead(); sched_preempt_enable_no_resched();
} cpu_play_dead();
#endif
schedule_preempt_disabled();
}
} }
#endif
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
static void show_regwindow32(struct pt_regs *regs) static void show_regwindow32(struct pt_regs *regs)
......
...@@ -369,7 +369,7 @@ void __cpuinit sparc_start_secondary(void *arg) ...@@ -369,7 +369,7 @@ void __cpuinit sparc_start_secondary(void *arg)
local_irq_enable(); local_irq_enable();
wmb(); wmb();
cpu_idle(); cpu_startup_entry(CPUHP_ONLINE);
/* We should never reach here! */ /* We should never reach here! */
BUG(); BUG();
......
...@@ -127,6 +127,8 @@ void __cpuinit smp_callin(void) ...@@ -127,6 +127,8 @@ void __cpuinit smp_callin(void)
/* idle thread is expected to have preempt disabled */ /* idle thread is expected to have preempt disabled */
preempt_disable(); preempt_disable();
cpu_startup_entry(CPUHP_ONLINE);
} }
void cpu_panic(void) void cpu_panic(void)
......
...@@ -407,8 +407,7 @@ after_lock_tlb: ...@@ -407,8 +407,7 @@ after_lock_tlb:
call smp_callin call smp_callin
nop nop
call cpu_idle
mov 0, %o0
call cpu_panic call cpu_panic
nop nop
1: b,a,pt %xcc, 1b 1: b,a,pt %xcc, 1b
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment