Commit 2201f994 authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman

powerpc/64s/idle: Move soft interrupt mask logic into C code

This simplifies the asm and fixes irq-off tracing over sleep
instructions.

Also move powersave_nap check for POWER8 into C code, and move
PSSCR register value calculation for POWER9 into C.
Reviewed-by: default avatarGautham R. Shenoy <ego@linux.vnet.ibm.com>
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 42bed042
...@@ -129,6 +129,9 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs) ...@@ -129,6 +129,9 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
} }
extern bool prep_irq_for_idle(void); extern bool prep_irq_for_idle(void);
extern bool prep_irq_for_idle_irqsoff(void);
#define fini_irq_for_idle_irqsoff() trace_hardirqs_off();
extern void force_external_irq_replay(void); extern void force_external_irq_replay(void);
......
...@@ -226,6 +226,7 @@ struct machdep_calls { ...@@ -226,6 +226,7 @@ struct machdep_calls {
extern void e500_idle(void); extern void e500_idle(void);
extern void power4_idle(void); extern void power4_idle(void);
extern void power7_idle(void); extern void power7_idle(void);
extern void power9_idle(void);
extern void ppc6xx_idle(void); extern void ppc6xx_idle(void);
extern void book3e_idle(void); extern void book3e_idle(void);
......
...@@ -481,10 +481,10 @@ extern unsigned long cpuidle_disable; ...@@ -481,10 +481,10 @@ extern unsigned long cpuidle_disable;
enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF}; enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF};
extern int powersave_nap; /* set if nap mode can be used in idle loop */ extern int powersave_nap; /* set if nap mode can be used in idle loop */
extern unsigned long power7_nap(int check_irq); extern unsigned long power7_idle_insn(unsigned long type); /* PNV_THREAD_NAP/etc*/
extern unsigned long power7_sleep(void); extern void power7_idle_type(unsigned long type);
extern unsigned long power7_winkle(void); extern unsigned long power9_idle_stop(unsigned long psscr_val);
extern unsigned long power9_idle_stop(unsigned long stop_psscr_val, extern void power9_idle_type(unsigned long stop_psscr_val,
unsigned long stop_psscr_mask); unsigned long stop_psscr_mask);
extern void flush_instruction_cache(void); extern void flush_instruction_cache(void);
......
...@@ -109,13 +109,9 @@ core_idle_lock_held: ...@@ -109,13 +109,9 @@ core_idle_lock_held:
/* /*
* Pass requested state in r3: * Pass requested state in r3:
* r3 - PNV_THREAD_NAP/SLEEP/WINKLE in POWER8 * r3 - PNV_THREAD_NAP/SLEEP/WINKLE in POWER8
* - Requested STOP state in POWER9 * - Requested PSSCR value in POWER9
* *
* To check IRQ_HAPPENED in r4 * Address of idle handler to 'rfid' to in r4
* 0 - don't check
* 1 - check
*
* Address to 'rfid' to in r5
*/ */
pnv_powersave_common: pnv_powersave_common:
/* Use r3 to pass state nap/sleep/winkle */ /* Use r3 to pass state nap/sleep/winkle */
...@@ -131,30 +127,7 @@ pnv_powersave_common: ...@@ -131,30 +127,7 @@ pnv_powersave_common:
std r0,_LINK(r1) std r0,_LINK(r1)
std r0,_NIP(r1) std r0,_NIP(r1)
/* Hard disable interrupts */
mfmsr r9 mfmsr r9
rldicl r9,r9,48,1
rotldi r9,r9,16
mtmsrd r9,1 /* hard-disable interrupts */
/* Check if something happened while soft-disabled */
lbz r0,PACAIRQHAPPENED(r13)
andi. r0,r0,~PACA_IRQ_HARD_DIS@l
beq 1f
cmpwi cr0,r4,0
beq 1f
addi r1,r1,INT_FRAME_SIZE
ld r0,16(r1)
li r3,0 /* Return 0 (no nap) */
mtlr r0
blr
1: /* We mark irqs hard disabled as this is the state we'll
* be in when returning and we need to tell arch_local_irq_restore()
* about it
*/
li r0,PACA_IRQ_HARD_DIS
stb r0,PACAIRQHAPPENED(r13)
/* We haven't lost state ... yet */ /* We haven't lost state ... yet */
li r0,0 li r0,0
...@@ -163,8 +136,8 @@ pnv_powersave_common: ...@@ -163,8 +136,8 @@ pnv_powersave_common:
/* Continue saving state */ /* Continue saving state */
SAVE_GPR(2, r1) SAVE_GPR(2, r1)
SAVE_NVGPRS(r1) SAVE_NVGPRS(r1)
mfcr r4 mfcr r5
std r4,_CCR(r1) std r5,_CCR(r1)
std r9,_MSR(r1) std r9,_MSR(r1)
std r1,PACAR1(r13) std r1,PACAR1(r13)
...@@ -178,7 +151,7 @@ pnv_powersave_common: ...@@ -178,7 +151,7 @@ pnv_powersave_common:
li r6, MSR_RI li r6, MSR_RI
andc r6, r9, r6 andc r6, r9, r6
mtmsrd r6, 1 /* clear RI before setting SRR0/1 */ mtmsrd r6, 1 /* clear RI before setting SRR0/1 */
mtspr SPRN_SRR0, r5 mtspr SPRN_SRR0, r4
mtspr SPRN_SRR1, r7 mtspr SPRN_SRR1, r7
rfid rfid
...@@ -322,35 +295,14 @@ lwarx_loop_stop: ...@@ -322,35 +295,14 @@ lwarx_loop_stop:
IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP) IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP)
_GLOBAL(power7_idle) /*
* Entered with MSR[EE]=0 and no soft-masked interrupts pending.
* r3 contains desired idle state (PNV_THREAD_NAP/SLEEP/WINKLE).
*/
_GLOBAL(power7_idle_insn)
/* Now check if user or arch enabled NAP mode */ /* Now check if user or arch enabled NAP mode */
LOAD_REG_ADDRBASE(r3,powersave_nap) LOAD_REG_ADDR(r4, pnv_enter_arch207_idle_mode)
lwz r4,ADDROFF(powersave_nap)(r3)
cmpwi 0,r4,0
beqlr
li r3, 1
/* fall through */
_GLOBAL(power7_nap)
mr r4,r3
li r3,PNV_THREAD_NAP
LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode)
b pnv_powersave_common
/* No return */
_GLOBAL(power7_sleep)
li r3,PNV_THREAD_SLEEP
li r4,1
LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode)
b pnv_powersave_common b pnv_powersave_common
/* No return */
_GLOBAL(power7_winkle)
li r3,PNV_THREAD_WINKLE
li r4,1
LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode)
b pnv_powersave_common
/* No return */
#define CHECK_HMI_INTERRUPT \ #define CHECK_HMI_INTERRUPT \
mfspr r0,SPRN_SRR1; \ mfspr r0,SPRN_SRR1; \
...@@ -372,17 +324,13 @@ ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \ ...@@ -372,17 +324,13 @@ ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \
20: nop; 20: nop;
/* /*
* r3 - The PSSCR value corresponding to the stop state. * Entered with MSR[EE]=0 and no soft-masked interrupts pending.
* r4 - The PSSCR mask corrresonding to the stop state. * r3 contains desired PSSCR register value.
*/ */
_GLOBAL(power9_idle_stop) _GLOBAL(power9_idle_stop)
mfspr r5,SPRN_PSSCR
andc r5,r5,r4
or r3,r3,r5
std r3, PACA_REQ_PSSCR(r13) std r3, PACA_REQ_PSSCR(r13)
mtspr SPRN_PSSCR,r3 mtspr SPRN_PSSCR,r3
LOAD_REG_ADDR(r5,power_enter_stop) LOAD_REG_ADDR(r4,power_enter_stop)
li r4,1
b pnv_powersave_common b pnv_powersave_common
/* No return */ /* No return */
......
...@@ -322,7 +322,8 @@ bool prep_irq_for_idle(void) ...@@ -322,7 +322,8 @@ bool prep_irq_for_idle(void)
* First we need to hard disable to ensure no interrupt * First we need to hard disable to ensure no interrupt
* occurs before we effectively enter the low power state * occurs before we effectively enter the low power state
*/ */
hard_irq_disable(); __hard_irq_disable();
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
/* /*
* If anything happened while we were soft-disabled, * If anything happened while we were soft-disabled,
...@@ -347,6 +348,36 @@ bool prep_irq_for_idle(void) ...@@ -347,6 +348,36 @@ bool prep_irq_for_idle(void)
return true; return true;
} }
/*
* This is for idle sequences that return with IRQs off, but the
* idle state itself wakes on interrupt. Tell the irq tracer that
* IRQs are enabled for the duration of idle so it does not get long
* off times. Must be paired with fini_irq_for_idle_irqsoff.
*/
bool prep_irq_for_idle_irqsoff(void)
{
WARN_ON(!irqs_disabled());
/*
* First we need to hard disable to ensure no interrupt
* occurs before we effectively enter the low power state
*/
__hard_irq_disable();
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
/*
* If anything happened while we were soft-disabled,
* we return now and do not enter the low power state.
*/
if (lazy_irq_pending())
return false;
/* Tell lockdep we are about to re-enable */
trace_hardirqs_on();
return true;
}
/* /*
* Force a replay of the external interrupt handler on this CPU. * Force a replay of the external interrupt handler on this CPU.
*/ */
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <asm/cpuidle.h> #include <asm/cpuidle.h>
#include <asm/code-patching.h> #include <asm/code-patching.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/runlatch.h>
#include "powernv.h" #include "powernv.h"
#include "subcore.h" #include "subcore.h"
...@@ -283,12 +284,68 @@ static DEVICE_ATTR(fastsleep_workaround_applyonce, 0600, ...@@ -283,12 +284,68 @@ static DEVICE_ATTR(fastsleep_workaround_applyonce, 0600,
show_fastsleep_workaround_applyonce, show_fastsleep_workaround_applyonce,
store_fastsleep_workaround_applyonce); store_fastsleep_workaround_applyonce);
static unsigned long __power7_idle_type(unsigned long type)
{
unsigned long srr1;
if (!prep_irq_for_idle_irqsoff())
return 0;
ppc64_runlatch_off();
srr1 = power7_idle_insn(type);
ppc64_runlatch_on();
fini_irq_for_idle_irqsoff();
return srr1;
}
void power7_idle_type(unsigned long type)
{
__power7_idle_type(type);
}
void power7_idle(void)
{
if (!powersave_nap)
return;
power7_idle_type(PNV_THREAD_NAP);
}
static unsigned long __power9_idle_type(unsigned long stop_psscr_val,
unsigned long stop_psscr_mask)
{
unsigned long psscr;
unsigned long srr1;
if (!prep_irq_for_idle_irqsoff())
return 0;
psscr = mfspr(SPRN_PSSCR);
psscr = (psscr & ~stop_psscr_mask) | stop_psscr_val;
ppc64_runlatch_off();
srr1 = power9_idle_stop(psscr);
ppc64_runlatch_on();
fini_irq_for_idle_irqsoff();
return srr1;
}
void power9_idle_type(unsigned long stop_psscr_val,
unsigned long stop_psscr_mask)
{
__power9_idle_type(stop_psscr_val, stop_psscr_mask);
}
/* /*
* Used for ppc_md.power_save which needs a function with no parameters * Used for ppc_md.power_save which needs a function with no parameters
*/ */
static void power9_idle(void) void power9_idle(void)
{ {
power9_idle_stop(pnv_default_stop_val, pnv_default_stop_mask); power9_idle_type(pnv_default_stop_val, pnv_default_stop_mask);
} }
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
...@@ -303,16 +360,17 @@ unsigned long pnv_cpu_offline(unsigned int cpu) ...@@ -303,16 +360,17 @@ unsigned long pnv_cpu_offline(unsigned int cpu)
u32 idle_states = pnv_get_supported_cpuidle_states(); u32 idle_states = pnv_get_supported_cpuidle_states();
if (cpu_has_feature(CPU_FTR_ARCH_300) && deepest_stop_found) { if (cpu_has_feature(CPU_FTR_ARCH_300) && deepest_stop_found) {
srr1 = power9_idle_stop(pnv_deepest_stop_psscr_val, srr1 = __power9_idle_type(pnv_deepest_stop_psscr_val,
pnv_deepest_stop_psscr_mask); pnv_deepest_stop_psscr_mask);
} else if (idle_states & OPAL_PM_WINKLE_ENABLED) { } else if (idle_states & OPAL_PM_WINKLE_ENABLED) {
srr1 = power7_winkle(); srr1 = __power7_idle_type(PNV_THREAD_WINKLE);
} else if ((idle_states & OPAL_PM_SLEEP_ENABLED) || } else if ((idle_states & OPAL_PM_SLEEP_ENABLED) ||
(idle_states & OPAL_PM_SLEEP_ENABLED_ER1)) { (idle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
srr1 = power7_sleep(); srr1 = __power7_idle_type(PNV_THREAD_SLEEP);
} else if (idle_states & OPAL_PM_NAP_ENABLED) { } else if (idle_states & OPAL_PM_NAP_ENABLED) {
srr1 = power7_nap(1); srr1 = __power7_idle_type(PNV_THREAD_NAP);
} else { } else {
ppc64_runlatch_off();
/* This is the fallback method. We emulate snooze */ /* This is the fallback method. We emulate snooze */
while (!generic_check_cpu_restart(cpu)) { while (!generic_check_cpu_restart(cpu)) {
HMT_low(); HMT_low();
...@@ -320,6 +378,7 @@ unsigned long pnv_cpu_offline(unsigned int cpu) ...@@ -320,6 +378,7 @@ unsigned long pnv_cpu_offline(unsigned int cpu)
} }
srr1 = 0; srr1 = 0;
HMT_medium(); HMT_medium();
ppc64_runlatch_on();
} }
return srr1; return srr1;
......
...@@ -182,9 +182,7 @@ static void pnv_smp_cpu_kill_self(void) ...@@ -182,9 +182,7 @@ static void pnv_smp_cpu_kill_self(void)
*/ */
kvmppc_set_host_ipi(cpu, 0); kvmppc_set_host_ipi(cpu, 0);
ppc64_runlatch_off();
srr1 = pnv_cpu_offline(cpu); srr1 = pnv_cpu_offline(cpu);
ppc64_runlatch_on();
/* /*
* If the SRR1 value indicates that we woke up due to * If the SRR1 value indicates that we woke up due to
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/stop_machine.h> #include <linux/stop_machine.h>
#include <asm/cputhreads.h> #include <asm/cputhreads.h>
#include <asm/cpuidle.h>
#include <asm/kvm_ppc.h> #include <asm/kvm_ppc.h>
#include <asm/machdep.h> #include <asm/machdep.h>
#include <asm/opal.h> #include <asm/opal.h>
...@@ -182,7 +183,7 @@ static void unsplit_core(void) ...@@ -182,7 +183,7 @@ static void unsplit_core(void)
cpu = smp_processor_id(); cpu = smp_processor_id();
if (cpu_thread_in_core(cpu) != 0) { if (cpu_thread_in_core(cpu) != 0) {
while (mfspr(SPRN_HID0) & mask) while (mfspr(SPRN_HID0) & mask)
power7_nap(0); power7_idle_insn(PNV_THREAD_NAP);
per_cpu(split_state, cpu).step = SYNC_STEP_UNSPLIT; per_cpu(split_state, cpu).step = SYNC_STEP_UNSPLIT;
return; return;
......
...@@ -73,9 +73,8 @@ static int nap_loop(struct cpuidle_device *dev, ...@@ -73,9 +73,8 @@ static int nap_loop(struct cpuidle_device *dev,
struct cpuidle_driver *drv, struct cpuidle_driver *drv,
int index) int index)
{ {
ppc64_runlatch_off(); power7_idle_type(PNV_THREAD_NAP);
power7_idle();
ppc64_runlatch_on();
return index; return index;
} }
...@@ -98,7 +97,8 @@ static int fastsleep_loop(struct cpuidle_device *dev, ...@@ -98,7 +97,8 @@ static int fastsleep_loop(struct cpuidle_device *dev,
new_lpcr &= ~LPCR_PECE1; new_lpcr &= ~LPCR_PECE1;
mtspr(SPRN_LPCR, new_lpcr); mtspr(SPRN_LPCR, new_lpcr);
power7_sleep();
power7_idle_type(PNV_THREAD_SLEEP);
mtspr(SPRN_LPCR, old_lpcr); mtspr(SPRN_LPCR, old_lpcr);
...@@ -110,10 +110,8 @@ static int stop_loop(struct cpuidle_device *dev, ...@@ -110,10 +110,8 @@ static int stop_loop(struct cpuidle_device *dev,
struct cpuidle_driver *drv, struct cpuidle_driver *drv,
int index) int index)
{ {
ppc64_runlatch_off(); power9_idle_type(stop_psscr_table[index].val,
power9_idle_stop(stop_psscr_table[index].val,
stop_psscr_table[index].mask); stop_psscr_table[index].mask);
ppc64_runlatch_on();
return index; return index;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment