Commit 310959e8 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc

Pull powerpc fixes from Benjamin Herrenschmidt:
 "It looks like my rewrite of our lazy irq scheme is still exposing
  "interesting" issues left and right.  The previous fixes are now
  causing an occasional BUG_ON to trigger (which this patch turns into a
  WARN_ON while at it), due to another issue of disconnect of the lazy
  irq state vs the processor state in the idle loop on pseries and
  cell.

  This should fix it properly once for all moving the nasty code to a
  common helper function.

  There's also couple more fixes for some debug stuff that didn't build
  (and helped resolving those problems so it's worth having), along with
  a compile fix for newer gcc's."

* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc:
  tty/hvc_opal: Fix debug function name
  powerpc/numa: Avoid stupid uninitialized warning from gcc
  powerpc: Fix build of some debug irq code
  powerpc: More fixes for lazy IRQ vs. idle
parents bc51b0c2 50fb31cf
...@@ -86,8 +86,8 @@ static inline bool arch_irqs_disabled(void) ...@@ -86,8 +86,8 @@ static inline bool arch_irqs_disabled(void)
} }
#ifdef CONFIG_PPC_BOOK3E #ifdef CONFIG_PPC_BOOK3E
#define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory"); #define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory")
#define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory"); #define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory")
#else #else
#define __hard_irq_enable() __mtmsrd(local_paca->kernel_msr | MSR_EE, 1) #define __hard_irq_enable() __mtmsrd(local_paca->kernel_msr | MSR_EE, 1)
#define __hard_irq_disable() __mtmsrd(local_paca->kernel_msr, 1) #define __hard_irq_disable() __mtmsrd(local_paca->kernel_msr, 1)
...@@ -125,6 +125,8 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs) ...@@ -125,6 +125,8 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
return !regs->softe; return !regs->softe;
} }
extern bool prep_irq_for_idle(void);
#else /* CONFIG_PPC64 */ #else /* CONFIG_PPC64 */
#define SET_MSR_EE(x) mtmsr(x) #define SET_MSR_EE(x) mtmsr(x)
......
...@@ -229,7 +229,7 @@ notrace void arch_local_irq_restore(unsigned long en) ...@@ -229,7 +229,7 @@ notrace void arch_local_irq_restore(unsigned long en)
*/ */
if (unlikely(irq_happened != PACA_IRQ_HARD_DIS)) if (unlikely(irq_happened != PACA_IRQ_HARD_DIS))
__hard_irq_disable(); __hard_irq_disable();
#ifdef CONFIG_TRACE_IRQFLAG #ifdef CONFIG_TRACE_IRQFLAGS
else { else {
/* /*
* We should already be hard disabled here. We had bugs * We should already be hard disabled here. We had bugs
...@@ -286,6 +286,52 @@ void notrace restore_interrupts(void) ...@@ -286,6 +286,52 @@ void notrace restore_interrupts(void)
__hard_irq_enable(); __hard_irq_enable();
} }
/*
* This is a helper to use when about to go into idle low-power
* when the latter has the side effect of re-enabling interrupts
* (such as calling H_CEDE under pHyp).
*
* You call this function with interrupts soft-disabled (this is
* already the case when ppc_md.power_save is called). The function
* will return whether to enter power save or just return.
*
* In the former case, it will have notified lockdep of interrupts
* being re-enabled and generally sanitized the lazy irq state,
* and in the latter case it will leave with interrupts hard
* disabled and marked as such, so the local_irq_enable() call
* in cpu_idle() will properly re-enable everything.
*/
bool prep_irq_for_idle(void)
{
/*
* First we need to hard disable to ensure no interrupt
* occurs before we effectively enter the low power state
*/
hard_irq_disable();
/*
* If anything happened while we were soft-disabled,
* we return now and do not enter the low power state.
*/
if (lazy_irq_pending())
return false;
/* Tell lockdep we are about to re-enable */
trace_hardirqs_on();
/*
* Mark interrupts as soft-enabled and clear the
* PACA_IRQ_HARD_DIS from the pending mask since we
* are about to hard enable as well as a side effect
* of entering the low power state.
*/
local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
local_paca->soft_enabled = 1;
/* Tell the caller to enter the low power state */
return true;
}
#endif /* CONFIG_PPC64 */ #endif /* CONFIG_PPC64 */
int arch_show_interrupts(struct seq_file *p, int prec) int arch_show_interrupts(struct seq_file *p, int prec)
......
...@@ -639,7 +639,7 @@ static void __init parse_drconf_memory(struct device_node *memory) ...@@ -639,7 +639,7 @@ static void __init parse_drconf_memory(struct device_node *memory)
unsigned int n, rc, ranges, is_kexec_kdump = 0; unsigned int n, rc, ranges, is_kexec_kdump = 0;
unsigned long lmb_size, base, size, sz; unsigned long lmb_size, base, size, sz;
int nid; int nid;
struct assoc_arrays aa; struct assoc_arrays aa = { .arrays = NULL };
n = of_get_drconf_memory(memory, &dm); n = of_get_drconf_memory(memory, &dm);
if (!n) if (!n)
......
...@@ -42,11 +42,9 @@ static void cbe_power_save(void) ...@@ -42,11 +42,9 @@ static void cbe_power_save(void)
{ {
unsigned long ctrl, thread_switch_control; unsigned long ctrl, thread_switch_control;
/* /* Ensure our interrupt state is properly tracked */
* We need to hard disable interrupts, the local_irq_enable() done by if (!prep_irq_for_idle())
* our caller upon return will hard re-enable. return;
*/
hard_irq_disable();
ctrl = mfspr(SPRN_CTRLF); ctrl = mfspr(SPRN_CTRLF);
...@@ -81,6 +79,9 @@ static void cbe_power_save(void) ...@@ -81,6 +79,9 @@ static void cbe_power_save(void)
*/ */
ctrl &= ~(CTRL_RUNLATCH | CTRL_TE); ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
mtspr(SPRN_CTRLT, ctrl); mtspr(SPRN_CTRLT, ctrl);
/* Re-enable interrupts in MSR */
__hard_irq_enable();
} }
static int cbe_system_reset_exception(struct pt_regs *regs) static int cbe_system_reset_exception(struct pt_regs *regs)
......
...@@ -99,15 +99,18 @@ static int snooze_loop(struct cpuidle_device *dev, ...@@ -99,15 +99,18 @@ static int snooze_loop(struct cpuidle_device *dev,
static void check_and_cede_processor(void) static void check_and_cede_processor(void)
{ {
/* /*
* Interrupts are soft-disabled at this point, * Ensure our interrupt state is properly tracked,
* but not hard disabled. So an interrupt might have * also checks if no interrupt has occurred while we
* occurred before entering NAP, and would be potentially * were soft-disabled
* lost (edge events, decrementer events, etc...) unless
* we first hard disable then check.
*/ */
hard_irq_disable(); if (prep_irq_for_idle()) {
if (!lazy_irq_pending())
cede_processor(); cede_processor();
#ifdef CONFIG_TRACE_IRQFLAGS
/* Ensure that H_CEDE returns with IRQs on */
if (WARN_ON(!(mfmsr() & MSR_EE)))
__hard_irq_enable();
#endif
}
} }
static int dedicated_cede_loop(struct cpuidle_device *dev, static int dedicated_cede_loop(struct cpuidle_device *dev,
......
...@@ -401,7 +401,7 @@ void __init hvc_opal_init_early(void) ...@@ -401,7 +401,7 @@ void __init hvc_opal_init_early(void)
} }
#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL_RAW #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL_RAW
void __init udbg_init_debug_opal(void) void __init udbg_init_debug_opal_raw(void)
{ {
u32 index = CONFIG_PPC_EARLY_DEBUG_OPAL_VTERMNO; u32 index = CONFIG_PPC_EARLY_DEBUG_OPAL_VTERMNO;
hvc_opal_privs[index] = &hvc_opal_boot_priv; hvc_opal_privs[index] = &hvc_opal_boot_priv;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment