Commit 3ebc7033 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'powerpc-4.10-4' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes friom Michael Ellerman:
 "Apologies for the late pull request, but Ben has been busy finding bugs.

   - Userspace was semi-randomly segfaulting on radix due to us
     incorrectly handling a fault triggered by autonuma, caused by a
     patch we merged earlier in v4.10 to prevent the kernel executing
     userspace.

   - We weren't marking host IPIs properly for KVM in the OPAL ICP
     backend.

   - The ERAT flushing on radix was missing an isync and was incorrectly
     marked as DD1 only.

   - The powernv CPU hotplug code was missing a wakeup type and failing
     to flush the interrupt correctly when using OPAL ICP

  Thanks to Benjamin Herrenschmidt"

* tag 'powerpc-4.10-4' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
  powerpc/powernv: Properly set "host-ipi" on IPIs
  powerpc/powernv: Fix CPU hotplug to handle waking on HVI
  powerpc/mm/radix: Update ERAT flushes when invalidating TLB
  powerpc/mm: Fix spurrious segfaults on radix with autonuma
parents 3d88460d f83e6862
...@@ -649,9 +649,10 @@ ...@@ -649,9 +649,10 @@
#define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */ #define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */
#define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */ #define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */
#define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */ #define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */
#define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 */ #define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 and 9 */
#define SRR1_WAKESYSERR 0x00300000 /* System error */ #define SRR1_WAKESYSERR 0x00300000 /* System error */
#define SRR1_WAKEEE 0x00200000 /* External interrupt */ #define SRR1_WAKEEE 0x00200000 /* External interrupt */
#define SRR1_WAKEHVI 0x00240000 /* Hypervisor Virtualization Interrupt (P9) */
#define SRR1_WAKEMT 0x00280000 /* mtctrl */ #define SRR1_WAKEMT 0x00280000 /* mtctrl */
#define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */ #define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */
#define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */ #define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */
......
...@@ -44,6 +44,7 @@ static inline int icp_hv_init(void) { return -ENODEV; } ...@@ -44,6 +44,7 @@ static inline int icp_hv_init(void) { return -ENODEV; }
#ifdef CONFIG_PPC_POWERNV #ifdef CONFIG_PPC_POWERNV
extern int icp_opal_init(void); extern int icp_opal_init(void);
extern void icp_opal_flush_interrupt(void);
#else #else
static inline int icp_opal_init(void) { return -ENODEV; } static inline int icp_opal_init(void) { return -ENODEV; }
#endif #endif
......
...@@ -253,8 +253,11 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -253,8 +253,11 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
if (unlikely(debugger_fault_handler(regs))) if (unlikely(debugger_fault_handler(regs)))
goto bail; goto bail;
/* On a kernel SLB miss we can only check for a valid exception entry */ /*
if (!user_mode(regs) && (address >= TASK_SIZE)) { * The kernel should never take an execute fault nor should it
* take a page fault to a kernel address.
*/
if (!user_mode(regs) && (is_exec || (address >= TASK_SIZE))) {
rc = SIGSEGV; rc = SIGSEGV;
goto bail; goto bail;
} }
...@@ -390,20 +393,6 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -390,20 +393,6 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
#endif /* CONFIG_8xx */ #endif /* CONFIG_8xx */
if (is_exec) { if (is_exec) {
/*
* An execution fault + no execute ?
*
* On CPUs that don't have CPU_FTR_COHERENT_ICACHE we
* deliberately create NX mappings, and use the fault to do the
* cache flush. This is usually handled in hash_page_do_lazy_icache()
* but we could end up here if that races with a concurrent PTE
* update. In that case we need to fall through here to the VMA
* check below.
*/
if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
(regs->msr & SRR1_ISI_N_OR_G))
goto bad_area;
/* /*
* Allow execution from readable areas if the MMU does not * Allow execution from readable areas if the MMU does not
* provide separate controls over reading and executing. * provide separate controls over reading and executing.
......
...@@ -50,9 +50,7 @@ static inline void _tlbiel_pid(unsigned long pid, unsigned long ric) ...@@ -50,9 +50,7 @@ static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) { for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
__tlbiel_pid(pid, set, ric); __tlbiel_pid(pid, set, ric);
} }
if (cpu_has_feature(CPU_FTR_POWER9_DD1)) asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
return;
} }
static inline void _tlbie_pid(unsigned long pid, unsigned long ric) static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
...@@ -85,8 +83,6 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid, ...@@ -85,8 +83,6 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid,
asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
asm volatile("ptesync": : :"memory"); asm volatile("ptesync": : :"memory");
if (cpu_has_feature(CPU_FTR_POWER9_DD1))
asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
} }
static inline void _tlbie_va(unsigned long va, unsigned long pid, static inline void _tlbie_va(unsigned long va, unsigned long pid,
......
...@@ -155,8 +155,10 @@ static void pnv_smp_cpu_kill_self(void) ...@@ -155,8 +155,10 @@ static void pnv_smp_cpu_kill_self(void)
wmask = SRR1_WAKEMASK_P8; wmask = SRR1_WAKEMASK_P8;
idle_states = pnv_get_supported_cpuidle_states(); idle_states = pnv_get_supported_cpuidle_states();
/* We don't want to take decrementer interrupts while we are offline, /* We don't want to take decrementer interrupts while we are offline,
* so clear LPCR:PECE1. We keep PECE2 enabled. * so clear LPCR:PECE1. We keep PECE2 (and LPCR_PECE_HVEE on P9)
* enabled as to let IPIs in.
*/ */
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1); mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
...@@ -206,8 +208,12 @@ static void pnv_smp_cpu_kill_self(void) ...@@ -206,8 +208,12 @@ static void pnv_smp_cpu_kill_self(void)
* contains 0. * contains 0.
*/ */
if (((srr1 & wmask) == SRR1_WAKEEE) || if (((srr1 & wmask) == SRR1_WAKEEE) ||
((srr1 & wmask) == SRR1_WAKEHVI) ||
(local_paca->irq_happened & PACA_IRQ_EE)) { (local_paca->irq_happened & PACA_IRQ_EE)) {
icp_native_flush_interrupt(); if (cpu_has_feature(CPU_FTR_ARCH_300))
icp_opal_flush_interrupt();
else
icp_native_flush_interrupt();
} else if ((srr1 & wmask) == SRR1_WAKEHDBELL) { } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
asm volatile(PPC_MSGCLR(%0) : : "r" (msg)); asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
...@@ -221,6 +227,8 @@ static void pnv_smp_cpu_kill_self(void) ...@@ -221,6 +227,8 @@ static void pnv_smp_cpu_kill_self(void)
if (srr1 && !generic_check_cpu_restart(cpu)) if (srr1 && !generic_check_cpu_restart(cpu))
DBG("CPU%d Unexpected exit while offline !\n", cpu); DBG("CPU%d Unexpected exit while offline !\n", cpu);
} }
/* Re-enable decrementer interrupts */
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1); mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1);
DBG("CPU%d coming online...\n", cpu); DBG("CPU%d coming online...\n", cpu);
} }
......
...@@ -120,18 +120,49 @@ static void icp_opal_cause_ipi(int cpu, unsigned long data) ...@@ -120,18 +120,49 @@ static void icp_opal_cause_ipi(int cpu, unsigned long data)
{ {
int hw_cpu = get_hard_smp_processor_id(cpu); int hw_cpu = get_hard_smp_processor_id(cpu);
kvmppc_set_host_ipi(cpu, 1);
opal_int_set_mfrr(hw_cpu, IPI_PRIORITY); opal_int_set_mfrr(hw_cpu, IPI_PRIORITY);
} }
static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id) static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id)
{ {
int hw_cpu = hard_smp_processor_id(); int cpu = smp_processor_id();
opal_int_set_mfrr(hw_cpu, 0xff); kvmppc_set_host_ipi(cpu, 0);
opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
return smp_ipi_demux(); return smp_ipi_demux();
} }
/*
* Called when an interrupt is received on an off-line CPU to
* clear the interrupt, so that the CPU can go back to nap mode.
*/
void icp_opal_flush_interrupt(void)
{
unsigned int xirr;
unsigned int vec;
do {
xirr = icp_opal_get_xirr();
vec = xirr & 0x00ffffff;
if (vec == XICS_IRQ_SPURIOUS)
break;
if (vec == XICS_IPI) {
/* Clear pending IPI */
int cpu = smp_processor_id();
kvmppc_set_host_ipi(cpu, 0);
opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
} else {
pr_err("XICS: hw interrupt 0x%x to offline cpu, "
"disabling\n", vec);
xics_mask_unknown_vec(vec);
}
/* EOI the interrupt */
} while (opal_int_eoi(xirr) > 0);
}
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
static const struct icp_ops icp_opal_ops = { static const struct icp_ops icp_opal_ops = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment