Commit a2c01ed5 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'powerpc-4.3-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:

 - Revert "Use the POWER8 Micro Partition Prefetch Engine in KVM HV on
   POWER8" from Paul
 - Handle irq_happened flag correctly in off-line loop from Paul
 - Validate rtas.entry before calling enter_rtas() from Vasant

* tag 'powerpc-4.3-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
  powerpc/rtas: Validate rtas.entry before calling enter_rtas()
  powerpc/powernv: Handle irq_happened flag correctly in off-line loop
  powerpc: Revert "Use the POWER8 Micro Partition Prefetch Engine in KVM HV on POWER8"
parents d0ddf980 8832317f
...@@ -3,7 +3,6 @@ ...@@ -3,7 +3,6 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <asm/reg.h>
/* bytes per L1 cache line */ /* bytes per L1 cache line */
#if defined(CONFIG_8xx) || defined(CONFIG_403GCX) #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
...@@ -40,12 +39,6 @@ struct ppc64_caches { ...@@ -40,12 +39,6 @@ struct ppc64_caches {
}; };
extern struct ppc64_caches ppc64_caches; extern struct ppc64_caches ppc64_caches;
static inline void logmpp(u64 x)
{
asm volatile(PPC_LOGMPP(R1) : : "r" (x));
}
#endif /* __powerpc64__ && ! __ASSEMBLY__ */ #endif /* __powerpc64__ && ! __ASSEMBLY__ */
#if defined(__ASSEMBLY__) #if defined(__ASSEMBLY__)
......
...@@ -297,8 +297,6 @@ struct kvmppc_vcore { ...@@ -297,8 +297,6 @@ struct kvmppc_vcore {
u32 arch_compat; u32 arch_compat;
ulong pcr; ulong pcr;
ulong dpdes; /* doorbell state (POWER8) */ ulong dpdes; /* doorbell state (POWER8) */
void *mpp_buffer; /* Micro Partition Prefetch buffer */
bool mpp_buffer_is_valid;
ulong conferring_threads; ulong conferring_threads;
}; };
......
...@@ -141,7 +141,6 @@ ...@@ -141,7 +141,6 @@
#define PPC_INST_ISEL 0x7c00001e #define PPC_INST_ISEL 0x7c00001e
#define PPC_INST_ISEL_MASK 0xfc00003e #define PPC_INST_ISEL_MASK 0xfc00003e
#define PPC_INST_LDARX 0x7c0000a8 #define PPC_INST_LDARX 0x7c0000a8
#define PPC_INST_LOGMPP 0x7c0007e4
#define PPC_INST_LSWI 0x7c0004aa #define PPC_INST_LSWI 0x7c0004aa
#define PPC_INST_LSWX 0x7c00042a #define PPC_INST_LSWX 0x7c00042a
#define PPC_INST_LWARX 0x7c000028 #define PPC_INST_LWARX 0x7c000028
...@@ -285,20 +284,6 @@ ...@@ -285,20 +284,6 @@
#define __PPC_EH(eh) 0 #define __PPC_EH(eh) 0
#endif #endif
/* POWER8 Micro Partition Prefetch (MPP) parameters */
/* Address mask is common for LOGMPP instruction and MPPR SPR */
#define PPC_MPPE_ADDRESS_MASK 0xffffffffc000ULL
/* Bits 60 and 61 of MPP SPR should be set to one of the following */
/* Aborting the fetch is indeed setting 00 in the table size bits */
#define PPC_MPPR_FETCH_ABORT (0x0ULL << 60)
#define PPC_MPPR_FETCH_WHOLE_TABLE (0x2ULL << 60)
/* Bits 54 and 55 of register for LOGMPP instruction should be set to: */
#define PPC_LOGMPP_LOG_L2 (0x02ULL << 54)
#define PPC_LOGMPP_LOG_L2L3 (0x01ULL << 54)
#define PPC_LOGMPP_LOG_ABORT (0x03ULL << 54)
/* Deal with instructions that older assemblers aren't aware of */ /* Deal with instructions that older assemblers aren't aware of */
#define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \ #define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \
__PPC_RA(a) | __PPC_RB(b)) __PPC_RA(a) | __PPC_RB(b))
...@@ -307,8 +292,6 @@ ...@@ -307,8 +292,6 @@
#define PPC_LDARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LDARX | \ #define PPC_LDARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LDARX | \
___PPC_RT(t) | ___PPC_RA(a) | \ ___PPC_RT(t) | ___PPC_RA(a) | \
___PPC_RB(b) | __PPC_EH(eh)) ___PPC_RB(b) | __PPC_EH(eh))
#define PPC_LOGMPP(b) stringify_in_c(.long PPC_INST_LOGMPP | \
__PPC_RB(b))
#define PPC_LWARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LWARX | \ #define PPC_LWARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LWARX | \
___PPC_RT(t) | ___PPC_RA(a) | \ ___PPC_RT(t) | ___PPC_RA(a) | \
___PPC_RB(b) | __PPC_EH(eh)) ___PPC_RB(b) | __PPC_EH(eh))
......
...@@ -226,7 +226,6 @@ ...@@ -226,7 +226,6 @@
#define CTRL_TE 0x00c00000 /* thread enable */ #define CTRL_TE 0x00c00000 /* thread enable */
#define CTRL_RUNLATCH 0x1 #define CTRL_RUNLATCH 0x1
#define SPRN_DAWR 0xB4 #define SPRN_DAWR 0xB4
#define SPRN_MPPR 0xB8 /* Micro Partition Prefetch Register */
#define SPRN_RPR 0xBA /* Relative Priority Register */ #define SPRN_RPR 0xBA /* Relative Priority Register */
#define SPRN_CIABR 0xBB #define SPRN_CIABR 0xBB
#define CIABR_PRIV 0x3 #define CIABR_PRIV 0x3
......
...@@ -1043,6 +1043,9 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs) ...@@ -1043,6 +1043,9 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
if (!capable(CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
return -EPERM; return -EPERM;
if (!rtas.entry)
return -EINVAL;
if (copy_from_user(&args, uargs, 3 * sizeof(u32)) != 0) if (copy_from_user(&args, uargs, 3 * sizeof(u32)) != 0)
return -EFAULT; return -EFAULT;
......
...@@ -36,7 +36,6 @@ ...@@ -36,7 +36,6 @@
#include <asm/reg.h> #include <asm/reg.h>
#include <asm/cputable.h> #include <asm/cputable.h>
#include <asm/cache.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
...@@ -75,12 +74,6 @@ ...@@ -75,12 +74,6 @@
static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1); static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
#if defined(CONFIG_PPC_64K_PAGES)
#define MPP_BUFFER_ORDER 0
#elif defined(CONFIG_PPC_4K_PAGES)
#define MPP_BUFFER_ORDER 3
#endif
static int dynamic_mt_modes = 6; static int dynamic_mt_modes = 6;
module_param(dynamic_mt_modes, int, S_IRUGO | S_IWUSR); module_param(dynamic_mt_modes, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(dynamic_mt_modes, "Set of allowed dynamic micro-threading modes: 0 (= none), 2, 4, or 6 (= 2 or 4)"); MODULE_PARM_DESC(dynamic_mt_modes, "Set of allowed dynamic micro-threading modes: 0 (= none), 2, 4, or 6 (= 2 or 4)");
...@@ -1455,13 +1448,6 @@ static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core) ...@@ -1455,13 +1448,6 @@ static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
vcore->kvm = kvm; vcore->kvm = kvm;
INIT_LIST_HEAD(&vcore->preempt_list); INIT_LIST_HEAD(&vcore->preempt_list);
vcore->mpp_buffer_is_valid = false;
if (cpu_has_feature(CPU_FTR_ARCH_207S))
vcore->mpp_buffer = (void *)__get_free_pages(
GFP_KERNEL|__GFP_ZERO,
MPP_BUFFER_ORDER);
return vcore; return vcore;
} }
...@@ -1894,33 +1880,6 @@ static int on_primary_thread(void) ...@@ -1894,33 +1880,6 @@ static int on_primary_thread(void)
return 1; return 1;
} }
static void kvmppc_start_saving_l2_cache(struct kvmppc_vcore *vc)
{
phys_addr_t phy_addr, mpp_addr;
phy_addr = (phys_addr_t)virt_to_phys(vc->mpp_buffer);
mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK;
mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_ABORT);
logmpp(mpp_addr | PPC_LOGMPP_LOG_L2);
vc->mpp_buffer_is_valid = true;
}
static void kvmppc_start_restoring_l2_cache(const struct kvmppc_vcore *vc)
{
phys_addr_t phy_addr, mpp_addr;
phy_addr = virt_to_phys(vc->mpp_buffer);
mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK;
/* We must abort any in-progress save operations to ensure
* the table is valid so that prefetch engine knows when to
* stop prefetching. */
logmpp(mpp_addr | PPC_LOGMPP_LOG_ABORT);
mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_WHOLE_TABLE);
}
/* /*
* A list of virtual cores for each physical CPU. * A list of virtual cores for each physical CPU.
* These are vcores that could run but their runner VCPU tasks are * These are vcores that could run but their runner VCPU tasks are
...@@ -2471,14 +2430,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) ...@@ -2471,14 +2430,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
srcu_idx = srcu_read_lock(&vc->kvm->srcu); srcu_idx = srcu_read_lock(&vc->kvm->srcu);
if (vc->mpp_buffer_is_valid)
kvmppc_start_restoring_l2_cache(vc);
__kvmppc_vcore_entry(); __kvmppc_vcore_entry();
if (vc->mpp_buffer)
kvmppc_start_saving_l2_cache(vc);
srcu_read_unlock(&vc->kvm->srcu, srcu_idx); srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
spin_lock(&vc->lock); spin_lock(&vc->lock);
...@@ -3073,14 +3026,8 @@ static void kvmppc_free_vcores(struct kvm *kvm) ...@@ -3073,14 +3026,8 @@ static void kvmppc_free_vcores(struct kvm *kvm)
{ {
long int i; long int i;
for (i = 0; i < KVM_MAX_VCORES; ++i) { for (i = 0; i < KVM_MAX_VCORES; ++i)
if (kvm->arch.vcores[i] && kvm->arch.vcores[i]->mpp_buffer) {
struct kvmppc_vcore *vc = kvm->arch.vcores[i];
free_pages((unsigned long)vc->mpp_buffer,
MPP_BUFFER_ORDER);
}
kfree(kvm->arch.vcores[i]); kfree(kvm->arch.vcores[i]);
}
kvm->arch.online_vcores = 0; kvm->arch.online_vcores = 0;
} }
......
...@@ -171,7 +171,26 @@ static void pnv_smp_cpu_kill_self(void) ...@@ -171,7 +171,26 @@ static void pnv_smp_cpu_kill_self(void)
* so clear LPCR:PECE1. We keep PECE2 enabled. * so clear LPCR:PECE1. We keep PECE2 enabled.
*/ */
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1); mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
/*
* Hard-disable interrupts, and then clear irq_happened flags
* that we can safely ignore while off-line, since they
* are for things for which we do no processing when off-line
* (or in the case of HMI, all the processing we need to do
* is done in lower-level real-mode code).
*/
hard_irq_disable();
local_paca->irq_happened &= ~(PACA_IRQ_DEC | PACA_IRQ_HMI);
while (!generic_check_cpu_restart(cpu)) { while (!generic_check_cpu_restart(cpu)) {
/*
* Clear IPI flag, since we don't handle IPIs while
* offline, except for those when changing micro-threading
* mode, which are handled explicitly below, and those
* for coming online, which are handled via
* generic_check_cpu_restart() calls.
*/
kvmppc_set_host_ipi(cpu, 0);
ppc64_runlatch_off(); ppc64_runlatch_off();
...@@ -196,20 +215,20 @@ static void pnv_smp_cpu_kill_self(void) ...@@ -196,20 +215,20 @@ static void pnv_smp_cpu_kill_self(void)
* having finished executing in a KVM guest, then srr1 * having finished executing in a KVM guest, then srr1
* contains 0. * contains 0.
*/ */
if ((srr1 & wmask) == SRR1_WAKEEE) { if (((srr1 & wmask) == SRR1_WAKEEE) ||
(local_paca->irq_happened & PACA_IRQ_EE)) {
icp_native_flush_interrupt(); icp_native_flush_interrupt();
local_paca->irq_happened &= PACA_IRQ_HARD_DIS;
smp_mb();
} else if ((srr1 & wmask) == SRR1_WAKEHDBELL) { } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
asm volatile(PPC_MSGCLR(%0) : : "r" (msg)); asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
kvmppc_set_host_ipi(cpu, 0);
} }
local_paca->irq_happened &= ~(PACA_IRQ_EE | PACA_IRQ_DBELL);
smp_mb();
if (cpu_core_split_required()) if (cpu_core_split_required())
continue; continue;
if (!generic_check_cpu_restart(cpu)) if (srr1 && !generic_check_cpu_restart(cpu))
DBG("CPU%d Unexpected exit while offline !\n", cpu); DBG("CPU%d Unexpected exit while offline !\n", cpu);
} }
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1); mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment