Commit e18e8844 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'powerpc-4.14-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:
 "A fix for a bad bug (written by me) in our livepatch handler. Removal
  of an over-zealous lockdep_assert_cpus_held() in our topology code. A
  fix to the recently added emulation of cntlz[wd]. And three small
  fixes to the recently added IMC PMU driver.

  Thanks to: Anju T Sudhakar, Balbir Singh, Kamalesh Babulal, Naveen N.
  Rao, Sandipan Das, Santosh Sivaraj, Thiago Jung Bauermann"

* tag 'powerpc-4.14-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
  powerpc/perf: Fix IMC initialization crash
  powerpc/perf: Add ___GFP_NOWARN flag to alloc_pages_node()
  powerpc/perf: Fix for core/nest imc call trace on cpuhotplug
  powerpc: Don't call lockdep_assert_cpus_held() from arch_update_cpu_topology()
  powerpc/lib/sstep: Fix count leading zeros instructions
  powerpc/livepatch: Fix livepatch stack access
parents 3be5f884 0d8ba162
...@@ -181,34 +181,25 @@ _GLOBAL(ftrace_stub) ...@@ -181,34 +181,25 @@ _GLOBAL(ftrace_stub)
* - we have no stack frame and can not allocate one * - we have no stack frame and can not allocate one
* - LR points back to the original caller (in A) * - LR points back to the original caller (in A)
* - CTR holds the new NIP in C * - CTR holds the new NIP in C
* - r0 & r12 are free * - r0, r11 & r12 are free
*
* r0 can't be used as the base register for a DS-form load or store, so
* we temporarily shuffle r1 (stack pointer) into r0 and then put it back.
*/ */
livepatch_handler: livepatch_handler:
CURRENT_THREAD_INFO(r12, r1) CURRENT_THREAD_INFO(r12, r1)
/* Save stack pointer into r0 */
mr r0, r1
/* Allocate 3 x 8 bytes */ /* Allocate 3 x 8 bytes */
ld r1, TI_livepatch_sp(r12) ld r11, TI_livepatch_sp(r12)
addi r1, r1, 24 addi r11, r11, 24
std r1, TI_livepatch_sp(r12) std r11, TI_livepatch_sp(r12)
/* Save toc & real LR on livepatch stack */ /* Save toc & real LR on livepatch stack */
std r2, -24(r1) std r2, -24(r11)
mflr r12 mflr r12
std r12, -16(r1) std r12, -16(r11)
/* Store stack end marker */ /* Store stack end marker */
lis r12, STACK_END_MAGIC@h lis r12, STACK_END_MAGIC@h
ori r12, r12, STACK_END_MAGIC@l ori r12, r12, STACK_END_MAGIC@l
std r12, -8(r1) std r12, -8(r11)
/* Restore real stack pointer */
mr r1, r0
/* Put ctr in r12 for global entry and branch there */ /* Put ctr in r12 for global entry and branch there */
mfctr r12 mfctr r12
...@@ -216,36 +207,30 @@ livepatch_handler: ...@@ -216,36 +207,30 @@ livepatch_handler:
/* /*
* Now we are returning from the patched function to the original * Now we are returning from the patched function to the original
* caller A. We are free to use r0 and r12, and we can use r2 until we * caller A. We are free to use r11, r12 and we can use r2 until we
* restore it. * restore it.
*/ */
CURRENT_THREAD_INFO(r12, r1) CURRENT_THREAD_INFO(r12, r1)
/* Save stack pointer into r0 */ ld r11, TI_livepatch_sp(r12)
mr r0, r1
ld r1, TI_livepatch_sp(r12)
/* Check stack marker hasn't been trashed */ /* Check stack marker hasn't been trashed */
lis r2, STACK_END_MAGIC@h lis r2, STACK_END_MAGIC@h
ori r2, r2, STACK_END_MAGIC@l ori r2, r2, STACK_END_MAGIC@l
ld r12, -8(r1) ld r12, -8(r11)
1: tdne r12, r2 1: tdne r12, r2
EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0 EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0
/* Restore LR & toc from livepatch stack */ /* Restore LR & toc from livepatch stack */
ld r12, -16(r1) ld r12, -16(r11)
mtlr r12 mtlr r12
ld r2, -24(r1) ld r2, -24(r11)
/* Pop livepatch stack frame */ /* Pop livepatch stack frame */
CURRENT_THREAD_INFO(r12, r0) CURRENT_THREAD_INFO(r12, r1)
subi r1, r1, 24 subi r11, r11, 24
std r1, TI_livepatch_sp(r12) std r11, TI_livepatch_sp(r12)
/* Restore real stack pointer */
mr r1, r0
/* Return to original caller of live patched function */ /* Return to original caller of live patched function */
blr blr
......
...@@ -1684,11 +1684,13 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, ...@@ -1684,11 +1684,13 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
* Logical instructions * Logical instructions
*/ */
case 26: /* cntlzw */ case 26: /* cntlzw */
op->val = __builtin_clz((unsigned int) regs->gpr[rd]); val = (unsigned int) regs->gpr[rd];
op->val = ( val ? __builtin_clz(val) : 32 );
goto logical_done; goto logical_done;
#ifdef __powerpc64__ #ifdef __powerpc64__
case 58: /* cntlzd */ case 58: /* cntlzd */
op->val = __builtin_clzl(regs->gpr[rd]); val = regs->gpr[rd];
op->val = ( val ? __builtin_clzl(val) : 64 );
goto logical_done; goto logical_done;
#endif #endif
case 28: /* and */ case 28: /* and */
......
...@@ -1438,7 +1438,6 @@ int numa_update_cpu_topology(bool cpus_locked) ...@@ -1438,7 +1438,6 @@ int numa_update_cpu_topology(bool cpus_locked)
int arch_update_cpu_topology(void) int arch_update_cpu_topology(void)
{ {
lockdep_assert_cpus_held();
return numa_update_cpu_topology(true); return numa_update_cpu_topology(true);
} }
......
...@@ -399,6 +399,20 @@ static void nest_imc_counters_release(struct perf_event *event) ...@@ -399,6 +399,20 @@ static void nest_imc_counters_release(struct perf_event *event)
/* Take the mutex lock for this node and then decrement the reference count */ /* Take the mutex lock for this node and then decrement the reference count */
mutex_lock(&ref->lock); mutex_lock(&ref->lock);
if (ref->refc == 0) {
/*
* The scenario where this is true is, when perf session is
* started, followed by offlining of all cpus in a given node.
*
* In the cpuhotplug offline path, ppc_nest_imc_cpu_offline()
* function set the ref->count to zero, if the cpu which is
* about to offline is the last cpu in a given node and make
* an OPAL call to disable the engine in that node.
*
*/
mutex_unlock(&ref->lock);
return;
}
ref->refc--; ref->refc--;
if (ref->refc == 0) { if (ref->refc == 0) {
rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST, rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
...@@ -523,8 +537,8 @@ static int core_imc_mem_init(int cpu, int size) ...@@ -523,8 +537,8 @@ static int core_imc_mem_init(int cpu, int size)
/* We need only vbase for core counters */ /* We need only vbase for core counters */
mem_info->vbase = page_address(alloc_pages_node(phys_id, mem_info->vbase = page_address(alloc_pages_node(phys_id,
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
get_order(size))); __GFP_NOWARN, get_order(size)));
if (!mem_info->vbase) if (!mem_info->vbase)
return -ENOMEM; return -ENOMEM;
...@@ -646,6 +660,20 @@ static void core_imc_counters_release(struct perf_event *event) ...@@ -646,6 +660,20 @@ static void core_imc_counters_release(struct perf_event *event)
return; return;
mutex_lock(&ref->lock); mutex_lock(&ref->lock);
if (ref->refc == 0) {
/*
* The scenario where this is true is, when perf session is
* started, followed by offlining of all cpus in a given core.
*
* In the cpuhotplug offline path, ppc_core_imc_cpu_offline()
* function set the ref->count to zero, if the cpu which is
* about to offline is the last cpu in a given core and make
* an OPAL call to disable the engine in that core.
*
*/
mutex_unlock(&ref->lock);
return;
}
ref->refc--; ref->refc--;
if (ref->refc == 0) { if (ref->refc == 0) {
rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
...@@ -763,8 +791,8 @@ static int thread_imc_mem_alloc(int cpu_id, int size) ...@@ -763,8 +791,8 @@ static int thread_imc_mem_alloc(int cpu_id, int size)
* free the memory in cpu offline path. * free the memory in cpu offline path.
*/ */
local_mem = page_address(alloc_pages_node(phys_id, local_mem = page_address(alloc_pages_node(phys_id,
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
get_order(size))); __GFP_NOWARN, get_order(size)));
if (!local_mem) if (!local_mem)
return -ENOMEM; return -ENOMEM;
...@@ -1148,7 +1176,8 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr) ...@@ -1148,7 +1176,8 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
} }
/* Only free the attr_groups which are dynamically allocated */ /* Only free the attr_groups which are dynamically allocated */
kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs); if (pmu_ptr->attr_groups[IMC_EVENT_ATTR])
kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs);
kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]); kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]);
kfree(pmu_ptr); kfree(pmu_ptr);
return; return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment