Commit 1365016f authored by Paul Burton's avatar Paul Burton Committed by Greg Kroah-Hartman

MIPS: pm-cps: Drop manual cache-line alignment of ready_count

commit 161c51cc upstream.

We allocate memory for a ready_count variable per-CPU, which is accessed
via a cached non-coherent TLB mapping to perform synchronisation between
threads within the core using LL/SC instructions. In order to ensure
that the variable is contained within its own data cache line we
allocate 2 lines worth of memory & align the resulting pointer to a line
boundary. This is however unnecessary, since kmalloc is guaranteed to
return memory which is at least cache-line aligned (see
ARCH_DMA_MINALIGN). Stop the redundant manual alignment.

Besides cleaning up the code & avoiding needless work, this has the side
effect of avoiding an arithmetic error found by Bryan on 64 bit systems
due to the 32 bit size of the former dlinesz. This led the ready_count
variable to have its upper 32b cleared erroneously for MIPS64 kernels,
causing problems when ready_count was later used on MIPS64 via cpuidle.
Signed-off-by: default avatarPaul Burton <paul.burton@imgtec.com>
Fixes: 3179d37e ("MIPS: pm-cps: add PM state entry code for CPS systems")
Reported-by: default avatarBryan O'Donoghue <bryan.odonoghue@imgtec.com>
Reviewed-by: default avatarBryan O'Donoghue <bryan.odonoghue@imgtec.com>
Tested-by: default avatarBryan O'Donoghue <bryan.odonoghue@imgtec.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/15383/Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent c9c5c35b
...@@ -55,7 +55,6 @@ DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT); ...@@ -55,7 +55,6 @@ DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT);
* state. Actually per-core rather than per-CPU. * state. Actually per-core rather than per-CPU.
*/ */
static DEFINE_PER_CPU_ALIGNED(u32*, ready_count); static DEFINE_PER_CPU_ALIGNED(u32*, ready_count);
static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc);
/* Indicates online CPUs coupled with the current CPU */ /* Indicates online CPUs coupled with the current CPU */
static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled); static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled);
...@@ -624,7 +623,6 @@ static int __init cps_gen_core_entries(unsigned cpu) ...@@ -624,7 +623,6 @@ static int __init cps_gen_core_entries(unsigned cpu)
{ {
enum cps_pm_state state; enum cps_pm_state state;
unsigned core = cpu_data[cpu].core; unsigned core = cpu_data[cpu].core;
unsigned dlinesz = cpu_data[cpu].dcache.linesz;
void *entry_fn, *core_rc; void *entry_fn, *core_rc;
for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) { for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
...@@ -644,16 +642,11 @@ static int __init cps_gen_core_entries(unsigned cpu) ...@@ -644,16 +642,11 @@ static int __init cps_gen_core_entries(unsigned cpu)
} }
if (!per_cpu(ready_count, core)) { if (!per_cpu(ready_count, core)) {
core_rc = kmalloc(dlinesz * 2, GFP_KERNEL); core_rc = kmalloc(sizeof(u32), GFP_KERNEL);
if (!core_rc) { if (!core_rc) {
pr_err("Failed allocate core %u ready_count\n", core); pr_err("Failed allocate core %u ready_count\n", core);
return -ENOMEM; return -ENOMEM;
} }
per_cpu(ready_count_alloc, core) = core_rc;
/* Ensure ready_count is aligned to a cacheline boundary */
core_rc += dlinesz - 1;
core_rc = (void *)((unsigned long)core_rc & ~(dlinesz - 1));
per_cpu(ready_count, core) = core_rc; per_cpu(ready_count, core) = core_rc;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment