Commit a026bb12 authored by Sudeep Holla's avatar Sudeep Holla Committed by Will Deacon

drivers/perf: arm-pmu: convert arm_pmu_mutex to spinlock

arm_pmu_mutex is never held long and we don't want to sleep while the
lock is being held as it's executed in the context of hotplug notifiers.
So it can be converted to a simple spinlock instead.

Without this patch we get the following warning:

BUG: sleeping function called from invalid context at kernel/locking/mutex.c:620
in_atomic(): 1, irqs_disabled(): 128, pid: 0, name: swapper/2
no locks held by swapper/2/0.
irq event stamp: 381314
hardirqs last  enabled at (381313): _raw_spin_unlock_irqrestore+0x7c/0x88
hardirqs last disabled at (381314): cpu_die+0x28/0x48
softirqs last  enabled at (381294): _local_bh_enable+0x28/0x50
softirqs last disabled at (381293): irq_enter+0x58/0x78
CPU: 2 PID: 0 Comm: swapper/2 Not tainted 4.7.0 #12
Call trace:
 dump_backtrace+0x0/0x220
 show_stack+0x24/0x30
 dump_stack+0xb4/0xf0
 ___might_sleep+0x1d8/0x1f0
 __might_sleep+0x5c/0x98
 mutex_lock_nested+0x54/0x400
 arm_perf_starting_cpu+0x34/0xb0
 cpuhp_invoke_callback+0x88/0x3d8
 notify_cpu_starting+0x78/0x98
 secondary_start_kernel+0x108/0x1a8

This patch converts the mutex to spinlock to eliminate the above
warnings. This constraints pmu->reset to be non-blocking call which is
the case with all the ARM PMU backends.

Cc: Stephen Boyd <sboyd@codeaurora.org>
Fixes: 37b502f1 ("arm/perf: Fix hotplug state machine conversion")
Acked-by: default avatarMark Rutland <mark.rutland@arm.com>
Signed-off-by: default avatarSudeep Holla <sudeep.holla@arm.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent 50ee91bd
...@@ -688,7 +688,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) ...@@ -688,7 +688,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
return 0; return 0;
} }
static DEFINE_MUTEX(arm_pmu_mutex); static DEFINE_SPINLOCK(arm_pmu_lock);
static LIST_HEAD(arm_pmu_list); static LIST_HEAD(arm_pmu_list);
/* /*
...@@ -701,7 +701,7 @@ static int arm_perf_starting_cpu(unsigned int cpu) ...@@ -701,7 +701,7 @@ static int arm_perf_starting_cpu(unsigned int cpu)
{ {
struct arm_pmu *pmu; struct arm_pmu *pmu;
mutex_lock(&arm_pmu_mutex); spin_lock(&arm_pmu_lock);
list_for_each_entry(pmu, &arm_pmu_list, entry) { list_for_each_entry(pmu, &arm_pmu_list, entry) {
if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
...@@ -709,7 +709,7 @@ static int arm_perf_starting_cpu(unsigned int cpu) ...@@ -709,7 +709,7 @@ static int arm_perf_starting_cpu(unsigned int cpu)
if (pmu->reset) if (pmu->reset)
pmu->reset(pmu); pmu->reset(pmu);
} }
mutex_unlock(&arm_pmu_mutex); spin_unlock(&arm_pmu_lock);
return 0; return 0;
} }
...@@ -821,9 +821,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu) ...@@ -821,9 +821,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
if (!cpu_hw_events) if (!cpu_hw_events)
return -ENOMEM; return -ENOMEM;
mutex_lock(&arm_pmu_mutex); spin_lock(&arm_pmu_lock);
list_add_tail(&cpu_pmu->entry, &arm_pmu_list); list_add_tail(&cpu_pmu->entry, &arm_pmu_list);
mutex_unlock(&arm_pmu_mutex); spin_unlock(&arm_pmu_lock);
err = cpu_pm_pmu_register(cpu_pmu); err = cpu_pm_pmu_register(cpu_pmu);
if (err) if (err)
...@@ -859,9 +859,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu) ...@@ -859,9 +859,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
return 0; return 0;
out_unregister: out_unregister:
mutex_lock(&arm_pmu_mutex); spin_lock(&arm_pmu_lock);
list_del(&cpu_pmu->entry); list_del(&cpu_pmu->entry);
mutex_unlock(&arm_pmu_mutex); spin_unlock(&arm_pmu_lock);
free_percpu(cpu_hw_events); free_percpu(cpu_hw_events);
return err; return err;
} }
...@@ -869,9 +869,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu) ...@@ -869,9 +869,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
{ {
cpu_pm_pmu_unregister(cpu_pmu); cpu_pm_pmu_unregister(cpu_pmu);
mutex_lock(&arm_pmu_mutex); spin_lock(&arm_pmu_lock);
list_del(&cpu_pmu->entry); list_del(&cpu_pmu->entry);
mutex_unlock(&arm_pmu_mutex); spin_unlock(&arm_pmu_lock);
free_percpu(cpu_pmu->hw_events); free_percpu(cpu_pmu->hw_events);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment