[PATCH] drivers/perf: arm-pmu: convert arm_pmu_mutex to spinlock

Sudeep Holla sudeep.holla at arm.com
Wed Aug 3 10:08:55 PDT 2016


arm_pmu_mutex is never held long and we don't want to sleep while the
lock is being held as it's executed in the context of hotplug notifiers.
So it can be converted to a simple spinlock instead.

Without this patch we get the following warning:

BUG: sleeping function called from invalid context at kernel/locking/mutex.c:620
in_atomic(): 1, irqs_disabled(): 128, pid: 0, name: swapper/2
no locks held by swapper/2/0.
irq event stamp: 381314
hardirqs last  enabled at (381313): _raw_spin_unlock_irqrestore+0x7c/0x88
hardirqs last disabled at (381314): cpu_die+0x28/0x48
softirqs last  enabled at (381294): _local_bh_enable+0x28/0x50
softirqs last disabled at (381293): irq_enter+0x58/0x78
CPU: 2 PID: 0 Comm: swapper/2 Not tainted 4.7.0 #12
Call trace:
 dump_backtrace+0x0/0x220
 show_stack+0x24/0x30
 dump_stack+0xb4/0xf0
 ___might_sleep+0x1d8/0x1f0
 __might_sleep+0x5c/0x98
 mutex_lock_nested+0x54/0x400
 arm_perf_starting_cpu+0x34/0xb0
 cpuhp_invoke_callback+0x88/0x3d8
 notify_cpu_starting+0x78/0x98
 secondary_start_kernel+0x108/0x1a8

This patch converts the mutex to spinlock to eliminate the above
warnings. This constraints pmu->reset to be non-blocking call which is
the case with all the ARM PMU backends.

Fixes: 37b502f121ad ("arm/perf: Fix hotplug state machine conversion")
Cc: Stephen Boyd <sboyd at codeaurora.org>
Cc: Will Deacon <will.deacon at arm.com>
Cc: Mark Rutland <mark.rutland at arm.com>
Signed-off-by: Sudeep Holla <sudeep.holla at arm.com>
---
 drivers/perf/arm_pmu.c | 18 +++++++++---------
 1 file changed, 9 insertions(+), 9 deletions(-)

diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 6ccb994bdfcb..4c9a537a1265 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -688,7 +688,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
 	return 0;
 }

-static DEFINE_MUTEX(arm_pmu_mutex);
+static DEFINE_SPINLOCK(arm_pmu_lock);
 static LIST_HEAD(arm_pmu_list);

 /*
@@ -701,7 +701,7 @@ static int arm_perf_starting_cpu(unsigned int cpu)
 {
 	struct arm_pmu *pmu;

-	mutex_lock(&arm_pmu_mutex);
+	spin_lock(&arm_pmu_lock);
 	list_for_each_entry(pmu, &arm_pmu_list, entry) {

 		if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
@@ -709,7 +709,7 @@ static int arm_perf_starting_cpu(unsigned int cpu)
 		if (pmu->reset)
 			pmu->reset(pmu);
 	}
-	mutex_unlock(&arm_pmu_mutex);
+	spin_unlock(&arm_pmu_lock);
 	return 0;
 }

@@ -821,9 +821,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
 	if (!cpu_hw_events)
 		return -ENOMEM;

-	mutex_lock(&arm_pmu_mutex);
+	spin_lock(&arm_pmu_lock);
 	list_add_tail(&cpu_pmu->entry, &arm_pmu_list);
-	mutex_unlock(&arm_pmu_mutex);
+	spin_unlock(&arm_pmu_lock);

 	err = cpu_pm_pmu_register(cpu_pmu);
 	if (err)
@@ -859,9 +859,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
 	return 0;

 out_unregister:
-	mutex_lock(&arm_pmu_mutex);
+	spin_lock(&arm_pmu_lock);
 	list_del(&cpu_pmu->entry);
-	mutex_unlock(&arm_pmu_mutex);
+	spin_unlock(&arm_pmu_lock);
 	free_percpu(cpu_hw_events);
 	return err;
 }
@@ -869,9 +869,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
 static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
 {
 	cpu_pm_pmu_unregister(cpu_pmu);
-	mutex_lock(&arm_pmu_mutex);
+	spin_lock(&arm_pmu_lock);
 	list_del(&cpu_pmu->entry);
-	mutex_unlock(&arm_pmu_mutex);
+	spin_unlock(&arm_pmu_lock);
 	free_percpu(cpu_pmu->hw_events);
 }

--
2.7.4




More information about the linux-arm-kernel mailing list