[PATCH 3/3] perf/arm_pmu_platform: Clean up with dev_printk

Robin Murphy robin.murphy at arm.com
Fri Mar 26 16:02:42 GMT 2021


Nearly all of the messages we can log from the platform device code
relate to the specific PMU device and the properties we're parsing from
its DT node. In some cases we use %pOF to point at where something was
wrong, but even that is inconsistent. Let's convert these logs to the
appropriate dev_printk variants, so that every issue specific to the
device and/or its DT description is clearly and instantly attributable,
particularly if there is more than one PMU node present in the DT.

The local refactoring in a couple of functions invites some extra
cleanup in the process - the init_fn matching can be streamlined, and
the PMU registration failure message moved to the appropriate place and
log level.

CC: Tian Tao <tiantao6 at hisilicon.com>
Signed-off-by: Robin Murphy <robin.murphy at arm.com>
---
 drivers/perf/arm_pmu_platform.c | 47 +++++++++++++++------------------
 1 file changed, 22 insertions(+), 25 deletions(-)

diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
index ef9676418c9f..513de1f54e2d 100644
--- a/drivers/perf/arm_pmu_platform.c
+++ b/drivers/perf/arm_pmu_platform.c
@@ -63,7 +63,7 @@ static bool pmu_has_irq_affinity(struct device_node *node)
 	return !!of_find_property(node, "interrupt-affinity", NULL);
 }
 
-static int pmu_parse_irq_affinity(struct device_node *node, int i)
+static int pmu_parse_irq_affinity(struct device *dev, int i)
 {
 	struct device_node *dn;
 	int cpu;
@@ -73,19 +73,18 @@ static int pmu_parse_irq_affinity(struct device_node *node, int i)
 	 * affinity matches our logical CPU order, as we used to assume.
 	 * This is fragile, so we'll warn in pmu_parse_irqs().
 	 */
-	if (!pmu_has_irq_affinity(node))
+	if (!pmu_has_irq_affinity(dev->of_node))
 		return i;
 
-	dn = of_parse_phandle(node, "interrupt-affinity", i);
+	dn = of_parse_phandle(dev->of_node, "interrupt-affinity", i);
 	if (!dn) {
-		pr_warn("failed to parse interrupt-affinity[%d] for %pOFn\n",
-			i, node);
+		dev_warn(dev, "failed to parse interrupt-affinity[%d]\n", i);
 		return -EINVAL;
 	}
 
 	cpu = of_cpu_node_to_id(dn);
 	if (cpu < 0) {
-		pr_warn("failed to find logical CPU for %pOFn\n", dn);
+		dev_warn(dev, "failed to find logical CPU for %pOFn\n", dn);
 		cpu = nr_cpu_ids;
 	}
 
@@ -99,17 +98,18 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
 	int i = 0, num_irqs;
 	struct platform_device *pdev = pmu->plat_device;
 	struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
+	struct device *dev = &pdev->dev;
 
 	num_irqs = platform_irq_count(pdev);
 	if (num_irqs < 0)
-		return dev_err_probe(&pdev->dev, num_irqs, "unable to count PMU IRQs\n");
+		return dev_err_probe(dev, num_irqs, "unable to count PMU IRQs\n");
 
 	/*
 	 * In this case we have no idea which CPUs are covered by the PMU.
 	 * To match our prior behaviour, we assume all CPUs in this case.
 	 */
 	if (num_irqs == 0) {
-		pr_warn("no irqs for PMU, sampling events not supported\n");
+		dev_warn(dev, "no irqs for PMU, sampling events not supported\n");
 		pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
 		cpumask_setall(&pmu->supported_cpus);
 		return 0;
@@ -121,10 +121,8 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
 			return pmu_parse_percpu_irq(pmu, irq);
 	}
 
-	if (nr_cpu_ids != 1 && !pmu_has_irq_affinity(pdev->dev.of_node)) {
-		pr_warn("no interrupt-affinity property for %pOF, guessing.\n",
-			pdev->dev.of_node);
-	}
+	if (nr_cpu_ids != 1 && !pmu_has_irq_affinity(dev->of_node))
+		dev_warn(dev, "no interrupt-affinity property, guessing.\n");
 
 	for (i = 0; i < num_irqs; i++) {
 		int cpu, irq;
@@ -134,18 +132,18 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
 			continue;
 
 		if (irq_is_percpu_devid(irq)) {
-			pr_warn("multiple PPIs or mismatched SPI/PPI detected\n");
+			dev_warn(dev, "multiple PPIs or mismatched SPI/PPI detected\n");
 			return -EINVAL;
 		}
 
-		cpu = pmu_parse_irq_affinity(pdev->dev.of_node, i);
+		cpu = pmu_parse_irq_affinity(dev, i);
 		if (cpu < 0)
 			return cpu;
 		if (cpu >= nr_cpu_ids)
 			continue;
 
 		if (per_cpu(hw_events->irq, cpu)) {
-			pr_warn("multiple PMU IRQs for the same CPU detected\n");
+			dev_warn(dev, "multiple PMU IRQs for the same CPU detected\n");
 			return -EINVAL;
 		}
 
@@ -190,9 +188,8 @@ int arm_pmu_device_probe(struct platform_device *pdev,
 			 const struct of_device_id *of_table,
 			 const struct pmu_probe_info *probe_table)
 {
-	const struct of_device_id *of_id;
 	armpmu_init_fn init_fn;
-	struct device_node *node = pdev->dev.of_node;
+	struct device *dev = &pdev->dev;
 	struct arm_pmu *pmu;
 	int ret = -ENODEV;
 
@@ -206,15 +203,14 @@ int arm_pmu_device_probe(struct platform_device *pdev,
 	if (ret)
 		goto out_free;
 
-	if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
-		init_fn = of_id->data;
-
-		pmu->secure_access = of_property_read_bool(pdev->dev.of_node,
+	init_fn = of_device_get_match_data(dev);
+	if (init_fn) {
+		pmu->secure_access = of_property_read_bool(dev->of_node,
 							   "secure-reg-access");
 
 		/* arm64 systems boot only as non-secure */
 		if (IS_ENABLED(CONFIG_ARM64) && pmu->secure_access) {
-			pr_warn("ignoring \"secure-reg-access\" property for arm64\n");
+			dev_warn(dev, "ignoring \"secure-reg-access\" property for arm64\n");
 			pmu->secure_access = false;
 		}
 
@@ -225,7 +221,7 @@ int arm_pmu_device_probe(struct platform_device *pdev,
 	}
 
 	if (ret) {
-		pr_info("%pOF: failed to probe PMU!\n", node);
+		dev_err(dev, "failed to probe PMU!\n");
 		goto out_free;
 	}
 
@@ -234,15 +230,16 @@ int arm_pmu_device_probe(struct platform_device *pdev,
 		goto out_free_irqs;
 
 	ret = armpmu_register(pmu);
-	if (ret)
+	if (ret) {
+		dev_err(dev, "failed to register PMU devices!\n");
 		goto out_free_irqs;
+	}
 
 	return 0;
 
 out_free_irqs:
 	armpmu_free_irqs(pmu);
 out_free:
-	pr_info("%pOF: failed to register PMU devices!\n", node);
 	armpmu_free(pmu);
 	return ret;
 }
-- 
2.21.0.dirty




More information about the linux-arm-kernel mailing list