[PATCH 3/3] perf: RISC-V: support noncontiguous pmu counter IDs

Sergey Matyukevich geomatsi at gmail.com
Thu Jun 23 04:27:35 PDT 2022


From: Sergey Matyukevich <sergey.matyukevich at syntacore.com>

OpenSBI and Linux driver assume that pmu counter IDs are not expected
to be contiguous. Current support for noncontiguous IDs is limited by
the special treatment of the index 1 used by hardware for TM control.
Replace counter array by IDR to support gaps in hardware counter IDs.

Signed-off-by: Sergey Matyukevich <sergey.matyukevich at syntacore.com>
---
 drivers/perf/riscv_pmu_legacy.c |  4 +-
 drivers/perf/riscv_pmu_sbi.c    | 88 +++++++++++++++++++++++----------
 include/linux/perf/riscv_pmu.h  |  2 +-
 3 files changed, 65 insertions(+), 29 deletions(-)

diff --git a/drivers/perf/riscv_pmu_legacy.c b/drivers/perf/riscv_pmu_legacy.c
index 342778782359..7d7131c47bc0 100644
--- a/drivers/perf/riscv_pmu_legacy.c
+++ b/drivers/perf/riscv_pmu_legacy.c
@@ -14,7 +14,6 @@
 
 #define RISCV_PMU_LEGACY_CYCLE		0
 #define RISCV_PMU_LEGACY_INSTRET	1
-#define RISCV_PMU_LEGACY_NUM_CTR	2
 
 static bool pmu_init_done;
 
@@ -83,7 +82,8 @@ static void pmu_legacy_init(struct riscv_pmu *pmu)
 {
 	pr_info("Legacy PMU implementation is available\n");
 
-	pmu->num_counters = RISCV_PMU_LEGACY_NUM_CTR;
+	pmu->cmask = BIT(RISCV_PMU_LEGACY_CYCLE) |
+		BIT(RISCV_PMU_LEGACY_INSTRET);
 	pmu->ctr_start = pmu_legacy_ctr_start;
 	pmu->ctr_stop = NULL;
 	pmu->event_map = pmu_legacy_event_map;
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index 294d4bded59e..57bea421f014 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -39,7 +39,7 @@ union sbi_pmu_ctr_info {
  * RISC-V doesn't have hetergenous harts yet. This need to be part of
  * per_cpu in case of harts with different pmu counters
  */
-static union sbi_pmu_ctr_info *pmu_ctr_list;
+static DEFINE_IDR(pmu_ctr_list);
 static unsigned int riscv_pmu_irq;
 
 struct sbi_pmu_event_data {
@@ -243,14 +243,20 @@ static const struct sbi_pmu_event_data pmu_cache_event_map[PERF_COUNT_HW_CACHE_M
 
 static int pmu_sbi_ctr_get_width(int idx)
 {
-	return pmu_ctr_list[idx].width;
+	union sbi_pmu_ctr_info *info;
+
+	info = idr_find(&pmu_ctr_list, idx);
+	if (!info)
+		return 0;
+
+	return info->width;
 }
 
 static bool pmu_sbi_ctr_is_fw(int cidx)
 {
 	union sbi_pmu_ctr_info *info;
 
-	info = &pmu_ctr_list[cidx];
+	info = idr_find(&pmu_ctr_list, cidx);
 	if (!info)
 		return false;
 
@@ -264,8 +270,7 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event)
 	struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
 	struct sbiret ret;
 	int idx;
-	uint64_t cbase = 0;
-	uint64_t cmask = GENMASK_ULL(rvpmu->num_counters, 0);
+	u64 cbase = 0;
 	unsigned long cflags = 0;
 
 	if (event->attr.exclude_kernel)
@@ -274,8 +279,8 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event)
 		cflags |= SBI_PMU_CFG_FLAG_SET_UINH;
 
 	/* retrieve the available counter index */
-	ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase, cmask,
-			cflags, hwc->event_base, hwc->config, 0);
+	ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase,
+			rvpmu->cmask, cflags, hwc->event_base, hwc->config, 0);
 	if (ret.error) {
 		pr_debug("Not able to find a counter for event %lx config %llx\n",
 			hwc->event_base, hwc->config);
@@ -283,7 +288,8 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event)
 	}
 
 	idx = ret.value;
-	if (idx > rvpmu->num_counters || !pmu_ctr_list[idx].value)
+
+	if (!idr_find(&pmu_ctr_list, idx))
 		return -ENOENT;
 
 	/* Additional sanity check for the counter id */
@@ -393,7 +399,7 @@ static u64 pmu_sbi_ctr_read(struct perf_event *event)
 	struct hw_perf_event *hwc = &event->hw;
 	int idx = hwc->idx;
 	struct sbiret ret;
-	union sbi_pmu_ctr_info info;
+	union sbi_pmu_ctr_info *info;
 	u64 val = 0;
 
 	if (pmu_sbi_is_fw_event(event)) {
@@ -402,10 +408,12 @@ static u64 pmu_sbi_ctr_read(struct perf_event *event)
 		if (!ret.error)
 			val = ret.value;
 	} else {
-		info = pmu_ctr_list[idx];
-		val = riscv_pmu_ctr_read_csr(info.csr);
+		info = idr_find(&pmu_ctr_list, idx);
+		if (!info)
+			return 0;
+		val = riscv_pmu_ctr_read_csr(info->csr);
 		if (IS_ENABLED(CONFIG_32BIT))
-			val = ((u64)riscv_pmu_ctr_read_csr(info.csr + 0x80)) << 31 | val;
+			val = ((u64)riscv_pmu_ctr_read_csr(info->csr + 0x80)) << 31 | val;
 	}
 
 	return val;
@@ -447,27 +455,46 @@ static int pmu_sbi_find_num_ctrs(void)
 		return sbi_err_map_linux_errno(ret.error);
 }
 
-static int pmu_sbi_get_ctrinfo(int nctr)
+static int pmu_sbi_get_ctrinfo(int nctr, u64 *mask)
 {
 	struct sbiret ret;
 	int i, num_hw_ctr = 0, num_fw_ctr = 0;
-	union sbi_pmu_ctr_info cinfo;
-
-	pmu_ctr_list = kcalloc(nctr + 1, sizeof(*pmu_ctr_list), GFP_KERNEL);
-	if (!pmu_ctr_list)
-		return -ENOMEM;
+	union sbi_pmu_ctr_info *cinfo;
+	int err;
 
-	for (i = 0; i <= nctr; i++) {
+	for (i = 0; i < 8 * sizeof(*mask); i++) {
 		ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_GET_INFO, i, 0, 0, 0, 0, 0);
 		if (ret.error)
 			/* The logical counter ids are not expected to be contiguous */
 			continue;
-		cinfo.value = ret.value;
-		if (cinfo.type == SBI_PMU_CTR_TYPE_FW)
+
+		*mask |= BIT(i);
+
+		cinfo = kzalloc(sizeof(*cinfo), GFP_KERNEL);
+		if (!cinfo)
+			return -ENOMEM;
+
+		err = idr_alloc(&pmu_ctr_list, cinfo, i, i + 1, GFP_KERNEL);
+		if (err < 0) {
+			pr_err("Failed to allocate PMU counter index %d\n", i);
+			kfree(cinfo);
+			return err;
+		}
+
+		cinfo->value = ret.value;
+		if (cinfo->type == SBI_PMU_CTR_TYPE_FW)
 			num_fw_ctr++;
 		else
 			num_hw_ctr++;
-		pmu_ctr_list[i].value = cinfo.value;
+
+		if (nctr == (num_fw_ctr + num_hw_ctr))
+			break;
+	}
+
+	if (nctr != (num_fw_ctr + num_hw_ctr)) {
+		pr_err("Invalid PMU counters: fw(%d) + hw(%d) != total(%d)\n",
+		       num_fw_ctr, num_hw_ctr, nctr);
+		return -EINVAL;
 	}
 
 	pr_info("%d firmware and %d hardware counters\n", num_fw_ctr, num_hw_ctr);
@@ -482,7 +509,7 @@ static inline void pmu_sbi_stop_all(struct riscv_pmu *pmu)
 	 * which may include counters that are not enabled yet.
 	 */
 	sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP,
-		  0, GENMASK_ULL(pmu->num_counters, 0), 0, 0, 0, 0);
+		  0, pmu->cmask, 0, 0, 0, 0);
 }
 
 static inline void pmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu)
@@ -582,7 +609,7 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
 		if (!event || !is_sampling_event(event))
 			continue;
 
-		info = &pmu_ctr_list[lidx];
+		info = idr_find(&pmu_ctr_list, lidx);
 		/* Do a sanity check */
 		if (!info || info->type != SBI_PMU_CTR_TYPE_HW)
 			continue;
@@ -698,6 +725,9 @@ static int pmu_sbi_device_probe(struct platform_device *pdev)
 	struct riscv_pmu *pmu = NULL;
 	int num_counters;
 	int ret = -ENODEV;
+	u64 cmask = 0;
+	void *entry;
+	int idx;
 
 	pr_info("SBI PMU extension is available\n");
 	pmu = riscv_pmu_alloc();
@@ -711,7 +741,7 @@ static int pmu_sbi_device_probe(struct platform_device *pdev)
 	}
 
 	/* cache all the information about counters now */
-	if (pmu_sbi_get_ctrinfo(num_counters))
+	if (pmu_sbi_get_ctrinfo(num_counters, &cmask))
 		goto out_free;
 
 	ret = pmu_sbi_setup_irqs(pmu, pdev);
@@ -720,7 +750,8 @@ static int pmu_sbi_device_probe(struct platform_device *pdev)
 		pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
 		pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE;
 	}
-	pmu->num_counters = num_counters;
+
+	pmu->cmask = cmask;
 	pmu->ctr_start = pmu_sbi_ctr_start;
 	pmu->ctr_stop = pmu_sbi_ctr_stop;
 	pmu->event_map = pmu_sbi_event_map;
@@ -742,6 +773,11 @@ static int pmu_sbi_device_probe(struct platform_device *pdev)
 	return 0;
 
 out_free:
+	idr_for_each_entry(&pmu_ctr_list, entry, idx) {
+		idr_remove(&pmu_ctr_list, idx);
+		kfree(entry);
+	}
+
 	kfree(pmu);
 	return ret;
 }
diff --git a/include/linux/perf/riscv_pmu.h b/include/linux/perf/riscv_pmu.h
index 46f9b6fe306e..b46e7e6d3209 100644
--- a/include/linux/perf/riscv_pmu.h
+++ b/include/linux/perf/riscv_pmu.h
@@ -45,7 +45,7 @@ struct riscv_pmu {
 
 	irqreturn_t	(*handle_irq)(int irq_num, void *dev);
 
-	int		num_counters;
+	u64		cmask;
 	u64		(*ctr_read)(struct perf_event *event);
 	int		(*ctr_get_idx)(struct perf_event *event);
 	int		(*ctr_get_width)(int idx);
-- 
2.36.1




More information about the linux-riscv mailing list