[RFC 11/12] riscv: Request pmu overflow interrupt as NMI

Xu Lu luxu.kernel at bytedance.com
Mon Oct 23 01:29:10 PDT 2023


This commit registers pmu overflow interrupt as NMI to improve the accuracy
of perf sampling.

Signed-off-by: Xu Lu <luxu.kernel at bytedance.com>
---
 arch/riscv/include/asm/irqflags.h |  2 +-
 drivers/perf/riscv_pmu_sbi.c      | 23 +++++++++++++++++++----
 2 files changed, 20 insertions(+), 5 deletions(-)

diff --git a/arch/riscv/include/asm/irqflags.h b/arch/riscv/include/asm/irqflags.h
index 6a709e9c69ca..be840e297559 100644
--- a/arch/riscv/include/asm/irqflags.h
+++ b/arch/riscv/include/asm/irqflags.h
@@ -12,7 +12,7 @@
 
 #ifdef CONFIG_RISCV_PSEUDO_NMI
 
-#define __ALLOWED_NMI_MASK			0
+#define __ALLOWED_NMI_MASK			BIT(IRQ_PMU_OVF)
 #define ALLOWED_NMI_MASK			(__ALLOWED_NMI_MASK & irqs_enabled_ie)
 
 static inline bool nmi_allowed(int irq)
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index 995b501ec721..85abb7dd43b9 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -760,6 +760,7 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
 
 static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node)
 {
+	int ret = 0;
 	struct riscv_pmu *pmu = hlist_entry_safe(node, struct riscv_pmu, node);
 	struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
 
@@ -778,20 +779,30 @@ static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node)
 	if (riscv_pmu_use_irq) {
 		cpu_hw_evt->irq = riscv_pmu_irq;
 		csr_clear(CSR_IP, BIT(riscv_pmu_irq_num));
-#ifndef CONFIG_RISCV_PSEUDO_NMI
+#ifdef CONFIG_RISCV_PSEUDO_NMI
+		ret = prepare_percpu_nmi(riscv_pmu_irq);
+		if (ret != 0) {
+			pr_err("Failed to prepare percpu nmi:%d\n", ret);
+			return ret;
+		}
+		enable_percpu_nmi(riscv_pmu_irq, IRQ_TYPE_NONE);
+#else
 		csr_set(CSR_IE, BIT(riscv_pmu_irq_num));
-#endif
 		enable_percpu_irq(riscv_pmu_irq, IRQ_TYPE_NONE);
+#endif
 	}
 
-	return 0;
+	return ret;
 }
 
 static int pmu_sbi_dying_cpu(unsigned int cpu, struct hlist_node *node)
 {
 	if (riscv_pmu_use_irq) {
+#ifdef CONFIG_RISCV_PSEUDO_NMI
+		disable_percpu_nmi(riscv_pmu_irq);
+		teardown_percpu_nmi(riscv_pmu_irq);
+#else
 		disable_percpu_irq(riscv_pmu_irq);
-#ifndef CONFIG_RISCV_PSEUDO_NMI
 		csr_clear(CSR_IE, BIT(riscv_pmu_irq_num));
 #endif
 	}
@@ -835,7 +846,11 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde
 		return -ENODEV;
 	}
 
+#ifdef CONFIG_RISCV_PSEUDO_NMI
+	ret = request_percpu_nmi(riscv_pmu_irq, pmu_sbi_ovf_handler, "riscv-pmu", hw_events);
+#else
 	ret = request_percpu_irq(riscv_pmu_irq, pmu_sbi_ovf_handler, "riscv-pmu", hw_events);
+#endif
 	if (ret) {
 		pr_err("registering percpu irq failed [%d]\n", ret);
 		return ret;
-- 
2.20.1




More information about the linux-riscv mailing list