[PATCH v10 15/20] coresight: Control path during CPU idle

Leo Yan leo.yan at arm.com
Sun Apr 5 08:02:51 PDT 2026


Extend the CPU PM flow to control the path: disable from source up to
the node before the sink, then re-enable the same range on restore.
To avoid latency, control it up to the node before the sink.

Track per-CPU PM restore failures using percpu_pm_failed.  Once a CPU
hits a restore failure, set the percpu_pm_failed and return NOTIFY_BAD
on subsequent notifications to avoid repeating half-completed
transitions.

Setting percpu_pm_failed permanently blocks CPU PM on that CPU.  Such
failures are typically seen during development; disabling PM operations
simplifies the implementation, and a warning highlights the issue.

Signed-off-by: Leo Yan <leo.yan at arm.com>
---
 drivers/hwtracing/coresight/coresight-core.c | 47 +++++++++++++++++++++++++---
 1 file changed, 43 insertions(+), 4 deletions(-)

diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
index 02d26dd0171ebf4e884bb3e0028b9a21588f061a..c1e8debc76aba7eb5ecf7efe2a3b9b8b3e11b10c 100644
--- a/drivers/hwtracing/coresight/coresight-core.c
+++ b/drivers/hwtracing/coresight/coresight-core.c
@@ -37,6 +37,7 @@ DEFINE_MUTEX(coresight_mutex);
 static DEFINE_PER_CPU(struct coresight_device *, csdev_sink);
 
 static DEFINE_PER_CPU(struct coresight_path *, percpu_path);
+static DEFINE_PER_CPU(bool, percpu_pm_failed);
 
 /**
  * struct coresight_node - elements of a path, from source to sink
@@ -1732,7 +1733,7 @@ static void coresight_release_device_list(void)
 	}
 }
 
-/* Return: 1 if PM is required, 0 if skip */
+/* Return: 1 if PM is required, 0 if skip, <0 on error */
 static int coresight_pm_check(struct coresight_path *path)
 {
 	struct coresight_device *source;
@@ -1749,6 +1750,9 @@ static int coresight_pm_check(struct coresight_path *path)
 	if (coresight_get_mode(source) == CS_MODE_DISABLED)
 		return 0;
 
+	if (this_cpu_read(percpu_pm_failed))
+		return -EIO;
+
 	/* pm_save_disable() and pm_restore_enable() must be paired */
 	source_has_cb = coresight_ops(source)->pm_save_disable &&
 			coresight_ops(source)->pm_restore_enable;
@@ -1771,24 +1775,59 @@ static void coresight_pm_device_restore(struct coresight_device *csdev)
 static int coresight_pm_save(struct coresight_path *path)
 {
 	struct coresight_device *source = coresight_get_source(path);
+	struct coresight_node *from, *to;
+	int ret;
+
+	ret = coresight_pm_device_save(source);
+	if (ret)
+		return ret;
+
+	from = coresight_path_first_node(path);
+	/* Up to the node before sink to avoid latency */
+	to = list_prev_entry(coresight_path_last_node(path), link);
+	coresight_disable_path_from_to(path, from, to);
 
-	return coresight_pm_device_save(source);
+	return 0;
 }
 
 static void coresight_pm_restore(struct coresight_path *path)
 {
 	struct coresight_device *source = coresight_get_source(path);
+	struct coresight_node *from, *to;
+	int ret;
+
+	from = coresight_path_first_node(path);
+	/* Up to the node before sink to avoid latency */
+	to = list_prev_entry(coresight_path_last_node(path), link);
+	ret = coresight_enable_path_from_to(path, coresight_get_mode(source),
+					    from, to);
+	if (ret)
+		goto path_failed;
 
 	coresight_pm_device_restore(source);
+	return;
+
+path_failed:
+	pr_err("Failed in coresight PM restore on CPU%d: %d\n",
+	       smp_processor_id(), ret);
+
+	/*
+	 * Once PM fails on a CPU, set percpu_pm_failed and leave it set until
+	 * reboot. This prevents repeated partial transitions during idle
+	 * entry and exit.
+	 */
+	this_cpu_write(percpu_pm_failed, true);
 }
 
 static int coresight_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
 				   void *v)
 {
 	struct coresight_path *path = coresight_get_percpu_local_path();
+	int ret;
 
-	if (!coresight_pm_check(path))
-		return NOTIFY_DONE;
+	ret = coresight_pm_check(path);
+	if (ret <= 0)
+		return ret ? NOTIFY_BAD : NOTIFY_DONE;
 
 	switch (cmd) {
 	case CPU_PM_ENTER:

-- 
2.34.1




More information about the linux-arm-kernel mailing list