[RFC PATCH 3/3] arm64: stacktrace: Implement reliable stacktrace

Mark Brown broonie at kernel.org
Mon Oct 12 13:26:05 EDT 2020


Live patching has a consistency model which requires that the
architecture provide a reliable stack trace interface which specifically
indicates that the stack has been fully walked and that it is reliable
and consistent. This is done by providing arch_stack_walk_reliable(), a
variant of arch_stack_walk() which should verify that the stack has
these properties and return an error if not.

The arm64 unwinder is already reasonably thorough in verifying the stack
as it walks it and reports errors but we additionally check that
we do not see any kretprobe trampolines on the stack. Since the unwinder
is able to resolve function graph tracer probes transparently we do not
reject those.

Signed-off-by: Mark Brown <broonie at kernel.org>
---
 arch/arm64/Kconfig             |  1 +
 arch/arm64/kernel/stacktrace.c | 42 ++++++++++++++++++++++++++++++++++
 2 files changed, 43 insertions(+)

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index d1ba52e4b976..026f69515a86 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -174,6 +174,7 @@ config ARM64
 	select HAVE_FUNCTION_ARG_ACCESS_API
 	select HAVE_FUTEX_CMPXCHG if FUTEX
 	select MMU_GATHER_RCU_TABLE_FREE
+	select HAVE_RELIABLE_STACKTRACE
 	select HAVE_RSEQ
 	select HAVE_STACKPROTECTOR
 	select HAVE_SYSCALL_TRACEPOINTS
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index ad20981dfda4..795b2c14481d 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -14,6 +14,7 @@
 #include <linux/stacktrace.h>
 
 #include <asm/irq.h>
+#include <asm/kprobes.h>
 #include <asm/pointer_auth.h>
 #include <asm/stack_pointer.h>
 #include <asm/stacktrace.h>
@@ -212,4 +213,45 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
 	walk_stackframe(task, &frame, consume_entry, cookie);
 }
 
+/*
+ * This function returns an error if it detects any unreliable features of the
+ * stack.  Otherwise it guarantees that the stack trace is reliable.
+ *
+ * If the task is not 'current', the caller *must* ensure the task is inactive.
+ */
+int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
+			     void *cookie, struct task_struct *task)
+{
+	struct stackframe frame;
+
+	if (task == current)
+		start_backtrace(&frame,
+				(unsigned long)__builtin_frame_address(0),
+				(unsigned long)arch_stack_walk_reliable);
+	else
+		start_backtrace(&frame, thread_saved_fp(task),
+				thread_saved_pc(task));
+
+	while (1) {
+		int ret;
+
+#ifdef CONFIG_KPROBES
+		/*
+		 * Mark stacktraces with kretprobed functions on them
+		 * as unreliable.
+		 */
+		if (frame.pc == (unsigned long)kretprobe_trampoline)
+			return -EINVAL;
+#endif
+
+		if (!consume_entry(cookie, frame.pc))
+			return -EINVAL;
+		ret = unwind_frame(task, &frame);
+		if (ret == -ENOENT)
+			return 0;
+		if (ret < 0)
+			return ret;
+	}
+}
+
 #endif
-- 
2.20.1




More information about the linux-arm-kernel mailing list