[PATCH 4/4] arm64: ftrace: Add return address protection

Ard Biesheuvel ardb at kernel.org
Tue Nov 29 06:18:03 PST 2022


Use the newly added asm macros to protect and restore the return address
in the ftrace call wrappers, based on whichever method is active (PAC
and/or shadow call stack).

If the graph tracer is in use, this covers both the return address *to*
the ftrace call site as well as the return address *at* the call site,
and the latter will either be restored in return_to_handler(), or before
returning to the call site.

Signed-off-by: Ard Biesheuvel <ardb at kernel.org>
---
 arch/arm64/kernel/entry-ftrace.S | 28 +++++++++++++++++++-
 1 file changed, 27 insertions(+), 1 deletion(-)

diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index 795344ab4ec45889..c744e4dd8c90a352 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -35,6 +35,11 @@
  * is missing from the LR and existing chain of frame records.
  */
 	.macro  ftrace_regs_entry, allregs=0
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	protect_return_address x9
+#endif
+	protect_return_address x30
+
 	/* Make room for pt_regs, plus a callee frame */
 	sub	sp, sp, #(PT_REGS_SIZE + 16)
 
@@ -89,7 +94,9 @@ SYM_CODE_START(ftrace_caller)
 	b	ftrace_common
 SYM_CODE_END(ftrace_caller)
 
-SYM_CODE_START(ftrace_common)
+SYM_CODE_START_LOCAL(ftrace_common)
+	alternative_insn  nop, "xpaci x30", ARM64_HAS_ADDRESS_AUTH, IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL)
+
 	sub	x0, x30, #AARCH64_INSN_SIZE	// ip (callsite's BL insn)
 	mov	x1, x9				// parent_ip (callsite's LR)
 	ldr_l	x2, function_trace_op		// op
@@ -115,9 +122,27 @@ SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
 	ldr	x30, [sp, #S_LR]
 	ldr	x9, [sp, #S_PC]
 
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	/* grab the original return address from the stack */
+	ldr	x10, [sp, #PT_REGS_SIZE + 8]
+#endif
+
 	/* Restore the callsite's SP */
 	add	sp, sp, #PT_REGS_SIZE + 16
 
+	restore_return_address x9
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	/* compare the original return address with the actual one */
+	cmp	x10, x30
+	b.ne	0f
+
+	/*
+	 * If they are the same, unprotect it now. If it was modified, it will
+	 * be dealt with in return_to_handler() below.
+	 */
+	restore_return_address x30
+0:
+#endif
 	ret	x9
 SYM_CODE_END(ftrace_common)
 
@@ -329,6 +354,7 @@ SYM_CODE_START(return_to_handler)
 	ldp x6, x7, [sp, #48]
 	add sp, sp, #64
 
+	restore_return_address x30
 	ret
 SYM_CODE_END(return_to_handler)
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
-- 
2.35.1




More information about the linux-arm-kernel mailing list