[RFC PATCH v2] arm64: ftrace: Add shadow call stack protection
Ard Biesheuvel
ardb at kernel.org
Fri Dec 9 07:12:29 PST 2022
On Thu, 1 Dec 2022 at 19:17, Ard Biesheuvel <ardb at kernel.org> wrote:
>
> The low-level ftrace code performs some manipulations of the return
> address that might be vulnerable to abuse as a gadget. We'd prefer to
> protect this code using PAC where we can, but due to the fact that this
> breaks pointer equality in ways that may interfere with the operation of
> ftrace in particular or the backtrace code in general, this needs some
> more careful thought.
>
> In the meantime, let's make the ftrace_caller() and return_to_handler()
> routines a bit more robust when shadow call stacks are enabled, by
> shadowing the return addresses that are captured and potentially
> manipulated by ftrace.
>
> Cc: Mark Rutland <mark.rutland at arm.com>
> Cc: Kees Cook <keescook at chromium.org>
> Signed-off-by: Ard Biesheuvel <ardb at kernel.org>
> ---
> This supersedes "[PATCH 4/4] arm64: ftrace: Add return address
> protection" sent out on the 29th of November.
>
Please disregard - better to fix this comprehensively, and I;m about
to send out a v3 of the original series.
> arch/arm64/kernel/entry-ftrace.S | 27 ++++++++++++++++++++
> 1 file changed, 27 insertions(+)
>
> diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
> index 30cc2a9d1757a6a7..4ce262e7d9456761 100644
> --- a/arch/arm64/kernel/entry-ftrace.S
> +++ b/arch/arm64/kernel/entry-ftrace.S
> @@ -12,6 +12,7 @@
> #include <asm/assembler.h>
> #include <asm/ftrace.h>
> #include <asm/insn.h>
> +#include <asm/scs.h>
>
> #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
> /*
> @@ -36,6 +37,11 @@
> SYM_CODE_START(ftrace_caller)
> bti c
>
> +#if defined(CONFIG_SHADOW_CALL_STACK) && !defined(CONFIG_DYNAMIC_SCS)
> + /* Push the callsite's LR and the current LR to the shadow stack */
> + stp x9, x30, [scs_sp], #16
> +#endif
> +
> /* Save original SP */
> mov x10, sp
>
> @@ -93,6 +99,24 @@ SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
> /* Restore the callsite's SP */
> add sp, sp, #FREGS_SIZE + 32
>
> +#if defined(CONFIG_SHADOW_CALL_STACK) && !defined(CONFIG_DYNAMIC_SCS)
> +#ifdef CONFIG_FUNCTION_GRAPH_TRACER
> + /*
> + * The callsite's LR will be popped from the shadow call stack in
> + * return_to_handler() if a return via that function was inserted into
> + * the call stack by ftrace. That means we should leave the callsite's
> + * LR on the shadow call stack, and only pop the return address that
> + * takes us back to the callsite.
> + */
> + adr x10, return_to_handler
> + cmp x10, x30
> + b.ne 0f
> + ldr x9, [scs_sp, #-8]!
> + ret x9
> +#endif
> + /* Pop our return address and the callsite's LR from the shadow stack */
> +0: ldp x30, x9, [scs_sp, #-16]!
> +#endif
> ret x9
> SYM_CODE_END(ftrace_caller)
>
> @@ -265,6 +289,9 @@ SYM_CODE_START(return_to_handler)
> ldp x6, x7, [sp, #48]
> add sp, sp, #64
>
> +#if defined(CONFIG_SHADOW_CALL_STACK) && !defined(CONFIG_DYNAMIC_SCS)
> + ldr x30, [scs_sp, #-8]!
> +#endif
> ret
> SYM_CODE_END(return_to_handler)
> #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
> --
> 2.35.1
>
More information about the linux-arm-kernel
mailing list