[PATCH 3/3] arm64: stacktrace: always inline core stacktrace functions
Kalesh Singh
kaleshsingh at google.com
Wed Mar 29 01:59:02 PDT 2023
On Fri, Mar 24, 2023 at 6:50 AM Mark Rutland <mark.rutland at arm.com> wrote:
>
> The arm64 stacktrace code can be used in kprobe context, and so cannot
> be safely probed. Some (but not all) of the unwind functions are
> annotated with `NOKPROBE_SYMBOL()` to ensure this, with others markes as
> `__always_inline`, relying on the top-level unwind function being marked
> as `noinstr`.
>
> This patch has stacktrace.c consistently mark the internal stacktrace
> functions as `__always_inline`, removing the need for NOKPROBE_SYMBOL()
> as the top-level unwind function (arch_stack_walk()) is marked as
> `noinstr`. This is more consistent and is a simpler pattern to follow
> for future additions to stacktrace.c.
>
> There should be no functional change as a result of this patch.
>
> Signed-off-by: Mark Rutland <mark.rutland at arm.com>
> Cc: Catalin Marinas <catalin.marinas at arm.com>
> Cc: Kalesh Singh <kaleshsingh at google.com>
> Cc: Madhavan T. Venkataraman <madvenka at linux.microsoft.com>
> Cc: Mark Brown <broonie at kernel.org>
> Cc: Will Deacon <will at kernel.org>
Reviewed-by: Kalesh Singh <kaleshsingh at google.com>
Thanks,
Kalesh
> ---
> arch/arm64/kernel/stacktrace.c | 23 +++++++++++++----------
> 1 file changed, 13 insertions(+), 10 deletions(-)
>
> diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
> index 5857e2de147a7..ffe7f6c93fea8 100644
> --- a/arch/arm64/kernel/stacktrace.c
> +++ b/arch/arm64/kernel/stacktrace.c
> @@ -25,8 +25,9 @@
> *
> * The regs must be on a stack currently owned by the calling task.
> */
> -static __always_inline void unwind_init_from_regs(struct unwind_state *state,
> - struct pt_regs *regs)
> +static __always_inline void
> +unwind_init_from_regs(struct unwind_state *state,
> + struct pt_regs *regs)
> {
> unwind_init_common(state, current);
>
> @@ -42,7 +43,8 @@ static __always_inline void unwind_init_from_regs(struct unwind_state *state,
> *
> * The function which invokes this must be noinline.
> */
> -static __always_inline void unwind_init_from_caller(struct unwind_state *state)
> +static __always_inline void
> +unwind_init_from_caller(struct unwind_state *state)
> {
> unwind_init_common(state, current);
>
> @@ -60,8 +62,9 @@ static __always_inline void unwind_init_from_caller(struct unwind_state *state)
> * duration of the unwind, or the unwind will be bogus. It is never valid to
> * call this for the current task.
> */
> -static __always_inline void unwind_init_from_task(struct unwind_state *state,
> - struct task_struct *task)
> +static __always_inline void
> +unwind_init_from_task(struct unwind_state *state,
> + struct task_struct *task)
> {
> unwind_init_common(state, task);
>
> @@ -101,7 +104,8 @@ unwind_recover_return_address(struct unwind_state *state)
> * records (e.g. a cycle), determined based on the location and fp value of A
> * and the location (but not the fp value) of B.
> */
> -static int notrace unwind_next(struct unwind_state *state)
> +static __always_inline int
> +unwind_next(struct unwind_state *state)
> {
> struct task_struct *tsk = state->task;
> unsigned long fp = state->fp;
> @@ -119,10 +123,10 @@ static int notrace unwind_next(struct unwind_state *state)
>
> return unwind_recover_return_address(state);
> }
> -NOKPROBE_SYMBOL(unwind_next);
>
> -static void notrace unwind(struct unwind_state *state,
> - stack_trace_consume_fn consume_entry, void *cookie)
> +static __always_inline void
> +unwind(struct unwind_state *state, stack_trace_consume_fn consume_entry,
> + void *cookie)
> {
> if (unwind_recover_return_address(state))
> return;
> @@ -137,7 +141,6 @@ static void notrace unwind(struct unwind_state *state,
> break;
> }
> }
> -NOKPROBE_SYMBOL(unwind);
>
> /*
> * Per-cpu stacks are only accessible when unwinding the current task in a
> --
> 2.30.2
>
More information about the linux-arm-kernel
mailing list