[PATCH v3 09/13] arm64: debug: split single stepping exception entry
Anshuman Khandual
anshuman.khandual at arm.com
Tue Jun 17 23:25:37 PDT 2025
On 09/06/25 11:04 PM, Ada Couprie Diaz wrote:
> Currently all debug exceptions share common entry code and are routed
> to `do_debug_exception()`, which calls dynamically-registered
> handlers for each specific debug exception. This is unfortunate as
> different debug exceptions have different entry handling requirements,
> and it would be better to handle these distinct requirements earlier.
I guess makes sense to mention this in the commit message as this is the
common motivation for these dynamic to static exception handler changes.
>
> The single stepping exception has the most constraints : it can be
> exploited to train branch predictors and it needs special handling at EL1
> for the Cortex-A76 erratum #1463225. We need to conserve all those
> mitigations.
> However, it does not write an address at FAR_EL1, as only hardware
> watchpoints do so.
>
> The single-step handler does its own signaling if it needs to and only
> returns 0, so we can call it directly from `entry-common.c`.
>
> Split the single stepping exception entry, adjust the function signature,
> keep the security mitigation and erratum handling.
>
> Move the call to `arm64_apply_bp_hardening()` to `entry-common.c` so that
> we can do it as early as possible, and only for the exceptions coming
> from EL0, where it is needed.
> This is safe to do as it is `noinstr`, as are all the functions it
> may call. `el0_ia()` and `el0_pc()` already call it this way.
>
> When taking a soft-step exception from EL0, most of the single stepping
> handling is safely preemptible : the only possible handler is
> `uprobe_singlestep_handler()`. It only operates on task-local data and
> properly checks its validity, then raises a Thread Information Flag,
> processed before returning to userspace in `do_notify_resume()`, which
> is already preemptible.
> However, the soft-step handler first calls `reinstall_suspended_bps()`
> to check if there is any hardware breakpoint or watchpoint pending
> or already stepped through.
> This cannot be preempted as it manipulates the hardware breakpoint and
> watchpoint registers.
>
> Move the call to `try_step_suspended_breakpoints()` to `entry-common.c`
> and adjust the relevant comments.
> We can now safely unmask interrupts before handling the step itself,
> fixing a PREEMPT_RT issue where the handler could call a sleeping function
> with preemption disabled.
>
> Signed-off-by: Ada Couprie Diaz <ada.coupriediaz at arm.com>
> Closes: https://lore.kernel.org/linux-arm-kernel/Z6YW_Kx4S2tmj2BP@uudg.org/
> ---
> arch/arm64/include/asm/exception.h | 1 +
> arch/arm64/kernel/debug-monitors.c | 19 +++----------
> arch/arm64/kernel/entry-common.c | 43 ++++++++++++++++++++++++++++++
> arch/arm64/kernel/hw_breakpoint.c | 2 +-
> 4 files changed, 49 insertions(+), 16 deletions(-)
>
> diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
> index 926bad7b6704..d6648b68a4c3 100644
> --- a/arch/arm64/include/asm/exception.h
> +++ b/arch/arm64/include/asm/exception.h
> @@ -62,6 +62,7 @@ void do_el1_gcs(struct pt_regs *regs, unsigned long esr);
> void do_debug_exception(unsigned long addr_if_watchpoint, unsigned long esr,
> struct pt_regs *regs);
> void do_breakpoint(unsigned long esr, struct pt_regs *regs);
> +void do_softstep(unsigned long esr, struct pt_regs *regs);
> void do_fpsimd_acc(unsigned long esr, struct pt_regs *regs);
> void do_sve_acc(unsigned long esr, struct pt_regs *regs);
> void do_sme_acc(unsigned long esr, struct pt_regs *regs);
> diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
> index 74ffdfeff76f..3f5503d61aee 100644
> --- a/arch/arm64/kernel/debug-monitors.c
> +++ b/arch/arm64/kernel/debug-monitors.c
> @@ -21,6 +21,7 @@
> #include <asm/cputype.h>
> #include <asm/daifflags.h>
> #include <asm/debug-monitors.h>
> +#include <asm/exception.h>
> #include <asm/kgdb.h>
> #include <asm/kprobes.h>
> #include <asm/system_misc.h>
> @@ -188,18 +189,10 @@ static void send_user_sigtrap(int si_code)
> "User debug trap");
> }
>
> -static int single_step_handler(unsigned long unused, unsigned long esr,
> - struct pt_regs *regs)
> +void do_softstep(unsigned long esr, struct pt_regs *regs)
do_softstep() has been chosen here just to match the corresponding ESR
macros ESR_ELx_EC_SOFTSTP_[LOW|CUR] - although it does make sense and
also consistent.
> {
> - /*
> - * If we are stepping a pending breakpoint, call the hw_breakpoint
> - * handler first.
> - */
> - if (try_step_suspended_breakpoints(regs))
> - return 0;
This check has been moved individual exception handling functions.
> -
> if (call_step_hook(regs, esr) == DBG_HOOK_HANDLED)
> - return 0;
> + return;
>
> if (user_mode(regs)) {
> send_user_sigtrap(TRAP_TRACE);
> @@ -219,10 +212,8 @@ static int single_step_handler(unsigned long unused, unsigned long esr,
> */
> set_regs_spsr_ss(regs);
> }
> -
> - return 0;
> }
> -NOKPROBE_SYMBOL(single_step_handler);
> +NOKPROBE_SYMBOL(do_softstep);
>
> static int call_break_hook(struct pt_regs *regs, unsigned long esr)
> {
> @@ -329,8 +320,6 @@ NOKPROBE_SYMBOL(try_handle_aarch32_break);
>
> void __init debug_traps_init(void)
> {
> - hook_debug_fault_code(DBG_ESR_EVT_HWSS, single_step_handler, SIGTRAP,
> - TRAP_TRACE, "single-step handler");
Right, this dynamic hook registration is no longer required.
> hook_debug_fault_code(DBG_ESR_EVT_BRK, brk_handler, SIGTRAP,
> TRAP_BRKPT, "BRK handler");
> }
> diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
> index be2add6b4ae3..5fb636efd554 100644
> --- a/arch/arm64/kernel/entry-common.c
> +++ b/arch/arm64/kernel/entry-common.c
> @@ -535,6 +535,24 @@ static void noinstr el1_breakpt(struct pt_regs *regs, unsigned long esr)
> arm64_exit_el1_dbg(regs);
> }
>
> +static void noinstr el1_softstp(struct pt_regs *regs, unsigned long esr)
> +{
> + arm64_enter_el1_dbg(regs);
> + if (!cortex_a76_erratum_1463225_debug_handler(regs)) {
Although mentioned in the commit message, this constraint is not
there in the present code though.
> + debug_exception_enter(regs);
> + /*
> + * After handling a breakpoint, we suspend the breakpoint
> + * and use single-step to move to the next instruction.
> + * If we are stepping a suspended breakpoint there's nothing more to do:
> + * the single-step is complete.
> + */
> + if (!try_step_suspended_breakpoints(regs))
> + do_softstep(esr, regs);
> + debug_exception_exit(regs);
> + }
> + arm64_exit_el1_dbg(regs);
> +}
> +
> static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
> {
> unsigned long far = read_sysreg(far_el1);
> @@ -587,6 +605,8 @@ asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs)
> el1_breakpt(regs, esr);
> break;
> case ESR_ELx_EC_SOFTSTP_CUR:
> + el1_softstp(regs, esr);
> + break;Changes to EL1 exception handling as required.
> case ESR_ELx_EC_WATCHPT_CUR:
> case ESR_ELx_EC_BRK64:
> el1_dbg(regs, esr);
> @@ -793,6 +813,25 @@ static void noinstr el0_breakpt(struct pt_regs *regs, unsigned long esr)
> exit_to_user_mode(regs);
> }
>
> +static void noinstr el0_softstp(struct pt_regs *regs, unsigned long esr)
> +{
> + if (!is_ttbr0_addr(regs->pc))
> + arm64_apply_bp_hardening();
> +
> + enter_from_user_mode(regs);
> + /*
> + * After handling a breakpoint, we suspend the breakpoint
> + * and use single-step to move to the next instruction.
> + * If we are stepping a suspended breakpoint there's nothing more to do:
> + * the single-step is complete.
> + */
> + if (!try_step_suspended_breakpoints(regs)) {
> + local_daif_restore(DAIF_PROCCTX);
> + do_softstep(esr, regs);
> + }
> + exit_to_user_mode(regs);
> +}
> +
> static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
> {
> /* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
> @@ -875,6 +914,8 @@ asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs)
> el0_breakpt(regs, esr);
> break;
> case ESR_ELx_EC_SOFTSTP_LOW:
> + el0_softstp(regs, esr);
> + break;
> case ESR_ELx_EC_WATCHPT_LOW:
> case ESR_ELx_EC_BRK64:
> el0_dbg(regs, esr);
> @@ -997,6 +1038,8 @@ asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs)
> el0_breakpt(regs, esr);
> break;
> case ESR_ELx_EC_SOFTSTP_LOW:
> + el0_softstp(regs, esr);
> + break;
Changes to EL0 exception handling as required.
> case ESR_ELx_EC_WATCHPT_LOW:
> case ESR_ELx_EC_BKPT32:
> el0_dbg(regs, esr);
> diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
> index 309ae24d4548..8a80e13347c8 100644
> --- a/arch/arm64/kernel/hw_breakpoint.c
> +++ b/arch/arm64/kernel/hw_breakpoint.c
> @@ -854,7 +854,7 @@ bool try_step_suspended_breakpoints(struct pt_regs *regs)
> bool handled_exception = false;
>
> /*
> - * Called from single-step exception handler.
> + * Called from single-step exception entry.Hmm, alright but does not make much difference through.
> * Return true if we stepped a breakpoint and can resume execution,
> * false if we need to handle a single-step.
> */
More information about the linux-arm-kernel
mailing list