[PATCH 3/3] ARM: vfp: Use vfp_lock() in vfp_entry().

Ard Biesheuvel ardb at kernel.org
Tue Jun 27 03:22:23 PDT 2023


On Thu, 15 Jun 2023 at 12:17, Sebastian Andrzej Siewior
<bigeasy at linutronix.de> wrote:
>
> vfp_entry() is invoked from exception handler and is fully preemptible.
> It uses local_bh_disable() to remain uninterrupted while checking the
> VFP state.
> This is not working on PREEMPT_RT because local_bh_disable()
> synchronizes the relevant section but the context remains fully
> preemptible.
>
> Use vfp_lock() for uninterrupted access.
>
> VFP_bounce() is invoked from within vfp_entry() and may send a signal.
> Sending a signal uses spinlock_t which becomes a sleeping lock
> on PREEMPT_RT and must not be acquired within a preempt-disabled
> section. Move the vfp_raise_sigfpe() block outside of the
> preempt-disabled section.
>

Isn't the latter a separate issue that predates the softirq changes?
If so, it should be fixed separately too, and backported to -stable
(unless nobody uses out-of-tree PREEMPT_RT with those kernels)

> Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
> ---
>  arch/arm/vfp/vfpmodule.c | 33 ++++++++++++++++++++-------------
>  1 file changed, 20 insertions(+), 13 deletions(-)
>
> diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
> index 524aec81134ba..67d7042bc3d5c 100644
> --- a/arch/arm/vfp/vfpmodule.c
> +++ b/arch/arm/vfp/vfpmodule.c
> @@ -267,7 +267,7 @@ static void vfp_panic(char *reason, u32 inst)
>  /*
>   * Process bitmask of exception conditions.
>   */
> -static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_regs *regs)
> +static int vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr)
>  {
>         int si_code = 0;
>
> @@ -275,8 +275,7 @@ static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_
>
>         if (exceptions == VFP_EXCEPTION_ERROR) {
>                 vfp_panic("unhandled bounce", inst);
> -               vfp_raise_sigfpe(FPE_FLTINV, regs);
> -               return;
> +               return FPE_FLTINV;
>         }
>
>         /*
> @@ -304,8 +303,7 @@ static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_
>         RAISE(FPSCR_OFC, FPSCR_OFE, FPE_FLTOVF);
>         RAISE(FPSCR_IOC, FPSCR_IOE, FPE_FLTINV);
>
> -       if (si_code)
> -               vfp_raise_sigfpe(si_code, regs);
> +       return si_code;
>  }
>
>  /*
> @@ -351,6 +349,8 @@ static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs)
>  static void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
>  {
>         u32 fpscr, orig_fpscr, fpsid, exceptions;
> +       int si_code2 = 0;
> +       int si_code = 0;
>
>         pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc);
>
> @@ -396,8 +396,8 @@ static void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
>                  * unallocated VFP instruction but with FPSCR.IXE set and not
>                  * on VFP subarch 1.
>                  */
> -                vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr, regs);
> -               return;
> +               si_code = vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr);
> +               goto exit;
>         }
>
>         /*
> @@ -421,14 +421,14 @@ static void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
>          */
>         exceptions = vfp_emulate_instruction(trigger, fpscr, regs);
>         if (exceptions)
> -               vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
> +               si_code2 = vfp_raise_exceptions(exceptions, trigger, orig_fpscr);
>
>         /*
>          * If there isn't a second FP instruction, exit now. Note that
>          * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1.
>          */
>         if ((fpexc & (FPEXC_EX | FPEXC_FP2V)) != (FPEXC_EX | FPEXC_FP2V))
> -               return;
> +               goto exit;
>
>         /*
>          * The barrier() here prevents fpinst2 being read
> @@ -440,7 +440,13 @@ static void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
>   emulate:
>         exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs);
>         if (exceptions)
> -               vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
> +               si_code = vfp_raise_exceptions(exceptions, trigger, orig_fpscr);
> +exit:
> +       vfp_unlock();
> +       if (si_code2)
> +               vfp_raise_sigfpe(si_code2, regs);
> +       if (si_code)
> +               vfp_raise_sigfpe(si_code, regs);
>  }
>
>  static void vfp_enable(void *unused)
> @@ -707,7 +713,7 @@ static int vfp_support_entry(struct pt_regs *regs, u32 trigger)
>         if (!user_mode(regs))
>                 return vfp_kmode_exception(regs, trigger);
>
> -       local_bh_disable();
> +       vfp_lock();
>         fpexc = fmrx(FPEXC);
>
>         /*
> @@ -772,6 +778,7 @@ static int vfp_support_entry(struct pt_regs *regs, u32 trigger)
>                  * replay the instruction that trapped.
>                  */
>                 fmxr(FPEXC, fpexc);
> +               vfp_unlock();
>         } else {
>                 /* Check for synchronous or asynchronous exceptions */
>                 if (!(fpexc & (FPEXC_EX | FPEXC_DEX))) {
> @@ -786,17 +793,17 @@ static int vfp_support_entry(struct pt_regs *regs, u32 trigger)
>                         if (!(fpscr & FPSCR_IXE)) {
>                                 if (!(fpscr & FPSCR_LENGTH_MASK)) {
>                                         pr_debug("not VFP\n");
> -                                       local_bh_enable();
> +                                       vfp_unlock();
>                                         return -ENOEXEC;
>                                 }
>                                 fpexc |= FPEXC_DEX;
>                         }
>                 }
>  bounce:                regs->ARM_pc += 4;
> +               /* VFP_bounce() will invoke vfp_unlock() */
>                 VFP_bounce(trigger, fpexc, regs);
>         }
>
> -       local_bh_enable();
>         return 0;
>  }
>
> --
> 2.40.1
>



More information about the linux-arm-kernel mailing list