[PATCH 1/2] arm64: Move handling of erratum 1418040 into C code

Stephen Boyd swboyd at chromium.org
Tue Jul 28 16:04:08 EDT 2020


Quoting Marc Zyngier (2020-07-28 02:21:11)
> diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
> index 35de8ba60e3d..44445d471442 100644
> --- a/arch/arm64/kernel/entry.S
> +++ b/arch/arm64/kernel/entry.S
> @@ -169,19 +169,6 @@ alternative_cb_end
>         stp     x28, x29, [sp, #16 * 14]
>  
>         .if     \el == 0
> -       .if     \regsize == 32
> -       /*
> -        * If we're returning from a 32-bit task on a system affected by
> -        * 1418040 then re-enable userspace access to the virtual counter.
> -        */

Can this comment go above the function in C?

> -#ifdef CONFIG_ARM64_ERRATUM_1418040
> -alternative_if ARM64_WORKAROUND_1418040
> -       mrs     x0, cntkctl_el1
> -       orr     x0, x0, #2      // ARCH_TIMER_USR_VCT_ACCESS_EN
> -       msr     cntkctl_el1, x0
> -alternative_else_nop_endif
> -#endif
> -       .endif
>         clear_gp_regs
>         mrs     x21, sp_el0
>         ldr_this_cpu    tsk, __entry_task, x20
> @@ -337,14 +324,6 @@ alternative_else_nop_endif
>         tst     x22, #PSR_MODE32_BIT            // native task?
>         b.eq    3f
>  
> -#ifdef CONFIG_ARM64_ERRATUM_1418040
> -alternative_if ARM64_WORKAROUND_1418040
> -       mrs     x0, cntkctl_el1
> -       bic     x0, x0, #2                      // ARCH_TIMER_USR_VCT_ACCESS_EN
> -       msr     cntkctl_el1, x0
> -alternative_else_nop_endif
> -#endif
> -
>  #ifdef CONFIG_ARM64_ERRATUM_845719
>  alternative_if ARM64_WORKAROUND_845719
>  #ifdef CONFIG_PID_IN_CONTEXTIDR
> diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
> index 6089638c7d43..87c33f7c536b 100644
> --- a/arch/arm64/kernel/process.c
> +++ b/arch/arm64/kernel/process.c
> @@ -515,6 +515,34 @@ static void entry_task_switch(struct task_struct *next)
>         __this_cpu_write(__entry_task, next);
>  }
>  
> +static void erratum_1418040_thread_switch(struct task_struct *prev,

Should it be marked __always_inline so that the cpus_have_const_cap()
check can avoid the branch to this function when there's nothing to do?

> +                                         struct task_struct *next)
> +{
> +       bool prev32, next32;
> +       u64 val;
> +
> +       if (!(IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) &&
> +             cpus_have_const_cap(ARM64_WORKAROUND_1418040)))
> +               return;
> +
> +       prev32 = (!(prev->flags & PF_KTHREAD) &&
> +                 is_compat_thread(task_thread_info(prev)));
> +       next32 = (!(next->flags & PF_KTHREAD) &&
> +                 is_compat_thread(task_thread_info(next)));
> +
> +       if (prev32 == next32)
> +               return;
> +
> +       val = read_sysreg(cntkctl_el1);
> +
> +       if (prev32 & !next32)
> +               val |= ARCH_TIMER_USR_VCT_ACCESS_EN;
> +       else
> +               val &= ~ARCH_TIMER_USR_VCT_ACCESS_EN;
> +
> +       write_sysreg(val, cntkctl_el1);
> +}
> +
>  /*
>   * Thread switching.
>   */



More information about the linux-arm-kernel mailing list