[PATCH 2/2] arm64: Configure kernel's PTR_AUTH key when it is built with PTR_AUTH.
Peter Collingbourne
pcc at google.com
Mon Dec 7 18:07:07 EST 2020
On Mon, Dec 7, 2020 at 2:46 PM Daniel Kiss <daniel.kiss at arm.com> wrote:
>
> If the kernel is not compiled with CONFIG_ARM64_PTR_AUTH_KERNEL,
> then the kernel does not need a key and kernel's key could be disabled.
>
> Signed-off-by: Daniel Kiss <daniel.kiss at arm.com>
> ---
> arch/arm64/include/asm/asm_pointer_auth.h | 68 ++++++++++++++++-------
> arch/arm64/include/asm/processor.h | 2 +
> arch/arm64/kernel/asm-offsets.c | 4 ++
> 3 files changed, 55 insertions(+), 19 deletions(-)
>
> diff --git a/arch/arm64/include/asm/asm_pointer_auth.h b/arch/arm64/include/asm/asm_pointer_auth.h
> index 52dead2a8640..af3d16027e8f 100644
> --- a/arch/arm64/include/asm/asm_pointer_auth.h
> +++ b/arch/arm64/include/asm/asm_pointer_auth.h
> @@ -14,6 +14,12 @@
> * thread.keys_user.ap*.
> */
> .macro ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3
> +#ifndef CONFIG_ARM64_PTR_AUTH_KERNEL
> + /* Reenable A key */
> + mrs \tmp1, sctlr_el1
> + orr \tmp1, \tmp1, SCTLR_ELx_ENIA
> + msr sctlr_el1, \tmp1
> +#endif
We should avoid an unconditional MSR on exit like this as it is
expensive (for my PR_PAC_SET_ENABLED_KEYS series I measured the cost
of entry/exit MSR as 43.7ns on Cortex-A75 and 33.0ns on Apple M1). In
that series I take care not to touch SCTLR_EL1 unless necessary.
Likewise for the MSRs on entry below.
> mov \tmp1, #THREAD_KEYS_USER
> add \tmp1, \tsk, \tmp1
> alternative_if_not ARM64_HAS_ADDRESS_AUTH
> @@ -39,6 +45,36 @@ alternative_if ARM64_HAS_GENERIC_AUTH
> alternative_else_nop_endif
> .endm
>
> + .macro __ptrauth_keys_init_cpu tsk, tmp1, tmp2, tmp3
> + mrs \tmp1, id_aa64isar1_el1
> + ubfx \tmp1, \tmp1, #ID_AA64ISAR1_APA_SHIFT, #8
> + cbz \tmp1, .Lno_addr_auth\@
> +#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
> + mov_q \tmp1, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \
> + SCTLR_ELx_ENDA | SCTLR_ELx_ENDB)
> +#else
> + mov_q \tmp1, (SCTLR_ELx_ENIB | \
> + SCTLR_ELx_ENDA | SCTLR_ELx_ENDB)
> +#endif
If you leave IA enabled here then you shouldn't need to MSR on entry
and exit. If no PAC instructions are used in the kernel then it
shouldn't matter if it is left enabled.
Peter
> + mrs \tmp2, sctlr_el1
> + orr \tmp2, \tmp2, \tmp1
> + msr sctlr_el1, \tmp2
> +#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
> + __ptrauth_keys_install_kernel_nosync \tsk, \tmp1, \tmp2, \tmp3
> +#endif
> + isb
> +.Lno_addr_auth\@:
> + .endm
> +
> + .macro ptrauth_keys_init_cpu tsk, tmp1, tmp2, tmp3
> +alternative_if_not ARM64_HAS_ADDRESS_AUTH
> + b .Lno_addr_auth\@
> +alternative_else_nop_endif
> + __ptrauth_keys_init_cpu \tsk, \tmp1, \tmp2, \tmp3
> +.Lno_addr_auth\@:
> + .endm
> +
> +#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
> .macro __ptrauth_keys_install_kernel_nosync tsk, tmp1, tmp2, tmp3
> mov \tmp1, #THREAD_KEYS_KERNEL
> add \tmp1, \tsk, \tmp1
> @@ -60,29 +96,23 @@ alternative_if ARM64_HAS_ADDRESS_AUTH
> alternative_else_nop_endif
> .endm
>
> - .macro __ptrauth_keys_init_cpu tsk, tmp1, tmp2, tmp3
> - mrs \tmp1, id_aa64isar1_el1
> - ubfx \tmp1, \tmp1, #ID_AA64ISAR1_APA_SHIFT, #8
> - cbz \tmp1, .Lno_addr_auth\@
> - mov_q \tmp1, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \
> - SCTLR_ELx_ENDA | SCTLR_ELx_ENDB)
> - mrs \tmp2, sctlr_el1
> - orr \tmp2, \tmp2, \tmp1
> - msr sctlr_el1, \tmp2
> - __ptrauth_keys_install_kernel_nosync \tsk, \tmp1, \tmp2, \tmp3
> - isb
> -.Lno_addr_auth\@:
> +#else /* CONFIG_ARM64_PTR_AUTH_KERNEL */
> +
> + .macro ptrauth_keys_install_kernel_nosync tsk, tmp1, tmp2, tmp3
> + mrs \tmp1, sctlr_el1
> + and \tmp1, \tmp1, ~SCTLR_ELx_ENIA
> + msr sctlr_el1, \tmp1
> .endm
>
> - .macro ptrauth_keys_init_cpu tsk, tmp1, tmp2, tmp3
> -alternative_if_not ARM64_HAS_ADDRESS_AUTH
> - b .Lno_addr_auth\@
> -alternative_else_nop_endif
> - __ptrauth_keys_init_cpu \tsk, \tmp1, \tmp2, \tmp3
> -.Lno_addr_auth\@:
> + .macro ptrauth_keys_install_kernel tsk, tmp1, tmp2, tmp3
> + mrs \tmp1, sctlr_el1
> + and \tmp1, \tmp1, ~SCTLR_ELx_ENIA
> + msr sctlr_el1, \tmp1
> .endm
>
> -#else /* CONFIG_ARM64_PTR_AUTH */
> +#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */
> +
> +#else /* !CONFIG_ARM64_PTR_AUTH */
>
> .macro ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3
> .endm
> diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
> index fce8cbecd6bc..e20888b321e3 100644
> --- a/arch/arm64/include/asm/processor.h
> +++ b/arch/arm64/include/asm/processor.h
> @@ -150,8 +150,10 @@ struct thread_struct {
> struct debug_info debug; /* debugging */
> #ifdef CONFIG_ARM64_PTR_AUTH
> struct ptrauth_keys_user keys_user;
> +#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
> struct ptrauth_keys_kernel keys_kernel;
> #endif
> +#endif
> #ifdef CONFIG_ARM64_MTE
> u64 sctlr_tcf0;
> u64 gcr_user_incl;
> diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
> index 7d32fc959b1a..cb7965a9f505 100644
> --- a/arch/arm64/kernel/asm-offsets.c
> +++ b/arch/arm64/kernel/asm-offsets.c
> @@ -46,7 +46,9 @@ int main(void)
> DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context));
> #ifdef CONFIG_ARM64_PTR_AUTH
> DEFINE(THREAD_KEYS_USER, offsetof(struct task_struct, thread.keys_user));
> +#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
> DEFINE(THREAD_KEYS_KERNEL, offsetof(struct task_struct, thread.keys_kernel));
> +#endif
> #endif
> BLANK();
> DEFINE(S_X0, offsetof(struct pt_regs, regs[0]));
> @@ -141,7 +143,9 @@ int main(void)
> DEFINE(PTRAUTH_USER_KEY_APDA, offsetof(struct ptrauth_keys_user, apda));
> DEFINE(PTRAUTH_USER_KEY_APDB, offsetof(struct ptrauth_keys_user, apdb));
> DEFINE(PTRAUTH_USER_KEY_APGA, offsetof(struct ptrauth_keys_user, apga));
> +#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
> DEFINE(PTRAUTH_KERNEL_KEY_APIA, offsetof(struct ptrauth_keys_kernel, apia));
> +#endif
> BLANK();
> #endif
> return 0;
> --
> 2.17.1
>
More information about the linux-arm-kernel
mailing list