[PATCH] arm64: Introduce sysctl to disable pointer authentication

Steve Capper steve.capper at arm.com
Tue Jul 7 13:32:32 EDT 2020


Pointer authentication is a mandatory feature in the Armv8.3
architecture that provides protection against return oriented
programming attacks. (meaning that all Arm CPUs targetting at least
Armv8.3 will have this feature).

Once CONFIG_ARM64_PTR_AUTH=y, any systems with the hardware support for
pointer authentication will automatically have it enabled by the kernel.

There are, however, situations where end users may want to disable
pointer authentication. One could be tracking down/working around a bug
in userspace relating to pointer auth. Also, one may wish to quantify
the performance overhead of pointer auth by running a workload
with/without it.

Linux distributions operate a single binary policy wherever possible,
meaning any mechanism to disable pointer authentication as a parameter
would be preferred greatly over building/supporting multiple kernels.

For hardware with pointer auth, this patch introduces the sysctl:
abi.ptrauth_disabled=1
which will disable pointer authentication for any new processes that are
exec'ed. (Reverting to 0 will then re-enable pointer authentication for
new processes).

One can set this sysctl on the kernel command line:
sysctl.abi.ptrauth_disabled=1
And this will ensure that pointer authentication is disabled for the init
process onwards.

Note that when pointer authentication is disabled by sysctl, the
capabilities are still exposed to userspace and pointer authentication
instructions can execute (it will just be as NOPs).

Signed-off-by: Steve Capper <steve.capper at arm.com>
---
 .../arm64/pointer-authentication.rst          | 32 ++++++++++++++++
 arch/arm64/include/asm/asm_pointer_auth.h     | 37 +++++++++++++++++++
 arch/arm64/include/asm/pointer_auth.h         | 10 +++--
 arch/arm64/include/asm/processor.h            |  1 +
 arch/arm64/kernel/asm-offsets.c               |  1 +
 arch/arm64/kernel/entry.S                     |  3 ++
 arch/arm64/kernel/process.c                   | 31 +++++++++++++++-
 7 files changed, 111 insertions(+), 4 deletions(-)

diff --git a/Documentation/arm64/pointer-authentication.rst b/Documentation/arm64/pointer-authentication.rst
index 30b2ab06526b..e8bc8336bbf2 100644
--- a/Documentation/arm64/pointer-authentication.rst
+++ b/Documentation/arm64/pointer-authentication.rst
@@ -107,3 +107,35 @@ filter out the Pointer Authentication system key registers from
 KVM_GET/SET_REG_* ioctls and mask those features from cpufeature ID
 register. Any attempt to use the Pointer Authentication instructions will
 result in an UNDEFINED exception being injected into the guest.
+
+
+Disabling Pointer Authentication
+--------------------------------
+There are situations where end users may want to disable pointer
+authentication. One could be tracking down/working around a bug in
+userspace relating to pointer authentication. Also, one may wish to
+quantify the performance overhead of pointer authentication by running a
+workload with/without it.
+
+For hardware with pointer auth, there is the following sysctl:
+
+.. code-block:: none
+
+        abi.ptrauth_disabled=1
+
+which will disable pointer authentication for any new processes that are
+exec'ed. (Reverting to 0 will then re-enable pointer authentication for
+any new processes).
+
+One can also set this sysctl on the kernel command line:
+
+.. code-block:: none
+
+        sysctl.abi.ptrauth_disabled=1
+
+And this will ensure that pointer authentication is disabled for the init
+process onwards.
+
+Note that when pointer authentication is disabled by sysctl, the
+capabilities are still exposed to userspace and pointer authentication
+instructions can execute (it will just be as NOPs).
diff --git a/arch/arm64/include/asm/asm_pointer_auth.h b/arch/arm64/include/asm/asm_pointer_auth.h
index 52dead2a8640..29fdd379bf1c 100644
--- a/arch/arm64/include/asm/asm_pointer_auth.h
+++ b/arch/arm64/include/asm/asm_pointer_auth.h
@@ -82,6 +82,38 @@ alternative_else_nop_endif
 .Lno_addr_auth\@:
 	.endm
 
+	.macro ptrauth_switch_to_user tsk, tmp1, tmp2, tmpw
+alternative_if_not ARM64_HAS_ADDRESS_AUTH
+	b	.Ldont_disable\@
+alternative_else_nop_endif
+	mov	\tmp2, #THREAD_PTRAUTH_DISABLED
+	ldr	\tmpw, [\tsk, \tmp2]
+	cbz	\tmpw, .Ldont_disable\@
+	mrs	\tmp1, sctlr_el1
+	mov_q	\tmp2, ~(SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \
+			SCTLR_ELx_ENDA | SCTLR_ELx_ENDB)
+	and	\tmp1, \tmp1, \tmp2
+	msr	sctlr_el1, \tmp1
+		// about to drop to EL0 which will give us an implicit isb
+.Ldont_disable\@:
+	.endm
+
+	.macro ptrauth_switch_to_kernel tsk, tmp1, tmp2, tmpw
+alternative_if_not ARM64_HAS_ADDRESS_AUTH
+	b	.Ldont_reenable\@
+alternative_else_nop_endif
+	mov	\tmp2, #THREAD_PTRAUTH_DISABLED
+	ldr	\tmpw, [\tsk, \tmp2]
+	cbz	\tmpw, .Ldont_reenable\@
+	mrs	\tmp1, sctlr_el1
+	mov_q	\tmp2, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \
+			SCTLR_ELx_ENDA | SCTLR_ELx_ENDB)
+	orr	\tmp1, \tmp1, \tmp2
+	msr	sctlr_el1, \tmp1
+	isb	// already in kernel mode, need context synchronising event
+.Ldont_reenable\@:
+	.endm
+
 #else /* CONFIG_ARM64_PTR_AUTH */
 
 	.macro ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3
@@ -93,6 +125,11 @@ alternative_else_nop_endif
 	.macro ptrauth_keys_install_kernel tsk, tmp1, tmp2, tmp3
 	.endm
 
+	.macro ptrauth_switch_to_user tsk, tmp1, tmp2, tmpw
+	.endm
+
+	.macro ptrauth_switch_to_kernel tsk, tmp1, tmp2, tmpw
+	.endm
 #endif /* CONFIG_ARM64_PTR_AUTH */
 
 #endif /* __ASM_ASM_POINTER_AUTH_H */
diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h
index c6b4f0603024..82993561ddc4 100644
--- a/arch/arm64/include/asm/pointer_auth.h
+++ b/arch/arm64/include/asm/pointer_auth.h
@@ -76,8 +76,12 @@ static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr)
 	return ptrauth_clear_pac(ptr);
 }
 
-#define ptrauth_thread_init_user(tsk)					\
-	ptrauth_keys_init_user(&(tsk)->thread.keys_user)
+#define ptrauth_thread_init_user(tsk, disabled)					\
+do {										\
+	ptrauth_keys_init_user(&(tsk)->thread.keys_user);			\
+	(tsk)->thread.ptrauth_disabled = disabled;				\
+} while (0)
+
 #define ptrauth_thread_init_kernel(tsk)					\
 	ptrauth_keys_init_kernel(&(tsk)->thread.keys_kernel)
 #define ptrauth_thread_switch_kernel(tsk)				\
@@ -86,7 +90,7 @@ static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr)
 #else /* CONFIG_ARM64_PTR_AUTH */
 #define ptrauth_prctl_reset_keys(tsk, arg)	(-EINVAL)
 #define ptrauth_strip_insn_pac(lr)	(lr)
-#define ptrauth_thread_init_user(tsk)
+#define ptrauth_thread_init_user(tsk, disabled)
 #define ptrauth_thread_init_kernel(tsk)
 #define ptrauth_thread_switch_kernel(tsk)
 #endif /* CONFIG_ARM64_PTR_AUTH */
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 240fe5e5b720..bb0b9edb5f9d 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -150,6 +150,7 @@ struct thread_struct {
 #ifdef CONFIG_ARM64_PTR_AUTH
 	struct ptrauth_keys_user	keys_user;
 	struct ptrauth_keys_kernel	keys_kernel;
+	unsigned int			ptrauth_disabled;
 #endif
 };
 
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 0577e2142284..eb686d9cf541 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -47,6 +47,7 @@ int main(void)
 #ifdef CONFIG_ARM64_PTR_AUTH
   DEFINE(THREAD_KEYS_USER,	offsetof(struct task_struct, thread.keys_user));
   DEFINE(THREAD_KEYS_KERNEL,	offsetof(struct task_struct, thread.keys_kernel));
+  DEFINE(THREAD_PTRAUTH_DISABLED,	offsetof(struct task_struct, thread.ptrauth_disabled));
 #endif
   BLANK();
   DEFINE(S_X0,			offsetof(struct pt_regs, regs[0]));
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 5304d193c79d..e3985521f763 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -180,6 +180,7 @@ alternative_cb_end
 	apply_ssbd 1, x22, x23
 
 	ptrauth_keys_install_kernel tsk, x20, x22, x23
+	ptrauth_switch_to_kernel tsk, x20, x22, w23
 
 	scs_load tsk, x20
 	.else
@@ -350,6 +351,7 @@ alternative_else_nop_endif
 
 	/* No kernel C function calls after this as user keys are set. */
 	ptrauth_keys_install_user tsk, x0, x1, x2
+	ptrauth_switch_to_user tsk, x0, x1, w2
 
 	apply_ssbd 0, x0, x1
 	.endif
@@ -920,6 +922,7 @@ SYM_FUNC_START(cpu_switch_to)
 	mov	sp, x9
 	msr	sp_el0, x1
 	ptrauth_keys_install_kernel x1, x8, x9, x10
+	ptrauth_switch_to_kernel x1, x8, x9, w10
 	scs_save x0, x8
 	scs_load x1, x8
 	ret
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 6089638c7d43..781f2b7bd986 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -580,6 +580,8 @@ unsigned long arch_align_stack(unsigned long sp)
 	return sp & ~0xf;
 }
 
+static unsigned int ptrauth_disabled;
+
 /*
  * Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY.
  */
@@ -587,9 +589,36 @@ void arch_setup_new_exec(void)
 {
 	current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0;
 
-	ptrauth_thread_init_user(current);
+	ptrauth_thread_init_user(current, ptrauth_disabled);
+}
+
+#ifdef CONFIG_ARM64_PTR_AUTH
+
+static struct ctl_table ptrauth_disable_sysctl_table[] = {
+	{
+		.procname	= "ptrauth_disabled",
+		.mode		= 0644,
+		.data		= &ptrauth_disabled,
+		.maxlen		= sizeof(ptrauth_disabled),
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= SYSCTL_ZERO,
+		.extra2		= SYSCTL_ONE,
+	},
+	{}
+
+};
+
+static int __init ptrauth_disable_init(void)
+{
+	if (!register_sysctl("abi", ptrauth_disable_sysctl_table))
+		return -EINVAL;
+	return 0;
 }
 
+core_initcall(ptrauth_disable_init);
+
+#endif /* CONFIG_ARM64_PTR_AUTH */
+
 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
 /*
  * Control the relaxed ABI allowing tagged user addresses into the kernel.
-- 
2.25.0




More information about the linux-arm-kernel mailing list