[PATCH v8] arm64: Use static call trampolines when kCFI is enabled

Ard Biesheuvel ardb at kernel.org
Tue Mar 31 05:06:06 PDT 2026


On Tue, 31 Mar 2026, at 13:04, Ard Biesheuvel wrote:
> From: Ard Biesheuvel <ardb at kernel.org>
>
> Implement arm64 support for the 'unoptimized' static call variety, which
> routes all calls through a trampoline that performs a tail call to the
> chosen function, and wire it up for use when kCFI is enabled. This works
> around an issue with kCFI and generic static calls, where the prototypes
> of default handlers such as __static_call_nop() and __static_call_ret0()
> don't match the expected prototype of the call site, resulting in kCFI
> false positives [0].
>
> Since static call targets may be located in modules loaded out of direct
> branching range, this needs a ADRP/ADD pair to load the branch target

Sashiko correctly points out that this should say ADRP/LDR rather than
ADRP/ADD, and this means that the sequence is in fact different from the
one used by modules.

> into R16 and a branch-to-register (BR) instruction to perform an
> indirect call. This is the exact code sequence that is used by modules
> when the call target is out of direct branching range.
>

... so please drop this last sentence when applying.


> Unlike on x86, there is no pressing need on arm64 to avoid indirect
> calls at all cost, but hiding it from the compiler as is done here does
> have some benefits:
> - the literal is located in .rodata, which gives us the same robustness
>   advantage that code patching does;
> - no D-cache pollution from fetching hash values from .text sections.
>
> From an execution speed PoV, this is unlikely to make any difference at
> all.
>
> [0] https://lore.kernel.org/all/20260311225822.1565895-1-cmllamas@google.com/
>
> Cc: Carlos Llamas <cmllamas at google.com>
> Cc: Sami Tolvanen <samitolvanen at google.com>
> Cc: Sean Christopherson <seanjc at google.com>
> Cc: Kees Cook <kees at kernel.org>
> Cc: Peter Zijlstra <peterz at infradead.org>
> Cc: Will McVicker <willmcvicker at google.com>
> Signed-off-by: Ard Biesheuvel <ardb at kernel.org>
> ---
> v8: Simplify the trampoline by combining the NULL and RET0 cases, and
>     dropping the conditional branch and return
> v7: https://lore.kernel.org/all/20260313061852.4025964-1-cmllamas@google.com/
>
>  arch/arm64/Kconfig                   |  1 +
>  arch/arm64/include/asm/static_call.h | 31 ++++++++++++++++++++
>  arch/arm64/kernel/Makefile           |  1 +
>  arch/arm64/kernel/static_call.c      | 23 +++++++++++++++
>  arch/arm64/kernel/vmlinux.lds.S      |  1 +
>  5 files changed, 57 insertions(+)
>
> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
> index 38dba5f7e4d2..9ea19b74b6c3 100644
> --- a/arch/arm64/Kconfig
> +++ b/arch/arm64/Kconfig
> @@ -252,6 +252,7 @@ config ARM64
>  	select HAVE_RSEQ
>  	select HAVE_RUST if RUSTC_SUPPORTS_ARM64
>  	select HAVE_STACKPROTECTOR
> +	select HAVE_STATIC_CALL if CFI
>  	select HAVE_SYSCALL_TRACEPOINTS
>  	select HAVE_KPROBES
>  	select HAVE_KRETPROBES
> diff --git a/arch/arm64/include/asm/static_call.h 
> b/arch/arm64/include/asm/static_call.h
> new file mode 100644
> index 000000000000..b73960c949e4
> --- /dev/null
> +++ b/arch/arm64/include/asm/static_call.h
> @@ -0,0 +1,31 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +#ifndef _ASM_STATIC_CALL_H
> +#define _ASM_STATIC_CALL_H
> +
> +#define __ARCH_DEFINE_STATIC_CALL_TRAMP(name, target)		    \
> +	asm("	.pushsection .static_call.text, \"ax\"		\n" \
> +	    "	.align	4					\n" \
> +	    "	.globl	" name "				\n" \
> +	    name ":						\n" \
> +	    "	hint	34	/* BTI C */			\n" \
> +	    "	adrp	x16, 1f					\n" \
> +	    "	ldr	x16, [x16, :lo12:1f]			\n" \
> +	    "	br	x16					\n" \
> +	    "	.type	" name ", %function			\n" \
> +	    "	.size	" name ", . - " name "			\n" \
> +	    "	.popsection					\n" \
> +	    "	.pushsection .rodata, \"a\"			\n" \
> +	    "	.align	3					\n" \
> +	    "1:	.quad	" #target "				\n" \
> +	    "	.popsection					\n")
> +
> +#define ARCH_DEFINE_STATIC_CALL_TRAMP(name, func)			\
> +	__ARCH_DEFINE_STATIC_CALL_TRAMP(STATIC_CALL_TRAMP_STR(name), #func)
> +
> +#define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)			\
> +	ARCH_DEFINE_STATIC_CALL_TRAMP(name, __static_call_return0)
> +
> +#define ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name)			\
> +	ARCH_DEFINE_STATIC_CALL_TRAMP(name, __static_call_return0)
> +
> +#endif /* _ASM_STATIC_CALL_H */
> diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
> index 76f32e424065..fe627100d199 100644
> --- a/arch/arm64/kernel/Makefile
> +++ b/arch/arm64/kernel/Makefile
> @@ -46,6 +46,7 @@ obj-$(CONFIG_MODULES)			+= module.o module-plts.o
>  obj-$(CONFIG_PERF_EVENTS)		+= perf_regs.o perf_callchain.o
>  obj-$(CONFIG_HARDLOCKUP_DETECTOR_PERF)	+= watchdog_hld.o
>  obj-$(CONFIG_HAVE_HW_BREAKPOINT)	+= hw_breakpoint.o
> +obj-$(CONFIG_HAVE_STATIC_CALL)		+= static_call.o
>  obj-$(CONFIG_CPU_PM)			+= sleep.o suspend.o
>  obj-$(CONFIG_KGDB)			+= kgdb.o
>  obj-$(CONFIG_EFI)			+= efi.o efi-rt-wrapper.o
> diff --git a/arch/arm64/kernel/static_call.c 
> b/arch/arm64/kernel/static_call.c
> new file mode 100644
> index 000000000000..8b3a19e10871
> --- /dev/null
> +++ b/arch/arm64/kernel/static_call.c
> @@ -0,0 +1,23 @@
> +// SPDX-License-Identifier: GPL-2.0
> +#include <linux/static_call.h>
> +#include <linux/memory.h>
> +#include <asm/text-patching.h>
> +
> +void arch_static_call_transform(void *site, void *tramp, void *func, 
> bool tail)
> +{
> +	u64 literal;
> +	int ret;
> +
> +	if (!func)
> +		func = __static_call_return0;
> +
> +	/* decode the instructions to discover the literal address */
> +	literal = ALIGN_DOWN((u64)tramp + 4, SZ_4K) +
> +		  aarch64_insn_adrp_get_offset(le32_to_cpup(tramp + 4)) +
> +		  8 * aarch64_insn_decode_immediate(AARCH64_INSN_IMM_12,
> +						    le32_to_cpup(tramp + 8));
> +
> +	ret = aarch64_insn_write_literal_u64((void *)literal, (u64)func);
> +	WARN_ON_ONCE(ret);
> +}
> +EXPORT_SYMBOL_GPL(arch_static_call_transform);
> diff --git a/arch/arm64/kernel/vmlinux.lds.S 
> b/arch/arm64/kernel/vmlinux.lds.S
> index 2964aad0362e..2d1e75263f03 100644
> --- a/arch/arm64/kernel/vmlinux.lds.S
> +++ b/arch/arm64/kernel/vmlinux.lds.S
> @@ -191,6 +191,7 @@ SECTIONS
>  			LOCK_TEXT
>  			KPROBES_TEXT
>  			HYPERVISOR_TEXT
> +			STATIC_CALL_TEXT
>  			*(.gnu.warning)
>  	}
> 
> -- 
> 2.53.0.1018.g2bb0e51243-goog



More information about the linux-arm-kernel mailing list