[PATCH v4 3/3] arm64: Use BTI C directly and unconditionally

Mark Brown broonie at kernel.org
Tue Dec 14 07:27:14 PST 2021


Now we have a macro for BTI C that looks like a regular instruction change
all the users of the current BTI_C macro to just emit a BTI C directly and
remove the macro.

This does mean that we now unconditionally BTI annotate all assembly
functions, meaning that they are worse in this respect than code generated
by the compiler. The overhead should be minimal for implementations with a
reasonable HINT implementation.

Signed-off-by: Mark Brown <broonie at kernel.org>
---
 arch/arm64/include/asm/linkage.h | 22 ++++++----------------
 arch/arm64/kernel/entry-ftrace.S |  8 ++------
 arch/arm64/lib/kasan_sw_tags.S   |  4 +---
 3 files changed, 9 insertions(+), 25 deletions(-)

diff --git a/arch/arm64/include/asm/linkage.h b/arch/arm64/include/asm/linkage.h
index 1cfa8bb33edd..9065e4749b42 100644
--- a/arch/arm64/include/asm/linkage.h
+++ b/arch/arm64/include/asm/linkage.h
@@ -4,16 +4,6 @@
 #define __ALIGN		.align 2
 #define __ALIGN_STR	".align 2"
 
-#if defined(CONFIG_ARM64_BTI_KERNEL) && defined(__aarch64__)
-
-#define BTI_C bti c ;
-
-#else
-
-#define BTI_C
-
-#endif
-
 /*
  * When using in-kernel BTI we need to ensure that PCS-conformant
  * assembly functions have suitable annotations.  Override
@@ -23,27 +13,27 @@
  */
 #define SYM_FUNC_START(name)				\
 	SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)	\
-	BTI_C
+	bti c ;
 
 #define SYM_FUNC_START_NOALIGN(name)			\
 	SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE)	\
-	BTI_C
+	bti c ;
 
 #define SYM_FUNC_START_LOCAL(name)			\
 	SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN)	\
-	BTI_C
+	bti c ;
 
 #define SYM_FUNC_START_LOCAL_NOALIGN(name)		\
 	SYM_START(name, SYM_L_LOCAL, SYM_A_NONE)	\
-	BTI_C
+	bti c ;
 
 #define SYM_FUNC_START_WEAK(name)			\
 	SYM_START(name, SYM_L_WEAK, SYM_A_ALIGN)	\
-	BTI_C
+	bti c ;
 
 #define SYM_FUNC_START_WEAK_NOALIGN(name)		\
 	SYM_START(name, SYM_L_WEAK, SYM_A_NONE)		\
-	BTI_C
+	bti c ;
 
 /*
  * Annotate a function as position independent, i.e., safe to be called before
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index 8cf970d219f5..e535480a4069 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -77,17 +77,13 @@
 	.endm
 
 SYM_CODE_START(ftrace_regs_caller)
-#ifdef BTI_C
-	BTI_C
-#endif
+	bti	c
 	ftrace_regs_entry	1
 	b	ftrace_common
 SYM_CODE_END(ftrace_regs_caller)
 
 SYM_CODE_START(ftrace_caller)
-#ifdef BTI_C
-	BTI_C
-#endif
+	bti	c
 	ftrace_regs_entry	0
 	b	ftrace_common
 SYM_CODE_END(ftrace_caller)
diff --git a/arch/arm64/lib/kasan_sw_tags.S b/arch/arm64/lib/kasan_sw_tags.S
index 5b04464c045e..20784ce75def 100644
--- a/arch/arm64/lib/kasan_sw_tags.S
+++ b/arch/arm64/lib/kasan_sw_tags.S
@@ -38,9 +38,7 @@
  * incremented by 256 prior to return).
  */
 SYM_CODE_START(__hwasan_tag_mismatch)
-#ifdef BTI_C
-	BTI_C
-#endif
+	bti	c
 	add	x29, sp, #232
 	stp	x2, x3, [sp, #8 * 2]
 	stp	x4, x5, [sp, #8 * 4]
-- 
2.30.2




More information about the linux-arm-kernel mailing list