[PATCH v4 12/12] ARM: entry: Make asm coproc dispatch code NWFPE only

Ard Biesheuvel ardb at kernel.org
Mon Mar 20 06:18:45 PDT 2023


Now that we can dispatch all VFP and iWMMXT related undef exceptions
using undef hooks implemented in C code, we no longer need the asm entry
code that takes care of this unless we are using FPE. As this means it
is ARM only, we can also remove the Thumb2 specific decorations.

It also means the non-standard, asm-only calling convention where
returning via LR means failure and returning via R9 means success is now
only used on legacy platforms that lack any kind of function return
prediction, avoiding the associated performance impact.

Signed-off-by: Ard Biesheuvel <ardb at kernel.org>
---
 arch/arm/kernel/entry-armv.S | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 0367c9581c1f05a6..5552179faf7a469e 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -532,8 +532,9 @@ ENDPROC(__und_usr)
  * IRQs enabled, FIQs enabled.
  */
 call_fpe:
-	badr	r9, ret_from_exception
-	badr	lr, __und_usr_fault_32
+#ifdef CONFIG_FPE_NWFPE
+	adr	r9, ret_from_exception
+	adr	lr, __und_usr_fault_32
 
 	sub	r4, r2, #4			@ ARM instr at LR - 4
 USERL(	4b,	ldrt r0, [r4])
@@ -554,9 +555,7 @@ ARM_BE8(rev	r0, r0)				@ little endian instruction
 	teqeq	r5, #1 << TIF_USING_IWMMXT	@ check whether it is set
 	beq	iwmmxt_task_enable		@ branch if set
 #endif
- ARM(	add	pc, pc, r8, lsr #6	)
- THUMB(	lsr	r8, r8, #6		)
- THUMB(	add	pc, r8			)
+	add	pc, pc, r8, lsr #6
 	nop
 
 	ret.w	lr				@ CP#0
@@ -598,6 +597,7 @@ ENTRY(fp_enter)
 ENTRY(no_fp)
 	ret	lr
 ENDPROC(no_fp)
+#endif
 
 __und_usr_fault_32:
 	mov	r1, #4
-- 
2.39.2




More information about the linux-arm-kernel mailing list