[PATCH v3 13/20] arm64: entry: template the entry asm functions
Mark Rutland
mark.rutland at arm.com
Tue May 25 11:32:55 PDT 2021
Now that the majority of the exception triage logic has been converted
to C, the entry assembly functions all have a uniform structure.
Let's generate them all with an assembly macro to reduce the amount of
code and to ensure they all remain in sync if we make changes in future.
There should be no functional change as a result of this patch.
Signed-off-by: Mark Rutland <mark.rutland at arm.com>
Reviewed-by: Joey Gouly <joey.gouly at arm.com>
Cc: Catalin Marinas <catalin.marinas at arm.com>
Cc: James Morse <james.morse at arm.com>
Cc: Marc Zyngier <maz at kernel.org>
Cc: Will Deacon <will at kernel.org>
---
arch/arm64/kernel/entry.S | 124 ++++++++++------------------------------------
1 file changed, 27 insertions(+), 97 deletions(-)
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index cab4c284c106..b4e9860a2227 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -604,114 +604,44 @@ SYM_CODE_START_LOCAL(el1_error_invalid)
inv_entry 1, BAD_ERROR
SYM_CODE_END(el1_error_invalid)
-/*
- * EL1 mode handlers.
- */
- .align 6
-SYM_CODE_START_LOCAL_NOALIGN(el1_sync)
- kernel_entry 1
- mov x0, sp
- bl el1_sync_handler
- b ret_to_kernel
-SYM_CODE_END(el1_sync)
-
- .align 6
-SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
- kernel_entry 1
- mov x0, sp
- bl el1_irq_handler
- b ret_to_kernel
-SYM_CODE_END(el1_irq)
-
- .align 6
-SYM_CODE_START_LOCAL_NOALIGN(el1_fiq)
- kernel_entry 1
- mov x0, sp
- bl el1_fiq_handler
- b ret_to_kernel
-SYM_CODE_END(el1_fiq)
-
+ .macro entry_handler el:req, regsize:req, label:req
.align 6
-SYM_CODE_START_LOCAL_NOALIGN(el1_error)
- kernel_entry 1
+SYM_CODE_START_LOCAL_NOALIGN(el\el\()_\label)
+ kernel_entry \el, \regsize
mov x0, sp
- bl el1_error_handler
+ bl el\el\()_\label\()_handler
+ .if \el == 0
+ b ret_to_user
+ .else
b ret_to_kernel
-SYM_CODE_END(el1_error)
-
-SYM_CODE_START_LOCAL(ret_to_kernel)
- kernel_exit 1
-SYM_CODE_END(ret_to_kernel)
+ .endif
+SYM_CODE_END(el\el\()_\label)
+ .endm
/*
- * EL0 mode handlers.
+ * Early exception handlers
*/
- .align 6
-SYM_CODE_START_LOCAL_NOALIGN(el0_sync)
- kernel_entry 0
- mov x0, sp
- bl el0_sync_handler
- b ret_to_user
-SYM_CODE_END(el0_sync)
+ entry_handler 1, 64, sync
+ entry_handler 1, 64, irq
+ entry_handler 1, 64, fiq
+ entry_handler 1, 64, error
- .align 6
-SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
- kernel_entry 0
- mov x0, sp
- bl el0_irq_handler
- b ret_to_user
-SYM_CODE_END(el0_irq)
-
- .align 6
-SYM_CODE_START_LOCAL_NOALIGN(el0_fiq)
- kernel_entry 0
- mov x0, sp
- bl el0_fiq_handler
- b ret_to_user
-SYM_CODE_END(el0_fiq)
-
- .align 6
-SYM_CODE_START_LOCAL_NOALIGN(el0_error)
- kernel_entry 0
- mov x0, sp
- bl el0_error_handler
- b ret_to_user
-SYM_CODE_END(el0_error)
+ entry_handler 0, 64, sync
+ entry_handler 0, 64, irq
+ entry_handler 0, 64, fiq
+ entry_handler 0, 64, error
#ifdef CONFIG_COMPAT
- .align 6
-SYM_CODE_START_LOCAL_NOALIGN(el0_sync_compat)
- kernel_entry 0, 32
- mov x0, sp
- bl el0_sync_compat_handler
- b ret_to_user
-SYM_CODE_END(el0_sync_compat)
-
- .align 6
-SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat)
- kernel_entry 0, 32
- mov x0, sp
- bl el0_irq_compat_handler
- b ret_to_user
-SYM_CODE_END(el0_irq_compat)
-
- .align 6
-SYM_CODE_START_LOCAL_NOALIGN(el0_fiq_compat)
- kernel_entry 0, 32
- mov x0, sp
- bl el0_fiq_compat_handler
- b ret_to_user
-SYM_CODE_END(el0_fiq_compat)
-
- .align 6
-SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat)
- kernel_entry 0, 32
- mov x0, sp
- bl el0_error_compat_handler
- b ret_to_user
-SYM_CODE_END(el0_error_compat)
+ entry_handler 0, 32, sync_compat
+ entry_handler 0, 32, irq_compat
+ entry_handler 0, 32, fiq_compat
+ entry_handler 0, 32, error_compat
#endif
+SYM_CODE_START_LOCAL(ret_to_kernel)
+ kernel_exit 1
+SYM_CODE_END(ret_to_kernel)
+
/*
* "slow" syscall return path.
*/
--
2.11.0
More information about the linux-arm-kernel
mailing list