[BOOT-WRAPPER 04/11] aarch64: Always enter kernel via exception return
Mark Rutland
mark.rutland at arm.com
Mon Jul 29 09:14:54 PDT 2024
When the boot-wrapper is entered at EL3 it will enter the kernel via
ERET, and when entered at EL2 it will branch to the kernel directly.
This is an artifact of the way the boot-wrapper was originally written
in assembly, and it would be preferable to always enter the kernel via
ERET so that PSTATE is always initialized to a known-good value.
Rework jump_kernel() to always enter the kernel via ERET.
Signed-off-by: Mark Rutland <mark.rutland at arm.com>
Cc: Akos Denke <akos.denke at arm.com>
Cc: Andre Przywara <andre.przywara at arm.com>
Cc: Luca Fancellu <luca.fancellu at arm.com>
Cc: Marc Zyngier <maz at kernel.org>
---
arch/aarch64/boot.S | 27 ++++++++++++---------------
1 file changed, 12 insertions(+), 15 deletions(-)
diff --git a/arch/aarch64/boot.S b/arch/aarch64/boot.S
index d8d38dd..3dbf85a 100644
--- a/arch/aarch64/boot.S
+++ b/arch/aarch64/boot.S
@@ -76,10 +76,6 @@ reset_at_el2:
b.eq err_invalid_id
bl setup_stack
- mov w0, #1
- ldr x1, =flag_no_el3
- str w0, [x1]
-
bl cpu_init_bootwrapper
bl cpu_init_arch
@@ -111,25 +107,26 @@ ASM_FUNC(jump_kernel)
bl find_logical_id
bl setup_stack // Reset stack pointer
- ldr w0, flag_no_el3
- cmp w0, #0 // Prepare Z flag
-
mov x0, x20
mov x1, x21
mov x2, x22
mov x3, x23
+ mov x4, #SPSR_KERNEL
- b.eq 1f
- br x19 // No EL3
+ mrs x5, CurrentEL
+ cmp x5, #CURRENTEL_EL3
+ b.eq eret_at_el3
+ cmp x5, #CURRENTEL_EL2
+ b.eq eret_at_el2
+ b . // Not possible
-1: mov x4, #SPSR_KERNEL
+eret_at_el3:
msr elr_el3, x19
msr spsr_el3, x4
eret
+eret_at_el2:
+ msr elr_el2, x19
+ msr spsr_el2, x4
+ eret
.ltorg
-
- .data
- .align 3
-flag_no_el3:
- .long 0
--
2.30.2
More information about the linux-arm-kernel
mailing list