[boot-wrapper PATCH 5/5] aarch64: Introduce EL2 boot code for Armv8-R AArch64
Jaxson Han
jaxson.han at arm.com
Tue Apr 20 08:24:38 BST 2021
The Armv8-R AArch64 profile does not support the EL3 exception level.
The Armv8-R AArch64 profile allows for an (optional) VMSAv8-64 MMU
at EL1, which allows to run off-the-shelf Linux. However EL2 only
supports a PMSA, which is not supported by Linux, so we need to drop
into EL1 before entering the kernel.
The boot sequence is:
If CurrentEL == EL3, then goto EL3 initialisation and drop to lower EL
before entering the kernel.
If CurrentEL == EL2 && id_aa64mmfr0_el1.MSA == 0xf (Armv8-R aarch64),
then goto Armv8-R AArch64 initialisation and drop to EL1 before
entering the kernel.
Else, no initialisation and keep the current EL before entering the
kernel.
Signed-off-by: Jaxson Han <jaxson.han at arm.com>
---
arch/aarch64/boot.S | 51 +++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 51 insertions(+)
diff --git a/arch/aarch64/boot.S b/arch/aarch64/boot.S
index f7dbf3f..6961a2a 100644
--- a/arch/aarch64/boot.S
+++ b/arch/aarch64/boot.S
@@ -25,16 +25,22 @@ _start:
* Boot sequence
* If CurrentEL == EL3, then goto EL3 initialisation and drop to
* lower EL before entering the kernel.
+ * If CurrentEL == EL2 && id_aa64mmfr0_el1.MSA == 0xf, then goto
+ * Armv8-R AArch64 initialisation and drop to EL1 before
+ * entering the kernel.
* Else, no initialisation and keep the current EL before
* entering the kernel.
*/
mrs x0, CurrentEL
cmp x0, #CURRENTEL_EL3
beq el3_init
+ cmp x0, #CURRENTEL_EL2
+ beq el2_init
/*
* We stay in the current EL for entering the kernel
*/
+keep_el:
mov w0, #1
ldr x1, =flag_keep_el
str w0, [x1]
@@ -112,6 +118,43 @@ el3_init:
str w0, [x1]
b el_max_init
+ /*
+ * EL2 Armv8-R AArch64 initialisation
+ */
+el2_init:
+ /* Detect Armv8-R AArch64 */
+ mrs x1, id_aa64mmfr0_el1
+ ubfx x1, x1, #48, #4 // MSA
+ /* 0xf means Armv8-R AArch64 */
+ cmp x1, 0xf
+ bne keep_el
+
+ mrs x0, midr_el1
+ msr vpidr_el2, x0
+
+ mrs x0, mpidr_el1
+ msr vmpidr_el2, x0
+
+ mov x0, #(1 << 31) // VTCR_MSA: VMSAv8-64 support
+ msr vtcr_el2, x0
+
+ /* Enable pointer authentication if present */
+ mrs x1, id_aa64isar1_el1
+ ldr x2, =(((0xff) << 24) | (0xff << 4))
+ and x1, x1, x2
+ cbz x1, 1f
+
+ mrs x0, hcr_el2
+ orr x0, x0, #(1 << 40) // AP key enable
+ orr x0, x0, #(1 << 41) // AP insn enable
+ msr hcr_el2, x0
+
+1: isb
+ mov w0, #SPSR_KERNEL_EL1
+ ldr x1, =spsr_to_elx
+ str w0, [x1]
+ b el_max_init
+
el_max_init:
ldr x0, =CNTFRQ
msr cntfrq_el0, x0
@@ -169,10 +212,18 @@ jump_kernel:
*/
bfi x4, x19, #5, #1
+ mrs x5, CurrentEL
+ cmp x5, #CURRENTEL_EL2
+ b.eq 1f
+
msr elr_el3, x19
msr spsr_el3, x4
eret
+1: msr elr_el2, x19
+ msr spsr_el2, x4
+ eret
+
.ltorg
.data
--
2.25.1
More information about the linux-arm-kernel
mailing list