[boot-wrapper 5/7] aarch64: Introduce EL2 boot code for Armv8-R AArch64

Luca Fancellu luca.fancellu at arm.com
Thu Jun 6 06:36:26 PDT 2024


The Armv8-R AArch64 profile does not support the EL3 exception level.
The Armv8-R AArch64 profile allows for an (optional) VMSAv8-64 MMU
at EL1, which allows to run off-the-shelf Linux. However EL2 only
supports a PMSA, which is not supported by Linux, so we need to drop
into EL1 before entering the kernel.

We add a new err_invalid_arch symbol as a dead loop. If we detect the
current Armv8-R aarch64 only supports with PMSA, meaning we cannot boot
Linux anymore, then we jump to err_invalid_arch.

During Armv8-R aarch64 init, to make sure nothing unexpected traps into
EL2, we auto-detect and config FIEN and EnSCXT in HCR_EL2.

The boot sequence is:
If CurrentEL == EL3, then goto EL3 initialisation and drop to lower EL
  before entering the kernel.
If CurrentEL == EL2 && id_aa64mmfr0_el1.MSA == 0xf (Armv8-R aarch64),
  if id_aa64mmfr0_el1.MSA_frac == 0x2,
    then goto Armv8-R AArch64 initialisation and drop to EL1 before
    entering the kernel.
  else, which means VMSA unsupported and cannot boot Linux,
    goto err_invalid_arch (dead loop).
Else, no initialisation and keep the current EL before entering the
  kernel.

Signed-off-by: Luca Fancellu <luca.fancellu at arm.com>
---
 arch/aarch64/boot.S            | 63 ++++++++++++++++++++++++++++++++--
 arch/aarch64/include/asm/cpu.h | 10 ++++++
 arch/aarch64/init.c            | 24 +++++++++++++
 3 files changed, 94 insertions(+), 3 deletions(-)

diff --git a/arch/aarch64/boot.S b/arch/aarch64/boot.S
index 211077af17c8..b2b9863b8d6a 100644
--- a/arch/aarch64/boot.S
+++ b/arch/aarch64/boot.S
@@ -22,7 +22,8 @@
 	 *   EL2 must be implemented.
 	 *
 	 * - EL2 (Non-secure)
-	 *   Entering at EL2 is partially supported.
+	 *   Entering at EL2 is partially supported for Armv8-A.
+	 *   Entering at EL2 is supported for Armv8-R.
 	 *   PSCI is not supported when entered in this exception level.
 	 */
 ASM_FUNC(_start)
@@ -76,7 +77,50 @@ reset_at_el2:
 	msr	sctlr_el2, x0
 	isb
 
-	b	reset_no_el3
+	/* Detect Armv8-R AArch64 */
+	mrs	x1, id_aa64mmfr0_el1
+	/*
+	 * Check MSA, bits [51:48]:
+	 * 0xf means Armv8-R AArch64.
+	 * If not 0xf, proceed in Armv8-A EL2.
+	 */
+	ubfx	x0, x1, #48, #4			// MSA
+	cmp	x0, 0xf
+	bne	reset_no_el3
+
+	/*
+	 * Armv8-R AArch64 is found, check if Linux can be booted.
+	 * Check MSA_frac, bits [55:52]:
+	 * 0x2 means EL1&0 translation regime also supports VMSAv8-64.
+	 */
+	ubfx	x0, x1, #52, #4			// MSA_frac
+	cmp	x0, 0x2
+	/*
+	 * If not 0x2, no VMSA, so cannot boot Linux and dead loop.
+	 * Also, since the architecture guarantees that those CPUID
+	 * fields never lose features when the value in a field
+	 * increases, we use blt to cover it.
+	 */
+	blt	err_invalid_arch
+
+	/* Start Armv8-R Linux at EL1 */
+	mov	w0, #SPSR_KERNEL_EL1
+	ldr	x1, =spsr_to_elx
+	str	w0, [x1]
+
+	cpuid	x0, x1
+	bl	find_logical_id
+	cmp	x0, #MPIDR_INVALID
+	b.eq	err_invalid_id
+	bl	setup_stack
+
+	bl	cpu_init_bootwrapper
+
+	bl	cpu_init_armv8r_el2
+
+	bl	gic_secure_init
+
+	b	start_bootmethod
 
 	/*
 	 * EL1 initialization
@@ -104,6 +148,7 @@ reset_no_el3:
 	b	start_bootmethod
 
 err_invalid_id:
+err_invalid_arch:
 	b	.
 
 	/*
@@ -121,10 +166,14 @@ ASM_FUNC(jump_kernel)
 	ldr	x0, =SCTLR_EL1_KERNEL
 	msr	sctlr_el1, x0
 
+	mrs	x5, CurrentEL
+	cmp	x5, #CURRENTEL_EL2
+	b.eq	1f
+
 	ldr	x0, =SCTLR_EL2_KERNEL
 	msr	sctlr_el2, x0
 
-	cpuid	x0, x1
+1:	cpuid	x0, x1
 	bl	find_logical_id
 	bl	setup_stack		// Reset stack pointer
 
@@ -147,10 +196,18 @@ ASM_FUNC(jump_kernel)
 	 */
 	bfi	x4, x19, #5, #1
 
+	mrs	x5, CurrentEL
+	cmp	x5, #CURRENTEL_EL2
+	b.eq	1f
+
 	msr	elr_el3, x19
 	msr	spsr_el3, x4
 	eret
 
+1:	msr	elr_el2, x19
+	msr	spsr_el2, x4
+	eret
+
 	.ltorg
 
 	.data
diff --git a/arch/aarch64/include/asm/cpu.h b/arch/aarch64/include/asm/cpu.h
index 846b89f8405d..6b2f5fbe4502 100644
--- a/arch/aarch64/include/asm/cpu.h
+++ b/arch/aarch64/include/asm/cpu.h
@@ -58,7 +58,13 @@
 #define SCR_EL3_TCR2EN			BIT(43)
 #define SCR_EL3_PIEN			BIT(45)
 
+#define VTCR_EL2_MSA			BIT(31)
+
 #define HCR_EL2_RES1			BIT(1)
+#define HCR_EL2_APK_NOTRAP		BIT(40)
+#define HCR_EL2_API_NOTRAP		BIT(41)
+#define HCR_EL2_FIEN_NOTRAP		BIT(47)
+#define HCR_EL2_ENSCXT_NOTRAP	BIT(53)
 
 #define ID_AA64DFR0_EL1_PMSVER		BITS(35, 32)
 #define ID_AA64DFR0_EL1_TRACEBUFFER	BITS(47, 44)
@@ -88,7 +94,9 @@
 
 #define ID_AA64PFR1_EL1_MTE		BITS(11, 8)
 #define ID_AA64PFR1_EL1_SME		BITS(27, 24)
+#define ID_AA64PFR0_EL1_RAS		BITS(31, 28)
 #define ID_AA64PFR0_EL1_SVE		BITS(35, 32)
+#define ID_AA64PFR0_EL1_CSV2	BITS(59, 56)
 
 #define ID_AA64SMFR0_EL1		s3_0_c0_c4_5
 #define ID_AA64SMFR0_EL1_FA64		BIT(63)
@@ -114,6 +122,7 @@
 #define SPSR_I			(1 << 7)	/* IRQ masked */
 #define SPSR_F			(1 << 6)	/* FIQ masked */
 #define SPSR_T			(1 << 5)	/* Thumb */
+#define SPSR_EL1H		(5 << 0)	/* EL1 Handler mode */
 #define SPSR_EL2H		(9 << 0)	/* EL2 Handler mode */
 #define SPSR_HYP		(0x1a << 0)	/* M[3:0] = hyp, M[4] = AArch32 */
 
@@ -153,6 +162,7 @@
 #else
 #define SCTLR_EL1_KERNEL	SCTLR_EL1_RES1
 #define SPSR_KERNEL		(SPSR_A | SPSR_D | SPSR_I | SPSR_F | SPSR_EL2H)
+#define SPSR_KERNEL_EL1		(SPSR_A | SPSR_D | SPSR_I | SPSR_F | SPSR_EL1H)
 #endif
 
 #ifndef __ASSEMBLY__
diff --git a/arch/aarch64/init.c b/arch/aarch64/init.c
index 37cb45fde446..8006f2705193 100644
--- a/arch/aarch64/init.c
+++ b/arch/aarch64/init.c
@@ -145,6 +145,30 @@ void cpu_init_el3(void)
 	msr(CNTFRQ_EL0, COUNTER_FREQ);
 }
 
+void cpu_init_armv8r_el2(void)
+{
+	unsigned long hcr = mrs(hcr_el2);
+
+	msr(vpidr_el2, mrs(midr_el1));
+	msr(vmpidr_el2, mrs(mpidr_el1));
+
+	/* VTCR_MSA: VMSAv8-64 support */
+	msr(vtcr_el2, VTCR_EL2_MSA);
+
+	if (mrs_field(ID_AA64PFR0_EL1, CSV2) <= 2)
+		hcr |= HCR_EL2_ENSCXT_NOTRAP;
+
+	if (mrs_field(ID_AA64PFR0_EL1, RAS) <= 2)
+		hcr |= HCR_EL2_FIEN_NOTRAP;
+
+	if (cpu_has_pauth())
+		hcr |= HCR_EL2_APK_NOTRAP | HCR_EL2_API_NOTRAP;
+
+	msr(hcr_el2, hcr);
+	isb();
+	msr(CNTFRQ_EL0, COUNTER_FREQ);
+}
+
 #ifdef PSCI
 extern char psci_vectors[];
 
-- 
2.34.1




More information about the linux-arm-kernel mailing list