[RFC PATCH 2/8] arm64: head: record the MMU state at primary entry

Ard Biesheuvel ardb at kernel.org
Fri Mar 4 09:56:51 PST 2022


Prepare for being able to deal with primary entry with the MMU and
caches enabled, by recording whether or not we entered at EL1 with the
MMU on in register x25.

While at it, add the disable_mmu_workaround macro invocation to
init_kernel_el, as its manipulation of SCTLR_EL1 may come down to
disabling of the MMU after subsequent patches.

Signed-off-by: Ard Biesheuvel <ardb at kernel.org>
---
 arch/arm64/kernel/head.S | 13 +++++++++++++
 1 file changed, 13 insertions(+)

diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 6a98f1a38c29..bec9c1483584 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -87,8 +87,10 @@
 	 *  x28        __create_page_tables()                   callee preserved temp register
 	 *  x19/x20    __primary_switch()                       callee preserved temp registers
 	 *  x24        __primary_switch() .. relocate_kernel()  current RELR displacement
+	 *  x25        primary_entry() .. start_kernel()        whether we entered at EL1 with the MMU on
 	 */
 SYM_CODE_START(primary_entry)
+	bl	record_mmu_state
 	bl	preserve_boot_args
 	bl	init_kernel_el			// w0=cpu_boot_mode
 	adrp	x23, __PHYS_OFFSET
@@ -105,6 +107,16 @@ SYM_CODE_START(primary_entry)
 	b	__primary_switch
 SYM_CODE_END(primary_entry)
 
+SYM_CODE_START_LOCAL(record_mmu_state)
+	mrs	x25, CurrentEL
+	cmp	x25, #CurrentEL_EL2
+	b.eq	0f
+	mrs	x25, sctlr_el1
+	tst	x25, #SCTLR_ELx_M
+0:	cset	w25, ne
+	ret
+SYM_CODE_END(record_mmu_state)
+
 /*
  * Preserve the arguments passed by the bootloader in x0 .. x3
  */
@@ -500,6 +512,7 @@ SYM_FUNC_START(init_kernel_el)
 
 SYM_INNER_LABEL(init_el1, SYM_L_LOCAL)
 	mov_q	x0, INIT_SCTLR_EL1_MMU_OFF
+	pre_disable_mmu_workaround
 	msr	sctlr_el1, x0
 	isb
 	mov_q	x0, INIT_PSTATE_EL1
-- 
2.30.2




More information about the linux-arm-kernel mailing list