[RFC PATCH 4/8] arm64: head: avoid cache invalidation when entering with the MMU on

Ard Biesheuvel ardb at kernel.org
Fri Mar 4 09:56:53 PST 2022


The primary entry code populates memory with the MMU and caches
disabled, and therefore needs to go out of its way to prevent dirty but
stale cachelines from potentially corrupting these memory contents
inadvertently.

When entering with the MMU on, this is not needed, so skip it.

While at it, renumber some asm labels to avoid confusion.

Signed-off-by: Ard Biesheuvel <ardb at kernel.org>
---
 arch/arm64/kernel/head.S | 23 ++++++++++++++------
 1 file changed, 16 insertions(+), 7 deletions(-)

diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 0726ce0d6fd4..b82c86fc9141 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -149,11 +149,13 @@ SYM_CODE_START_LOCAL(preserve_boot_args)
 	stp	x21, x1, [x0]			// x0 .. x3 at kernel entry
 	stp	x2, x3, [x0, #16]
 
+	cbnz	x25, 0f				// skip cache invalidation if MMU is on
 	dmb	sy				// needed before dc ivac with
 						// MMU off
 
 	add	x1, x0, #0x20			// 4 x 8 bytes
 	b	dcache_inval_poc		// tail call
+0:	ret
 SYM_CODE_END(preserve_boot_args)
 
 /*
@@ -296,6 +298,8 @@ SYM_CODE_END(preserve_boot_args)
 SYM_FUNC_START_LOCAL(__create_page_tables)
 	mov	x28, lr
 
+	cbnz	x25, 0f			// skip cache invalidation if MMU is on
+
 	/*
 	 * Invalidate the init page tables to avoid potential dirty cache lines
 	 * being evicted. Other page tables are allocated in rodata as part of
@@ -309,7 +313,7 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
 	/*
 	 * Clear the init page tables.
 	 */
-	adrp	x0, init_pg_dir
+0:	adrp	x0, init_pg_dir
 	adrp	x1, init_pg_end
 	sub	x1, x1, x0
 1:	stp	xzr, xzr, [x0], #16
@@ -331,15 +335,16 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
 	mrs_s	x6, SYS_ID_AA64MMFR2_EL1
 	and	x6, x6, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
 	mov	x5, #52
-	cbnz	x6, 1f
+	cbnz	x6, 2f
 #endif
 	mov	x5, #VA_BITS_MIN
-1:
+2:
 	adr_l	x6, vabits_actual
 	str	x5, [x6]
+	cbnz	x25, 3f			// skip cache invalidation if MMU is on
 	dmb	sy
 	dc	ivac, x6		// Invalidate potentially stale cache line
-
+3:
 	/*
 	 * VA_BITS may be too small to allow for an ID mapping to be created
 	 * that covers system RAM if that is located sufficiently high in the
@@ -355,12 +360,14 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
 	adrp	x5, __idmap_text_end
 	clz	x5, x5
 	cmp	x5, TCR_T0SZ(VA_BITS_MIN) // default T0SZ small enough?
-	b.ge	1f			// .. then skip VA range extension
+	b.ge	5f			// .. then skip VA range extension
 
 	adr_l	x6, idmap_t0sz
 	str	x5, [x6]
+	cbnz	x25, 4f			// skip cache invalidation if MMU is on
 	dmb	sy
 	dc	ivac, x6		// Invalidate potentially stale cache line
+4:
 
 #if (VA_BITS < 48)
 #define EXTRA_SHIFT	(PGDIR_SHIFT + PAGE_SHIFT - 3)
@@ -387,7 +394,7 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
 	mov	x4, #1 << (PHYS_MASK_SHIFT - PGDIR_SHIFT)
 	str_l	x4, idmap_ptrs_per_pgd, x5
 #endif
-1:
+5:
 	ldr_l	x4, idmap_ptrs_per_pgd
 	adr_l	x6, __idmap_text_end		// __pa(__idmap_text_end)
 
@@ -407,6 +414,8 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
 
 	map_memory x0, x1, x5, x6, x7, x3, x4, x10, x11, x12, x13, x14
 
+	cbnz	x25, 4f			// skip cache invalidation if MMU is on
+
 	/*
 	 * Since the page tables have been populated with non-cacheable
 	 * accesses (MMU disabled), invalidate those tables again to
@@ -422,7 +431,7 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
 	adrp	x1, init_pg_end
 	bl	dcache_inval_poc
 
-	ret	x28
+4:	ret	x28
 SYM_FUNC_END(__create_page_tables)
 
 	/*
-- 
2.30.2




More information about the linux-arm-kernel mailing list