[PATCH v7 13/50] arm64: head: Clear BSS and the kernel page tables in one go
Ard Biesheuvel
ardb+git at google.com
Tue Jan 23 06:53:12 PST 2024
From: Ard Biesheuvel <ardb at kernel.org>
We will move the CPU feature overrides into BSS in a subsequent patch,
and this requires that BSS is zeroed before the feature override
detection code runs. So let's map BSS read-write in the ID map, and zero
it via this mapping.
Since the kernel page tables are right next to it, and also zeroed via
the ID map, let's drop the separate clear_page_tables() function, and
just zero everything in one go.
Signed-off-by: Ard Biesheuvel <ardb at kernel.org>
---
arch/arm64/kernel/head.S | 33 +++++++-------------
arch/arm64/kernel/vmlinux.lds.S | 3 ++
2 files changed, 14 insertions(+), 22 deletions(-)
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index ca5e5fbefcd3..2af518161f3a 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -177,17 +177,6 @@ SYM_CODE_START_LOCAL(preserve_boot_args)
ret
SYM_CODE_END(preserve_boot_args)
-SYM_FUNC_START_LOCAL(clear_page_tables)
- /*
- * Clear the init page tables.
- */
- adrp x0, init_pg_dir
- adrp x1, init_pg_end
- sub x2, x1, x0
- mov x1, xzr
- b __pi_memset // tail call
-SYM_FUNC_END(clear_page_tables)
-
/*
* Macro to populate page table entries, these entries can be pointers to the next level
* or last level entries pointing to physical memory.
@@ -386,9 +375,9 @@ SYM_FUNC_START_LOCAL(create_idmap)
map_memory x0, x1, x3, x6, x7, x3, IDMAP_PGD_ORDER, x10, x11, x12, x13, x14, EXTRA_SHIFT
- /* Remap the kernel page tables r/w in the ID map */
+ /* Remap BSS and the kernel page tables r/w in the ID map */
adrp x1, _text
- adrp x2, init_pg_dir
+ adrp x2, __bss_start
adrp x3, _end
bic x4, x2, #SWAPPER_BLOCK_SIZE - 1
mov_q x5, SWAPPER_RW_MMUFLAGS
@@ -489,14 +478,6 @@ SYM_FUNC_START_LOCAL(__primary_switched)
mov x0, x20
bl set_cpu_boot_mode_flag
- // Clear BSS
- adr_l x0, __bss_start
- mov x1, xzr
- adr_l x2, __bss_stop
- sub x2, x2, x0
- bl __pi_memset
- dsb ishst // Make zero page visible to PTW
-
#if VA_BITS > 48
adr_l x8, vabits_actual // Set this early so KASAN early init
str x25, [x8] // ... observes the correct value
@@ -782,6 +763,15 @@ SYM_FUNC_START_LOCAL(__primary_switch)
adrp x1, reserved_pg_dir
adrp x2, init_idmap_pg_dir
bl __enable_mmu
+
+ // Clear BSS
+ adrp x0, __bss_start
+ mov x1, xzr
+ adrp x2, init_pg_end
+ sub x2, x2, x0
+ bl __pi_memset
+ dsb ishst // Make zero page visible to PTW
+
#ifdef CONFIG_RELOCATABLE
adrp x23, KERNEL_START
and x23, x23, MIN_KIMG_ALIGN - 1
@@ -796,7 +786,6 @@ SYM_FUNC_START_LOCAL(__primary_switch)
orr x23, x23, x0 // record kernel offset
#endif
#endif
- bl clear_page_tables
bl create_kernel_mapping
adrp x1, init_pg_dir
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 8dd5dda66f7c..8a3c6aacc355 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -311,12 +311,15 @@ SECTIONS
__pecoff_data_rawsize = ABSOLUTE(. - __initdata_begin);
_edata = .;
+ /* start of zero-init region */
BSS_SECTION(SBSS_ALIGN, 0, 0)
. = ALIGN(PAGE_SIZE);
init_pg_dir = .;
. += INIT_DIR_SIZE;
init_pg_end = .;
+ /* end of zero-init region */
+
#ifdef CONFIG_RELOCATABLE
. += SZ_4K; /* stack for the early relocation code */
early_init_stack = .;
--
2.43.0.429.g432eaa2c6b-goog
More information about the linux-arm-kernel
mailing list