[PATCH v2] arm64: mm: fix booting with 52-bit address space
Ard Biesheuvel
ardb at kernel.org
Thu Jun 30 07:07:23 PDT 2022
Joey reports that booting 52-bit VA capable builds on 52-bit VA capable
CPUs is broken since commit 0d9b1ffefabe ("arm64: mm: make vabits_actual
a build time constant if possible"). This is due to the fact that the
primary CPU reads the vabits_actual variable before it has been
assigned.
The reason for deferring the assignment of vabits_actual was that we try
to perform as few stores to memory as we can with the MMU and caches
off, due to the cache coherency issues it creates.
Since __cpu_setup() [which is where the read of vabits_actual occurs] is
also called on the secondary boot path, we cannot just read the CPU ID
registers directly, given that the size of the VA space is decided by
the capabilities of the primary CPU. So let's read vabits_actual only on
the secondary boot path, and read the CPU ID registers directly on the
primary boot path, by making it a function parameter of __cpu_setup().
Cc: Will Deacon <will at kernel.org>
Cc: Anshuman Khandual <anshuman.khandual at arm.com>
Cc: Mark Rutland <mark.rutland at arm.com>
Fixes: 0d9b1ffefabe ("arm64: mm: make vabits_actual a build time constant if possible")
Reported-by: Joey Gouly <joey.gouly at arm.com>
Co-developed-by: Joey Gouly <joey.gouly at arm.com>
Signed-off-by: Joey Gouly <joey.gouly at arm.com>
Signed-off-by: Ard Biesheuvel <ardb at kernel.org>
---
This is a follow-up to Joey's v1, after discussing over IRC
https://lore.kernel.org/linux-arm-kernel/20220630110016.31441-1-joey.gouly@arm.com/
arch/arm64/kernel/head.S | 10 ++++++++++
arch/arm64/mm/proc.S | 5 +++--
2 files changed, 13 insertions(+), 2 deletions(-)
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index cea86b982e6a..ad67386df598 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -107,6 +107,13 @@ SYM_CODE_START(primary_entry)
* On return, the CPU will be ready for the MMU to be turned on and
* the TCR will have been set.
*/
+#if VA_BITS > 48
+ mrs_s x0, SYS_ID_AA64MMFR2_EL1
+ tst x0, #0xf << ID_AA64MMFR2_LVA_SHIFT
+ mov x0, #VA_BITS_MIN
+ mov x1, #VA_BITS
+ csel x0, x0, x1, eq
+#endif
bl __cpu_setup // initialise processor
b __primary_switch
SYM_CODE_END(primary_entry)
@@ -587,6 +594,9 @@ SYM_FUNC_START_LOCAL(secondary_startup)
mov x20, x0 // preserve boot mode
bl switch_to_vhe
bl __cpu_secondary_check52bitva
+#if VA_BITS > 48
+ ldr_l x0, vabits_actual
+#endif
bl __cpu_setup // initialise processor
adrp x1, swapper_pg_dir
adrp x2, idmap_pg_dir
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 5d6c494070d1..656c78f82a17 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -397,6 +397,8 @@ SYM_FUNC_END(idmap_kpti_install_ng_mappings)
*
* Initialise the processor for turning the MMU on.
*
+ * Input:
+ * x0 - actual number of VA bits (ignored unless VA_BITS > 48)
* Output:
* Return in x0 the value of the SCTLR_EL1 register.
*/
@@ -466,8 +468,7 @@ SYM_FUNC_START(__cpu_setup)
tcr_clear_errata_bits tcr, x9, x5
#ifdef CONFIG_ARM64_VA_BITS_52
- ldr_l x9, vabits_actual
- sub x9, xzr, x9
+ sub x9, xzr, x0
add x9, x9, #64
tcr_set_t1sz tcr, x9
#else
--
2.35.1
More information about the linux-arm-kernel
mailing list