[PATCH v4 57/61] arm64: kvm: Limit HYP VA and host S2 range to 48 bits when LPA2 is in effect

Ard Biesheuvel ardb at google.com
Tue Sep 12 07:16:47 PDT 2023


From: Ard Biesheuvel <ardb at kernel.org>

The KVM code needs more work to support 5 level paging with LPA2, so for
the time being, limit KVM to 48 bit addressing on 4k and 16k pagesize
configurations. This can be reverted once the LPA2 support for KVM is
merged.

Signed-off-by: Ard Biesheuvel <ardb at kernel.org>
---
 arch/arm64/kvm/hyp/nvhe/mem_protect.c | 2 ++
 arch/arm64/kvm/mmu.c                  | 5 ++++-
 arch/arm64/kvm/va_layout.c            | 9 +++++----
 3 files changed, 11 insertions(+), 5 deletions(-)

diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index 9d703441278b..c20b08cf1f03 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -128,6 +128,8 @@ static void prepare_host_vtcr(void)
 	/* The host stage 2 is id-mapped, so use parange for T0SZ */
 	parange = kvm_get_parange(id_aa64mmfr0_el1_sys_val);
 	phys_shift = id_aa64mmfr0_parange_to_phys_shift(parange);
+	if (IS_ENABLED(CONFIG_ARM64_LPA2) && phys_shift > 48)
+		phys_shift = 48; // not implemented yet
 
 	host_mmu.arch.vtcr = kvm_get_vtcr(id_aa64mmfr0_el1_sys_val,
 					  id_aa64mmfr1_el1_sys_val, phys_shift);
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index a4c5d7f44e32..1cac302b92e4 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -800,7 +800,8 @@ static int get_user_mapping_size(struct kvm *kvm, u64 addr)
 {
 	struct kvm_pgtable pgt = {
 		.pgd		= (kvm_pteref_t)kvm->mm->pgd,
-		.ia_bits	= vabits_actual,
+		.ia_bits	= IS_ENABLED(CONFIG_ARM64_LPA2) ? 48
+								: vabits_actual,
 		.start_level	= (KVM_PGTABLE_MAX_LEVELS -
 				   ARM64_HW_PGTABLE_LEVELS(pgt.ia_bits)),
 		.mm_ops		= &kvm_user_mm_ops,
@@ -1905,6 +1906,8 @@ int __init kvm_mmu_init(u32 *hyp_va_bits)
 	idmap_bits = IDMAP_VA_BITS;
 	kernel_bits = vabits_actual;
 	*hyp_va_bits = max(idmap_bits, kernel_bits);
+	if (IS_ENABLED(CONFIG_ARM64_LPA2))
+		*hyp_va_bits = 48; // LPA2 is not yet supported in KVM
 
 	kvm_debug("Using %u-bit virtual addresses at EL2\n", *hyp_va_bits);
 	kvm_debug("IDMAP page: %lx\n", hyp_idmap_start);
diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
index 91b22a014610..796ffc1cc529 100644
--- a/arch/arm64/kvm/va_layout.c
+++ b/arch/arm64/kvm/va_layout.c
@@ -59,12 +59,13 @@ static void init_hyp_physvirt_offset(void)
  */
 __init void kvm_compute_layout(void)
 {
+	u64 vabits = IS_ENABLED(CONFIG_ARM64_LPA2) ? 48 : vabits_actual; // not yet
 	phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
 	u64 hyp_va_msb;
 
 	/* Where is my RAM region? */
-	hyp_va_msb  = idmap_addr & BIT(vabits_actual - 1);
-	hyp_va_msb ^= BIT(vabits_actual - 1);
+	hyp_va_msb  = idmap_addr & BIT(vabits - 1);
+	hyp_va_msb ^= BIT(vabits - 1);
 
 	tag_lsb = fls64((u64)phys_to_virt(memblock_start_of_DRAM()) ^
 			(u64)(high_memory - 1));
@@ -72,9 +73,9 @@ __init void kvm_compute_layout(void)
 	va_mask = GENMASK_ULL(tag_lsb - 1, 0);
 	tag_val = hyp_va_msb;
 
-	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && tag_lsb != (vabits_actual - 1)) {
+	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && tag_lsb != (vabits - 1)) {
 		/* We have some free bits to insert a random tag. */
-		tag_val |= get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb);
+		tag_val |= get_random_long() & GENMASK_ULL(vabits - 2, tag_lsb);
 	}
 	tag_val >>= tag_lsb;
 
-- 
2.42.0.283.g2d96d420d3-goog




More information about the linux-arm-kernel mailing list