[PATCH 1/3] arm64: VHE: Enable EL2 MMU from the idmap

Marc Zyngier maz at kernel.org
Wed Feb 24 04:37:36 EST 2021


Enabling the MMU requires the write to SCTLR_ELx (and the ISB
that follows) to live in some identity-mapped memory. Otherwise,
the translation will result in something totally unexpected
(either fetching the wrong instruction stream, or taking a
fault of some sort).

This is exactly what happens in mutate_to_vhe(), as this code
lives in the .hyp.text section, which isn't identity-mapped.
With the right configuration, this explodes badly.

Extract the MMU-enabling part of mutate_to_vhe(), and move
it to its own function that lives in the idmap. This ensures
nothing bad happens.

Fixes: f359182291c7 ("arm64: Provide an 'upgrade to VHE' stub hypercall")
Reported-by: "kernelci.org bot" <bot at kernelci.org>
Tested-by: Guillaume Tucker <guillaume.tucker at collabora.com>
Signed-off-by: Marc Zyngier <maz at kernel.org>
---
 arch/arm64/kernel/hyp-stub.S | 39 ++++++++++++++++++++++++------------
 1 file changed, 26 insertions(+), 13 deletions(-)

diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index 678cd2c618ee..ae56787ea7c1 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -75,9 +75,6 @@ SYM_CODE_END(el1_sync)
 
 // nVHE? No way! Give me the real thing!
 SYM_CODE_START_LOCAL(mutate_to_vhe)
-	// Be prepared to fail
-	mov_q	x0, HVC_STUB_ERR
-
 	// Sanity check: MMU *must* be off
 	mrs	x1, sctlr_el2
 	tbnz	x1, #0, 1f
@@ -96,8 +93,11 @@ SYM_CODE_START_LOCAL(mutate_to_vhe)
 	cmp	x1, xzr
 	and	x2, x2, x1
 	csinv	x2, x2, xzr, ne
-	cbz	x2, 1f
+	cbnz	x2, 2f
 
+1:	mov_q	x0, HVC_STUB_ERR
+	eret
+2:
 	// Engage the VHE magic!
 	mov_q	x0, HCR_HOST_VHE_FLAGS
 	msr	hcr_el2, x0
@@ -131,6 +131,24 @@ SYM_CODE_START_LOCAL(mutate_to_vhe)
 	msr	mair_el1, x0
 	isb
 
+	// Hack the exception return to stay at EL2
+	mrs	x0, spsr_el1
+	and	x0, x0, #~PSR_MODE_MASK
+	mov	x1, #PSR_MODE_EL2h
+	orr	x0, x0, x1
+	msr	spsr_el1, x0
+
+	b	enter_vhe
+SYM_CODE_END(mutate_to_vhe)
+
+	// At the point where we reach enter_vhe(), we run with
+	// the MMU off (which is enforced by mutate_to_vhe()).
+	// We thus need to be in the idmap, or everything will
+	// explode when enabling the MMU.
+
+	.pushsection	.idmap.text, "ax"
+
+SYM_CODE_START_LOCAL(enter_vhe)
 	// Invalidate TLBs before enabling the MMU
 	tlbi	vmalle1
 	dsb	nsh
@@ -143,17 +161,12 @@ SYM_CODE_START_LOCAL(mutate_to_vhe)
 	mov_q	x0, INIT_SCTLR_EL1_MMU_OFF
 	msr_s	SYS_SCTLR_EL12, x0
 
-	// Hack the exception return to stay at EL2
-	mrs	x0, spsr_el1
-	and	x0, x0, #~PSR_MODE_MASK
-	mov	x1, #PSR_MODE_EL2h
-	orr	x0, x0, x1
-	msr	spsr_el1, x0
-
 	mov	x0, xzr
 
-1:	eret
-SYM_CODE_END(mutate_to_vhe)
+	eret
+SYM_CODE_END(enter_vhe)
+
+	.popsection
 
 .macro invalid_vector	label
 SYM_CODE_START_LOCAL(\label)
-- 
2.29.2




More information about the linux-arm-kernel mailing list