[PATCH v2 1/9] arm64: Rename the VHE switch to "finalise_el2"
Marc Zyngier
maz at kernel.org
Thu Jun 30 09:04:52 PDT 2022
as we are about to perform a lot more in 'mutate_to_vhe' than
we currently do, this function really becomes the point where
we finalise the basic EL2 configuration.
Reflect this into the code by renaming a bunch of things:
- HVC_VHE_RESTART -> HVC_FINALISE_EL2
- switch_to_vhe --> finalise_el2
- mutate_to_vhe -> __finalise_el2
No functional changes.
Signed-off-by: Marc Zyngier <maz at kernel.org>
---
Documentation/virt/kvm/arm/hyp-abi.rst | 11 ++++++-----
arch/arm64/include/asm/virt.h | 4 ++--
arch/arm64/kernel/head.S | 6 +++---
arch/arm64/kernel/hyp-stub.S | 21 ++++++++++-----------
arch/arm64/kernel/sleep.S | 2 +-
5 files changed, 22 insertions(+), 22 deletions(-)
diff --git a/Documentation/virt/kvm/arm/hyp-abi.rst b/Documentation/virt/kvm/arm/hyp-abi.rst
index 4d43fbc25195..412b276449d3 100644
--- a/Documentation/virt/kvm/arm/hyp-abi.rst
+++ b/Documentation/virt/kvm/arm/hyp-abi.rst
@@ -60,12 +60,13 @@ these functions (see arch/arm{,64}/include/asm/virt.h):
* ::
- x0 = HVC_VHE_RESTART (arm64 only)
+ x0 = HVC_FINALISE_EL2 (arm64 only)
- Attempt to upgrade the kernel's exception level from EL1 to EL2 by enabling
- the VHE mode. This is conditioned by the CPU supporting VHE, the EL2 MMU
- being off, and VHE not being disabled by any other means (command line
- option, for example).
+ Finish configuring EL2 depending on the command-line options,
+ including an attempt to upgrade the kernel's exception level from
+ EL1 to EL2 by enabling the VHE mode. This is conditioned by the CPU
+ supporting VHE, the EL2 MMU being off, and VHE not being disabled by
+ any other means (command line option, for example).
Any other value of r0/x0 triggers a hypervisor-specific handling,
which is not documented here.
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 0e80db4327b6..dec6eee0eda5 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -36,9 +36,9 @@
#define HVC_RESET_VECTORS 2
/*
- * HVC_VHE_RESTART - Upgrade the CPU from EL1 to EL2, if possible
+ * HVC_FINALISE_EL2 - Upgrade the CPU from EL1 to EL2, if possible
*/
-#define HVC_VHE_RESTART 3
+#define HVC_FINALISE_EL2 3
/* Max number of HYP stub hypercalls */
#define HVC_STUB_HCALL_NR 4
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 6bf685f988f1..04ebfe663eae 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -449,7 +449,7 @@ SYM_FUNC_START_LOCAL(__primary_switched)
mov x0, x22 // pass FDT address in x0
bl init_feature_override // Parse cpu feature overrides
mov x0, x20
- bl switch_to_vhe // Prefer VHE if possible
+ bl finalise_el2 // Prefer VHE if possible
ldp x29, x30, [sp], #16
bl start_kernel
ASM_BUG()
@@ -532,7 +532,7 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
eret
__cpu_stick_to_vhe:
- mov x0, #HVC_VHE_RESTART
+ mov x0, #HVC_FINALISE_EL2
hvc #0
mov x0, #BOOT_CPU_MODE_EL2
ret
@@ -582,7 +582,7 @@ SYM_FUNC_START_LOCAL(secondary_startup)
* Common entry point for secondary CPUs.
*/
mov x20, x0 // preserve boot mode
- bl switch_to_vhe
+ bl finalise_el2
bl __cpu_secondary_check52bitva
bl __cpu_setup // initialise processor
adrp x1, swapper_pg_dir
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index 5bafb53fafb4..571286eb443c 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -51,8 +51,8 @@ SYM_CODE_START_LOCAL(elx_sync)
msr vbar_el2, x1
b 9f
-1: cmp x0, #HVC_VHE_RESTART
- b.eq mutate_to_vhe
+1: cmp x0, #HVC_FINALISE_EL2
+ b.eq __finalise_el2
2: cmp x0, #HVC_SOFT_RESTART
b.ne 3f
@@ -73,8 +73,8 @@ SYM_CODE_START_LOCAL(elx_sync)
eret
SYM_CODE_END(elx_sync)
-// nVHE? No way! Give me the real thing!
-SYM_CODE_START_LOCAL(mutate_to_vhe)
+SYM_CODE_START_LOCAL(__finalise_el2)
+ // nVHE? No way! Give me the real thing!
// Sanity check: MMU *must* be off
mrs x1, sctlr_el2
tbnz x1, #0, 1f
@@ -140,10 +140,10 @@ SYM_CODE_START_LOCAL(mutate_to_vhe)
msr spsr_el1, x0
b enter_vhe
-SYM_CODE_END(mutate_to_vhe)
+SYM_CODE_END(__finalise_el2)
// At the point where we reach enter_vhe(), we run with
- // the MMU off (which is enforced by mutate_to_vhe()).
+ // the MMU off (which is enforced by __finalise_el2()).
// We thus need to be in the idmap, or everything will
// explode when enabling the MMU.
@@ -222,11 +222,11 @@ SYM_FUNC_START(__hyp_reset_vectors)
SYM_FUNC_END(__hyp_reset_vectors)
/*
- * Entry point to switch to VHE if deemed capable
+ * Entry point to finalise EL2 and switch to VHE if deemed capable
*
* w0: boot mode, as returned by init_kernel_el()
*/
-SYM_FUNC_START(switch_to_vhe)
+SYM_FUNC_START(finalise_el2)
// Need to have booted at EL2
cmp w0, #BOOT_CPU_MODE_EL2
b.ne 1f
@@ -236,9 +236,8 @@ SYM_FUNC_START(switch_to_vhe)
cmp x0, #CurrentEL_EL1
b.ne 1f
- // Turn the world upside down
- mov x0, #HVC_VHE_RESTART
+ mov x0, #HVC_FINALISE_EL2
hvc #0
1:
ret
-SYM_FUNC_END(switch_to_vhe)
+SYM_FUNC_END(finalise_el2)
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index e36b09d942f7..617f78ad43a1 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -100,7 +100,7 @@ SYM_FUNC_END(__cpu_suspend_enter)
.pushsection ".idmap.text", "awx"
SYM_CODE_START(cpu_resume)
bl init_kernel_el
- bl switch_to_vhe
+ bl finalise_el2
bl __cpu_setup
/* enable the MMU early - so we can access sleep_save_stash by va */
adrp x1, swapper_pg_dir
--
2.34.1
More information about the linux-arm-kernel
mailing list