[PATCH 2/2] arm64: Cope with CPUs stuck in VHE mode
Marc Zyngier
maz at kernel.org
Tue Mar 30 18:00:18 BST 2021
On Mon, 29 Mar 2021 11:22:00 +0100,
Will Deacon <will at kernel.org> wrote:
>
> On Fri, Mar 26, 2021 at 11:20:18AM +0000, Marc Zyngier wrote:
> > I guess I could either make this code conditional on CONFIG_ARM64_VHE
> > and let the machine crash early without a word, or have some later
> > checks once the machine started booting. In the later case, displaying
> > anything useful is going to be a challenge though (the odds of someone
> > having a serial console on this box are close to nil). Pick your poison.
>
> I think the best thing to do would be to fail to initialise KVM if the
> kernel is stuck at EL2 but we don't have VHE support compiled in. Is that
> do-able?
To quote someone, it is "a little ugly on the side".
I came up with the following hack. Can't say I'm in love with it,
specially the sprinkling of checks in the alternative callbacks, but
hey, I can boot the machine without CONFIG_ARM64_VHE, and get the
expected splat at boot time:
[ 0.033604] ------------[ cut here ]------------
[ 0.033850] CPU: CPUs started in inconsistent modes
[ 0.033854] WARNING: CPU: 0 PID: 1 at arch/arm64/kernel/smp.c:434 hyp_mode_check+0x90/0xc4
[ 0.034863] Modules linked in:
[ 0.035100] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.12.0-rc3-00103-geee3f110c447-dirty #3231
[ 0.035776] Hardware name: Apple Mac mini (M1, 2020) (DT)
[ 0.036192] pstate: 60400009 (nZCv daif +PAN -UAO -TCO BTYPE=--)
[ 0.036654] pc : hyp_mode_check+0x90/0xc4
[ 0.036963] lr : hyp_mode_check+0x90/0xc4
[ 0.037271] sp : ffff800010053e30
[ 0.037526] x29: ffff800010053e30 x28: 0000000000000000
[ 0.037935] x27: 0000000000000000 x26: 0000000000000000
[ 0.038344] x25: 0000000000000000 x24: 0000000000000000
[ 0.038754] x23: 0000000000000000 x22: 0000000000000000
[ 0.039163] x21: 0000000000000000 x20: ffffca3b2f53fc04
[ 0.039572] x19: ffffca3b2fac1000 x18: 0000000000000001
[ 0.039981] x17: 00000000cc4379d6 x16: 000000005c7b6156
[ 0.040391] x15: 0000000000000030 x14: ffffffffffffffff
[ 0.040800] x13: ffff800090053ab7 x12: ffff800010053ac0
[ 0.041209] x11: 0000000bbe2c6238 x10: ffffca3b2faa0ad8
[ 0.041618] x9 : ffffca3b2e310df0 x8 : fffffffffffe18b8
[ 0.042027] x7 : ffffca3b2fa481d8 x6 : 0000000000002ffd
[ 0.042437] x5 : 0000000000000000 x4 : 0000000000000000
[ 0.042846] x3 : 00000000ffffffff x2 : 0000000000000000
[ 0.043255] x1 : 0000000000000000 x0 : ffff4af181631280
[ 0.043665] Call trace:
[ 0.043852] hyp_mode_check+0x90/0xc4
[ 0.044134] smp_cpus_done+0x34/0x48
[ 0.044409] smp_init+0x80/0x90
[ 0.044651] kernel_init_freeable+0x108/0x160
[ 0.044986] kernel_init+0x20/0x12c
[ 0.045254] ret_from_fork+0x10/0x3c
[ 0.045530] ---[ end trace 0736417247c9e9a3 ]---
[...]
[ 0.616800] kvm [1]: HYP mode not available
I'll wrap that up in a separate patch, and we can then discuss whether
we really want it...
Thanks,
M.
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 7379f35ae2c6..69bc4e26aa26 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -72,6 +72,11 @@ void __hyp_reset_vectors(void);
DECLARE_STATIC_KEY_FALSE(kvm_protected_mode_initialized);
+static inline bool is_kernel_in_hyp_mode(void)
+{
+ return read_sysreg(CurrentEL) == CurrentEL_EL2;
+}
+
/* Reports the availability of HYP mode */
static inline bool is_hyp_mode_available(void)
{
@@ -83,6 +88,10 @@ static inline bool is_hyp_mode_available(void)
static_branch_likely(&kvm_protected_mode_initialized))
return true;
+ /* Catch braindead CPUs */
+ if (!IS_ENABLED(CONFIG_ARM64_VHE) && is_kernel_in_hyp_mode())
+ return false;
+
return (__boot_cpu_mode[0] == BOOT_CPU_MODE_EL2 &&
__boot_cpu_mode[1] == BOOT_CPU_MODE_EL2);
}
@@ -98,12 +107,11 @@ static inline bool is_hyp_mode_mismatched(void)
static_branch_likely(&kvm_protected_mode_initialized))
return false;
- return __boot_cpu_mode[0] != __boot_cpu_mode[1];
-}
+ /* Catch braindead CPUs */
+ if (!IS_ENABLED(CONFIG_ARM64_VHE) && is_kernel_in_hyp_mode())
+ return true;
-static inline bool is_kernel_in_hyp_mode(void)
-{
- return read_sysreg(CurrentEL) == CurrentEL_EL2;
+ return __boot_cpu_mode[0] != __boot_cpu_mode[1];
}
static __always_inline bool has_vhe(void)
diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
index 978301392d67..edb048654e00 100644
--- a/arch/arm64/kvm/va_layout.c
+++ b/arch/arm64/kvm/va_layout.c
@@ -156,6 +156,9 @@ void __init kvm_update_va_mask(struct alt_instr *alt,
{
int i;
+ if (!is_hyp_mode_available())
+ return;
+
BUG_ON(nr_inst != 5);
for (i = 0; i < nr_inst; i++) {
@@ -191,6 +194,9 @@ void kvm_patch_vector_branch(struct alt_instr *alt,
u64 addr;
u32 insn;
+ if (!is_hyp_mode_available())
+ return;
+
BUG_ON(nr_inst != 4);
if (!cpus_have_const_cap(ARM64_SPECTRE_V3A) || WARN_ON_ONCE(has_vhe()))
@@ -244,6 +250,9 @@ static void generate_mov_q(u64 val, __le32 *origptr, __le32 *updptr, int nr_inst
{
u32 insn, oinsn, rd;
+ if (!is_hyp_mode_available())
+ return;
+
BUG_ON(nr_inst != 4);
/* Compute target register */
--
Without deviation from the norm, progress is not possible.
More information about the linux-arm-kernel
mailing list