[PATCH v2 04/12] KVM: arm64: nVHE: Simplify __guest_exit_panic path
Pierre-Clément Tosi
ptosi at google.com
Wed Apr 10 01:28:39 PDT 2024
Immediately jump to __guest_exit_panic when taking an invalid EL2
exception with the nVHE host vector table instead of first duplicating
the vCPU context check that __guest_exit_panic will also perform.
Fix the wrong (probably bitrotten) __guest_exit_panic ABI doc to reflect
how it is used by VHE and (now) nVHE and rename the routine to
__hyp_panic to better reflect that it might not exit through the guest
but will always (directly or indirectly) end up executing hyp_panic().
Use CPU_LR_OFFSET to clarify that the routine returns to hyp_panic().
Signed-off-by: Pierre-Clément Tosi <ptosi at google.com>
---
arch/arm64/kvm/hyp/entry.S | 14 +++++++++-----
arch/arm64/kvm/hyp/hyp-entry.S | 2 +-
arch/arm64/kvm/hyp/include/hyp/switch.h | 4 ++--
arch/arm64/kvm/hyp/nvhe/host.S | 8 +-------
4 files changed, 13 insertions(+), 15 deletions(-)
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index bcaaf1a11b4e..6a1ce9d21e5b 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -83,7 +83,7 @@ alternative_else_nop_endif
eret
sb
-SYM_INNER_LABEL(__guest_exit_restore_elr_and_panic, SYM_L_GLOBAL)
+SYM_INNER_LABEL(__hyp_restore_elr_and_panic, SYM_L_GLOBAL)
// x0-x29,lr: hyp regs
stp x0, x1, [sp, #-16]!
@@ -92,13 +92,15 @@ SYM_INNER_LABEL(__guest_exit_restore_elr_and_panic, SYM_L_GLOBAL)
msr elr_el2, x0
ldp x0, x1, [sp], #16
-SYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL)
- // x2-x29,lr: vcpu regs
- // vcpu x0-x1 on the stack
+SYM_INNER_LABEL(__hyp_panic, SYM_L_GLOBAL)
+ // x0-x29,lr: vcpu regs
+
+ stp x0, x1, [sp, #-16]!
// If the hyp context is loaded, go straight to hyp_panic
get_loaded_vcpu x0, x1
cbnz x0, 1f
+ ldp x0, x1, [sp], #16
b hyp_panic
1:
@@ -110,10 +112,12 @@ SYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL)
// accurate if the guest had been completely restored.
adr_this_cpu x0, kvm_hyp_ctxt, x1
adr_l x1, hyp_panic
- str x1, [x0, #CPU_XREG_OFFSET(30)]
+ str x1, [x0, #CPU_LR_OFFSET]
get_vcpu_ptr x1, x0
+ // Keep x0-x1 on the stack for __guest_exit
+
SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
// x0: return code
// x1: vcpu
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 03f97d71984c..7e65ef738ec9 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -122,7 +122,7 @@ el2_error:
eret
sb
-.macro invalid_vector label, target = __guest_exit_panic
+.macro invalid_vector label, target = __hyp_panic
.align 2
SYM_CODE_START_LOCAL(\label)
b \target
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 19a7ca2c1277..9387e3a0b680 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -753,7 +753,7 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
static inline void __kvm_unexpected_el2_exception(void)
{
- extern char __guest_exit_restore_elr_and_panic[];
+ extern char __hyp_restore_elr_and_panic[];
unsigned long addr, fixup;
struct kvm_exception_table_entry *entry, *end;
unsigned long elr_el2 = read_sysreg(elr_el2);
@@ -776,7 +776,7 @@ static inline void __kvm_unexpected_el2_exception(void)
/* Trigger a panic after restoring the hyp context. */
this_cpu_ptr(&kvm_hyp_ctxt)->sys_regs[ELR_EL2] = elr_el2;
- write_sysreg(__guest_exit_restore_elr_and_panic, elr_el2);
+ write_sysreg(__hyp_restore_elr_and_panic, elr_el2);
}
#endif /* __ARM64_KVM_HYP_SWITCH_H__ */
diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S
index 135cfb294ee5..7397b4f1838a 100644
--- a/arch/arm64/kvm/hyp/nvhe/host.S
+++ b/arch/arm64/kvm/hyp/nvhe/host.S
@@ -196,19 +196,13 @@ SYM_FUNC_END(__host_hvc)
tbz x0, #PAGE_SHIFT, .L__hyp_sp_overflow\@
sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
-
/* If a guest is loaded, panic out of it. */
- stp x0, x1, [sp, #-16]!
- get_loaded_vcpu x0, x1
- cbnz x0, __guest_exit_panic
- add sp, sp, #16
-
/*
* The panic may not be clean if the exception is taken before the host
* context has been saved by __host_exit or after the hyp context has
* been partially clobbered by __host_enter.
*/
- b hyp_panic
+ b __hyp_panic
.L__hyp_sp_overflow\@:
/* Switch to the overflow stack */
--
2.44.0.478.gd926399ef9-goog
--
Pierre
More information about the linux-arm-kernel
mailing list