[PATCH v4 07/27] KVM: arm64: Pull ctxt_has_ helpers to start of sysreg-sr.h
Mark Brown
broonie at kernel.org
Thu Feb 13 17:57:50 PST 2025
Rather than add earlier prototypes of specific ctxt_has_ helpers let's just
pull all their definitions to the top of sysreg-sr.h so they're all
available to all the individual save/restore functions.
Signed-off-by: Mark Brown <broonie at kernel.org>
---
arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h | 62 +++++++++++++++---------------
1 file changed, 30 insertions(+), 32 deletions(-)
diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
index 76ff095c6b6ebf336aab3018ec1f384a5f19949e..7a4ad8a4c3727e4628b375cbefc5e0d3533687de 100644
--- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
+++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
@@ -16,8 +16,6 @@
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
-static inline bool ctxt_has_s1poe(struct kvm_cpu_context *ctxt);
-
static inline struct kvm_vcpu *ctxt_to_vcpu(struct kvm_cpu_context *ctxt)
{
struct kvm_vcpu *vcpu = ctxt->__hyp_running_vcpu;
@@ -28,36 +26,6 @@ static inline struct kvm_vcpu *ctxt_to_vcpu(struct kvm_cpu_context *ctxt)
return vcpu;
}
-static inline bool ctxt_is_guest(struct kvm_cpu_context *ctxt)
-{
- return host_data_ptr(host_ctxt) != ctxt;
-}
-
-static inline u64 *ctxt_mdscr_el1(struct kvm_cpu_context *ctxt)
-{
- struct kvm_vcpu *vcpu = ctxt_to_vcpu(ctxt);
-
- if (ctxt_is_guest(ctxt) && kvm_host_owns_debug_regs(vcpu))
- return &vcpu->arch.external_mdscr_el1;
-
- return &ctxt_sys_reg(ctxt, MDSCR_EL1);
-}
-
-static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
-{
- *ctxt_mdscr_el1(ctxt) = read_sysreg(mdscr_el1);
-
- // POR_EL0 can affect uaccess, so must be saved/restored early.
- if (ctxt_has_s1poe(ctxt))
- ctxt_sys_reg(ctxt, POR_EL0) = read_sysreg_s(SYS_POR_EL0);
-}
-
-static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
-{
- ctxt_sys_reg(ctxt, TPIDR_EL0) = read_sysreg(tpidr_el0);
- ctxt_sys_reg(ctxt, TPIDRRO_EL0) = read_sysreg(tpidrro_el0);
-}
-
static inline bool ctxt_has_mte(struct kvm_cpu_context *ctxt)
{
struct kvm_vcpu *vcpu = ctxt_to_vcpu(ctxt);
@@ -98,6 +66,36 @@ static inline bool ctxt_has_s1poe(struct kvm_cpu_context *ctxt)
return kvm_has_s1poe(kern_hyp_va(vcpu->kvm));
}
+static inline bool ctxt_is_guest(struct kvm_cpu_context *ctxt)
+{
+ return host_data_ptr(host_ctxt) != ctxt;
+}
+
+static inline u64 *ctxt_mdscr_el1(struct kvm_cpu_context *ctxt)
+{
+ struct kvm_vcpu *vcpu = ctxt_to_vcpu(ctxt);
+
+ if (ctxt_is_guest(ctxt) && kvm_host_owns_debug_regs(vcpu))
+ return &vcpu->arch.external_mdscr_el1;
+
+ return &ctxt_sys_reg(ctxt, MDSCR_EL1);
+}
+
+static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
+{
+ *ctxt_mdscr_el1(ctxt) = read_sysreg(mdscr_el1);
+
+ // POR_EL0 can affect uaccess, so must be saved/restored early.
+ if (ctxt_has_s1poe(ctxt))
+ ctxt_sys_reg(ctxt, POR_EL0) = read_sysreg_s(SYS_POR_EL0);
+}
+
+static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
+{
+ ctxt_sys_reg(ctxt, TPIDR_EL0) = read_sysreg(tpidr_el0);
+ ctxt_sys_reg(ctxt, TPIDRRO_EL0) = read_sysreg(tpidrro_el0);
+}
+
static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
{
ctxt_sys_reg(ctxt, SCTLR_EL1) = read_sysreg_el1(SYS_SCTLR);
--
2.39.5
More information about the linux-arm-kernel
mailing list