[RFC PATCH v6 24/35] KVM: arm64: Handle SPE hardware maintenance interrupts
Alexandru Elisei
alexandru.elisei at arm.com
Fri Nov 14 08:07:05 PST 2025
Re-inject all maintenance interrupts raised by SPE while the guest was
running.
Save the value of the hardware PMBSR_EL1 register in a separate variable,
instead of updating the VCPU sysreg directly, to detect when the SPU
asserted the interrupt, as opposed to the guest writing 1 to the
PMBSR_EL1.S bit.
Signed-off-by: Alexandru Elisei <alexandru.elisei at arm.com>
---
arch/arm64/include/asm/kvm_spe.h | 7 +++++++
arch/arm64/kvm/arm.c | 2 ++
arch/arm64/kvm/hyp/vhe/spe-sr.c | 2 +-
arch/arm64/kvm/spe.c | 27 +++++++++++++++++++++++++++
4 files changed, 37 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/include/asm/kvm_spe.h b/arch/arm64/include/asm/kvm_spe.h
index a61c1c1de76f..7d8becf76314 100644
--- a/arch/arm64/include/asm/kvm_spe.h
+++ b/arch/arm64/include/asm/kvm_spe.h
@@ -17,9 +17,11 @@ struct kvm_spe {
};
struct kvm_vcpu_spe {
+ u64 hw_pmbsr_el1; /* Updated on hardware management event */
u64 host_pmscr_el2; /* Host PMSCR_EL2 register, context switched. */
int irq_num; /* Buffer management interrupt number */
bool initialized; /* SPE initialized for the VCPU */
+ bool irq_level; /* Virtual buffer management interrupt level */
};
DECLARE_STATIC_KEY_FALSE(kvm_spe_available);
@@ -56,6 +58,8 @@ bool kvm_spe_has_feat_spe_fds(struct kvm *kvm);
void kvm_vcpu_spe_load(struct kvm_vcpu *vcpu);
void kvm_vcpu_spe_put(struct kvm_vcpu *vcpu);
+
+void kvm_spe_sync_hwstate(struct kvm_vcpu *vcpu);
#else
struct kvm_spe {
};
@@ -111,6 +115,9 @@ static inline void kvm_vcpu_spe_load(struct kvm_vcpu *vcpu)
static inline void kvm_vcpu_spe_put(struct kvm_vcpu *vcpu)
{
}
+static inline void kvm_spe_sync_hwstate(struct kvm_vcpu *vcpu)
+{
+}
#endif /* CONFIG_KVM_ARM_SPE */
#endif /* __ARM64_KVM_SPE_H__ */
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index c5f5d5dbd695..a2c97daece24 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -1272,6 +1272,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
if (kvm_vcpu_has_pmu(vcpu))
kvm_pmu_sync_hwstate(vcpu);
+ kvm_spe_sync_hwstate(vcpu);
+
/*
* Sync the vgic state before syncing the timer state because
* the timer code needs to know if the virtual timer
diff --git a/arch/arm64/kvm/hyp/vhe/spe-sr.c b/arch/arm64/kvm/hyp/vhe/spe-sr.c
index fb8614435069..0fb625eadfa1 100644
--- a/arch/arm64/kvm/hyp/vhe/spe-sr.c
+++ b/arch/arm64/kvm/hyp/vhe/spe-sr.c
@@ -75,6 +75,6 @@ void __kvm_spe_save_guest_buffer(struct kvm_vcpu *vcpu, struct kvm_cpu_context *
write_sysreg_s(0, SYS_PMBSR_EL1);
isb();
/* PMBSR_EL1 changed while the VCPU was running, save it */
- ctxt_sys_reg(guest_ctxt, PMBSR_EL1) = pmbsr_el1;
+ vcpu->arch.vcpu_spe.hw_pmbsr_el1 = pmbsr_el1;
}
NOKPROBE_SYMBOL(__kvm_spe_save_guest_buffer);
diff --git a/arch/arm64/kvm/spe.c b/arch/arm64/kvm/spe.c
index 85a1ac8bb57f..d163ddfdd8e2 100644
--- a/arch/arm64/kvm/spe.c
+++ b/arch/arm64/kvm/spe.c
@@ -178,6 +178,33 @@ u64 kvm_spe_read_sysreg(struct kvm_vcpu *vcpu, int reg, u32 encoding)
return val;
}
+static void kvm_spe_update_irq_level(struct kvm_vcpu *vcpu, bool level)
+{
+ struct kvm_vcpu_spe *vcpu_spe = &vcpu->arch.vcpu_spe;
+ int ret;
+
+ if (vcpu_spe->irq_level == level)
+ return;
+
+ vcpu_spe->irq_level = level;
+ ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu, vcpu_spe->irq_num, level, vcpu_spe);
+ WARN_ONCE(ret, "kvm_vgic_inject_irq");
+}
+
+void kvm_spe_sync_hwstate(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu_spe *vcpu_spe = &vcpu->arch.vcpu_spe;
+
+ if (!vcpu_has_spe(vcpu))
+ return;
+
+ if (FIELD_GET(PMBSR_EL1_S, vcpu_spe->hw_pmbsr_el1)) {
+ __vcpu_assign_sys_reg(vcpu, PMBSR_EL1, vcpu_spe->hw_pmbsr_el1);
+ vcpu_spe->hw_pmbsr_el1 = 0;
+ kvm_spe_update_irq_level(vcpu, true);
+ }
+}
+
static void kvm_spe_save_sampling_regs(struct kvm_vcpu *vcpu, struct kvm_cpu_context *ctxt)
{
struct kvm *kvm = vcpu->kvm;
--
2.51.2
More information about the linux-arm-kernel
mailing list