[PATCH v3 4/7] KVM: arm/arm64: Allow user injection of unsupported exclusive/atomic DABT
Yicong Yang
yangyicong at huawei.com
Thu Jun 26 01:09:03 PDT 2025
From: Yicong Yang <yangyicong at hisilicon.com>
The unsupported exclusive/atomic DABT exception is hand to the
userspace. Provide a way for the userspace to inject this DABT
to the guest if they want to imitate how this is handled on the
host.
Signed-off-by: Yicong Yang <yangyicong at hisilicon.com>
---
arch/arm64/include/asm/kvm_emulate.h | 1 +
arch/arm64/include/uapi/asm/kvm.h | 3 ++-
arch/arm64/kvm/guest.c | 4 ++++
arch/arm64/kvm/inject_fault.c | 29 ++++++++++++++++++++++++++++
4 files changed, 36 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 0720898f563e..df141ae77019 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -47,6 +47,7 @@ void kvm_skip_instr32(struct kvm_vcpu *vcpu);
void kvm_inject_undefined(struct kvm_vcpu *vcpu);
void kvm_inject_vabt(struct kvm_vcpu *vcpu);
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
+void kvm_inject_dabt_excl_atomic(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_inject_size_fault(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index ed5f3892674c..69985acda668 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -184,8 +184,9 @@ struct kvm_vcpu_events {
__u8 serror_pending;
__u8 serror_has_esr;
__u8 ext_dabt_pending;
+ __u8 ext_dabt_excl_atom_pending;
/* Align it to 8 bytes */
- __u8 pad[5];
+ __u8 pad[4];
__u64 serror_esr;
} exception;
__u32 reserved[12];
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 2196979a24a3..47bc09ea50c3 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -839,6 +839,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
bool serror_pending = events->exception.serror_pending;
bool has_esr = events->exception.serror_has_esr;
bool ext_dabt_pending = events->exception.ext_dabt_pending;
+ bool ext_dabt_excl_atom_pending = events->exception.ext_dabt_excl_atom_pending;
if (serror_pending && has_esr) {
if (!cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
@@ -855,6 +856,9 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
if (ext_dabt_pending)
kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
+ if (ext_dabt_excl_atom_pending)
+ kvm_inject_dabt_excl_atomic(vcpu, kvm_vcpu_get_hfar(vcpu));
+
return 0;
}
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index a640e839848e..d64650a1aefe 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -171,6 +171,35 @@ void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
inject_abt64(vcpu, false, addr);
}
+/**
+ * kvm_inject_dabt_excl_atomic - inject a data abort for unsupported exclusive
+ * or atomic access
+ * @vcpu: The VCPU to receive the data abort
+ * @addr: The address to report in the DFAR
+ *
+ * It is assumed that this code is called from the VCPU thread and that the
+ * VCPU therefore is not currently executing guest code.
+ */
+void kvm_inject_dabt_excl_atomic(struct kvm_vcpu *vcpu, unsigned long addr)
+{
+ u64 esr = 0;
+
+ /* Reuse the general DABT injection routine and modify the DFSC */
+ kvm_inject_dabt(vcpu, addr);
+
+ if (match_target_el(vcpu, unpack_vcpu_flag(EXCEPT_AA64_EL1_SYNC))) {
+ esr = vcpu_read_sys_reg(vcpu, ESR_EL1);
+ esr &= ~ESR_ELx_FSC;
+ esr |= ESR_ELx_FSC_EXCL_ATOMIC;
+ vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
+ } else {
+ esr = vcpu_read_sys_reg(vcpu, ESR_EL2);
+ esr &= ~ESR_ELx_FSC;
+ esr |= ESR_ELx_FSC_EXCL_ATOMIC;
+ vcpu_write_sys_reg(vcpu, esr, ESR_EL2);
+ }
+}
+
/**
* kvm_inject_pabt - inject a prefetch abort into the guest
* @vcpu: The VCPU to receive the prefetch abort
--
2.24.0
More information about the linux-arm-kernel
mailing list