[PATCH v6 10/25] KVM: arm64: iommu: Support DABT for IOMMU
Mostafa Saleh
smostafa at google.com
Fri May 1 04:19:12 PDT 2026
The pKVM SMMUv3 driver needs to trap and emulate access to the MMIO
space of the SMMUv3 to provide emulation for the kernel driver.
Add a handler for DABTs for IOMMU drivers to be able to do so.
In case the host causes a data abort, check if it's part of IOMMU
emulation first.
Signed-off-by: Mostafa Saleh <smostafa at google.com>
---
arch/arm64/kvm/hyp/include/nvhe/iommu.h | 3 ++-
arch/arm64/kvm/hyp/nvhe/iommu/iommu.c | 15 +++++++++++++++
arch/arm64/kvm/hyp/nvhe/mem_protect.c | 15 +++++++++++++++
3 files changed, 32 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/kvm/hyp/include/nvhe/iommu.h b/arch/arm64/kvm/hyp/include/nvhe/iommu.h
index eba94b4f6050..e1b6f16391cc 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/iommu.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/iommu.h
@@ -8,6 +8,7 @@
struct kvm_iommu_ops {
int (*init)(void);
int (*host_stage2_idmap)(phys_addr_t start, phys_addr_t end, int prot);
+ bool (*dabt_handler)(struct user_pt_regs *regs, u64 esr, u64 addr);
};
int kvm_iommu_init(void *pool_base, unsigned int nr_pages);
@@ -18,5 +19,5 @@ int kvm_iommu_host_stage2_idmap(phys_addr_t start, phys_addr_t end,
/* Returns zeroed memory. */
void *kvm_iommu_donate_pages(u8 order);
void kvm_iommu_reclaim_pages(void *ptr);
-
+bool kvm_iommu_host_dabt_handler(struct user_pt_regs *regs, u64 esr, u64 addr);
#endif /* __ARM64_KVM_NVHE_IOMMU_H__ */
diff --git a/arch/arm64/kvm/hyp/nvhe/iommu/iommu.c b/arch/arm64/kvm/hyp/nvhe/iommu/iommu.c
index 53cb5e4b0aac..b1474db016e5 100644
--- a/arch/arm64/kvm/hyp/nvhe/iommu/iommu.c
+++ b/arch/arm64/kvm/hyp/nvhe/iommu/iommu.c
@@ -4,6 +4,10 @@
*
* Copyright (C) 2022 Linaro Ltd.
*/
+#include <asm/kvm_hyp.h>
+
+#include <hyp/adjust_pc.h>
+
#include <linux/iommu.h>
#include <nvhe/iommu.h>
@@ -139,3 +143,14 @@ void kvm_iommu_reclaim_pages(void *ptr)
{
hyp_put_page(&iommu_pages_pool, ptr);
}
+
+bool kvm_iommu_host_dabt_handler(struct user_pt_regs *regs, u64 esr, u64 addr)
+{
+ if (kvm_iommu_ops && kvm_iommu_ops->dabt_handler &&
+ kvm_iommu_ops->dabt_handler(regs, esr, addr)) {
+ /* DABT handled by the driver, skip to next instruction. */
+ kvm_skip_host_instr();
+ return true;
+ }
+ return false;
+}
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index b54cb72ed88c..5c64007dba4d 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -788,6 +788,12 @@ static void host_inject_mem_abort(struct kvm_cpu_context *host_ctxt)
inject_host_exception(esr);
}
+static bool is_mmio_dabt(u64 esr)
+{
+ return (ESR_ELx_EC(esr) == ESR_ELx_EC_DABT_LOW) &&
+ (esr & ESR_ELx_ISV);
+}
+
void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
{
struct kvm_vcpu_fault_info fault;
@@ -810,6 +816,15 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
BUG_ON(!(fault.hpfar_el2 & HPFAR_EL2_NS));
addr = FIELD_GET(HPFAR_EL2_FIPA, fault.hpfar_el2) << 12;
+ /*
+ * Emulate data aborts for IOMMU drivers, other access will be denied
+ * by host_stage2_adjust_range()
+ */
+ if (is_mmio_dabt(esr) && !addr_is_memory(addr) &&
+ kvm_iommu_host_dabt_handler(&host_ctxt->regs,
+ esr, addr | FAR_TO_FIPA_OFFSET(fault.far_el2)))
+ return;
+
switch (host_stage2_idmap(addr)) {
case -EPERM:
host_inject_mem_abort(host_ctxt);
--
2.54.0.545.g6539524ca2-goog
More information about the linux-arm-kernel
mailing list