[PATCH v6 14/25] iommu/arm-smmu-v3-kvm: Add MMIO emulation
Mostafa Saleh
smostafa at google.com
Fri May 1 04:19:16 PDT 2026
Add data abort handler for the SMMUs, at the moment most registers
are just passthrough.
In the next patches CMDQ/STE emulation will be added which inserts
logic to some register access.
Signed-off-by: Mostafa Saleh <smostafa at google.com>
---
.../iommu/arm/arm-smmu-v3/pkvm/arm-smmu-v3.c | 143 ++++++++++++++++++
.../iommu/arm/arm-smmu-v3/pkvm/arm_smmu_v3.h | 10 ++
2 files changed, 153 insertions(+)
diff --git a/drivers/iommu/arm/arm-smmu-v3/pkvm/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/pkvm/arm-smmu-v3.c
index d9945db9e102..cce5a51b4656 100644
--- a/drivers/iommu/arm/arm-smmu-v3/pkvm/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/pkvm/arm-smmu-v3.c
@@ -8,6 +8,7 @@
#include <nvhe/iommu.h>
#include <nvhe/mem_protect.h>
+#include <nvhe/trap_handler.h>
#include "arm_smmu_v3.h"
#include "../arm-smmu-v3.h"
@@ -106,6 +107,7 @@ static int smmu_init_device(struct hyp_arm_smmu_v3_device *smmu)
if (!PAGE_ALIGNED(smmu->mmio_addr | smmu->mmio_size))
return -EINVAL;
+ hyp_spin_lock_init(&smmu->lock);
ret = __pkvm_host_donate_hyp_mmio(smmu->mmio_addr, smmu->mmio_size, &haddr);
if (ret)
return ret;
@@ -144,6 +146,8 @@ static int smmu_init(void)
goto out_reclaim_smmu;
}
+ BUILD_BUG_ON(sizeof(hyp_spinlock_t) != sizeof(u32));
+
return 0;
out_reclaim_smmu:
@@ -153,6 +157,144 @@ static int smmu_init(void)
return ret;
}
+static bool smmu_dabt_device(struct hyp_arm_smmu_v3_device *smmu,
+ struct user_pt_regs *regs,
+ u64 esr, u32 off)
+{
+ bool is_write = esr & ESR_ELx_WNR;
+ unsigned int len = BIT((esr & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
+ int rd = (esr & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
+ const u64 read_write = -1ULL;
+ const u64 no_access = 0;
+ u64 mask = no_access;
+ const u64 read_only = is_write ? no_access : read_write;
+ bool is_xzr = (rd == 31);
+ u64 val = is_xzr ? 0 : regs->regs[rd];
+
+ switch (off) {
+ case ARM_SMMU_IDR0:
+ if (len != sizeof(u32))
+ break;
+ /* Clear stage-2 support, hide MSI to avoid write back to cmdq */
+ mask = read_only & ~(IDR0_S2P | IDR0_VMID16 | IDR0_MSI | IDR0_HYP);
+ break;
+ /* Passthrough the register access for bisectiblity, handled later */
+ case ARM_SMMU_CMDQ_BASE:
+ case ARM_SMMU_CMDQ_PROD:
+ case ARM_SMMU_CMDQ_CONS:
+ case ARM_SMMU_STRTAB_BASE:
+ case ARM_SMMU_STRTAB_BASE_CFG:
+ case ARM_SMMU_GBPA:
+ mask = read_write;
+ break;
+ case ARM_SMMU_CR0:
+ if (len != sizeof(u32))
+ break;
+ mask = read_write;
+ break;
+ case ARM_SMMU_CR1: {
+ /* Based on Linux implementation */
+ u64 cr1_template = FIELD_PREP(CR1_TABLE_SH, ARM_SMMU_SH_ISH) |
+ FIELD_PREP(CR1_TABLE_OC, CR1_CACHE_WB) |
+ FIELD_PREP(CR1_TABLE_IC, CR1_CACHE_WB) |
+ FIELD_PREP(CR1_QUEUE_SH, ARM_SMMU_SH_ISH) |
+ FIELD_PREP(CR1_QUEUE_OC, CR1_CACHE_WB) |
+ FIELD_PREP(CR1_QUEUE_IC, CR1_CACHE_WB);
+ if (len != sizeof(u32))
+ break;
+ /* Don't mess with shareability/cacheability. */
+ if (is_write) {
+ WARN_ON(val != cr1_template);
+ val = cr1_template;
+ }
+ mask = read_write;
+ break;
+ }
+
+ /* Allowed 32 bit registers. */
+ case ARM_SMMU_EVTQ_PROD + SZ_64K:
+ case ARM_SMMU_EVTQ_CONS + SZ_64K:
+ case ARM_SMMU_EVTQ_IRQ_CFG1:
+ case ARM_SMMU_EVTQ_IRQ_CFG2:
+ case ARM_SMMU_PRIQ_PROD + SZ_64K:
+ case ARM_SMMU_PRIQ_CONS + SZ_64K:
+ case ARM_SMMU_PRIQ_IRQ_CFG1:
+ case ARM_SMMU_PRIQ_IRQ_CFG2:
+ case ARM_SMMU_GERRORN:
+ case ARM_SMMU_GERROR_IRQ_CFG1:
+ case ARM_SMMU_GERROR_IRQ_CFG2:
+ case ARM_SMMU_IRQ_CTRLACK:
+ case ARM_SMMU_IRQ_CTRL:
+ case ARM_SMMU_CR0ACK:
+ case ARM_SMMU_CR2:
+ if (len != sizeof(u32))
+ break;
+ mask = read_write;
+ break;
+ /* Allowed 64 bit registers. */
+ case ARM_SMMU_EVTQ_BASE:
+ case ARM_SMMU_EVTQ_IRQ_CFG0:
+ case ARM_SMMU_PRIQ_BASE:
+ case ARM_SMMU_PRIQ_IRQ_CFG0:
+ case ARM_SMMU_GERROR_IRQ_CFG0:
+ if (len != sizeof(u64))
+ break;
+ mask = read_write;
+ break;
+ /* Allowed RO 32 bit registers. */
+ case ARM_SMMU_IIDR:
+ case ARM_SMMU_IDR5:
+ case ARM_SMMU_IDR3:
+ case ARM_SMMU_IDR1:
+ case ARM_SMMU_GERROR:
+ if (len != sizeof(u32))
+ break;
+ mask = read_only;
+ };
+
+ if (WARN_ON(!mask))
+ goto out_ret;
+
+ if (is_write) {
+ if (len == sizeof(u64))
+ writeq_relaxed(val & mask, smmu->base + off);
+ else
+ writel_relaxed(val & mask, smmu->base + off);
+
+ return true;
+ }
+
+ if (len == sizeof(u64))
+ val = readq_relaxed(smmu->base + off) & mask;
+ else
+ val = readl_relaxed(smmu->base + off) & mask;
+ /*
+ * Device might be read senstive, so do it but ignore writing
+ * back for xzr.
+ */
+ if (!is_xzr)
+ regs->regs[rd] = val;
+
+out_ret:
+ return true;
+}
+
+static bool smmu_dabt_handler(struct user_pt_regs *regs, u64 esr, u64 addr)
+{
+ struct hyp_arm_smmu_v3_device *smmu;
+ bool ret;
+
+ for_each_smmu(smmu) {
+ if (addr < smmu->mmio_addr || addr >= smmu->mmio_addr + smmu->mmio_size)
+ continue;
+ hyp_spin_lock(&smmu->lock);
+ ret = smmu_dabt_device(smmu, regs, esr, addr - smmu->mmio_addr);
+ hyp_spin_unlock(&smmu->lock);
+ return ret;
+ }
+ return false;
+}
+
static int smmu_host_stage2_idmap(phys_addr_t start, phys_addr_t end, int prot)
{
return 0;
@@ -162,4 +304,5 @@ static int smmu_host_stage2_idmap(phys_addr_t start, phys_addr_t end, int prot)
struct kvm_iommu_ops smmu_ops = {
.init = smmu_init,
.host_stage2_idmap = smmu_host_stage2_idmap,
+ .dabt_handler = smmu_dabt_handler,
};
diff --git a/drivers/iommu/arm/arm-smmu-v3/pkvm/arm_smmu_v3.h b/drivers/iommu/arm/arm-smmu-v3/pkvm/arm_smmu_v3.h
index 82b84673e85b..263b0fef262d 100644
--- a/drivers/iommu/arm/arm-smmu-v3/pkvm/arm_smmu_v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/pkvm/arm_smmu_v3.h
@@ -4,6 +4,10 @@
#include <asm/kvm_asm.h>
+#ifdef __KVM_NVHE_HYPERVISOR__
+#include <nvhe/spinlock.h>
+#endif
+
/*
* Parameters from the trusted host:
* @mmio_addr base address of the SMMU registers
@@ -15,6 +19,7 @@
* @oas PA size
* @pgsize_bitmap Supported page sizes
* @sid_bits Max number of SID bits supported
+ * @lock Lock to protect SMMU
*/
struct hyp_arm_smmu_v3_device {
phys_addr_t mmio_addr;
@@ -24,6 +29,11 @@ struct hyp_arm_smmu_v3_device {
unsigned long oas;
unsigned long pgsize_bitmap;
unsigned int sid_bits;
+#ifdef __KVM_NVHE_HYPERVISOR__
+ hyp_spinlock_t lock;
+#else
+ u32 lock;
+#endif
};
extern size_t kvm_nvhe_sym(kvm_hyp_arm_smmu_v3_count);
--
2.54.0.545.g6539524ca2-goog
More information about the linux-arm-kernel
mailing list