[PATCH v6 17/25] iommu/arm-smmu-v3-kvm: Emulate CMDQ for host
Mostafa Saleh
smostafa at google.com
Fri May 1 04:19:19 PDT 2026
Don’t allow access to the command queue from the host:
- ARM_SMMU_CMDQ_BASE: Only allowed to be written when CMDQ is disabled, we
use it to keep track of the host command queue base.
Reads return the saved value.
- ARM_SMMU_CMDQ_PROD: Writes trigger command queue emulation which sanitise
and filters the whole range. Reads returns the host copy.
- ARM_SMMU_CMDQ_CONS: Writes move the sw copy of the cons, but the host
can’t skip commands once submitted. Reads return the emulated value and
the error bits in the actual cons.
Signed-off-by: Mostafa Saleh <smostafa at google.com>
---
.../iommu/arm/arm-smmu-v3/pkvm/arm-smmu-v3.c | 128 +++++++++++++++++-
1 file changed, 124 insertions(+), 4 deletions(-)
diff --git a/drivers/iommu/arm/arm-smmu-v3/pkvm/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/pkvm/arm-smmu-v3.c
index aac455599728..1633a3cf8a3b 100644
--- a/drivers/iommu/arm/arm-smmu-v3/pkvm/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/pkvm/arm-smmu-v3.c
@@ -100,7 +100,6 @@ static int smmu_unshare_pages(phys_addr_t addr, size_t size)
return 0;
}
-__maybe_unused
static bool smmu_cmdq_has_space(struct arm_smmu_queue *cmdq, u32 n)
{
struct arm_smmu_ll_queue *llq = &cmdq->llq;
@@ -351,6 +350,92 @@ static int smmu_init(void)
return ret;
}
+static bool smmu_filter_command(struct hyp_arm_smmu_v3_device *smmu, u64 *command)
+{
+ u64 command0 = le64_to_cpu(command[0]);
+ u64 command1 = le64_to_cpu(command[1]);
+ u64 type = FIELD_GET(CMDQ_0_OP, command0);
+
+ switch (type) {
+ case CMDQ_OP_CFGI_STE:
+ /* TBD: SHADOW_STE*/
+ break;
+ case CMDQ_OP_CFGI_ALL:
+ {
+ /*
+ * Linux doesn't use range STE invalidation, and only use this
+ * for CFGI_ALL, which is done on reset and not on an new STE
+ * being used.
+ * Although, this is not architectural we rely on the current Linux
+ * implementation.
+ */
+ if ((FIELD_GET(CMDQ_CFGI_1_RANGE, command1) != 31))
+ return true;
+ break;
+ }
+ case CMDQ_OP_TLBI_NH_ASID:
+ case CMDQ_OP_TLBI_NH_VA:
+ case 0x13: /* CMD_TLBI_NH_VAA: Not used by Linux */
+ {
+ /* Only allow VMID = 0 */
+ if (FIELD_GET(CMDQ_TLBI_0_VMID, command0) != 0)
+ return true;
+ break;
+ }
+ case 0x10: /* CMD_TLBI_NH_ALL: Not used by Linux */
+ case CMDQ_OP_TLBI_EL2_ALL:
+ case CMDQ_OP_TLBI_EL2_VA:
+ case CMDQ_OP_TLBI_EL2_ASID:
+ case CMDQ_OP_TLBI_S12_VMALL:
+ case CMDQ_OP_TLBI_S2_IPA:
+ case 0x23: /* CMD_TLBI_EL2_VAA: Not used by Linux */
+ return true;
+ case CMDQ_OP_CMD_SYNC:
+ if (FIELD_GET(CMDQ_SYNC_0_CS, command0) == CMDQ_SYNC_0_CS_IRQ) {
+ /* Allow it, but let the host timeout, as this should never happen. */
+ command0 &= ~CMDQ_SYNC_0_CS;
+ command0 |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_SEV);
+ command1 &= ~CMDQ_SYNC_1_MSIADDR_MASK;
+ }
+ break;
+ }
+
+ return false;
+}
+
+static int smmu_emulate_cmdq_insert(struct hyp_arm_smmu_v3_device *smmu)
+{
+ u64 *host_cmdq = hyp_phys_to_virt(smmu->cmdq_host.q_base & Q_BASE_ADDR_MASK);
+ bool use_wfe = smmu->features & ARM_SMMU_FEAT_SEV, skip;
+ u64 cmd[CMDQ_ENT_DWORDS];
+ int idx, ret;
+ u32 space;
+
+ if (!is_cmdq_enabled(smmu))
+ return 0;
+
+ space = (1 << (smmu->cmdq_host.llq.max_n_shift)) - queue_space(&smmu->cmdq_host.llq);
+ /* Wait for the command queue to have some space. */
+ ret = smmu_wait(use_wfe, smmu_cmdq_has_space(&smmu->cmdq, space));
+ if (ret)
+ return ret;
+
+ while (space--) {
+ idx = Q_IDX(&smmu->cmdq_host.llq, smmu->cmdq_host.llq.cons);
+ queue_inc_cons(&smmu->cmdq_host.llq);
+
+ memcpy(cmd, &host_cmdq[idx * CMDQ_ENT_DWORDS], CMDQ_ENT_DWORDS << 3);
+ skip = smmu_filter_command(smmu, cmd);
+ if (WARN_ON(skip))
+ continue;
+ smmu_add_cmd_raw(smmu, cmd);
+ }
+
+ writel(smmu->cmdq.llq.prod, smmu->cmdq.prod_reg);
+
+ return smmu_wait(use_wfe, smmu_cmdq_empty(&smmu->cmdq));
+}
+
static void smmu_emulate_cmdq_enable(struct hyp_arm_smmu_v3_device *smmu)
{
u32 shift = smmu->cmdq_host.q_base & Q_BASE_LOG2SIZE;
@@ -388,18 +473,51 @@ static bool smmu_dabt_device(struct hyp_arm_smmu_v3_device *smmu,
/* Clear stage-2 support, hide MSI to avoid write back to cmdq */
mask = read_only & ~(IDR0_S2P | IDR0_VMID16 | IDR0_MSI | IDR0_HYP);
break;
- /* Passthrough the register access for bisectiblity, handled later */
case ARM_SMMU_CMDQ_BASE:
+ /*
+ * Although allowed to use smaller size, we rely on the SMMUv3 driver
+ * using 64-bit store instruction for simplicity.
+ */
+ if (len != sizeof(u64))
+ break;
if (is_write) {
/* Not allowed by the architecture */
if (WARN_ON(is_cmdq_enabled(smmu)))
break;
smmu->cmdq_host.q_base = val;
+ goto out_ret;
+ } else {
+ val = smmu->cmdq_host.q_base;
+ goto out_update_regs;
}
- mask = read_write;
- break;
case ARM_SMMU_CMDQ_PROD:
+ if (len != sizeof(u32))
+ break;
+ if (is_write) {
+ smmu->cmdq_host.llq.prod = val;
+ WARN_ON(smmu_emulate_cmdq_insert(smmu));
+ goto out_ret;
+ } else {
+ val = smmu->cmdq_host.llq.prod;
+ goto out_update_regs;
+ }
case ARM_SMMU_CMDQ_CONS:
+ if (len != sizeof(u32))
+ break;
+ if (is_write) {
+ if (WARN_ON(is_cmdq_enabled(smmu)))
+ break;
+
+ smmu->cmdq_host.llq.cons = val;
+ goto out_ret;
+ } else {
+ /* Propagate errors back to the host.*/
+ u32 cons = readl_relaxed(smmu->base + ARM_SMMU_CMDQ_CONS);
+
+ val = smmu->cmdq_host.llq.cons | (CMDQ_CONS_ERR & cons);
+ goto out_update_regs;
+ }
+ /* Passthrough the register access for bisectiblity, handled later */
case ARM_SMMU_STRTAB_BASE:
case ARM_SMMU_STRTAB_BASE_CFG:
case ARM_SMMU_GBPA:
@@ -495,6 +613,8 @@ static bool smmu_dabt_device(struct hyp_arm_smmu_v3_device *smmu,
val = readq_relaxed(smmu->base + off) & mask;
else
val = readl_relaxed(smmu->base + off) & mask;
+
+out_update_regs:
/*
* Device might be read senstive, so do it but ignore writing
* back for xzr.
--
2.54.0.545.g6539524ca2-goog
More information about the linux-arm-kernel
mailing list