[PATCH v9 4/6] iommu/arm-smmu-v3: Add CS_NONE quirk for CONFIG_TEGRA241_CMDQV
Nicolin Chen
nicolinc at nvidia.com
Fri Jul 5 11:10:42 PDT 2024
Hi Will,
On Fri, Jul 05, 2024 at 04:27:21PM +0100, Will Deacon wrote:
> On Tue, Jul 02, 2024 at 01:10:19PM -0700, Nicolin Chen wrote:
> > On Tue, Jul 02, 2024 at 12:47:14PM -0700, Nicolin Chen wrote:
> > > @@ -345,6 +345,11 @@ static void arm_smmu_cmdq_build_sync_cmd(u64 *cmd, struct arm_smmu_device *smmu,
> > > FIELD_PREP(CMDQ_SYNC_0_MSH, ARM_SMMU_SH_ISH) |
> > > FIELD_PREP(CMDQ_SYNC_0_MSIATTR, ARM_SMMU_MEMATTR_OIWB);
> > >
> > > + if (cmdq->type == TEGRA241_VCMDQ) {
> > > + cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_NONE);
> > > + return;
> > > + }
> > > +
> > > if (!(smmu->options & ARM_SMMU_OPT_MSIPOLL)) {
> > > cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_SEV);
> > > return;
> > > @@ -690,7 +695,8 @@ static int arm_smmu_cmdq_poll_until_sync(struct arm_smmu_device *smmu,
> > > struct arm_smmu_cmdq *cmdq,
> > > struct arm_smmu_ll_queue *llq)
> > > {
> > > - if (smmu->options & ARM_SMMU_OPT_MSIPOLL)
> > > + if (smmu->options & ARM_SMMU_OPT_MSIPOLL &&
> > > + cmdq->type != TEGRA241_VCMDQ) {
> > > return __arm_smmu_cmdq_poll_until_msi(smmu, cmdq, llq);
> > >
> > > --------------------------------------------------------------
> > >
> > > Would you prefer this one? I feel CMDQ_QUIRK_SYNC_CS_NONE_ONLY
> > > is more general looking though..
> >
> > And we would need some additional lines of comments for the two
> > pieces above, explaining why TEGRA241_VCMDQ type needs the first
> > one while bypasses the second one. Again, it feels even worse :(
>
> I hacked the code around a bit this afternoon. Please can you see if:
>
> https://git.kernel.org/pub/scm/linux/kernel/git/will/linux.git/log/?h=for-nicolin/grace-vcmdq-wip
>
> does roughly what you need?
I appreciate the patch. Yet, we cannot use IORT's model field.
This would need to go through IORT documentation, for A. And B,
we had a very long discussion with ARM (Robin was there) years
ago, and concluded that this CMDQV would not be a model in IORT
but a DSDT node as an extension. So, this is firm...
With that, we cannot avoid an unconditional hard-coding tegra
function call even if we switch to an impl design:
+static int acpi_smmu_impl_init(u32 model, struct arm_smmu_device *smmu)
+{
+ /*
+ * unconditional go through ACPI table to detect if there is a tegra241
+ * implementation that extends SMMU with a CMDQV. The probe() will fill
+ * the smmu->impl pointer upon success. Otherwise, fall back to regular
+ * SMMU CMDQ.
+ */
+ tegra241_impl_acpi_probe(smmu);
+ return 0;
+}
As for arm_smmu_cmdq_needs_busy_polling, it doesn't really look
very optimal to me. But if you insist on having an smmu option,
we still have to take in the PATCH-3 in this series, enforcing
an arm_smmu_cmdq_build_sync_cmd() call in the IRQ handler too.
So, it would eventually look like [attachment].
Thanks!
Nicolin
-------------- next part --------------
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 3e2eb88535de..e57ea8d39c98 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -352,15 +352,26 @@ arm_smmu_get_cmdq(struct arm_smmu_device *smmu, u8 opcode)
return &smmu->cmdq;
}
+static bool arm_smmu_cmdq_needs_busy_polling(struct arm_smmu_device *smmu,
+ struct arm_smmu_cmdq *cmdq)
+{
+ if (cmdq == &smmu->cmdq)
+ return false;
+
+ return smmu->options & ARM_SMMU_OPT_SECONDARY_CMDQ_CS_NONE_ONLY;
+}
+
static void arm_smmu_cmdq_build_sync_cmd(u64 *cmd, struct arm_smmu_device *smmu,
- struct arm_smmu_queue *q, u32 prod)
+ struct arm_smmu_cmdq *cmdq, u32 prod)
{
+ struct arm_smmu_queue *q = &cmdq->q;
+
cmd[1] = 0;
cmd[0] = FIELD_PREP(CMDQ_0_OP, CMDQ_OP_CMD_SYNC) |
FIELD_PREP(CMDQ_SYNC_0_MSH, ARM_SMMU_SH_ISH) |
FIELD_PREP(CMDQ_SYNC_0_MSIATTR, ARM_SMMU_MEMATTR_OIWB);
- if (q->quirks & CMDQ_QUIRK_SYNC_CS_NONE_ONLY) {
+ if (arm_smmu_cmdq_needs_busy_polling(smmu, cmdq)) {
cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_NONE);
return;
}
@@ -380,7 +391,7 @@ static void arm_smmu_cmdq_build_sync_cmd(u64 *cmd, struct arm_smmu_device *smmu,
}
void __arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu,
- struct arm_smmu_queue *q)
+ struct arm_smmu_cmdq *cmdq)
{
static const char * const cerror_str[] = {
[CMDQ_ERR_CERROR_NONE_IDX] = "No error",
@@ -388,6 +399,7 @@ void __arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu,
[CMDQ_ERR_CERROR_ABT_IDX] = "Abort on command fetch",
[CMDQ_ERR_CERROR_ATC_INV_IDX] = "ATC invalidate timeout",
};
+ struct arm_smmu_queue *q = &cmdq->q;
int i;
u64 cmd[CMDQ_ENT_DWORDS];
@@ -426,14 +438,14 @@ void __arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu,
dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]);
/* Convert the erroneous command into a CMD_SYNC */
- arm_smmu_cmdq_build_sync_cmd(cmd, smmu, q, cons);
+ arm_smmu_cmdq_build_sync_cmd(cmd, smmu, cmdq, cons);
queue_write(Q_ENT(q, cons), cmd, q->ent_dwords);
}
static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
{
- __arm_smmu_cmdq_skip_err(smmu, &smmu->cmdq.q);
+ __arm_smmu_cmdq_skip_err(smmu, &smmu->cmdq);
}
/*
@@ -711,7 +723,7 @@ static int arm_smmu_cmdq_poll_until_sync(struct arm_smmu_device *smmu,
struct arm_smmu_ll_queue *llq)
{
if (smmu->options & ARM_SMMU_OPT_MSIPOLL &&
- !(cmdq->q.quirks & CMDQ_QUIRK_SYNC_CS_NONE_ONLY))
+ !arm_smmu_cmdq_needs_busy_polling(smmu, cmdq))
return __arm_smmu_cmdq_poll_until_msi(smmu, cmdq, llq);
return __arm_smmu_cmdq_poll_until_consumed(smmu, cmdq, llq);
@@ -797,7 +809,7 @@ static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
arm_smmu_cmdq_write_entries(cmdq, cmds, llq.prod, n);
if (sync) {
prod = queue_inc_prod_n(&llq, n);
- arm_smmu_cmdq_build_sync_cmd(cmd_sync, smmu, &cmdq->q, prod);
+ arm_smmu_cmdq_build_sync_cmd(cmd_sync, smmu, cmdq, prod);
queue_write(Q_ENT(&cmdq->q, prod), cmd_sync, CMDQ_ENT_DWORDS);
/*
@@ -3985,6 +3997,8 @@ static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
smmu->features |= ARM_SMMU_FEAT_COHERENCY;
smmu->tegra241_cmdqv = tegra241_cmdqv_acpi_probe(smmu, node);
+ if (smmu->tegra241_cmdqv)
+ smmu->options |= ARM_SMMU_OPT_SECONDARY_CMDQ_CS_NONE_ONLY;
return 0;
}
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index 2c1fe7e129cd..0962aa839080 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -654,10 +654,11 @@ struct arm_smmu_device {
#define ARM_SMMU_FEAT_ATTR_TYPES_OVR (1 << 20)
u32 features;
-#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
-#define ARM_SMMU_OPT_PAGE0_REGS_ONLY (1 << 1)
-#define ARM_SMMU_OPT_MSIPOLL (1 << 2)
-#define ARM_SMMU_OPT_CMDQ_FORCE_SYNC (1 << 3)
+#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
+#define ARM_SMMU_OPT_PAGE0_REGS_ONLY (1 << 1)
+#define ARM_SMMU_OPT_MSIPOLL (1 << 2)
+#define ARM_SMMU_OPT_CMDQ_FORCE_SYNC (1 << 3)
+#define ARM_SMMU_OPT_SECONDARY_CMDQ_CS_NONE_ONLY (1 << 4)
u32 options;
struct arm_smmu_cmdq cmdq;
@@ -805,7 +806,7 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
unsigned long iova, size_t size);
void __arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu,
- struct arm_smmu_queue *q);
+ struct arm_smmu_cmdq *cmdq);
int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
struct arm_smmu_queue *q, void __iomem *page,
unsigned long prod_off, unsigned long cons_off,
diff --git a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
index bb696c66e56d..4b1de8517bec 100644
--- a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
+++ b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
@@ -266,7 +266,7 @@ static void tegra241_vintf0_handle_error(struct tegra241_vintf *vintf)
u32 gerror = readl_relaxed(REG_VCMDQ_PAGE0(vcmdq, GERROR));
__arm_smmu_cmdq_skip_err(vintf->cmdqv->smmu,
- &vcmdq->cmdq.q);
+ &vcmdq->cmdq);
writel(gerror, REG_VCMDQ_PAGE0(vcmdq, GERRORN));
map &= ~BIT_ULL(lidx);
}
More information about the linux-arm-kernel
mailing list