[PATCH v6 04/25] iommu/arm-smmu-v3: Move TLB range invalidation into common code
Mostafa Saleh
smostafa at google.com
Fri May 1 04:19:06 PDT 2026
Range TLB invalidation has a very specific algorithm. Instead of
re-writing it for the hypervisor, move it to a function that can
be re-used.
Signed-off-by: Mostafa Saleh <smostafa at google.com>
---
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 65 ++++--------------
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 76 +++++++++++++++++++++
2 files changed, 88 insertions(+), 53 deletions(-)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index cb64f88989f0..c22832d26495 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -2362,68 +2362,27 @@ static void arm_smmu_tlb_inv_context(void *cookie)
arm_smmu_domain_inv(smmu_domain);
}
+static void __arm_smmu_cmdq_batch_add(void *__opaque,
+ struct arm_smmu_cmdq_batch *cmds,
+ struct arm_smmu_cmdq_ent *cmd)
+{
+ struct arm_smmu_device *smmu = (struct arm_smmu_device *)__opaque;
+
+ arm_smmu_cmdq_batch_add(smmu, cmds, cmd);
+}
+
static void arm_smmu_cmdq_batch_add_range(struct arm_smmu_device *smmu,
struct arm_smmu_cmdq_batch *cmds,
struct arm_smmu_cmdq_ent *cmd,
unsigned long iova, size_t size,
size_t granule, size_t pgsize)
{
- unsigned long end = iova + size, num_pages = 0, tg = pgsize;
- size_t inv_range = granule;
-
if (WARN_ON_ONCE(!size))
return;
- if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) {
- num_pages = size >> tg;
-
- /* Convert page size of 12,14,16 (log2) to 1,2,3 */
- cmd->tlbi.tg = (tg - 10) / 2;
-
- /*
- * Determine what level the granule is at. For non-leaf, both
- * io-pgtable and SVA pass a nominal last-level granule because
- * they don't know what level(s) actually apply, so ignore that
- * and leave TTL=0. However for various errata reasons we still
- * want to use a range command, so avoid the SVA corner case
- * where both scale and num could be 0 as well.
- */
- if (cmd->tlbi.leaf)
- cmd->tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3));
- else if ((num_pages & CMDQ_TLBI_RANGE_NUM_MAX) == 1)
- num_pages++;
- }
-
- while (iova < end) {
- if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) {
- /*
- * On each iteration of the loop, the range is 5 bits
- * worth of the aligned size remaining.
- * The range in pages is:
- *
- * range = (num_pages & (0x1f << __ffs(num_pages)))
- */
- unsigned long scale, num;
-
- /* Determine the power of 2 multiple number of pages */
- scale = __ffs(num_pages);
- cmd->tlbi.scale = scale;
-
- /* Determine how many chunks of 2^scale size we have */
- num = (num_pages >> scale) & CMDQ_TLBI_RANGE_NUM_MAX;
- cmd->tlbi.num = num - 1;
-
- /* range is num * 2^scale * pgsize */
- inv_range = num << (scale + tg);
-
- /* Clear out the lower order bits for the next iteration */
- num_pages -= num << scale;
- }
-
- cmd->tlbi.addr = iova;
- arm_smmu_cmdq_batch_add(smmu, cmds, cmd);
- iova += inv_range;
- }
+ arm_smmu_tlb_inv_build(cmd, iova, size, granule,
+ pgsize, smmu->features & ARM_SMMU_FEAT_RANGE_INV,
+ smmu, __arm_smmu_cmdq_batch_add, cmds);
}
static bool arm_smmu_inv_size_too_big(struct arm_smmu_device *smmu, size_t size,
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index 9b8c5fb7282b..7be41dbe5aaa 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -1204,6 +1204,82 @@ static inline void arm_smmu_write_strtab_l1_desc(struct arm_smmu_strtab_l1 *dst,
WRITE_ONCE(dst->l2ptr, cpu_to_le64(val));
}
+/**
+ * arm_smmu_tlb_inv_build - Create a range invalidation command
+ * @cmd: Base command initialized with OPCODE (S1, S2..), vmid and asid
+ * @iova: Start IOVA to invalidate
+ * @size: Size of range
+ * @granule: Granule of invalidation
+ * @pgsize: Page size of the invalidation
+ * @is_range: Use range invalidation commands
+ * @opaque: Pointer to pass to add_cmd
+ * @add_cmd: Function to send/batch the invalidation command
+ * @cmds: Incase of batching, it includes the pointer to the batch
+ */
+static inline void arm_smmu_tlb_inv_build(struct arm_smmu_cmdq_ent *cmd,
+ unsigned long iova, size_t size,
+ size_t granule, unsigned long pgsize,
+ bool is_range, void *opaque,
+ void (*add_cmd)(void *_opaque,
+ struct arm_smmu_cmdq_batch *cmds,
+ struct arm_smmu_cmdq_ent *cmd),
+ struct arm_smmu_cmdq_batch *cmds)
+{
+ unsigned long end = iova + size, num_pages = 0, tg = pgsize;
+ size_t inv_range = granule;
+
+ if (is_range) {
+ num_pages = size >> tg;
+
+ /* Convert page size of 12,14,16 (log2) to 1,2,3 */
+ cmd->tlbi.tg = (tg - 10) / 2;
+
+ /*
+ * Determine what level the granule is at. For non-leaf, both
+ * io-pgtable and SVA pass a nominal last-level granule because
+ * they don't know what level(s) actually apply, so ignore that
+ * and leave TTL=0. However for various errata reasons we still
+ * want to use a range command, so avoid the SVA corner case
+ * where both scale and num could be 0 as well.
+ */
+ if (cmd->tlbi.leaf)
+ cmd->tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3));
+ else if ((num_pages & CMDQ_TLBI_RANGE_NUM_MAX) == 1)
+ num_pages++;
+ }
+
+ while (iova < end) {
+ if (is_range) {
+ /*
+ * On each iteration of the loop, the range is 5 bits
+ * worth of the aligned size remaining.
+ * The range in pages is:
+ *
+ * range = (num_pages & (0x1f << __ffs(num_pages)))
+ */
+ unsigned long scale, num;
+
+ /* Determine the power of 2 multiple number of pages */
+ scale = __ffs(num_pages);
+ cmd->tlbi.scale = scale;
+
+ /* Determine how many chunks of 2^scale size we have */
+ num = (num_pages >> scale) & CMDQ_TLBI_RANGE_NUM_MAX;
+ cmd->tlbi.num = num - 1;
+
+ /* range is num * 2^scale * pgsize */
+ inv_range = num << (scale + tg);
+
+ /* Clear out the lower order bits for the next iteration */
+ num_pages -= num << scale;
+ }
+
+ cmd->tlbi.addr = iova;
+ add_cmd(opaque, cmds, cmd);
+ iova += inv_range;
+ }
+}
+
#ifdef CONFIG_ARM_SMMU_V3_SVA
bool arm_smmu_sva_supported(struct arm_smmu_device *smmu);
void arm_smmu_sva_notifier_synchronize(void);
--
2.54.0.545.g6539524ca2-goog
More information about the linux-arm-kernel
mailing list