[PATCH v5 3/7] iommu/arm-smmu-v3: Introduce a per-domain arm_smmu_invs array

Jason Gunthorpe jgg at nvidia.com
Tue Nov 25 05:43:21 PST 2025


On Mon, Nov 24, 2025 at 08:14:39PM -0800, Nicolin Chen wrote:
> On Mon, Nov 24, 2025 at 09:42:31PM +0000, Will Deacon wrote:
> > On Sat, Nov 08, 2025 at 12:08:04AM -0800, Nicolin Chen wrote:
> > > +VISIBLE_IF_KUNIT
> > > +struct arm_smmu_invs *arm_smmu_invs_merge(struct arm_smmu_invs *invs,
> > > +					  struct arm_smmu_invs *to_merge)
> > > +{
> > > +	struct arm_smmu_invs *new_invs;
> > > +	struct arm_smmu_inv *new;
> > > +	size_t num_trashes = 0;
> > > +	size_t num_adds = 0;
> > > +	size_t i, j;
> > > +
> > > +	for (i = j = 0; i < invs->num_invs || j < to_merge->num_invs;) {
> > 
> > Maybe worth having a simple iterator macro for this?
> 
> I added two macros:
> 
> +#define arm_smmu_invs_for_each_inv(invs, idx, inv)              \
> +	for (idx = 0, inv = &invs->inv[0]; idx < invs->num_invs; \
> +	     inv = &invs->inv[++idx])
> +#define arm_smmu_invs_for_each_idx_dual(invs1, idx1, invs2, idx2) \
> +	for (idx1 = idx2 = 0; idx1 < invs1->num_invs || idx2 < invs2->num_invs;)

I think pull more stuff in. Something like this:

static inline struct arm_smmu_inv *
arm_smmu_invs_iter_next(struct arm_smmu_invs *invs, size_t next,
			size_t *idx)
{
	while (true) {
		if (next >= invs->num_invs) {
			*idx = next;
			return NULL;
		}
		if (!refcount_read(&invs->inv[next].users)) {
			next++;
			continue;
		}
		*idx = next;
		return &invs->inv[next];
	}
}

static int arm_smmu_inv_cmp(const struct arm_smmu_inv *l,
			    const struct arm_smmu_inv *r)
{
	if (l->smmu != r->smmu)
		return cmp_int((uintptr_t)l->smmu, (uintptr_t)r->smmu);
	if (l->type != r->type)
		return cmp_int(l->type, r->type);
	return cmp_int(l->id, r->id);
}

static inline int arm_smmu_invs_iter_next_cmp(struct arm_smmu_invs *invs_lhs,
					      size_t next_lhs, size_t *idx_lhs,
					      struct arm_smmu_invs *invs_rhs,
					      size_t next_rhs, size_t *idx_rhs)
{
	struct arm_smmu_inv *cur_lhs =
		arm_smmu_invs_iter_next(invs_lhs, 0, idx_lhs);

	/*
	 * Compare of two sorted arrays items. If one side is past the end of
	 * the array, return the other side to let it run out the iteration.
	 */
	if (!cur_lhs)
		return -1;
	if (next_rhs >= invs_rhs->num_invs)
		return 1;
	return arm_smmu_inv_cmp(cur_lhs, &invs_rhs->inv[next_rhs]);
}

/*
 * Iterates over all non-trash entries in invs. idx is a stack variable
 * to store the index, cur is a stack variable of 'struct arm_smmu_inv *'
 */
#define arm_smmu_invs_for_each_inv(invs, idx, cur)              \
	for (cur = arm_smmu_invs_iter_next(invs, 0, &(idx)); cur; \
	     cur = arm_smmu_invs_iter_next(invs, idx + 1, &(idx)))

/*
 * Iterate over two sorted arrays computing a merge sort
 */
#define arm_smmu_invs_for_each_merge(invs_lhs, idx_lhs, invs_rhs, idx_rhs, \
				     cmp)                                  \
	for (cmp = arm_smmu_invs_iter_next_cmp(invs_lhs, 0, &(idx_lhs),    \
					       invs_rhs, 0, &(idx_rhs));   \
	     idx_lhs < invs_lhs->num_invs || idx_rhs < invs_rhs->num_invs; \
	     cmp = arm_smmu_invs_iter_next_cmp(                            \
		     invs_lhs, idx_lhs + (cmp <= 0 ? 1 : 0), &(idx_lhs),   \
		     invs_rhs, idx_rhs + (cmp >= 0 ? 1 : 0), &(idx_rhs)))



And then change the loops computing num_trash to work directly on actual things ignoring trash:

	arm_smmu_invs_for_each_merge(invs, i, to_merge, j, cmp)
		new_size++;
	new_invs = arm_smmu_invs_alloc(new_size);


Name should probably be for_each_.... though

Jason



More information about the linux-arm-kernel mailing list