[PATCH V2 3/6] iommu: iova: add support for 'first-fit' algorithm
Ajay Kumar
ajaykumar.rs at samsung.com
Wed May 11 05:15:41 PDT 2022
From: Marek Szyprowski <m.szyprowski at samsung.com>
Add support for the 'first-fit' allocation algorithm. It will be used for
the special case of implementing DMA_ATTR_LOW_ADDRESS, so this path
doesn't use IOVA cache.
Signed-off-by: Marek Szyprowski <m.szyprowski at samsung.com>
Signed-off-by: Ajay Kumar <ajaykumar.rs at samsung.com>
---
drivers/iommu/iova.c | 78 ++++++++++++++++++++++++++++++++++++++++++++
include/linux/iova.h | 2 ++
2 files changed, 80 insertions(+)
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index ae0fe0a6714e..89f9338f83a3 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -231,6 +231,59 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
return -ENOMEM;
}
+static unsigned long
+__iova_get_aligned_start(unsigned long start, unsigned long size)
+{
+ unsigned long mask = __roundup_pow_of_two(size) - 1;
+
+ return (start + mask) & ~mask;
+}
+
+static int __alloc_and_insert_iova_range_forward(struct iova_domain *iovad,
+ unsigned long size, unsigned long limit_pfn,
+ struct iova *new)
+{
+ struct rb_node *curr;
+ unsigned long flags;
+ unsigned long start, limit;
+
+ spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
+
+ curr = rb_first(&iovad->rbroot);
+ limit = limit_pfn;
+ start = __iova_get_aligned_start(iovad->start_pfn, size);
+
+ while (curr) {
+ struct iova *curr_iova = rb_entry(curr, struct iova, node);
+ struct rb_node *next = rb_next(curr);
+
+ start = __iova_get_aligned_start(curr_iova->pfn_hi + 1, size);
+ if (next) {
+ struct iova *next_iova = rb_entry(next, struct iova, node);
+ limit = next_iova->pfn_lo - 1;
+ } else {
+ limit = limit_pfn;
+ }
+
+ if ((start + size) <= limit)
+ break; /* found a free slot */
+ curr = next;
+ }
+
+ if (!curr && start + size > limit) {
+ spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+ return -ENOMEM;
+ }
+
+ new->pfn_lo = start;
+ new->pfn_hi = new->pfn_lo + size - 1;
+ iova_insert_rbtree(&iovad->rbroot, new, curr);
+
+ spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+
+ return 0;
+}
+
static struct kmem_cache *iova_cache;
static unsigned int iova_cache_users;
static DEFINE_MUTEX(iova_cache_mutex);
@@ -420,6 +473,31 @@ free_iova(struct iova_domain *iovad, unsigned long pfn)
}
EXPORT_SYMBOL_GPL(free_iova);
+/**
+ * alloc_iova_first_fit - allocates an iova from the beginning of address space
+ * @iovad: - iova domain in question
+ * @size: - size of page frames to allocate
+ * @limit_pfn: - max limit address
+ * Returns a pfn the allocated iova starts at or IOVA_BAD_ADDR in the case
+ * of a failure.
+ */
+unsigned long
+alloc_iova_first_fit(struct iova_domain *iovad, unsigned long size,
+ unsigned long limit_pfn)
+{
+ struct iova *new_iova = alloc_iova_mem();
+
+ if (!new_iova)
+ return IOVA_BAD_ADDR;
+
+ if (__alloc_and_insert_iova_range_forward(iovad, size, limit_pfn, new_iova)) {
+ free_iova_mem(new_iova);
+ return IOVA_BAD_ADDR;
+ }
+ return new_iova->pfn_lo;
+}
+EXPORT_SYMBOL_GPL(alloc_iova_first_fit);
+
/**
* alloc_iova_fast - allocates an iova from rcache
* @iovad: - iova domain in question
diff --git a/include/linux/iova.h b/include/linux/iova.h
index 46b5b10c532b..45ed6d41490a 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -89,6 +89,8 @@ void free_iova_fast(struct iova_domain *iovad, unsigned long pfn,
unsigned long size);
unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
unsigned long limit_pfn, bool flush_rcache);
+unsigned long alloc_iova_first_fit(struct iova_domain *iovad, unsigned long size,
+ unsigned long limit_pfn);
struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
unsigned long pfn_hi);
void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
--
2.17.1
More information about the linux-arm-kernel
mailing list