[RFC PATCH v1 09/18] RDMA/umem: Preallocate and cache IOVA for UMEM ODP
Leon Romanovsky
leon at kernel.org
Tue Jul 2 02:09:39 PDT 2024
From: Leon Romanovsky <leonro at nvidia.com>
As a preparation to provide two step interface to map pages,
preallocate IOVA when UMEM is initialized.
Signed-off-by: Leon Romanovsky <leonro at nvidia.com>
---
drivers/infiniband/core/umem_odp.c | 14 +++++++++++++-
include/rdma/ib_umem_odp.h | 1 +
2 files changed, 14 insertions(+), 1 deletion(-)
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index e9fa22d31c23..955bf338b1bf 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -50,6 +50,7 @@
static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
const struct mmu_interval_notifier_ops *ops)
{
+ struct ib_device *dev = umem_odp->umem.ibdev;
int ret;
umem_odp->umem.is_odp = 1;
@@ -87,15 +88,25 @@ static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
goto out_pfn_list;
}
+ umem_odp->iova.dev = dev->dma_device;
+ umem_odp->iova.size = end - start;
+ umem_odp->iova.dir = DMA_BIDIRECTIONAL;
+ ret = dma_alloc_iova(&umem_odp->iova);
+ if (ret)
+ goto out_dma_list;
+
+
ret = mmu_interval_notifier_insert(&umem_odp->notifier,
umem_odp->umem.owning_mm,
start, end - start, ops);
if (ret)
- goto out_dma_list;
+ goto out_free_iova;
}
return 0;
+out_free_iova:
+ dma_free_iova(&umem_odp->iova);
out_dma_list:
kvfree(umem_odp->dma_list);
out_pfn_list:
@@ -274,6 +285,7 @@ void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
ib_umem_end(umem_odp));
mutex_unlock(&umem_odp->umem_mutex);
mmu_interval_notifier_remove(&umem_odp->notifier);
+ dma_free_iova(&umem_odp->iova);
kvfree(umem_odp->dma_list);
kvfree(umem_odp->pfn_list);
}
diff --git a/include/rdma/ib_umem_odp.h b/include/rdma/ib_umem_odp.h
index 0844c1d05ac6..bb2d7f2a5b04 100644
--- a/include/rdma/ib_umem_odp.h
+++ b/include/rdma/ib_umem_odp.h
@@ -23,6 +23,7 @@ struct ib_umem_odp {
* See ODP_READ_ALLOWED_BIT and ODP_WRITE_ALLOWED_BIT.
*/
dma_addr_t *dma_list;
+ struct dma_iova_attrs iova;
/*
* The umem_mutex protects the page_list and dma_list fields of an ODP
* umem, allowing only a single thread to map/unmap pages. The mutex
--
2.45.2
More information about the Linux-nvme
mailing list