[RFC PATCH v1 12/18] RDMA/umem: Prevent UMEM ODP creation with SWIOTLB
Leon Romanovsky
leon at kernel.org
Tue Jul 2 02:09:42 PDT 2024
From: Leon Romanovsky <leonro at nvidia.com>
RDMA UMEM never supported DMA addresses returned from SWIOTLB, as these
addresses should be programmed to the hardware which is not aware that
it is bounce buffers and not real ones.
Instead of silently leave broken system for the users who didn't
know it, let's be explicit and return an error to them.
Signed-off-by: Leon Romanovsky <leonro at nvidia.com>
---
drivers/infiniband/core/umem_odp.c | 81 +++++++++++++++---------------
1 file changed, 41 insertions(+), 40 deletions(-)
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 6e170cb5110c..12186717a892 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -42,7 +42,8 @@
#include <linux/interval_tree.h>
#include <linux/hmm.h>
#include <linux/pagemap.h>
-
+#include <linux/dma-map-ops.h>
+#include <linux/swiotlb.h>
#include <rdma/ib_umem_odp.h>
#include "uverbs.h"
@@ -51,50 +52,50 @@ static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
const struct mmu_interval_notifier_ops *ops)
{
struct ib_device *dev = umem_odp->umem.ibdev;
+ size_t page_size = 1UL << umem_odp->page_shift;
+ unsigned long start, end;
+ size_t ndmas, npfns;
int ret;
umem_odp->umem.is_odp = 1;
mutex_init(&umem_odp->umem_mutex);
+ if (umem_odp->is_implicit_odp)
+ return 0;
+
+ if (dev_use_swiotlb(dev->dma_device, page_size, DMA_BIDIRECTIONAL) ||
+ is_swiotlb_force_bounce(dev->dma_device))
+ return -EOPNOTSUPP;
+
+ start = ALIGN_DOWN(umem_odp->umem.address, page_size);
+ if (check_add_overflow(umem_odp->umem.address,
+ (unsigned long)umem_odp->umem.length, &end))
+ return -EOVERFLOW;
+ end = ALIGN(end, page_size);
+ if (unlikely(end < page_size))
+ return -EOVERFLOW;
+
+ ndmas = (end - start) >> umem_odp->page_shift;
+ if (!ndmas)
+ return -EINVAL;
+
+ npfns = (end - start) >> PAGE_SHIFT;
+ umem_odp->pfn_list =
+ kvcalloc(npfns, sizeof(*umem_odp->pfn_list), GFP_KERNEL);
+ if (!umem_odp->pfn_list)
+ return -ENOMEM;
+
+ umem_odp->iova.dev = dev->dma_device;
+ umem_odp->iova.size = end - start;
+ umem_odp->iova.dir = DMA_BIDIRECTIONAL;
+ ret = dma_alloc_iova(&umem_odp->iova);
+ if (ret)
+ goto out_pfn_list;
- if (!umem_odp->is_implicit_odp) {
- size_t page_size = 1UL << umem_odp->page_shift;
- unsigned long start;
- unsigned long end;
- size_t ndmas, npfns;
-
- start = ALIGN_DOWN(umem_odp->umem.address, page_size);
- if (check_add_overflow(umem_odp->umem.address,
- (unsigned long)umem_odp->umem.length,
- &end))
- return -EOVERFLOW;
- end = ALIGN(end, page_size);
- if (unlikely(end < page_size))
- return -EOVERFLOW;
-
- ndmas = (end - start) >> umem_odp->page_shift;
- if (!ndmas)
- return -EINVAL;
-
- npfns = (end - start) >> PAGE_SHIFT;
- umem_odp->pfn_list = kvcalloc(
- npfns, sizeof(*umem_odp->pfn_list), GFP_KERNEL);
- if (!umem_odp->pfn_list)
- return -ENOMEM;
-
-
- umem_odp->iova.dev = dev->dma_device;
- umem_odp->iova.size = end - start;
- umem_odp->iova.dir = DMA_BIDIRECTIONAL;
- ret = dma_alloc_iova(&umem_odp->iova);
- if (ret)
- goto out_pfn_list;
-
- ret = mmu_interval_notifier_insert(&umem_odp->notifier,
- umem_odp->umem.owning_mm,
- start, end - start, ops);
- if (ret)
- goto out_free_iova;
- }
+ ret = mmu_interval_notifier_insert(&umem_odp->notifier,
+ umem_odp->umem.owning_mm, start,
+ end - start, ops);
+ if (ret)
+ goto out_free_iova;
return 0;
--
2.45.2
More information about the Linux-nvme
mailing list