[RFC PATCH v1 08/18] iommu/dma: Implement link/unlink ranges callbacks
Leon Romanovsky
leon at kernel.org
Tue Jul 2 02:09:38 PDT 2024
From: Leon Romanovsky <leonro at nvidia.com>
Add an implementation of link/unlink interface to perform in map/unmap
pages in fast patch for pre-allocated IOVA.
Signed-off-by: Leon Romanovsky <leonro at nvidia.com>
---
drivers/iommu/dma-iommu.c | 79 +++++++++++++++++++++++++++++++++++++++
1 file changed, 79 insertions(+)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 0b5ca6961940..7425d155a14e 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -1731,6 +1731,82 @@ static void iommu_dma_free_iova(struct device *dev, dma_addr_t iova,
__iommu_dma_free_iova(cookie, iova, size, &iotlb_gather);
}
+static int iommu_dma_start_range(struct dma_iova_state *state)
+{
+ struct device *dev = state->iova->dev;
+
+ state->domain = iommu_get_dma_domain(dev);
+
+ if (static_branch_unlikely(&iommu_deferred_attach_enabled))
+ return iommu_deferred_attach(dev, state->domain);
+
+ return 0;
+}
+
+static int iommu_dma_link_range(struct dma_iova_state *state, phys_addr_t phys,
+ dma_addr_t addr, size_t size)
+{
+ struct iommu_domain *domain = state->domain;
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
+ struct device *dev = state->iova->dev;
+ enum dma_data_direction dir = state->iova->dir;
+ bool coherent = dev_is_dma_coherent(dev);
+ unsigned long attrs = state->iova->attrs;
+ int prot = dma_info_to_prot(dir, coherent, attrs);
+
+ if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ arch_sync_dma_for_device(phys, size, dir);
+
+ size = iova_align(iovad, size);
+ return iommu_map(domain, addr, phys, size, prot, GFP_ATOMIC);
+}
+
+static void iommu_sync_dma_for_cpu(struct iommu_domain *domain,
+ dma_addr_t start, size_t size,
+ enum dma_data_direction dir)
+{
+ size_t sync_size, unmapped = 0;
+ phys_addr_t phys;
+
+ do {
+ phys = iommu_iova_to_phys(domain, start + unmapped);
+ if (WARN_ON(!phys))
+ continue;
+
+ sync_size = (unmapped + PAGE_SIZE > size) ? size % PAGE_SIZE :
+ PAGE_SIZE;
+ arch_sync_dma_for_cpu(phys, sync_size, dir);
+ unmapped += sync_size;
+ } while (unmapped < size);
+}
+
+static void iommu_dma_unlink_range(struct dma_iova_state *state,
+ dma_addr_t start, size_t size)
+{
+ struct device *dev = state->iova->dev;
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
+ struct iommu_iotlb_gather iotlb_gather;
+ bool coherent = dev_is_dma_coherent(dev);
+ unsigned long attrs = state->iova->attrs;
+ size_t unmapped;
+
+ iommu_iotlb_gather_init(&iotlb_gather);
+ iotlb_gather.queued = READ_ONCE(cookie->fq_domain);
+
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !coherent)
+ iommu_sync_dma_for_cpu(domain, start, size, state->iova->dir);
+
+ size = iova_align(iovad, size);
+ unmapped = iommu_unmap_fast(domain, start, size, &iotlb_gather);
+ WARN_ON(unmapped != size);
+
+ if (!iotlb_gather.queued)
+ iommu_iotlb_sync(domain, &iotlb_gather);
+}
+
static const struct dma_map_ops iommu_dma_ops = {
.flags = DMA_F_PCI_P2PDMA_SUPPORTED |
DMA_F_CAN_SKIP_SYNC,
@@ -1757,6 +1833,9 @@ static const struct dma_map_ops iommu_dma_ops = {
.max_mapping_size = iommu_dma_max_mapping_size,
.alloc_iova = iommu_dma_alloc_iova,
.free_iova = iommu_dma_free_iova,
+ .link_range = iommu_dma_link_range,
+ .unlink_range = iommu_dma_unlink_range,
+ .start_range = iommu_dma_start_range,
};
void iommu_setup_dma_ops(struct device *dev)
--
2.45.2
More information about the Linux-nvme
mailing list