[PATCH 10/10] ARM/dma-mapping: merge IOMMU ops
Christoph Hellwig
hch at lst.de
Mon May 2 09:43:30 PDT 2022
From: Robin Murphy <robin.murphy at arm.com>
The dma_sync_* operations are now the only difference between the
coherent and non-coherent IOMMU ops. Some minor tweaks to make those
safe for coherent devices with minimal overhead, and we can condense
down to a single set of DMA ops.
Signed-off-by: Robin Murphy <robin.murphy at arm.com>
Signed-off-by: Christoph Hellwig <hch at lst.de>
---
arch/arm/mm/dma-mapping.c | 37 +++++++++++++------------------------
1 file changed, 13 insertions(+), 24 deletions(-)
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 10e5e5800d784..dd46cce61579a 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1341,6 +1341,9 @@ static void arm_iommu_sync_sg_for_cpu(struct device *dev,
struct scatterlist *s;
int i;
+ if (dev->dma_coherent)
+ return;
+
for_each_sg(sg, s, nents, i)
__dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
@@ -1360,6 +1363,9 @@ static void arm_iommu_sync_sg_for_device(struct device *dev,
struct scatterlist *s;
int i;
+ if (dev->dma_coherent)
+ return;
+
for_each_sg(sg, s, nents, i)
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
}
@@ -1493,12 +1499,13 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev,
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t iova = handle & PAGE_MASK;
- struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
+ struct page *page;
unsigned int offset = handle & ~PAGE_MASK;
- if (!iova)
+ if (dev->dma_coherent || !iova)
return;
+ page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
__dma_page_dev_to_cpu(page, offset, size, dir);
}
@@ -1507,12 +1514,13 @@ static void arm_iommu_sync_single_for_device(struct device *dev,
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t iova = handle & PAGE_MASK;
- struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
+ struct page *page;
unsigned int offset = handle & ~PAGE_MASK;
- if (!iova)
+ if (dev->dma_coherent || !iova)
return;
+ page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
__dma_page_cpu_to_dev(page, offset, size, dir);
}
@@ -1536,22 +1544,6 @@ static const struct dma_map_ops iommu_ops = {
.unmap_resource = arm_iommu_unmap_resource,
};
-static const struct dma_map_ops iommu_coherent_ops = {
- .alloc = arm_iommu_alloc_attrs,
- .free = arm_iommu_free_attrs,
- .mmap = arm_iommu_mmap_attrs,
- .get_sgtable = arm_iommu_get_sgtable,
-
- .map_page = arm_iommu_map_page,
- .unmap_page = arm_iommu_unmap_page,
-
- .map_sg = arm_iommu_map_sg,
- .unmap_sg = arm_iommu_unmap_sg,
-
- .map_resource = arm_iommu_map_resource,
- .unmap_resource = arm_iommu_unmap_resource,
-};
-
/**
* arm_iommu_create_mapping
* @bus: pointer to the bus holding the client device (for IOMMU calls)
@@ -1750,10 +1742,7 @@ static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
return;
}
- if (coherent)
- set_dma_ops(dev, &iommu_coherent_ops);
- else
- set_dma_ops(dev, &iommu_ops);
+ set_dma_ops(dev, &iommu_ops);
}
static void arm_teardown_iommu_dma_ops(struct device *dev)
--
2.30.2
More information about the linux-arm-kernel
mailing list