[PATCH 5/6] dma-mapping: Allow batched DMA sync operations if supported by the arch
Barry Song
21cnbao at gmail.com
Sat Dec 20 21:15:23 PST 2025
>
> All errors (new ones prefixed by >>):
>
> >> kernel/dma/direct.c:456:4: error: call to undeclared function 'dma_direct_unmap_phys_batch_add'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
> 456 | dma_direct_unmap_phys_batch_add(dev, sg->dma_address,
> | ^
> kernel/dma/direct.c:456:4: note: did you mean 'dma_direct_unmap_phys'?
> kernel/dma/direct.h:188:20: note: 'dma_direct_unmap_phys' declared here
> 188 | static inline void dma_direct_unmap_phys(struct device *dev, dma_addr_t addr,
> | ^
> >> kernel/dma/direct.c:484:22: error: call to undeclared function 'dma_direct_map_phys_batch_add'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
> 484 | sg->dma_address = dma_direct_map_phys_batch_add(dev, sg_phys(sg),
> | ^
> 2 errors generated.
>
>
Thanks very much for the report.
Can you please check if the below diff fix the build issue?
>From 5541aa1efa19777e435c9f3cca7cd2c6a490d9f1 Mon Sep 17 00:00:00 2001
From: Barry Song <v-songbaohua at oppo.com>
Date: Sun, 21 Dec 2025 13:09:36 +0800
Subject: [PATCH] kernel/dma: Fix build errors for dma_direct_map_phys
Reported-by: kernel test robot <lkp at intel.com>
Closes: https://lore.kernel.org/oe-kbuild-all/202512201836.f6KX6WMH-lkp@intel.com/
Signed-off-by: Barry Song <v-songbaohua at oppo.com>
---
kernel/dma/direct.h | 38 ++++++++++++++++++++++++++------------
1 file changed, 26 insertions(+), 12 deletions(-)
diff --git a/kernel/dma/direct.h b/kernel/dma/direct.h
index a211bab26478..bcc398b5aa6b 100644
--- a/kernel/dma/direct.h
+++ b/kernel/dma/direct.h
@@ -138,8 +138,7 @@ static inline dma_addr_t __dma_direct_map_phys(struct device *dev,
return DMA_MAPPING_ERROR;
}
-#ifdef CONFIG_ARCH_WANT_BATCHED_DMA_SYNC
-static inline dma_addr_t dma_direct_map_phys_batch_add(struct device *dev,
+static inline dma_addr_t dma_direct_map_phys(struct device *dev,
phys_addr_t phys, size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
@@ -147,13 +146,13 @@ static inline dma_addr_t dma_direct_map_phys_batch_add(struct device *dev,
if (dma_addr != DMA_MAPPING_ERROR && !dev_is_dma_coherent(dev) &&
!(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO)))
- arch_sync_dma_for_device_batch_add(phys, size, dir);
+ arch_sync_dma_for_device(phys, size, dir);
return dma_addr;
}
-#endif
-static inline dma_addr_t dma_direct_map_phys(struct device *dev,
+#ifdef CONFIG_ARCH_WANT_BATCHED_DMA_SYNC
+static inline dma_addr_t dma_direct_map_phys_batch_add(struct device *dev,
phys_addr_t phys, size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
@@ -161,13 +160,20 @@ static inline dma_addr_t dma_direct_map_phys(struct device *dev,
if (dma_addr != DMA_MAPPING_ERROR && !dev_is_dma_coherent(dev) &&
!(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO)))
- arch_sync_dma_for_device(phys, size, dir);
+ arch_sync_dma_for_device_batch_add(phys, size, dir);
return dma_addr;
}
+#else
+static inline dma_addr_t dma_direct_map_phys_batch_add(struct device *dev,
+ phys_addr_t phys, size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ return dma_direct_map_phys(dev, phys, size, dir, attrs);
+}
+#endif
-#ifdef CONFIG_ARCH_WANT_BATCHED_DMA_SYNC
-static inline void dma_direct_unmap_phys_batch_add(struct device *dev, dma_addr_t addr,
+static inline void dma_direct_unmap_phys(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
phys_addr_t phys;
@@ -178,14 +184,14 @@ static inline void dma_direct_unmap_phys_batch_add(struct device *dev, dma_addr_
phys = dma_to_phys(dev, addr);
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- dma_direct_sync_single_for_cpu_batch_add(dev, addr, size, dir);
+ dma_direct_sync_single_for_cpu(dev, addr, size, dir);
swiotlb_tbl_unmap_single(dev, phys, size, dir,
attrs | DMA_ATTR_SKIP_CPU_SYNC);
}
-#endif
-static inline void dma_direct_unmap_phys(struct device *dev, dma_addr_t addr,
+#ifdef CONFIG_ARCH_WANT_BATCHED_DMA_SYNC
+static inline void dma_direct_unmap_phys_batch_add(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
phys_addr_t phys;
@@ -196,9 +202,17 @@ static inline void dma_direct_unmap_phys(struct device *dev, dma_addr_t addr,
phys = dma_to_phys(dev, addr);
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- dma_direct_sync_single_for_cpu(dev, addr, size, dir);
+ dma_direct_sync_single_for_cpu_batch_add(dev, addr, size, dir);
swiotlb_tbl_unmap_single(dev, phys, size, dir,
attrs | DMA_ATTR_SKIP_CPU_SYNC);
}
+#else
+static inline void dma_direct_unmap_phys_batch_add(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ dma_direct_unmap_phys(dev, addr, size, dir, attrs);
+}
+#endif
+
#endif /* _KERNEL_DMA_DIRECT_H */
--
2.39.3 (Apple Git-146)
Thanks
Barry
More information about the linux-arm-kernel
mailing list