[PATCH] arm64: Make ARCH_DMA_MINALIGN configurable

Vincent Whitchurch vincent.whitchurch at axis.com
Mon May 17 00:43:32 PDT 2021


ARCH_DMA_MINALIGN is hardcoded to 128, but this wastes memory if the
kernel is only intended to be run on platforms with cache line sizes of
64 bytes.

Make this configurable (hidden under CONFIG_EXPERT).  Setting this to 64
bytes reduces the slab memory usage of my Cortex-A53-based system by
~6%, measured right after startup.

Signed-off-by: Vincent Whitchurch <vincent.whitchurch at axis.com>
---
 arch/arm64/Kconfig             | 10 ++++++++++
 arch/arm64/include/asm/cache.h |  2 +-
 2 files changed, 11 insertions(+), 1 deletion(-)

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 9f1d8566bbf9..c8716aa18001 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -274,6 +274,16 @@ config ARCH_MMAP_RND_COMPAT_BITS_MIN
 config ARCH_MMAP_RND_COMPAT_BITS_MAX
        default 16
 
+config ARM64_DMA_MINALIGN
+	int "DMA alignment for slab buffers" if EXPERT
+	default 128
+	help
+	  Set the number of bytes to which buffers allocated with kmalloc()
+	  need to be aligned to ensure that they will be usable for DMA
+	  (ARCH_DMA_MINALIGN).  Lower values may save memory on platforms with
+	  smaller cache line sizes, but may also make the kernel non-functional
+	  on platforms with cache line sizes larger than the chosen value.
+
 config NO_IOPORT_MAP
 	def_bool y if !PCI
 
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index a074459f8f2f..5fa68820ff57 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -47,7 +47,7 @@
  * cache before the transfer is done, causing old data to be seen by
  * the CPU.
  */
-#define ARCH_DMA_MINALIGN	(128)
+#define ARCH_DMA_MINALIGN	(CONFIG_ARM64_DMA_MINALIGN)
 
 #ifdef CONFIG_KASAN_SW_TAGS
 #define ARCH_SLAB_MINALIGN	(1ULL << KASAN_SHADOW_SCALE_SHIFT)
-- 
2.28.0




More information about the linux-arm-kernel mailing list