[RFC PATCH 2/6] arm64: Support ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION
Jonathan Cameron
Jonathan.Cameron at huawei.com
Thu Mar 20 10:41:14 PDT 2025
From: Yicong Yang <yangyicong at hisilicon.com>
ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION provides the mechanism for
invalidate certain memory regions in a cache-incoherent manner.
Currently is used by NVIDMM and CXL memory. This is mainly done
by the system component and is implementation define per spec.
Provides a method for the platforms register their own invalidate
method and implement ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION.
Signed-off-by: Yicong Yang <yangyicong at hisilicon.com>
Signed-off-by: Jonathan Cameron <Jonathan.Cameron at huawei.com>
---
arch/arm64/Kconfig | 1 +
arch/arm64/include/asm/cacheflush.h | 14 ++++++++++
arch/arm64/mm/flush.c | 42 +++++++++++++++++++++++++++++
3 files changed, 57 insertions(+)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 940343beb3d4..11ecd20ec3b8 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -21,6 +21,7 @@ config ARM64
select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
select ARCH_HAS_CACHE_LINE_SIZE
select ARCH_HAS_CC_PLATFORM
+ select ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION
select ARCH_HAS_CRC32
select ARCH_HAS_CRC_T10DIF if KERNEL_MODE_NEON
select ARCH_HAS_CURRENT_STACK_POINTER
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 28ab96e808ef..b8eb8738c965 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -139,6 +139,20 @@ static __always_inline void icache_inval_all_pou(void)
dsb(ish);
}
+#ifdef CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION
+
+#include <linux/memregion.h>
+
+struct system_cache_flush_method {
+ int (*invalidate_memregion)(int res_desc,
+ phys_addr_t start, size_t len);
+};
+
+void arm64_set_sys_cache_flush_method(const struct system_cache_flush_method *method);
+void arm64_clr_sys_cache_flush_method(const struct system_cache_flush_method *method);
+
+#endif /* CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION */
+
#include <asm-generic/cacheflush.h>
#endif /* __ASM_CACHEFLUSH_H */
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index 013eead9b695..d822406d925d 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -10,6 +10,7 @@
#include <linux/mm.h>
#include <linux/libnvdimm.h>
#include <linux/pagemap.h>
+#include <linux/memregion.h>
#include <asm/cacheflush.h>
#include <asm/cache.h>
@@ -100,3 +101,44 @@ void arch_invalidate_pmem(void *addr, size_t size)
}
EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
#endif
+
+#ifdef CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION
+
+static const struct system_cache_flush_method *scfm_data;
+DEFINE_SPINLOCK(scfm_lock);
+
+void arm64_set_sys_cache_flush_method(const struct system_cache_flush_method *method)
+{
+ guard(spinlock_irqsave)(&scfm_lock);
+ if (scfm_data || !method || !method->invalidate_memregion)
+ return;
+
+ scfm_data = method;
+}
+EXPORT_SYMBOL_GPL(arm64_set_sys_cache_flush_method);
+
+void arm64_clr_sys_cache_flush_method(const struct system_cache_flush_method *method)
+{
+ guard(spinlock_irqsave)(&scfm_lock);
+ if (scfm_data && scfm_data == method)
+ scfm_data = NULL;
+}
+
+int cpu_cache_invalidate_memregion(int res_desc, phys_addr_t start, size_t len)
+{
+ guard(spinlock_irqsave)(&scfm_lock);
+ if (!scfm_data)
+ return -EOPNOTSUPP;
+
+ return scfm_data->invalidate_memregion(res_desc, start, len);
+}
+EXPORT_SYMBOL_NS_GPL(cpu_cache_invalidate_memregion, "DEVMEM");
+
+bool cpu_cache_has_invalidate_memregion(void)
+{
+ guard(spinlock_irqsave)(&scfm_lock);
+ return !!scfm_data;
+}
+EXPORT_SYMBOL_NS_GPL(cpu_cache_has_invalidate_memregion, "DEVMEM");
+
+#endif /* CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION */
--
2.43.0
More information about the linux-arm-kernel
mailing list