[PATCH 2/5] kvx: Implement dcache invalidation primitive
Jules Maselbas
jmaselbas at kalray.eu
Mon Mar 1 15:58:48 GMT 2021
From: Yann Sionneau <ysionneau at kalray.eu>
Signed-off-by: Yann Sionneau <ysionneau at kalray.eu>
Signed-off-by: Jules Maselbas <jmaselbas at kalray.eu>
---
arch/kvx/Kconfig | 1 +
arch/kvx/include/asm/cache.h | 13 ++++++++++
arch/kvx/lib/Makefile | 2 +-
arch/kvx/lib/cache.c | 50 ++++++++++++++++++++++++++++++++++++
4 files changed, 65 insertions(+), 1 deletion(-)
create mode 100644 arch/kvx/lib/cache.c
diff --git a/arch/kvx/Kconfig b/arch/kvx/Kconfig
index 8483ae6e6..4e02613ec 100644
--- a/arch/kvx/Kconfig
+++ b/arch/kvx/Kconfig
@@ -12,6 +12,7 @@ config KVX
select FLEXIBLE_BOOTARGS
select GENERIC_FIND_NEXT_BIT
select HAS_ARCH_SJLJ
+ select HAS_CACHE
select LIBFDT
select MFD_SYSCON
select OF_BAREBOX_DRIVERS
diff --git a/arch/kvx/include/asm/cache.h b/arch/kvx/include/asm/cache.h
index 3be176725..efdbcc630 100644
--- a/arch/kvx/include/asm/cache.h
+++ b/arch/kvx/include/asm/cache.h
@@ -15,4 +15,17 @@ static inline void sync_caches_for_execution(void)
__builtin_kvx_barrier();
}
+void kvx_dcache_invalidate_mem_area(uint64_t addr, int size);
+
+static inline void sync_dcache_icache(void)
+{
+ sync_caches_for_execution();
+}
+
+static inline void dcache_inval(void)
+{
+ __builtin_kvx_fence();
+ __builtin_kvx_dinval();
+}
+
#endif /* __KVX_CACHE_H */
diff --git a/arch/kvx/lib/Makefile b/arch/kvx/lib/Makefile
index cee08b0fa..d271ebccf 100644
--- a/arch/kvx/lib/Makefile
+++ b/arch/kvx/lib/Makefile
@@ -3,4 +3,4 @@
# Copyright (C) 2019 Kalray Inc.
#
-obj-y += cpuinfo.o board.o dtb.o poweroff.o bootm.o setjmp.o
+obj-y += cpuinfo.o board.o dtb.o poweroff.o bootm.o setjmp.o cache.o
diff --git a/arch/kvx/lib/cache.c b/arch/kvx/lib/cache.c
new file mode 100644
index 000000000..4e128891a
--- /dev/null
+++ b/arch/kvx/lib/cache.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-FileCopyrightText: 2021 Yann Sionneau <ysionneau at kalray.eu>, Kalray Inc.
+
+#include <asm/cache.h>
+
+#define K1_DCACHE_REFILL (12)
+#define K1_DCACHE_HWLOOP (1)
+#define K1_DCACHE_REFILL_PERCENT (80)
+#define K1_DCACHE_LINE_SIZE (64)
+#define K1_DCACHE_SIZE (128*1024)
+
+void kvx_dcache_invalidate_mem_area(uint64_t addr, int size)
+{
+ /* if hwloop iterations cost < _K1_DCACHE_REFILL_PERCENT cache refill,
+ * use hwloop, otherwise invalid the whole cache
+ */
+ if (size <
+ (K1_DCACHE_REFILL_PERCENT * (K1_DCACHE_REFILL * K1_DCACHE_SIZE))
+ / (100 * (K1_DCACHE_REFILL + K1_DCACHE_HWLOOP))) {
+ /* number of lines that must be invalidated */
+ int invalid_lines = ((addr + size) -
+ (addr & (~(K1_DCACHE_LINE_SIZE - 1))));
+
+ invalid_lines = invalid_lines / K1_DCACHE_LINE_SIZE
+ + (0 != (invalid_lines % K1_DCACHE_LINE_SIZE));
+ if (__builtin_constant_p(invalid_lines) && invalid_lines <= 2) {
+ /* when inlining (and doing constant folding),
+ * gcc is able to unroll small loops
+ */
+ int i;
+
+ for (i = 0; i < invalid_lines; i++) {
+ __builtin_kvx_dinvall((void *)(addr
+ + i * K1_DCACHE_LINE_SIZE));
+ }
+ } else if (invalid_lines > 0) {
+ __asm__ __volatile__ (
+ "loopdo %1, 0f\n;;\n"
+ "dinvall 0[%0]\n"
+ "addd %0 = %0, %2\n;;\n"
+ "0:\n"
+ : "+r"(addr)
+ : "r" (invalid_lines),
+ "i" (K1_DCACHE_LINE_SIZE)
+ : "ls", "le", "lc", "memory");
+ }
+ } else {
+ __builtin_kvx_dinval();
+ }
+}
--
2.17.1
More information about the barebox
mailing list