[PATCH v9 3/6] riscv: mm: dma-noncoherent: nonstandard cache operations support
Prabhakar
prabhakar.csengg at gmail.com
Wed Jun 14 03:47:56 PDT 2023
From: Lad Prabhakar <prabhakar.mahadev-lad.rj at bp.renesas.com>
Introduce support for nonstandard noncoherent systems in the RISC-V
architecture. It enables function pointer support to handle cache
management in such systems.
This patch adds a new configuration option called
"RISCV_NONSTANDARD_CACHE_OPS." This option is a boolean flag that
depends on "RISCV_DMA_NONCOHERENT" and enables the function pointer
support for cache management in nonstandard noncoherent systems.
Signed-off-by: Lad Prabhakar <prabhakar.mahadev-lad.rj at bp.renesas.com>
---
v8 -> v9
* New patch
---
arch/riscv/Kconfig | 7 ++++
arch/riscv/include/asm/dma-noncoherent.h | 28 +++++++++++++++
arch/riscv/mm/dma-noncoherent.c | 43 ++++++++++++++++++++++++
arch/riscv/mm/pmem.c | 13 +++++++
4 files changed, 91 insertions(+)
create mode 100644 arch/riscv/include/asm/dma-noncoherent.h
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 628aad4fb6e2..325ab2124f0a 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -261,6 +261,13 @@ config RISCV_DMA_NONCOHERENT
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select DMA_DIRECT_REMAP
+config RISCV_NONSTANDARD_CACHE_OPS
+ bool
+ depends on RISCV_DMA_NONCOHERENT
+ help
+ This enables function pointer support for non-standard noncoherent
+ systems to handle cache management.
+
config AS_HAS_INSN
def_bool $(as-instr,.insn r 51$(comma) 0$(comma) 0$(comma) t0$(comma) t0$(comma) zero)
diff --git a/arch/riscv/include/asm/dma-noncoherent.h b/arch/riscv/include/asm/dma-noncoherent.h
new file mode 100644
index 000000000000..f4e9bb2d3800
--- /dev/null
+++ b/arch/riscv/include/asm/dma-noncoherent.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2023 Renesas Electronics Corp.
+ */
+
+#ifndef __ASM_DMA_NONCOHERENT_H
+#define __ASM_DMA_NONCOHERENT_H
+
+#include <linux/dma-direct.h>
+
+/*
+ * struct riscv_cache_ops - Structure for CMO function pointers
+ *
+ * @clean: Function pointer for clean cache
+ * @inval: Function pointer for invalidate cache
+ * @flush: Function pointer for flushing the cache
+ */
+struct riscv_cache_ops {
+ void (*clean)(phys_addr_t paddr, unsigned long size);
+ void (*inval)(phys_addr_t paddr, unsigned long size);
+ void (*flush)(phys_addr_t paddr, unsigned long size);
+};
+
+extern struct riscv_cache_ops noncoherent_cache_ops;
+
+void riscv_noncoherent_register_cache_ops(const struct riscv_cache_ops *ops);
+
+#endif /* __ASM_DMA_NONCOHERENT_H */
diff --git a/arch/riscv/mm/dma-noncoherent.c b/arch/riscv/mm/dma-noncoherent.c
index b9a9f57e02be..4cdaa879839a 100644
--- a/arch/riscv/mm/dma-noncoherent.c
+++ b/arch/riscv/mm/dma-noncoherent.c
@@ -9,13 +9,26 @@
#include <linux/dma-map-ops.h>
#include <linux/mm.h>
#include <asm/cacheflush.h>
+#include <asm/dma-noncoherent.h>
static bool noncoherent_supported;
+struct riscv_cache_ops noncoherent_cache_ops = {
+ .clean = NULL,
+ .inval = NULL,
+ .flush = NULL,
+};
+
static inline void arch_dma_cache_wback(phys_addr_t paddr, size_t size)
{
void *vaddr = phys_to_virt(paddr);
+#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
+ if (unlikely(noncoherent_cache_ops.clean)) {
+ noncoherent_cache_ops.clean(paddr, size);
+ return;
+ }
+#endif
ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size);
}
@@ -23,6 +36,13 @@ static inline void arch_dma_cache_inv(phys_addr_t paddr, size_t size)
{
void *vaddr = phys_to_virt(paddr);
+#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
+ if (unlikely(noncoherent_cache_ops.inval)) {
+ noncoherent_cache_ops.inval(paddr, size);
+ return;
+ }
+#endif
+
ALT_CMO_OP(inval, vaddr, size, riscv_cbom_block_size);
}
@@ -30,6 +50,13 @@ static inline void arch_dma_cache_wback_inv(phys_addr_t paddr, size_t size)
{
void *vaddr = phys_to_virt(paddr);
+#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
+ if (unlikely(noncoherent_cache_ops.flush)) {
+ noncoherent_cache_ops.flush(paddr, size);
+ return;
+ }
+#endif
+
ALT_CMO_OP(flush, vaddr, size, riscv_cbom_block_size);
}
@@ -50,6 +77,13 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
{
void *flush_addr = page_address(page);
+#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
+ if (unlikely(noncoherent_cache_ops.flush)) {
+ noncoherent_cache_ops.flush(page_to_phys(page), size);
+ return;
+ }
+#endif
+
ALT_CMO_OP(flush, flush_addr, size, riscv_cbom_block_size);
}
@@ -75,3 +109,12 @@ void riscv_noncoherent_supported(void)
"Non-coherent DMA support enabled without a block size\n");
noncoherent_supported = true;
}
+
+void riscv_noncoherent_register_cache_ops(const struct riscv_cache_ops *ops)
+{
+ if (!ops)
+ return;
+
+ noncoherent_cache_ops = *ops;
+}
+EXPORT_SYMBOL_GPL(riscv_noncoherent_register_cache_ops);
diff --git a/arch/riscv/mm/pmem.c b/arch/riscv/mm/pmem.c
index 089df92ae876..fb481f5b930a 100644
--- a/arch/riscv/mm/pmem.c
+++ b/arch/riscv/mm/pmem.c
@@ -7,15 +7,28 @@
#include <linux/libnvdimm.h>
#include <asm/cacheflush.h>
+#include <asm/dma-noncoherent.h>
void arch_wb_cache_pmem(void *addr, size_t size)
{
+#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
+ if (unlikely(noncoherent_cache_ops.clean)) {
+ noncoherent_cache_ops.clean(virt_to_phys(addr), size);
+ return;
+ }
+#endif
ALT_CMO_OP(clean, addr, size, riscv_cbom_block_size);
}
EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
void arch_invalidate_pmem(void *addr, size_t size)
{
+#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
+ if (unlikely(noncoherent_cache_ops.inval)) {
+ noncoherent_cache_ops.inval(virt_to_phys(addr), size);
+ return;
+ }
+#endif
ALT_CMO_OP(inval, addr, size, riscv_cbom_block_size);
}
EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
--
2.25.1
More information about the linux-riscv
mailing list