[PATCH 18/20] clocksource: add driver for RISC-V CLINT timer

Ahmad Fatoum a.fatoum at pengutronix.de
Sun Mar 14 12:28:02 GMT 2021


Linux selects this on nommu RISC-V machines. It's also used on the Virt
machine added in a follow-up commit.

Signed-off-by: Ahmad Fatoum <a.fatoum at pengutronix.de>
---
 drivers/clocksource/Kconfig       |  7 +++
 drivers/clocksource/Makefile      |  1 +
 drivers/clocksource/timer-clint.c | 98 +++++++++++++++++++++++++++++++
 3 files changed, 106 insertions(+)
 create mode 100644 drivers/clocksource/timer-clint.c

diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 6dfe6151ac98..7f829490e108 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -99,4 +99,11 @@ config CLOCKSOURCE_TI_DM
 config CLOCKSOURCE_TI_32K
 	bool
 
+config CLINT_TIMER
+	bool "CLINT Timer for the RISC-V platform" if COMPILE_TEST
+	depends on OFDEVICE
+	help
+	  This option enables the CLINT timer for RISC-V systems.  The CLINT
+	  driver is usually used for NoMMU RISC-V systems.
+
 endmenu
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index b4607f787fcf..268ce16800a7 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -21,3 +21,4 @@ obj-$(CONFIG_CLOCKSOURCE_IMX_GPT) += timer-imx-gpt.o
 obj-$(CONFIG_CLOCKSOURCE_DW_APB_TIMER) += dw_apb_timer.o
 obj-$(CONFIG_CLOCKSOURCE_TI_DM) += timer-ti-dm.o
 obj-$(CONFIG_CLOCKSOURCE_TI_32K) += timer-ti-32k.o
+obj-$(CONFIG_CLINT_TIMER) += timer-clint.o
diff --git a/drivers/clocksource/timer-clint.c b/drivers/clocksource/timer-clint.c
new file mode 100644
index 000000000000..620222b51226
--- /dev/null
+++ b/drivers/clocksource/timer-clint.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ *
+ * Most of the M-mode (i.e. NoMMU) RISC-V systems usually have a
+ * CLINT MMIO timer device.
+ */
+
+#define pr_fmt(fmt) "clint: " fmt
+
+#include <common.h>
+#include <init.h>
+#include <clock.h>
+#include <errno.h>
+#include <of.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <notifier.h>
+#include <io.h>
+
+#define CLINT_TIMER_VAL_OFF	0xbff8
+
+#ifdef CONFIG_64BIT
+#define clint_get_cycles()	readq(clint_timer_val)
+#else
+#define clint_get_cycles()	readl(clint_timer_val)
+#define clint_get_cycles_hi()	readl(((u32 *)clint_timer_val) + 1)
+#endif
+
+static void __iomem *clint_timer_val;
+
+#ifdef CONFIG_64BIT
+static u64 notrace clint_get_cycles64(void)
+{
+	return clint_get_cycles();
+}
+#else /* CONFIG_64BIT */
+static u64 notrace clint_get_cycles64(void)
+{
+	u32 hi, lo;
+
+	do {
+		hi = clint_get_cycles_hi();
+		lo = clint_get_cycles();
+	} while (hi != clint_get_cycles_hi());
+
+	return ((u64)hi << 32) | lo;
+}
+#endif /* CONFIG_64BIT */
+
+static u64 clint_rdtime(void)
+{
+	return clint_get_cycles64();
+}
+
+static struct clocksource clint_clocksource = {
+	.read		= clint_rdtime,
+	.mask		= CLOCKSOURCE_MASK(64),
+};
+
+static int clint_timer_init_dt(struct device_d* dev)
+{
+	struct device_node *cpu;
+	struct resource *iores;
+	u32 riscv_timebase;
+
+	/* one timer is enough */
+	if (clint_timer_val)
+		return 0;
+
+	iores = dev_request_mem_resource(dev, 0);
+	if (IS_ERR(iores))
+		return PTR_ERR(iores);
+	clint_timer_val = IOMEM(iores->start) + CLINT_TIMER_VAL_OFF;
+
+	cpu = of_find_node_by_path("/cpus");
+	if (!cpu || of_property_read_u32(cpu, "timebase-frequency", &riscv_timebase))
+		panic(KERN_WARNING "RISC-V system with no 'timebase-frequency' in DTS\n");
+
+	dev_info(dev, "running at %u Hz\n", riscv_timebase);
+
+	clint_clocksource.mult = clocksource_hz2mult(riscv_timebase, clint_clocksource.shift);
+
+	return init_clock(&clint_clocksource);
+}
+
+static struct of_device_id time_clint_dt_ids[] = {
+	{ .compatible = "riscv,clint0", },
+	{ .compatible = "sifive,clint0" },
+	{ /* sentinel */ },
+};
+
+static struct driver_d clint_timer_driver = {
+	.name = "clint-timer",
+	.probe = clint_timer_init_dt,
+	.of_compatible = time_clint_dt_ids,
+};
+postcore_platform_driver(clint_timer_driver);
-- 
2.29.2




More information about the barebox mailing list