[PATCH v3 4/6] RISC-V: Allow marking IPIs as suitable for remote FENCEs
Anup Patel
apatel at ventanamicro.com
Sat Feb 19 21:08:52 PST 2022
To do remote FENCEs (i.e. remote TLB flushes) using IPI calls on
the RISC-V kernel, we need hardware mechanism to directly inject
IPI from the RISC-V kernel instead of using SBI calls.
The upcoming ACLINT [M|S]SWI devices and AIA IMSIC devices allow
direct IPI injection from the RISC-V kernel. To support this, we
extend the riscv_ipi_set_virq_range() function so that irqchip
drivers can mark IPIs as suitable for remote FENCEs.
Signed-off-by: Anup Patel <apatel at ventanamicro.com>
---
arch/riscv/include/asm/ipi-mux.h | 2 ++
arch/riscv/include/asm/smp.h | 18 ++++++++++++++++--
arch/riscv/kernel/ipi-mux.c | 3 ++-
arch/riscv/kernel/sbi.c | 3 ++-
arch/riscv/kernel/smp.c | 11 ++++++++++-
drivers/clocksource/timer-clint.c | 2 +-
6 files changed, 33 insertions(+), 6 deletions(-)
diff --git a/arch/riscv/include/asm/ipi-mux.h b/arch/riscv/include/asm/ipi-mux.h
index 988e2bba372a..3a5acbf51806 100644
--- a/arch/riscv/include/asm/ipi-mux.h
+++ b/arch/riscv/include/asm/ipi-mux.h
@@ -15,6 +15,7 @@ void riscv_ipi_mux_handle_irq(void);
/* Create irq_domain for muxed IPIs */
struct irq_domain *riscv_ipi_mux_create(bool use_soft_irq,
+ bool use_for_rfence,
void (*clear_ipi)(void),
void (*send_ipi)(const struct cpumask *mask));
@@ -28,6 +29,7 @@ static inline void riscv_ipi_mux_handle_irq(void)
}
static inline struct irq_domain *riscv_ipi_mux_create(bool use_soft_irq,
+ bool use_for_rfence,
void (*clear_ipi)(void),
void (*send_ipi)(const struct cpumask *mask))
{
diff --git a/arch/riscv/include/asm/smp.h b/arch/riscv/include/asm/smp.h
index 178fe4ada592..ddd3be1c77b6 100644
--- a/arch/riscv/include/asm/smp.h
+++ b/arch/riscv/include/asm/smp.h
@@ -16,6 +16,9 @@ struct seq_file;
extern unsigned long boot_cpu_hartid;
#ifdef CONFIG_SMP
+
+#include <linux/jump_label.h>
+
/*
* Mapping between linux logical cpu index and hartid.
*/
@@ -46,7 +49,12 @@ void riscv_ipi_disable(void);
bool riscv_ipi_have_virq_range(void);
/* Set the IPI interrupt numbers for arch (called by irqchip drivers) */
-void riscv_ipi_set_virq_range(int virq, int nr_irqs);
+void riscv_ipi_set_virq_range(int virq, int nr_irqs, bool use_for_rfence);
+
+/* Check if we can use IPIs for remote FENCEs */
+DECLARE_STATIC_KEY_FALSE(riscv_ipi_for_rfence);
+#define riscv_use_ipi_for_rfence() \
+ static_branch_unlikely(&riscv_ipi_for_rfence)
/* Secondary hart entry */
asmlinkage void smp_callin(void);
@@ -93,10 +101,16 @@ static inline bool riscv_ipi_have_virq_range(void)
return false;
}
-static inline void riscv_ipi_set_virq_range(int virq, int nr)
+static inline void riscv_ipi_set_virq_range(int virq, int nr,
+ bool use_for_rfence)
{
}
+static inline bool riscv_use_ipi_for_rfence(void)
+{
+ return false;
+}
+
#endif /* CONFIG_SMP */
#if defined(CONFIG_HOTPLUG_CPU) && (CONFIG_SMP)
diff --git a/arch/riscv/kernel/ipi-mux.c b/arch/riscv/kernel/ipi-mux.c
index 3a0405f0e0de..544074ea3ead 100644
--- a/arch/riscv/kernel/ipi-mux.c
+++ b/arch/riscv/kernel/ipi-mux.c
@@ -144,6 +144,7 @@ static int ipi_mux_starting_cpu(unsigned int cpu)
}
struct irq_domain *riscv_ipi_mux_create(bool use_soft_irq,
+ bool use_for_rfence,
void (*clear_ipi)(void),
void (*send_ipi)(const struct cpumask *mask))
{
@@ -198,7 +199,7 @@ struct irq_domain *riscv_ipi_mux_create(bool use_soft_irq,
"irqchip/riscv/ipi-mux:starting",
ipi_mux_starting_cpu, ipi_mux_dying_cpu);
- riscv_ipi_set_virq_range(virq, BITS_PER_LONG);
+ riscv_ipi_set_virq_range(virq, BITS_PER_LONG, use_for_rfence);
return ipi_mux_priv.domain;
diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c
index fa3d92fce9f8..210d23524771 100644
--- a/arch/riscv/kernel/sbi.c
+++ b/arch/riscv/kernel/sbi.c
@@ -630,7 +630,8 @@ static void sbi_ipi_clear(void)
void __init sbi_ipi_init(void)
{
- if (riscv_ipi_mux_create(true, sbi_ipi_clear, sbi_send_cpumask_ipi))
+ if (riscv_ipi_mux_create(true, false,
+ sbi_ipi_clear, sbi_send_cpumask_ipi))
pr_info("providing IPIs using SBI IPI extension\n");
}
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index a9f1aca38358..b98d9c319f6f 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -149,7 +149,10 @@ bool riscv_ipi_have_virq_range(void)
return (ipi_virq_base) ? true : false;
}
-void riscv_ipi_set_virq_range(int virq, int nr)
+DEFINE_STATIC_KEY_FALSE(riscv_ipi_for_rfence);
+EXPORT_SYMBOL_GPL(riscv_ipi_for_rfence);
+
+void riscv_ipi_set_virq_range(int virq, int nr, bool use_for_rfence)
{
int i, err;
@@ -172,6 +175,12 @@ void riscv_ipi_set_virq_range(int virq, int nr)
/* Enabled IPIs for boot CPU immediately */
riscv_ipi_enable();
+
+ /* Update RFENCE static key */
+ if (use_for_rfence)
+ static_branch_enable(&riscv_ipi_for_rfence);
+ else
+ static_branch_disable(&riscv_ipi_for_rfence);
}
EXPORT_SYMBOL_GPL(riscv_ipi_set_virq_range);
diff --git a/drivers/clocksource/timer-clint.c b/drivers/clocksource/timer-clint.c
index b05a9e946633..607d47dab896 100644
--- a/drivers/clocksource/timer-clint.c
+++ b/drivers/clocksource/timer-clint.c
@@ -224,7 +224,7 @@ static int __init clint_timer_init_dt(struct device_node *np)
goto fail_free_irq;
}
- riscv_ipi_mux_create(true, clint_clear_ipi, clint_send_ipi);
+ riscv_ipi_mux_create(true, true, clint_clear_ipi, clint_send_ipi);
clint_clear_ipi();
return 0;
--
2.25.1
More information about the linux-riscv
mailing list