[PATCH 6/7] riscv: mm: Always flush a single MM context by ASID
Samuel Holland
samuel at sholland.org
Sat Sep 9 13:16:34 PDT 2023
Even if ASIDs are not supported, using the single-ASID variant of the
sfence.vma instruction preserves TLB entries for global (kernel) pages.
So it is always most efficient to use the single-ASID code path.
Signed-off-by: Samuel Holland <samuel at sholland.org>
---
arch/riscv/include/asm/mmu_context.h | 2 -
arch/riscv/include/asm/tlbflush.h | 11 +++--
arch/riscv/mm/context.c | 3 +-
arch/riscv/mm/tlbflush.c | 68 ++++++----------------------
4 files changed, 24 insertions(+), 60 deletions(-)
diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h
index 7030837adc1a..b0659413a080 100644
--- a/arch/riscv/include/asm/mmu_context.h
+++ b/arch/riscv/include/asm/mmu_context.h
@@ -33,8 +33,6 @@ static inline int init_new_context(struct task_struct *tsk,
return 0;
}
-DECLARE_STATIC_KEY_FALSE(use_asid_allocator);
-
#include <asm-generic/mmu_context.h>
#endif /* _ASM_RISCV_MMU_CONTEXT_H */
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index e55831edfc19..ba27cf68b170 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -54,13 +54,18 @@ void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
#define flush_tlb_all() local_flush_tlb_all()
#define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+ unsigned long asid = cntx2asid(atomic_long_read(&mm->context.id));
+
+ local_flush_tlb_all_asid(asid);
+}
+
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
- local_flush_tlb_all();
+ flush_tlb_mm(vma->vm_mm);
}
-
-#define flush_tlb_mm(mm) flush_tlb_all()
#endif /* !CONFIG_SMP || !CONFIG_MMU */
/* Flush a range of kernel pages */
diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
index 3ca9b653df7d..20057085ab8a 100644
--- a/arch/riscv/mm/context.c
+++ b/arch/riscv/mm/context.c
@@ -18,8 +18,7 @@
#ifdef CONFIG_MMU
-DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
-
+static DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
static unsigned long num_asids;
static atomic_long_t current_version;
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index 54c3e70ccd81..56c2d40681a2 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -6,15 +6,6 @@
#include <asm/sbi.h>
#include <asm/mmu_context.h>
-static inline void local_flush_tlb_range(unsigned long start,
- unsigned long size, unsigned long stride)
-{
- if (size <= stride)
- local_flush_tlb_page(start);
- else
- local_flush_tlb_all();
-}
-
static inline void local_flush_tlb_range_asid(unsigned long start,
unsigned long size, unsigned long stride, unsigned long asid)
{
@@ -51,62 +42,33 @@ static void __ipi_flush_tlb_range_asid(void *info)
local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
}
-static void __ipi_flush_tlb_range(void *info)
-{
- struct flush_tlb_range_data *d = info;
-
- local_flush_tlb_range(d->start, d->size, d->stride);
-}
-
static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
unsigned long size, unsigned long stride)
{
+ unsigned long asid = cntx2asid(atomic_long_read(&mm->context.id));
struct flush_tlb_range_data ftd;
struct cpumask *cmask = mm_cpumask(mm);
unsigned int cpuid;
- bool broadcast;
if (cpumask_empty(cmask))
return;
cpuid = get_cpu();
/* check if the tlbflush needs to be sent to other CPUs */
- broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
- if (static_branch_unlikely(&use_asid_allocator)) {
- unsigned long asid = cntx2asid(atomic_long_read(&mm->context.id));
-
- if (broadcast) {
- if (riscv_use_ipi_for_rfence()) {
- ftd.asid = asid;
- ftd.start = start;
- ftd.size = size;
- ftd.stride = stride;
- on_each_cpu_mask(cmask,
- __ipi_flush_tlb_range_asid,
- &ftd, 1);
- } else
- sbi_remote_sfence_vma_asid(cmask,
- start, size, asid);
- } else {
- local_flush_tlb_range_asid(start, size, stride, asid);
- }
- } else {
- if (broadcast) {
- if (riscv_use_ipi_for_rfence()) {
- ftd.asid = 0;
- ftd.start = start;
- ftd.size = size;
- ftd.stride = stride;
- on_each_cpu_mask(cmask,
- __ipi_flush_tlb_range,
- &ftd, 1);
- } else
- sbi_remote_sfence_vma(cmask, start, size);
- } else {
- local_flush_tlb_range(start, size, stride);
- }
- }
-
+ if (cpumask_any_but(cmask, cpuid) < nr_cpu_ids) {
+ if (riscv_use_ipi_for_rfence()) {
+ ftd.asid = asid;
+ ftd.start = start;
+ ftd.size = size;
+ ftd.stride = stride;
+ on_each_cpu_mask(cmask,
+ __ipi_flush_tlb_range_asid,
+ &ftd, 1);
+ } else
+ sbi_remote_sfence_vma_asid(cmask,
+ start, size, asid);
+ } else
+ local_flush_tlb_range_asid(start, size, stride, asid);
put_cpu();
}
--
2.41.0
More information about the linux-riscv
mailing list