[RFC PATCH v2 2/9] riscv: mm: Apply a threshold to the number of active ASIDs on each CPU
Xu Lu
luxu.kernel at bytedance.com
Thu Nov 27 06:11:10 PST 2025
Since each CPU has limited TLB entries, there exist limited active ASIDs
in each CPU's TLB at the same time. Thus we apply a threshold here. When
a mm_struct is loaded, we mark its ASID as active. If the number of
active ASIDs exceeds the threshold, we evict the mm_struct that has not
been used for the longest time, flush its TLB entries, mark its ASID
inactive, and clear current CPU in its mm_cpumask.
Signed-off-by: Xu Lu <luxu.kernel at bytedance.com>
---
arch/riscv/include/asm/tlbflush.h | 27 +++++++++++++
arch/riscv/mm/context.c | 1 +
arch/riscv/mm/tlbflush.c | 66 +++++++++++++++++++++++++++++++
3 files changed, 94 insertions(+)
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index eed0abc405143..3f83fd5ef36db 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -66,6 +66,33 @@ void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
extern unsigned long tlb_flush_all_threshold;
+
+#ifdef CONFIG_RISCV_LAZY_TLB_FLUSH
+
+#define MAX_LOADED_MM 6
+
+struct tlb_context {
+ struct mm_struct *mm;
+ unsigned int gen;
+};
+
+struct tlb_info {
+ rwlock_t rwlock;
+ struct mm_struct *active_mm;
+ unsigned int next_gen;
+ struct tlb_context contexts[MAX_LOADED_MM];
+};
+
+DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_info, tlbinfo);
+
+void local_load_tlb_mm(struct mm_struct *mm);
+
+#else /* CONFIG_RISCV_LAZY_TLB_FLUSH */
+
+static inline void local_load_tlb_mm(struct mm_struct *mm) {}
+
+#endif /* CONFIG_RISCV_LAZY_TLB_FLUSH */
+
#else /* CONFIG_MMU */
#define local_flush_tlb_all() do { } while (0)
#endif /* CONFIG_MMU */
diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
index 55c20ad1f7444..a7cf36ad34678 100644
--- a/arch/riscv/mm/context.c
+++ b/arch/riscv/mm/context.c
@@ -217,6 +217,7 @@ static inline void set_mm(struct mm_struct *prev,
*/
cpumask_set_cpu(cpu, mm_cpumask(next));
if (static_branch_unlikely(&use_asid_allocator)) {
+ local_load_tlb_mm(next);
set_mm_asid(next, cpu);
} else {
cpumask_clear_cpu(cpu, mm_cpumask(prev));
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index 8404530ec00f9..0b1c21c7aafb8 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -103,6 +103,15 @@ struct flush_tlb_range_data {
unsigned long stride;
};
+#ifdef CONFIG_RISCV_LAZY_TLB_FLUSH
+DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_info, tlbinfo) = {
+ .rwlock = __RW_LOCK_UNLOCKED(tlbinfo.rwlock),
+ .active_mm = NULL,
+ .next_gen = 1,
+ .contexts = { { NULL, 0, }, },
+};
+#endif /* CONFIG_RISCV_LAZY_TLB_FLUSH */
+
static void __ipi_flush_tlb_range_asid(void *info)
{
struct flush_tlb_range_data *d = info;
@@ -240,3 +249,60 @@ void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
cpumask_clear(&batch->cpumask);
}
+
+#ifdef CONFIG_RISCV_LAZY_TLB_FLUSH
+
+static inline unsigned int new_tlb_gen(struct tlb_info *info)
+{
+ unsigned int gen = info->next_gen++;
+ unsigned int i;
+
+ if (unlikely(!info->next_gen)) {
+ for (i = 0; i < MAX_LOADED_MM; i++) {
+ if (info->contexts[i].gen)
+ info->contexts[i].gen = 1;
+ }
+ info->next_gen = 1;
+ gen = info->next_gen++;
+ }
+
+ return gen;
+}
+
+void local_load_tlb_mm(struct mm_struct *mm)
+{
+ struct tlb_info *info = this_cpu_ptr(&tlbinfo);
+ struct tlb_context *contexts = info->contexts;
+ struct mm_struct *victim = NULL;
+ unsigned int i, pos = 0, min = UINT_MAX;
+
+ for (i = 0; i < MAX_LOADED_MM; i++) {
+ if (contexts[i].mm == mm) {
+ pos = i;
+ break;
+ }
+ if (min > contexts[i].gen) {
+ min = contexts[i].gen;
+ pos = i;
+ }
+ }
+
+ write_lock(&info->rwlock);
+
+ info->active_mm = mm;
+
+ if (contexts[pos].mm != mm) {
+ victim = contexts[pos].mm;
+ contexts[pos].mm = mm;
+ }
+ contexts[pos].gen = new_tlb_gen(info);
+
+ write_unlock(&info->rwlock);
+
+ if (victim) {
+ cpumask_clear_cpu(raw_smp_processor_id(), mm_cpumask(victim));
+ local_flush_tlb_all_asid(get_mm_asid(victim));
+ }
+}
+
+#endif /* CONFIG_RISCV_LAZY_TLB_FLUSH */
--
2.20.1
More information about the linux-riscv
mailing list