[RFC PATCH v1 4/4] riscv: mm: Perform tlb flush during context_switch
Guo Ren
guoren at kernel.org
Sun Nov 2 19:44:15 PST 2025
On Thu, Oct 30, 2025 at 9:57 PM Xu Lu <luxu.kernel at bytedance.com> wrote:
>
> During context_switch, check the percpu tlb flush queue and lazily
> perform tlb flush.
>
> Signed-off-by: Xu Lu <luxu.kernel at bytedance.com>
> ---
> arch/riscv/include/asm/tlbflush.h | 4 ++++
> arch/riscv/mm/context.c | 6 ++++++
> arch/riscv/mm/tlbflush.c | 34 +++++++++++++++++++++++++++++++
> 3 files changed, 44 insertions(+)
>
> diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
> index eed0abc405143..7735c36f13d9f 100644
> --- a/arch/riscv/include/asm/tlbflush.h
> +++ b/arch/riscv/include/asm/tlbflush.h
> @@ -66,6 +66,10 @@ void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
> void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
>
> extern unsigned long tlb_flush_all_threshold;
> +
> +DECLARE_PER_CPU(bool, need_tlb_flush);
> +void local_tlb_flush_queue_drain(void);
> +
> #else /* CONFIG_MMU */
> #define local_flush_tlb_all() do { } while (0)
> #endif /* CONFIG_MMU */
> diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
> index 4d5792c3a8c19..82b743bc81e4c 100644
> --- a/arch/riscv/mm/context.c
> +++ b/arch/riscv/mm/context.c
> @@ -199,6 +199,12 @@ static void set_mm_asid(struct mm_struct *mm, unsigned int cpu)
>
> if (need_flush_tlb)
> local_flush_tlb_all();
> +
> + /* Paired with RISCV_FENCE in should_ipi_flush() */
> + RISCV_FENCE(w, r);
> +
> + if (this_cpu_read(need_tlb_flush))
> + local_tlb_flush_queue_drain();
> }
>
> static void set_mm_noasid(struct mm_struct *mm)
> diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
> index f4333c3a6d251..6592f72354df9 100644
> --- a/arch/riscv/mm/tlbflush.c
> +++ b/arch/riscv/mm/tlbflush.c
> @@ -115,6 +115,8 @@ DEFINE_PER_CPU(struct tlb_flush_queue, tlb_flush_queue) = {
> .len = 0,
> };
>
> +DEFINE_PER_CPU(bool, need_tlb_flush) = false;
> +
> static bool should_ipi_flush(int cpu, void *info)
> {
> struct tlb_flush_queue *queue = per_cpu_ptr(&tlb_flush_queue, cpu);
> @@ -134,6 +136,14 @@ static bool should_ipi_flush(int cpu, void *info)
> }
> raw_spin_unlock_irqrestore(&queue->lock, flags);
>
> + /* Ensure tlb flush info is queued before setting need_tlb_flush flag */
> + smp_wmb();
> +
> + per_cpu(need_tlb_flush, cpu) = true;
> +
> + /* Paired with RISCV_FENCE in set_mm_asid() */
> + RISCV_FENCE(w, r);
> +
> /* Recheck whether loaded_asid changed during enqueueing task */
> if (per_cpu(loaded_asid, cpu) == d->asid)
> return true;
> @@ -146,6 +156,9 @@ static void __ipi_flush_tlb_range_asid(void *info)
> struct flush_tlb_range_data *d = info;
>
> local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
> +
> + if (this_cpu_read(need_tlb_flush))
> + local_tlb_flush_queue_drain();
> }
>
> static inline unsigned long get_mm_asid(struct mm_struct *mm)
> @@ -280,3 +293,24 @@ void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
> 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
> cpumask_clear(&batch->cpumask);
> }
> +
> +void local_tlb_flush_queue_drain(void)
> +{
> + struct tlb_flush_queue *queue = this_cpu_ptr(&tlb_flush_queue);
> + struct flush_tlb_range_data *d;
> + unsigned int i;
> +
> + this_cpu_write(need_tlb_flush, false);
> +
> + /* Ensure clearing the need_tlb_flush flags before real tlb flush */
> + smp_wmb();
> +
> + raw_spin_lock(&queue->lock);
> + for (i = 0; i < queue->len; i++) {
> + d = &queue->tasks[i];
> + local_flush_tlb_range_asid(d->start, d->size, d->stride,
> + d->asid);
Here, do we need an accurate flush for a delay flush?
> + }
> + queue->len = 0;
> + raw_spin_unlock(&queue->lock);
> +}
> --
> 2.20.1
>
--
Best Regards
Guo Ren
More information about the linux-riscv
mailing list