[PATCH] riscv: mm: Verify per-hart ASID bits when using ASID allocator
hu.shengming at zte.com.cn
hu.shengming at zte.com.cn
Thu Mar 12 19:56:16 PDT 2026
From: Shengming Hu <hu.shengming at zte.com.cn>
RISC-V probes the number of implemented ASID bits in SATP on the boot
hart and enables the ASID allocator based on that result. However, the
privileged spec allows harts to implement fewer ASID bits by hardwiring
the upper bits to zero.
If a secondary hart implements fewer ASID bits than the boot hart while
the ASID allocator is enabled, different Linux ASIDs may alias on that
hart. This can allow stale TLB entries to survive and potentially
corrupt memory.
Verify the number of implemented ASID bits on each CPU as it comes
online, and refuse to online CPUs that support fewer ASID bits than
the boot CPU.
Signed-off-by: Shengming Hu <hu.shengming at zte.com.cn>
---
arch/riscv/include/asm/mmu_context.h | 2 ++
arch/riscv/kernel/smpboot.c | 3 ++
arch/riscv/mm/context.c | 42 ++++++++++++++++++++++++----
3 files changed, 41 insertions(+), 6 deletions(-)
diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h
index dbf27a78df6c..0cca20b7dbe1 100644
--- a/arch/riscv/include/asm/mmu_context.h
+++ b/arch/riscv/include/asm/mmu_context.h
@@ -16,6 +16,8 @@
void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *task);
+int verify_cpu_asid_bits(void);
+
#define activate_mm activate_mm
static inline void activate_mm(struct mm_struct *prev,
struct mm_struct *next)
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index 8b628580fe11..9774a591c1a5 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -230,6 +230,9 @@ asmlinkage __visible void smp_callin(void)
return;
}
+ if (verify_cpu_asid_bits())
+ return;
+
/* All kernel threads share the same mm context. */
mmgrab(mm);
current->active_mm = mm;
diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
index 55c20ad1f744..2a830f021ce5 100644
--- a/arch/riscv/mm/context.c
+++ b/arch/riscv/mm/context.c
@@ -22,6 +22,7 @@
DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
static unsigned long num_asids;
+static unsigned long asid_bits;
static atomic_long_t current_version;
@@ -224,16 +225,16 @@ static inline void set_mm(struct mm_struct *prev,
}
}
-static int __init asids_init(void)
+static unsigned long get_cpu_asid_bits(void)
{
- unsigned long asid_bits, old;
+ unsigned long asid_bit_count, old;
/* Figure-out number of ASID bits in HW */
old = csr_read(CSR_SATP);
- asid_bits = old | (SATP_ASID_MASK << SATP_ASID_SHIFT);
- csr_write(CSR_SATP, asid_bits);
- asid_bits = (csr_read(CSR_SATP) >> SATP_ASID_SHIFT) & SATP_ASID_MASK;
- asid_bits = fls_long(asid_bits);
+ asid_bit_count = old | (SATP_ASID_MASK << SATP_ASID_SHIFT);
+ csr_write(CSR_SATP, asid_bit_count);
+ asid_bit_count = (csr_read(CSR_SATP) >> SATP_ASID_SHIFT) & SATP_ASID_MASK;
+ asid_bit_count = fls_long(asid_bit_count);
csr_write(CSR_SATP, old);
/*
@@ -242,6 +243,30 @@ static int __init asids_init(void)
* to remove unwanted TLB enteries.
*/
local_flush_tlb_all();
+ return asid_bit_count;
+}
+
+int verify_cpu_asid_bits(void)
+{
+ unsigned long cpu_asid_bits = get_cpu_asid_bits();
+
+ if (cpu_asid_bits < asid_bits) {
+ /*
+ * The ASID allocator is initialized using the boot CPU's ASID width.
+ * We cannot safely shrink the system-wide ASID space at runtime, so
+ * reject secondary CPU bringup if it supports fewer ASID bits than
+ * the boot CPU.
+ */
+ pr_crit("CPU%d: ASID bits (%lu) smaller than boot CPU (%lu), refusing to online\n",
+ smp_processor_id(), cpu_asid_bits, asid_bits);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int __init asids_init(void)
+{
+ asid_bits = get_cpu_asid_bits();
/* Pre-compute ASID details */
if (asid_bits) {
@@ -279,6 +304,11 @@ static inline void set_mm(struct mm_struct *prev,
{
/* Nothing to do here when there is no MMU */
}
+
+int verify_cpu_asid_bits(void)
+{
+ return 0;
+}
#endif
/*
--
2.25.1
More information about the linux-riscv
mailing list