[PATCH 1/3] riscv: mm: Rename new_vmalloc into new_valid_map_cpus
Vivian Wang
wangruikang at iscas.ac.cn
Sun Mar 1 18:21:30 PST 2026
In preparation of a future patch using this mechanism for non-vmalloc
mappings, rename new_vmalloc into new_valid_map_cpus to avoid misleading
readers.
No functional change intended.
Signed-off-by: Vivian Wang <wangruikang at iscas.ac.cn>
---
arch/riscv/include/asm/cacheflush.h | 6 +++---
arch/riscv/kernel/entry.S | 38 ++++++++++++++++++-------------------
arch/riscv/mm/init.c | 2 +-
3 files changed, 23 insertions(+), 23 deletions(-)
diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
index 0092513c3376..b6d1a5eb7564 100644
--- a/arch/riscv/include/asm/cacheflush.h
+++ b/arch/riscv/include/asm/cacheflush.h
@@ -41,7 +41,7 @@ do { \
} while (0)
#ifdef CONFIG_64BIT
-extern u64 new_vmalloc[NR_CPUS / sizeof(u64) + 1];
+extern u64 new_valid_map_cpus[NR_CPUS / sizeof(u64) + 1];
extern char _end[];
#define flush_cache_vmap flush_cache_vmap
static inline void flush_cache_vmap(unsigned long start, unsigned long end)
@@ -54,8 +54,8 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
* the only place this can happen is in handle_exception() where
* an sfence.vma is emitted.
*/
- for (i = 0; i < ARRAY_SIZE(new_vmalloc); ++i)
- new_vmalloc[i] = -1ULL;
+ for (i = 0; i < ARRAY_SIZE(new_valid_map_cpus); ++i)
+ new_valid_map_cpus[i] = -1ULL;
}
}
#define flush_cache_vmap_early(start, end) local_flush_tlb_kernel_range(start, end)
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 60eb221296a6..e57a0f550860 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -20,44 +20,44 @@
.section .irqentry.text, "ax"
-.macro new_vmalloc_check
+.macro new_valid_map_cpus_check
REG_S a0, TASK_TI_A0(tp)
csrr a0, CSR_CAUSE
/* Exclude IRQs */
- blt a0, zero, .Lnew_vmalloc_restore_context_a0
+ blt a0, zero, .Lnew_valid_map_cpus_restore_context_a0
REG_S a1, TASK_TI_A1(tp)
- /* Only check new_vmalloc if we are in page/protection fault */
+ /* Only check new_valid_map_cpus if we are in page/protection fault */
li a1, EXC_LOAD_PAGE_FAULT
- beq a0, a1, .Lnew_vmalloc_kernel_address
+ beq a0, a1, .Lnew_valid_map_cpus_kernel_address
li a1, EXC_STORE_PAGE_FAULT
- beq a0, a1, .Lnew_vmalloc_kernel_address
+ beq a0, a1, .Lnew_valid_map_cpus_kernel_address
li a1, EXC_INST_PAGE_FAULT
- bne a0, a1, .Lnew_vmalloc_restore_context_a1
+ bne a0, a1, .Lnew_valid_map_cpus_restore_context_a1
-.Lnew_vmalloc_kernel_address:
+.Lnew_valid_map_cpus_kernel_address:
/* Is it a kernel address? */
csrr a0, CSR_TVAL
- bge a0, zero, .Lnew_vmalloc_restore_context_a1
+ bge a0, zero, .Lnew_valid_map_cpus_restore_context_a1
/* Check if a new vmalloc mapping appeared that could explain the trap */
REG_S a2, TASK_TI_A2(tp)
/*
* Computes:
- * a0 = &new_vmalloc[BIT_WORD(cpu)]
+ * a0 = &new_valid_map_cpus[BIT_WORD(cpu)]
* a1 = BIT_MASK(cpu)
*/
lw a2, TASK_TI_CPU(tp)
/*
- * Compute the new_vmalloc element position:
+ * Compute the new_valid_map_cpus element position:
* (cpu / 64) * 8 = (cpu >> 6) << 3
*/
srli a1, a2, 6
slli a1, a1, 3
- la a0, new_vmalloc
+ la a0, new_valid_map_cpus
add a0, a0, a1
/*
- * Compute the bit position in the new_vmalloc element:
+ * Compute the bit position in the new_valid_map_cpus element:
* bit_pos = cpu % 64 = cpu - (cpu / 64) * 64 = cpu - (cpu >> 6) << 6
* = cpu - ((cpu >> 6) << 3) << 3
*/
@@ -67,12 +67,12 @@
li a2, 1
sll a1, a2, a1
- /* Check the value of new_vmalloc for this cpu */
+ /* Check the value of new_valid_map_cpus for this cpu */
REG_L a2, 0(a0)
and a2, a2, a1
- beq a2, zero, .Lnew_vmalloc_restore_context
+ beq a2, zero, .Lnew_valid_map_cpus_restore_context
- /* Atomically reset the current cpu bit in new_vmalloc */
+ /* Atomically reset the current cpu bit in new_valid_map_cpus */
amoxor.d a0, a1, (a0)
/* Only emit a sfence.vma if the uarch caches invalid entries */
@@ -84,11 +84,11 @@
csrw CSR_SCRATCH, x0
sret
-.Lnew_vmalloc_restore_context:
+.Lnew_valid_map_cpus_restore_context:
REG_L a2, TASK_TI_A2(tp)
-.Lnew_vmalloc_restore_context_a1:
+.Lnew_valid_map_cpus_restore_context_a1:
REG_L a1, TASK_TI_A1(tp)
-.Lnew_vmalloc_restore_context_a0:
+.Lnew_valid_map_cpus_restore_context_a0:
REG_L a0, TASK_TI_A0(tp)
.endm
@@ -144,7 +144,7 @@ SYM_CODE_START(handle_exception)
* could "miss" the new mapping and traps: in that case, we only need
* to retry the access, no sfence.vma is required.
*/
- new_vmalloc_check
+ new_valid_map_cpus_check
#endif
REG_S sp, TASK_TI_KERNEL_SP(tp)
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 811e03786c56..9922c22a2a5f 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -37,7 +37,7 @@
#include "../kernel/head.h"
-u64 new_vmalloc[NR_CPUS / sizeof(u64) + 1];
+u64 new_valid_map_cpus[NR_CPUS / sizeof(u64) + 1];
struct kernel_mapping kernel_map __ro_after_init;
EXPORT_SYMBOL(kernel_map);
--
2.52.0
More information about the linux-riscv
mailing list