[PATCH 02/10] arm64: mm: Introduce a C wrapper for by-range TLB invalidation helpers
Will Deacon
will at kernel.org
Fri Jul 11 09:17:24 PDT 2025
In preparation for reducing our reliance on complex preprocessor macros
for TLB invalidation routines, introduce a new C wrapper for by-range
TLB invalidation helpers which can be used instead of the __tlbi() macro
and can additionally be called from C code.
Signed-off-by: Will Deacon <will at kernel.org>
---
arch/arm64/include/asm/tlbflush.h | 20 +++++++++++++++++++-
1 file changed, 19 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 1c7548ec6cb7..4408aeebf4d5 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -418,6 +418,24 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
* operations can only span an even number of pages. We save this for last to
* ensure 64KB start alignment is maintained for the LPA2 case.
*/
+#define __GEN_TLBI_OP_CASE(op) \
+ case op: \
+ __tlbi(r ## op, arg); \
+ break
+
+static __always_inline void __tlbi_range(const enum tlbi_op op, u64 arg)
+{
+ switch (op) {
+ __GEN_TLBI_OP_CASE(vae1is);
+ __GEN_TLBI_OP_CASE(vale1is);
+ __GEN_TLBI_OP_CASE(vaale1is);
+ __GEN_TLBI_OP_CASE(ipas2e1is);
+ default:
+ BUILD_BUG();
+ }
+}
+#undef __GEN_TLBI_OP_CASE
+
#define __flush_tlb_range_op(op, start, pages, stride, \
asid, tlb_level, tlbi_user, lpa2) \
do { \
@@ -445,7 +463,7 @@ do { \
if (num >= 0) { \
addr = __TLBI_VADDR_RANGE(__flush_start >> shift, asid, \
scale, num, tlb_level); \
- __tlbi(r##op, addr); \
+ __tlbi_range(op, addr); \
if (tlbi_user) \
__tlbi_user(r##op, addr); \
__flush_start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
--
2.50.0.727.gbf7dc18ff4-goog
More information about the linux-arm-kernel
mailing list