[RFC V1 15/16] arm64/mm: Add macros __tlb_asid_level and __tlb_range

Anshuman Khandual anshuman.khandual at arm.com
Mon Feb 23 21:11:52 PST 2026


From: Linu Cherian <linu.cherian at arm.com>

Existing __tlb_level macro uses encoded arguments for TLBI instructions
which is not compatible with TLBIP instructions required with FEAT_D128
both for level hint and range based operations.

Add two new macros __tlb_asid_level and __tlb_range that will work both
with existing TLBI and upcoming TLBIP instructions. __tlb_asid_level is
used for non range operations with level hints, where as __tlb_range is
used for range operations with level hints. Subsequently update the macro
__flush_tlb_range_op as required.

Cc: Catalin Marinas <catalin.marinas at arm.com>
Cc: Will Deacon <will at kernel.org>
Cc: Ryan Roberts <ryan.roberts at arm.com>
Cc: Mark Rutland <mark.rutland at arm.com>
Cc: linux-arm-kernel at lists.infradead.org
Cc: linux-kernel at vger.kernel.org
Signed-off-by: Linu Cherian <linu.cherian at arm.com>
Signed-off-by: Anshuman Khandual <anshuman.khandual at arm.com>
---
 arch/arm64/include/asm/tlbflush.h | 47 ++++++++++++++++++++++---------
 1 file changed, 34 insertions(+), 13 deletions(-)

diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index a2d65d7d6aae..9c93ffbcc1e0 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -54,6 +54,8 @@
 		__tlbi(op, (arg) | USER_ASID_FLAG);				\
 } while (0)
 
+#define TLBI_ASID_MASK		GENMASK_ULL(63, 48)
+
 /* This macro creates a properly formatted VA operand for the TLBI */
 #define __TLBI_VADDR(addr, asid)				\
 	({							\
@@ -102,6 +104,8 @@ static inline unsigned long get_trans_granule(void)
  * in asm/stage2_pgtable.h.
  */
 #define TLBI_TTL_MASK		GENMASK_ULL(47, 44)
+#define TLBI_TG_MASK		GENMASK_ULL(47, 46)
+#define TLBI_LVL_MASK		GENMASK_ULL(45, 44)
 
 #define TLBI_TTL_UNKNOWN	INT_MAX
 
@@ -124,6 +128,15 @@ static inline unsigned long get_trans_granule(void)
 		__tlbi_level(op, (arg | USER_ASID_FLAG), level);	\
 } while (0)
 
+#define __tlb_asid_level(op, addr, asid, level, tlb_user) do {		\
+	u64 arg1;							\
+									\
+	arg1 = __TLBI_VADDR(addr, asid);				\
+	__tlbi_level(op, arg1, level);					\
+	if (tlb_user)							\
+		__tlbi_user_level(op, arg1, level);			\
+} while (0)
+
 /*
  * This macro creates a properly formatted VA operand for the TLB RANGE. The
  * value bit assignments are:
@@ -149,11 +162,10 @@ static inline unsigned long get_trans_granule(void)
 #define TLBIR_TTL_MASK		GENMASK_ULL(38, 37)
 #define TLBIR_BADDR_MASK	GENMASK_ULL(36,  0)
 
-#define __TLBI_VADDR_RANGE(baddr, asid, scale, num, ttl)		\
+#define __TLB_RANGE_ARGS(asid, scale, num, ttl)			\
 	({								\
 		unsigned long __ta = 0;					\
 		unsigned long __ttl = (ttl >= 1 && ttl <= 3) ? ttl : 0;	\
-		__ta |= FIELD_PREP(TLBIR_BADDR_MASK, baddr);		\
 		__ta |= FIELD_PREP(TLBIR_TTL_MASK, __ttl);		\
 		__ta |= FIELD_PREP(TLBIR_NUM_MASK, num);		\
 		__ta |= FIELD_PREP(TLBIR_SCALE_MASK, scale);		\
@@ -162,6 +174,13 @@ static inline unsigned long get_trans_granule(void)
 		__ta;							\
 	})
 
+#define __TLBI_VADDR_RANGE(baddr, args)					\
+	({								\
+		unsigned long __ta = args;				\
+		__ta |= FIELD_PREP(TLBIR_BADDR_MASK, baddr);		\
+		__ta;							\
+	})
+
 /* These macros are used by the TLBI RANGE feature. */
 #define __TLBI_RANGE_PAGES(num, scale)	\
 	((unsigned long)((num) + 1) << (5 * (scale) + 1))
@@ -181,6 +200,16 @@ static inline unsigned long get_trans_granule(void)
 		(__pages >> (5 * (scale) + 1)) - 1;			\
 	})
 
+#define __tlb_range(op, addr, lpa2, range_args, tlb_user) do {		\
+	u64 arg1;							\
+	int shift = lpa2 ? 16 : PAGE_SHIFT;				\
+									\
+	arg1 = __TLBI_VADDR_RANGE((addr) >> shift,  range_args);	\
+	__tlbi(r##op, arg1);						\
+	if (tlb_user)							\
+		__tlbi_user(r##op, arg1);				\
+} while (0)
+
 /*
  *	TLB Invalidation
  *	================
@@ -423,17 +452,12 @@ do {									\
 	typeof(pages) __flush_pages = pages;				\
 	int num = 0;							\
 	int scale = 3;							\
-	int shift = lpa2 ? 16 : PAGE_SHIFT;				\
-	unsigned long addr;						\
 									\
 	while (__flush_pages > 0) {					\
 		if (!system_supports_tlb_range() ||			\
 		    __flush_pages == 1 ||				\
 		    (lpa2 && __flush_start != ALIGN(__flush_start, SZ_64K))) {	\
-			addr = __TLBI_VADDR(__flush_start, asid);	\
-			__tlbi_level(op, addr, tlb_level);		\
-			if (tlbi_user)					\
-				__tlbi_user_level(op, addr, tlb_level);	\
+			__tlb_asid_level(op, __flush_start, asid, tlb_level, tlbi_user);	\
 			__flush_start += stride;			\
 			__flush_pages -= stride >> PAGE_SHIFT;		\
 			continue;					\
@@ -441,11 +465,8 @@ do {									\
 									\
 		num = __TLBI_RANGE_NUM(__flush_pages, scale);		\
 		if (num >= 0) {						\
-			addr = __TLBI_VADDR_RANGE(__flush_start >> shift, asid, \
-						scale, num, tlb_level);	\
-			__tlbi(r##op, addr);				\
-			if (tlbi_user)					\
-				__tlbi_user(r##op, addr);		\
+			u64 args = __TLB_RANGE_ARGS(asid, scale, num, tlb_level);	\
+			__tlb_range(op, __flush_start, lpa2, args, tlbi_user); \
 			__flush_start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
 			__flush_pages -= __TLBI_RANGE_PAGES(num, scale);\
 		}							\
-- 
2.43.0




More information about the linux-arm-kernel mailing list