[PATCH 2/2] arm64/mm: avoid max_pinned_asids underflow

Reda CHERKAOUI redacherkaoui67 at gmail.com
Thu Feb 19 04:30:07 PST 2026


Signed-off-by: Reda CHERKAOUI <redacherkaoui67 at gmail.com>
---
 arch/arm64/mm/context.c | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index 74c1ece7db78..fdcee1127954 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -386,11 +386,7 @@ static int asids_update_limit(void)
 		if (pinned_asid_map)
 			set_kpti_asid_bits(pinned_asid_map);
 	}
-	/*
-	 * Expect allocation after rollover to fail if we don't have at least
-	 * one more ASID than CPUs. ASID #0 is reserved for init_mm.
-	 */
-	WARN_ON(num_available_asids - 1 <= num_possible_cpus());
+
 	pr_info("ASID allocator initialised with %lu entries\n",
 		num_available_asids);
 
@@ -399,7 +395,11 @@ static int asids_update_limit(void)
 	 * even if all CPUs have a reserved ASID and the maximum number of ASIDs
 	 * are pinned, there still is at least one empty slot in the ASID map.
 	 */
-	max_pinned_asids = num_available_asids - num_possible_cpus() - 2;
+	if (WARN_ON(num_available_asids <= num_possible_cpus() + 2))
+		max_pinned_asids = 0;
+	else
+		max_pinned_asids = num_available_asids - num_possible_cpus() - 2;
+
 	return 0;
 }
 arch_initcall(asids_update_limit);
-- 
2.43.0




More information about the linux-arm-kernel mailing list