[PATCH v3 2/5] arm64: tlb: Pass the corresponding mm to __tlbi_sync_s1ish()
Catalin Marinas
catalin.marinas at arm.com
Mon Mar 23 09:24:02 PDT 2026
The mm structure will be used for workarounds that need limiting to
specific tasks.
Signed-off-by: Catalin Marinas <catalin.marinas at arm.com>
Cc: Will Deacon <will at kernel.org>
Cc: Mark Rutland <mark.rutland at arm.com>
---
arch/arm64/include/asm/tlbflush.h | 8 ++++----
arch/arm64/kernel/sys_compat.c | 2 +-
2 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index f41eebf00990..262791191935 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -185,7 +185,7 @@ do { \
* Complete broadcast TLB maintenance issued by the host which invalidates
* stage 1 information in the host's own translation regime.
*/
-static inline void __tlbi_sync_s1ish(void)
+static inline void __tlbi_sync_s1ish(struct mm_struct *mm)
{
dsb(ish);
__repeat_tlbi_sync(vale1is, 0);
@@ -323,7 +323,7 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
asid = __TLBI_VADDR(0, ASID(mm));
__tlbi(aside1is, asid);
__tlbi_user(aside1is, asid);
- __tlbi_sync_s1ish();
+ __tlbi_sync_s1ish(mm);
mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL);
}
@@ -377,7 +377,7 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long uaddr)
{
flush_tlb_page_nosync(vma, uaddr);
- __tlbi_sync_s1ish();
+ __tlbi_sync_s1ish(vma->vm_mm);
}
static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
@@ -532,7 +532,7 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
{
__flush_tlb_range_nosync(vma->vm_mm, start, end, stride,
last_level, tlb_level);
- __tlbi_sync_s1ish();
+ __tlbi_sync_s1ish(vma->vm_mm);
}
static inline void local_flush_tlb_contpte(struct vm_area_struct *vma,
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
index b9d4998c97ef..03fde2677d5b 100644
--- a/arch/arm64/kernel/sys_compat.c
+++ b/arch/arm64/kernel/sys_compat.c
@@ -37,7 +37,7 @@ __do_compat_cache_op(unsigned long start, unsigned long end)
* We pick the reserved-ASID to minimise the impact.
*/
__tlbi(aside1is, __TLBI_VADDR(0, 0));
- __tlbi_sync_s1ish();
+ __tlbi_sync_s1ish(current->mm);
}
ret = caches_clean_inval_user_pou(start, start + chunk);
More information about the linux-arm-kernel
mailing list