[PATCH v4 0/8] enable PT_RECLAIM on more 64-bit architectures
Andrew Morton
akpm at linux-foundation.org
Tue Jan 27 12:46:42 PST 2026
On Tue, 27 Jan 2026 20:12:53 +0800 Qi Zheng <qi.zheng at linux.dev> wrote:
> This series aims to enable PT_RECLAIM on more 64-bit architectures.
Thanks, I updated mm.git's mm-unstable branch to v4.
> Changes in v4:
> - convert __HAVE_ARCH_TLB_REMOVE_TABLE to CONFIG_HAVE_ARCH_TLB_REMOVE_TABLE config
> - fix a WARN_ON_ONCE() on sparc64 (and on ppc)
> (reported by Andreas Larsson)
> - collect Acked-by (Hi David, I've kept your Acked-by, feel free to drop it)
> - rebase onto the v6.19-rc7
>
Below is how v4 altered mm.git.
I'm not seeing the WARN_ON_ONCE() fix. I assume that was due to the
Kconfig alterations?
--- a/arch/powerpc/include/asm/tlb.h~b
+++ a/arch/powerpc/include/asm/tlb.h
@@ -37,7 +37,6 @@ extern void tlb_flush(struct mmu_gather
*/
#define tlb_needs_table_invalidate() radix_enabled()
-#define __HAVE_ARCH_TLB_REMOVE_TABLE
/* Get the generic bits... */
#include <asm-generic/tlb.h>
--- a/arch/powerpc/Kconfig~b
+++ a/arch/powerpc/Kconfig
@@ -305,6 +305,7 @@ config PPC
select LOCK_MM_AND_FIND_VMA
select MMU_GATHER_PAGE_SIZE
select MMU_GATHER_RCU_TABLE_FREE
+ select HAVE_ARCH_TLB_REMOVE_TABLE
select MMU_GATHER_MERGE_VMAS
select MMU_LAZY_TLB_SHOOTDOWN if PPC_BOOK3S_64
select MODULES_USE_ELF_RELA
--- a/arch/sparc/include/asm/tlb_64.h~b
+++ a/arch/sparc/include/asm/tlb_64.h
@@ -33,7 +33,6 @@ void flush_tlb_pending(void);
#define tlb_needs_table_invalidate() (false)
#endif
-#define __HAVE_ARCH_TLB_REMOVE_TABLE
#include <asm-generic/tlb.h>
#endif /* _SPARC64_TLB_H */
--- a/arch/sparc/Kconfig~b
+++ a/arch/sparc/Kconfig
@@ -74,6 +74,7 @@ config SPARC64
select HAVE_KRETPROBES
select HAVE_KPROBES
select MMU_GATHER_RCU_TABLE_FREE if SMP
+ select HAVE_ARCH_TLB_REMOVE_TABLE if SMP
select MMU_GATHER_MERGE_VMAS
select MMU_GATHER_NO_FLUSH_CACHE
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
--- a/include/asm-generic/tlb.h~b
+++ a/include/asm-generic/tlb.h
@@ -213,7 +213,7 @@ struct mmu_table_batch {
#define MAX_TABLE_BATCH \
((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
-#ifndef __HAVE_ARCH_TLB_REMOVE_TABLE
+#ifndef CONFIG_HAVE_ARCH_TLB_REMOVE_TABLE
static inline void __tlb_remove_table(void *table)
{
struct ptdesc *ptdesc = (struct ptdesc *)table;
--- a/mm/Kconfig~b
+++ a/mm/Kconfig
@@ -1448,9 +1448,12 @@ config ARCH_HAS_USER_SHADOW_STACK
The architecture has hardware support for userspace shadow call
stacks (eg, x86 CET, arm64 GCS or RISC-V Zicfiss).
+config HAVE_ARCH_TLB_REMOVE_TABLE
+ def_bool n
+
config PT_RECLAIM
def_bool y
- depends on MMU_GATHER_RCU_TABLE_FREE
+ depends on MMU_GATHER_RCU_TABLE_FREE && !HAVE_ARCH_TLB_REMOVE_TABLE
help
Try to reclaim empty user page table pages in paths other than munmap
and exit_mmap path.
_
More information about the linux-um
mailing list