[PATCH] arm64: move early allocation of kfence pool after acpi table initialization.

Imran Khan imran.f.khan at oracle.com
Wed Feb 25 18:07:48 PST 2026


Currently early allocation of kfence pool (arm64_kfence_alloc_pool) happens
before ACPI table parsing (acpi_boot_table_init) and hence the kfence pool
can overlap with area containing ACPI data.
For example on my setup I see that kfence pool of size 32MB is getting
allocated at physical address 0xc3c570000 and BGRT table is present at
0xc3e512018.
This is causing KFENCE to generate false positive reports.
For example trying to access BGRT binary attributes, reports errors like:

[  101.153638] ==================================================================
[  101.153639] BUG: KFENCE: use-after-free read in __pi_memcpy_generic+0x14c/0x230
[  101.153639]
[  101.153642] Use-after-free read at 0x000000002b4fde1e (in kfence-#252):
[  101.153643]  __pi_memcpy_generic+0x14c/0x230
[  101.153645]  sysfs_kf_bin_read+0x70/0x140
[  101.153647]  kernfs_file_read_iter+0xac/0x220
[  101.153649]  kernfs_fop_read_iter+0x30/0x80
[  101.153651]  copy_splice_read+0x1f0/0x400
[  101.153653]  do_splice_read+0x84/0x1a0
[  101.153655]  splice_direct_to_actor+0xb4/0x2a0
[  101.153657]  do_splice_direct+0x70/0x100
[  101.153659]  do_sendfile+0x360/0x400
[  101.153661]  __arm64_sys_sendfile64+0x70/0x1c0
[  101.153663]  invoke_syscall+0x70/0x160
[  101.153664]  el0_svc_common.constprop.0+0x108/0x140
[  101.153666]  do_el0_svc+0x24/0x60
[  101.153667]  el0_svc+0x38/0x160
[  101.153669]  el0t_64_sync_handler+0xb8/0x100
[  101.153670]  el0t_64_sync+0x19c/0x1a0
[  101.153671]
[  101.153672] kfence-#252: 0x00000000e0140f78-0x00000000451bb320, size=256, cache=maple_node
[  101.153672]
[  101.153674] allocated by task 8328 on cpu 0 at 99.989222s (1.164452s ago):
[  101.153679]  mas_alloc_nodes+0x138/0x180
[  101.153682]  mas_store_gfp+0x198/0x3e0
[  101.153684]  do_vmi_align_munmap+0x168/0x320
[  101.153687]  do_vmi_munmap+0xb8/0x1c0
[  101.153689]  __vm_munmap+0xdc/0x1e0
[  101.153691]  __arm64_sys_munmap+0x28/0x60
[  101.153693]  invoke_syscall+0x70/0x160
[  101.153695]  el0_svc_common.constprop.0+0x108/0x140
[  101.153696]  do_el0_svc+0x24/0x60
[  101.153697]  el0_svc+0x38/0x160
[  101.153699]  el0t_64_sync_handler+0xb8/0x100
[  101.153701]  el0t_64_sync+0x19c/0x1a0
[  101.153702]
[  101.153702] freed by task 0 on cpu 0 at 100.057612s (1.096089s ago):
[  101.153722]  __rcu_free_sheaf_prepare+0x11c/0x260
[  101.153723]  rcu_free_sheaf+0x2c/0x140
[  101.153725]  rcu_do_batch+0x158/0x560
[  101.153727]  rcu_core+0x110/0x220
[  101.153728]  rcu_core_si+0x18/0x40
[  101.153729]  handle_softirqs+0x128/0x340
[  101.153731]  __do_softirq+0x1c/0x34
[  101.153732]  ____do_softirq+0x18/0x38

The place of warning remains the same but freer and allocator stacks can
differ.

Moving early allocation of kfence pool, after ACPI table initialization,
avoids the above mentioned overlap and prevents false positive reports
such as the one above.

Signed-off-by: Imran Khan <imran.f.khan at oracle.com>
---
 arch/arm64/include/asm/kfence.h |  9 +++++++++
 arch/arm64/kernel/setup.c       |  7 +++++++
 arch/arm64/mm/mmu.c             | 13 ++-----------
 3 files changed, 18 insertions(+), 11 deletions(-)

diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h
index 21dbc9dda7478..25c66f8059d6d 100644
--- a/arch/arm64/include/asm/kfence.h
+++ b/arch/arm64/include/asm/kfence.h
@@ -19,6 +19,11 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
 
 #ifdef CONFIG_KFENCE
 extern bool kfence_early_init;
+
+extern phys_addr_t arm64_kfence_alloc_pool(void);
+
+extern void arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp);
+
 static inline bool arm64_kfence_can_set_direct_map(void)
 {
 	return !kfence_early_init;
@@ -26,6 +31,10 @@ static inline bool arm64_kfence_can_set_direct_map(void)
 bool arch_kfence_init_pool(void);
 #else /* CONFIG_KFENCE */
 static inline bool arm64_kfence_can_set_direct_map(void) { return false; }
+
+static inline phys_addr_t arm64_kfence_alloc_pool(void) { return 0; }
+
+static inline void arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) { }
 #endif /* CONFIG_KFENCE */
 
 #endif /* __ASM_KFENCE_H */
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 23c05dc7a8f2a..2e9ec94cd4d5b 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -32,6 +32,7 @@
 #include <linux/sched/task.h>
 #include <linux/scs.h>
 #include <linux/mm.h>
+#include <linux/kfence.h>
 
 #include <asm/acpi.h>
 #include <asm/fixmap.h>
@@ -54,6 +55,7 @@
 #include <asm/efi.h>
 #include <asm/xen/hypervisor.h>
 #include <asm/mmu_context.h>
+#include <asm/kfence.h>
 
 static int num_standard_resources;
 static struct resource *standard_resources;
@@ -280,6 +282,8 @@ u64 cpu_logical_map(unsigned int cpu)
 
 void __init __no_sanitize_address setup_arch(char **cmdline_p)
 {
+	phys_addr_t early_kfence_pool;
+
 	setup_initial_init_mm(_text, _etext, _edata, _end);
 
 	*cmdline_p = boot_command_line;
@@ -341,6 +345,9 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
 	if (acpi_disabled)
 		unflatten_device_tree();
 
+	early_kfence_pool = arm64_kfence_alloc_pool();
+	arm64_kfence_map_pool(early_kfence_pool, swapper_pg_dir);
+
 	bootmem_init();
 
 	kasan_init();
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index a6a00accf4f93..5a7215daa9ce5 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -1048,7 +1048,7 @@ static int __init parse_kfence_early_init(char *arg)
 }
 early_param("kfence.sample_interval", parse_kfence_early_init);
 
-static phys_addr_t __init arm64_kfence_alloc_pool(void)
+phys_addr_t __init arm64_kfence_alloc_pool(void)
 {
 	phys_addr_t kfence_pool;
 
@@ -1068,7 +1068,7 @@ static phys_addr_t __init arm64_kfence_alloc_pool(void)
 	return kfence_pool;
 }
 
-static void __init arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp)
+void __init arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp)
 {
 	if (!kfence_pool)
 		return;
@@ -1107,11 +1107,6 @@ bool arch_kfence_init_pool(void)
 
 	return !ret;
 }
-#else /* CONFIG_KFENCE */
-
-static inline phys_addr_t arm64_kfence_alloc_pool(void) { return 0; }
-static inline void arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) { }
-
 #endif /* CONFIG_KFENCE */
 
 static void __init map_mem(pgd_t *pgdp)
@@ -1120,7 +1115,6 @@ static void __init map_mem(pgd_t *pgdp)
 	phys_addr_t kernel_start = __pa_symbol(_text);
 	phys_addr_t kernel_end = __pa_symbol(__init_begin);
 	phys_addr_t start, end;
-	phys_addr_t early_kfence_pool;
 	int flags = NO_EXEC_MAPPINGS;
 	u64 i;
 
@@ -1137,8 +1131,6 @@ static void __init map_mem(pgd_t *pgdp)
 	BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end) &&
 		     pgd_index(_PAGE_OFFSET(VA_BITS_MIN)) != PTRS_PER_PGD - 1);
 
-	early_kfence_pool = arm64_kfence_alloc_pool();
-
 	linear_map_requires_bbml2 = !force_pte_mapping() && can_set_direct_map();
 
 	if (force_pte_mapping())
@@ -1178,7 +1170,6 @@ static void __init map_mem(pgd_t *pgdp)
 	__map_memblock(pgdp, kernel_start, kernel_end,
 		       PAGE_KERNEL, NO_CONT_MAPPINGS);
 	memblock_clear_nomap(kernel_start, kernel_end - kernel_start);
-	arm64_kfence_map_pool(early_kfence_pool, pgdp);
 }
 
 void mark_rodata_ro(void)

base-commit: 6de23f81a5e08be8fbf5e8d7e9febc72a5b5f27f
-- 
2.34.1




More information about the linux-arm-kernel mailing list