[PATCH 4/5] ARM: mmu: catch stack overflowing into TTB with stack guard page

Ahmad Fatoum a.fatoum at pengutronix.de
Mon Sep 11 08:08:59 PDT 2023


While barebox stack is often quite generous, due to its default of 32K,
bugs can make it overflow and on ARM, this clobbers the page tables
leading to even harder to debug problems than usual.

Let's add a 4K buffer zone between the page tables and the stack and
configure the MMU to trap all accesses into it.

Note that hitting the stack guard page can be silent if the exception
handler places it's frame there. Still a hanging barebox may be better
than an erratically behaving one.

Signed-off-by: Ahmad Fatoum <a.fatoum at pengutronix.de>
---
 arch/arm/cpu/interrupts_32.c       | 21 +++++++++++++++--
 arch/arm/cpu/interrupts_64.c       | 38 +++++++++++++++++++++++-------
 arch/arm/cpu/mmu_32.c              | 16 +++++++++++++
 arch/arm/cpu/mmu_64.c              | 15 ++++++++++++
 arch/arm/include/asm/barebox-arm.h | 18 +++++++++++++-
 lib/Kconfig.hardening              | 10 ++++++++
 6 files changed, 107 insertions(+), 11 deletions(-)

diff --git a/arch/arm/cpu/interrupts_32.c b/arch/arm/cpu/interrupts_32.c
index 5bc790a796fb..468dcdd30e93 100644
--- a/arch/arm/cpu/interrupts_32.c
+++ b/arch/arm/cpu/interrupts_32.c
@@ -8,7 +8,9 @@
 
 #include <common.h>
 #include <abort.h>
+#include <linux/sizes.h>
 #include <asm/ptrace.h>
+#include <asm/barebox-arm.h>
 #include <asm/unwind.h>
 #include <init.h>
 
@@ -106,6 +108,22 @@ void do_prefetch_abort (struct pt_regs *pt_regs)
 	do_exception(pt_regs);
 }
 
+static const char *data_abort_reason(ulong far)
+{
+	ulong guard_page;
+
+	if (far < PAGE_SIZE)
+		return "NULL pointer dereference";
+
+	if (IS_ENABLED(CONFIG_STACK_GUARD_PAGE)) {
+		guard_page = arm_mem_guard_page_get();
+		if (guard_page <= far && far < guard_page + PAGE_SIZE)
+			return "stack overflow";
+	}
+
+	return "paging request";
+}
+
 /**
  * The CPU catches a data abort. That really should not happen!
  * @param[in] pt_regs Register set content when the accident happens
@@ -119,8 +137,7 @@ void do_data_abort (struct pt_regs *pt_regs)
 	asm volatile ("mrc     p15, 0, %0, c6, c0, 0" : "=r" (far) : : "cc");
 
 	printf("unable to handle %s at address 0x%08x\n",
-			far < PAGE_SIZE ? "NULL pointer dereference" :
-			"paging request", far);
+	       data_abort_reason(far), far);
 
 	do_exception(pt_regs);
 }
diff --git a/arch/arm/cpu/interrupts_64.c b/arch/arm/cpu/interrupts_64.c
index d844915fee24..b3e7da179756 100644
--- a/arch/arm/cpu/interrupts_64.c
+++ b/arch/arm/cpu/interrupts_64.c
@@ -6,6 +6,7 @@
 #include <common.h>
 #include <abort.h>
 #include <asm/ptrace.h>
+#include <asm/barebox-arm.h>
 #include <asm/unwind.h>
 #include <init.h>
 #include <asm/system.h>
@@ -142,17 +143,38 @@ void do_bad_error(struct pt_regs *pt_regs)
 extern volatile int arm_ignore_data_abort;
 extern volatile int arm_data_abort_occurred;
 
-void do_sync(struct pt_regs *pt_regs, unsigned int esr, unsigned long far)
+static const char *data_abort_reason(ulong far)
 {
-	if ((esr >> ESR_ELx_EC_SHIFT) == ESR_ELx_EC_DABT_CUR &&
-			arm_ignore_data_abort) {
-		arm_data_abort_occurred = 1;
-		pt_regs->elr += 4;
-		return;
+	ulong guard_page;
+
+	if (far < PAGE_SIZE)
+		return "NULL pointer dereference: ";
+
+	if (IS_ENABLED(CONFIG_STACK_GUARD_PAGE)) {
+		guard_page = arm_mem_guard_page_get();
+		if (guard_page <= far && far < guard_page + PAGE_SIZE)
+			return "Stack overflow: ";
 	}
 
-	printf("%s exception (ESR 0x%08x) at 0x%016lx\n", esr_get_class_string(esr),
-	       esr, far);
+	return NULL;
+}
+
+void do_sync(struct pt_regs *pt_regs, unsigned int esr, unsigned long far)
+{
+	const char *extra = NULL;
+
+	if ((esr >> ESR_ELx_EC_SHIFT) == ESR_ELx_EC_DABT_CUR) {
+		if (arm_ignore_data_abort) {
+			arm_data_abort_occurred = 1;
+			pt_regs->elr += 4;
+			return;
+		}
+
+		extra = data_abort_reason(far);
+	}
+
+	printf("%s%s exception (ESR 0x%08x) at 0x%016lx\n", extra ?: "",
+	       esr_get_class_string(esr), esr, far);
 	do_exception(pt_regs);
 }
 
diff --git a/arch/arm/cpu/mmu_32.c b/arch/arm/cpu/mmu_32.c
index c5d64aa88bac..07b225067796 100644
--- a/arch/arm/cpu/mmu_32.c
+++ b/arch/arm/cpu/mmu_32.c
@@ -475,11 +475,27 @@ static void create_zero_page(void)
 	pr_debug("Created zero page\n");
 }
 
+static void create_guard_page(void)
+{
+	ulong guard_page;
+
+	if (!IS_ENABLED(CONFIG_STACK_GUARD_PAGE))
+		return;
+
+	guard_page = arm_mem_guard_page_get();
+	request_sdram_region("guard page", guard_page, PAGE_SIZE);
+	remap_range((void *)guard_page, PAGE_SIZE, MAP_FAULT);
+
+	pr_debug("Created guard page\n");
+}
+
 /*
  * Map vectors and zero page
  */
 static void vectors_init(void)
 {
+	create_guard_page();
+
 	/*
 	 * First try to use the vectors where they actually are, works
 	 * on ARMv7 and later.
diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index 3124f8f3a987..fb57260c90ae 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -204,6 +204,20 @@ static void mmu_enable(void)
 	set_cr(get_cr() | CR_M | CR_C | CR_I);
 }
 
+static void create_guard_page(void)
+{
+	ulong guard_page;
+
+	if (!IS_ENABLED(CONFIG_STACK_GUARD_PAGE))
+		return;
+
+	guard_page = arm_mem_guard_page_get();
+	request_sdram_region("guard page", guard_page, PAGE_SIZE);
+	remap_range((void *)guard_page, PAGE_SIZE, MAP_FAULT);
+
+	pr_debug("Created guard page\n");
+}
+
 /*
  * Prepare MMU for usage enable it.
  */
@@ -241,6 +255,7 @@ void __mmu_init(bool mmu_on)
 
 	/* Make zero page faulting to catch NULL pointer derefs */
 	zero_page_faulting();
+	create_guard_page();
 }
 
 void mmu_disable(void)
diff --git a/arch/arm/include/asm/barebox-arm.h b/arch/arm/include/asm/barebox-arm.h
index aceb7fdf74f8..382fa8505a66 100644
--- a/arch/arm/include/asm/barebox-arm.h
+++ b/arch/arm/include/asm/barebox-arm.h
@@ -15,6 +15,7 @@
 #include <linux/sizes.h>
 #include <asm-generic/memory_layout.h>
 #include <linux/kernel.h>
+#include <linux/pagemap.h>
 #include <linux/types.h>
 #include <linux/compiler.h>
 #include <asm/barebox-arm-head.h>
@@ -82,9 +83,19 @@ static inline unsigned long arm_mem_stack(unsigned long endmem)
 	return arm_mem_scratch(endmem) - STACK_SIZE;
 }
 
-static inline unsigned long arm_mem_ttb(unsigned long endmem)
+static inline unsigned long arm_mem_guard_page(unsigned long endmem)
 {
 	endmem = arm_mem_stack(endmem);
+
+	if (!IS_ENABLED(CONFIG_STACK_GUARD_PAGE))
+		return endmem;
+
+	return ALIGN_DOWN(endmem, PAGE_SIZE) - PAGE_SIZE;
+}
+
+static inline unsigned long arm_mem_ttb(unsigned long endmem)
+{
+	endmem = arm_mem_guard_page(endmem);
 	endmem = ALIGN_DOWN(endmem, ARM_EARLY_PAGETABLE_SIZE) - ARM_EARLY_PAGETABLE_SIZE;
 
 	return endmem;
@@ -121,6 +132,11 @@ static inline const void *arm_mem_scratch_get(void)
 	return (const void *)arm_mem_scratch(arm_mem_endmem_get());
 }
 
+static inline unsigned long arm_mem_guard_page_get(void)
+{
+	return arm_mem_guard_page(arm_mem_endmem_get());
+}
+
 static inline unsigned long arm_mem_barebox_image(unsigned long membase,
 						  unsigned long endmem,
 						  unsigned long size)
diff --git a/lib/Kconfig.hardening b/lib/Kconfig.hardening
index 503fdf7c0cc5..aad0d8b97024 100644
--- a/lib/Kconfig.hardening
+++ b/lib/Kconfig.hardening
@@ -1,5 +1,15 @@
 menu "Hardening options"
 
+config STACK_GUARD_PAGE
+	bool "Place guard page to catch stack overflows"
+	depends on ARM && MMU
+	help
+	  When enabled, barebox places a faulting guard page to catch total
+	  stack usage exceeding CONFIG_STACK_SIZE. On overflows, that hit
+	  the reserved 4KiB, barebox will panic and report a stack overflow.
+	  The report may not always succeed if the stack overflow impacts
+	  operation of the exception handler.
+
 config STACKPROTECTOR
 	bool
 
-- 
2.39.2




More information about the barebox mailing list