[PATCH 1/4] mmu: add MAP_CACHED_RWX mapping type

Sascha Hauer s.hauer at pengutronix.de
Mon Feb 23 00:34:06 PST 2026


From: Sascha Hauer <sascha at saschahauer.de>

ARCH_MAP_CACHED_RWX seems to be the typical mapping for PBL and is used
by ARM32, ARM64 and RiscV. Drop the ARCH_ prefix and move it to the
generic mapping types.

Signed-off-by: Claude Sonnet 4.5 <noreply at anthropic.com>
---
 arch/arm/cpu/mmu-common.c    |  2 +-
 arch/arm/cpu/mmu-common.h    |  4 +---
 arch/arm/cpu/mmu_32.c        | 10 +++++-----
 arch/arm/cpu/mmu_64.c        |  8 ++++----
 arch/riscv/cpu/mmu.c         |  6 +++---
 arch/riscv/include/asm/mmu.h |  1 -
 include/mmu.h                |  1 +
 7 files changed, 15 insertions(+), 17 deletions(-)

diff --git a/arch/arm/cpu/mmu-common.c b/arch/arm/cpu/mmu-common.c
index 44c39dc048..0300bb9bc6 100644
--- a/arch/arm/cpu/mmu-common.c
+++ b/arch/arm/cpu/mmu-common.c
@@ -21,7 +21,7 @@ const char *map_type_tostr(maptype_t map_type)
 	map_type &= ~ARCH_MAP_FLAG_PAGEWISE;
 
 	switch (map_type) {
-	case ARCH_MAP_CACHED_RWX:	return "RWX";
+	case MAP_CACHED_RWX:		return "RWX";
 	case MAP_CACHED_RO:		return "RO";
 	case MAP_CACHED:		return "CACHED";
 	case MAP_UNCACHED:		return "UNCACHED";
diff --git a/arch/arm/cpu/mmu-common.h b/arch/arm/cpu/mmu-common.h
index b42c421ffd..3a3590ebb5 100644
--- a/arch/arm/cpu/mmu-common.h
+++ b/arch/arm/cpu/mmu-common.h
@@ -11,8 +11,6 @@
 #include <linux/sizes.h>
 #include <linux/bits.h>
 
-#define ARCH_MAP_CACHED_RWX	MAP_ARCH(2)
-
 #define ARCH_MAP_FLAG_PAGEWISE	BIT(31)
 
 struct device;
@@ -32,7 +30,7 @@ static inline maptype_t arm_mmu_maybe_skip_permissions(maptype_t map_type)
 	case MAP_CODE:
 	case MAP_CACHED:
 	case MAP_CACHED_RO:
-		return ARCH_MAP_CACHED_RWX;
+		return MAP_CACHED_RWX;
 	default:
 		return map_type;
 	}
diff --git a/arch/arm/cpu/mmu_32.c b/arch/arm/cpu/mmu_32.c
index 4e569677e1..074fd1b0ed 100644
--- a/arch/arm/cpu/mmu_32.c
+++ b/arch/arm/cpu/mmu_32.c
@@ -302,7 +302,7 @@ static uint32_t get_pte_flags(maptype_t map_type)
 {
 	if (cpu_architecture() >= CPU_ARCH_ARMv7) {
 		switch (map_type & MAP_TYPE_MASK) {
-		case ARCH_MAP_CACHED_RWX:
+		case MAP_CACHED_RWX:
 			return PTE_FLAGS_CACHED_V7_RWX;
 		case MAP_CACHED_RO:
 			return PTE_FLAGS_CACHED_RO_V7;
@@ -323,7 +323,7 @@ static uint32_t get_pte_flags(maptype_t map_type)
 		case MAP_CACHED_RO:
 		case MAP_CODE:
 			return PTE_FLAGS_CACHED_RO_V4;
-		case ARCH_MAP_CACHED_RWX:
+		case MAP_CACHED_RWX:
 		case MAP_CACHED:
 			return PTE_FLAGS_CACHED_V4;
 		case MAP_UNCACHED:
@@ -635,7 +635,7 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize, unsigned lon
 	 * map the bulk of the memory as sections to avoid allocating too many page tables
 	 * at this early stage
 	 */
-	early_remap_range(membase, barebox_start - membase, ARCH_MAP_CACHED_RWX);
+	early_remap_range(membase, barebox_start - membase, MAP_CACHED_RWX);
 	/*
 	 * Map the remainder of the memory explicitly with two level page tables. This is
 	 * the place where barebox proper ends at. In barebox proper we'll remap the code
@@ -646,10 +646,10 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize, unsigned lon
 	 * at the location being remapped.
 	 */
 	early_remap_range(barebox_start, barebox_size,
-			  ARCH_MAP_CACHED_RWX | ARCH_MAP_FLAG_PAGEWISE);
+			  MAP_CACHED_RWX | ARCH_MAP_FLAG_PAGEWISE);
 	early_remap_range(optee_start, OPTEE_SIZE, MAP_UNCACHED);
 	early_remap_range(PAGE_ALIGN_DOWN((uintptr_t)_stext), PAGE_ALIGN(_etext - _stext),
-			  ARCH_MAP_CACHED_RWX);
+			  MAP_CACHED_RWX);
 
 	__mmu_cache_on();
 }
diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index 20e185cf5e..2ed39abeb5 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -161,7 +161,7 @@ static unsigned long get_pte_attrs(maptype_t map_type)
 		return CACHED_MEM | PTE_BLOCK_RO;
 	case MAP_CACHED_RO:
 		return attrs_xn() | CACHED_MEM | PTE_BLOCK_RO;
-	case ARCH_MAP_CACHED_RWX:
+	case MAP_CACHED_RWX:
 		return CACHED_MEM;
 	default:
 		return ~0UL;
@@ -404,7 +404,7 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize, unsigned lon
 	 */
 	early_init_range(2);
 
-	early_remap_range(membase, memsize, ARCH_MAP_CACHED_RWX);
+	early_remap_range(membase, memsize, MAP_CACHED_RWX);
 
 	/* Default location for OP-TEE: end of DRAM, leave OPTEE_SIZE space for it */
 	optee_membase = membase + memsize - OPTEE_SIZE;
@@ -417,7 +417,7 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize, unsigned lon
 	 * executing code from it
 	 */
 	early_remap_range(barebox_start, barebox_size,
-		     ARCH_MAP_CACHED_RWX | ARCH_MAP_FLAG_PAGEWISE);
+		     MAP_CACHED_RWX | ARCH_MAP_FLAG_PAGEWISE);
 
 	/* OP-TEE might be at location specified in OP-TEE header */
 	optee_get_membase(&optee_membase);
@@ -425,7 +425,7 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize, unsigned lon
 	early_remap_range(optee_membase, OPTEE_SIZE, MAP_FAULT);
 
 	early_remap_range(PAGE_ALIGN_DOWN((uintptr_t)_stext), PAGE_ALIGN(_etext - _stext),
-			  ARCH_MAP_CACHED_RWX);
+			  MAP_CACHED_RWX);
 
 	mmu_enable();
 }
diff --git a/arch/riscv/cpu/mmu.c b/arch/riscv/cpu/mmu.c
index bafd597b69..38120782ab 100644
--- a/arch/riscv/cpu/mmu.c
+++ b/arch/riscv/cpu/mmu.c
@@ -125,14 +125,14 @@ static unsigned long flags_to_pte(maptype_t flags)
 
 	/*
 	 * Map barebox memory types to RISC-V PTE flags:
-	 * - ARCH_MAP_CACHED_RWX: read + write + execute (early boot, full RAM access)
+	 * - MAP_CACHED_RWX: read + write + execute (early boot, full RAM access)
 	 * - MAP_CODE: read + execute (text sections)
 	 * - MAP_CACHED_RO: read only (rodata sections)
 	 * - MAP_CACHED: read + write (data/bss sections)
 	 * - MAP_UNCACHED: read + write, uncached (device memory)
 	 */
 	switch (flags & MAP_TYPE_MASK) {
-	case ARCH_MAP_CACHED_RWX:
+	case MAP_CACHED_RWX:
 		/* Full access for early boot: R + W + X */
 		pte |= PTE_R | PTE_W | PTE_X;
 		break;
@@ -287,7 +287,7 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize,
 	 */
 	pr_debug("Remapping RAM 0x%08lx-0x%08lx as cached RWX...\n", membase, end);
 	for (addr = membase; addr < end; addr += RISCV_L1_SIZE)
-		create_megapage(addr, addr, ARCH_MAP_CACHED_RWX);
+		create_megapage(addr, addr, MAP_CACHED_RWX);
 
 	pr_debug("Page table setup complete, used %lu KB\n",
 		 (early_pt_idx * RISCV_PGSIZE) / 1024);
diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h
index ba7068c493..98af92cc17 100644
--- a/arch/riscv/include/asm/mmu.h
+++ b/arch/riscv/include/asm/mmu.h
@@ -16,7 +16,6 @@
 #define MAP_ARCH_DEFAULT MAP_CACHED
 
 /* Architecture-specific memory type flags */
-#define ARCH_MAP_CACHED_RWX		MAP_ARCH(2)	/* Cached, RWX (early boot) */
 #define ARCH_MAP_FLAG_PAGEWISE		(1 << 16)	/* Force page-wise mapping */
 
 /*
diff --git a/include/mmu.h b/include/mmu.h
index 9f582f25e1..d0143f360a 100644
--- a/include/mmu.h
+++ b/include/mmu.h
@@ -10,6 +10,7 @@
 #define MAP_FAULT		2
 #define MAP_CODE		3
 #define MAP_CACHED_RO		4
+#define MAP_CACHED_RWX		6
 
 #ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE
 #define MAP_WRITECOMBINE	5

-- 
2.47.3




More information about the barebox mailing list