[PATCH 11/22] ARM: mmu: make force_pages a maptype_t flag

Ahmad Fatoum a.fatoum at pengutronix.de
Wed Aug 6 05:37:03 PDT 2025


The case with force_page == false is the default and having to write an
extra parameter everywhere is needless visual clutter. Especially if we
are going to add new parameters or OR further flags, it's more readable
to use a single parameter for the flags instead of multiple.

Signed-off-by: Ahmad Fatoum <a.fatoum at pengutronix.de>
---
 arch/arm/cpu/mmu-common.h |  3 +++
 arch/arm/cpu/mmu_32.c     | 18 ++++++++++--------
 arch/arm/cpu/mmu_64.c     | 21 +++++++++++----------
 3 files changed, 24 insertions(+), 18 deletions(-)

diff --git a/arch/arm/cpu/mmu-common.h b/arch/arm/cpu/mmu-common.h
index 01d081db426e..a111e15a21b4 100644
--- a/arch/arm/cpu/mmu-common.h
+++ b/arch/arm/cpu/mmu-common.h
@@ -9,10 +9,13 @@
 #include <linux/ioport.h>
 #include <linux/kernel.h>
 #include <linux/sizes.h>
+#include <linux/bits.h>
 
 #define ARCH_MAP_CACHED_RWX	MAP_ARCH(2)
 #define ARCH_MAP_CACHED_RO	MAP_ARCH(3)
 
+#define ARCH_MAP_FLAG_PAGEWISE	BIT(31)
+
 struct device;
 
 void dma_inv_range(void *ptr, size_t size);
diff --git a/arch/arm/cpu/mmu_32.c b/arch/arm/cpu/mmu_32.c
index 4b7f370edaea..e43d9d0d4606 100644
--- a/arch/arm/cpu/mmu_32.c
+++ b/arch/arm/cpu/mmu_32.c
@@ -266,8 +266,9 @@ static uint32_t get_pmd_flags(maptype_t map_type)
 }
 
 static void __arch_remap_range(void *_virt_addr, phys_addr_t phys_addr, size_t size,
-			       maptype_t map_type, bool force_pages)
+			       maptype_t map_type)
 {
+	bool force_pages = map_type & ARCH_MAP_FLAG_PAGEWISE;
 	u32 virt_addr = (u32)_virt_addr;
 	u32 pte_flags, pmd_flags;
 	uint32_t *ttb = get_ttb();
@@ -363,16 +364,16 @@ static void __arch_remap_range(void *_virt_addr, phys_addr_t phys_addr, size_t s
 	tlb_invalidate();
 }
 
-static void early_remap_range(u32 addr, size_t size, maptype_t map_type, bool force_pages)
+static void early_remap_range(u32 addr, size_t size, maptype_t map_type)
 {
-	__arch_remap_range((void *)addr, addr, size, map_type, force_pages);
+	__arch_remap_range((void *)addr, addr, size, map_type);
 }
 
 int arch_remap_range(void *virt_addr, phys_addr_t phys_addr, size_t size, maptype_t map_type)
 {
 	map_type = arm_mmu_maybe_skip_permissions(map_type);
 
-	__arch_remap_range(virt_addr, phys_addr, size, map_type, false);
+	__arch_remap_range(virt_addr, phys_addr, size, map_type);
 
 	if (maptype_is_compatible(map_type, MAP_UNCACHED))
 		dma_inv_range(virt_addr, size);
@@ -643,7 +644,7 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize, unsigned lon
 	 * map the bulk of the memory as sections to avoid allocating too many page tables
 	 * at this early stage
 	 */
-	early_remap_range(membase, barebox_start - membase, ARCH_MAP_CACHED_RWX, false);
+	early_remap_range(membase, barebox_start - membase, ARCH_MAP_CACHED_RWX);
 	/*
 	 * Map the remainder of the memory explicitly with two level page tables. This is
 	 * the place where barebox proper ends at. In barebox proper we'll remap the code
@@ -653,10 +654,11 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize, unsigned lon
 	 * a break-before-make sequence which we can't do when barebox proper is running
 	 * at the location being remapped.
 	 */
-	early_remap_range(barebox_start, barebox_size, ARCH_MAP_CACHED_RWX, true);
-	early_remap_range(optee_start, OPTEE_SIZE, MAP_UNCACHED, false);
+	early_remap_range(barebox_start, barebox_size,
+			  ARCH_MAP_CACHED_RWX | ARCH_MAP_FLAG_PAGEWISE);
+	early_remap_range(optee_start, OPTEE_SIZE, MAP_UNCACHED);
 	early_remap_range(PAGE_ALIGN_DOWN((uintptr_t)_stext), PAGE_ALIGN(_etext - _stext),
-			  ARCH_MAP_CACHED_RWX, false);
+			  ARCH_MAP_CACHED_RWX);
 
 	__mmu_cache_on();
 }
diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index 0bd5e4dc98c4..6e617a15a6d7 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -146,8 +146,9 @@ static void split_block(uint64_t *pte, int level)
 }
 
 static int __arch_remap_range(uint64_t virt, uint64_t phys, uint64_t size,
-			      maptype_t map_type, bool force_pages)
+			      maptype_t map_type)
 {
+	bool force_pages = map_type & ARCH_MAP_FLAG_PAGEWISE;
 	unsigned long attr = get_pte_attrs(map_type);
 	uint64_t *ttb = get_ttb();
 	uint64_t block_size;
@@ -312,9 +313,9 @@ static void flush_cacheable_pages(void *start, size_t size)
 		v8_flush_dcache_range(flush_start, flush_end);
 }
 
-static void early_remap_range(uint64_t addr, size_t size, maptype_t map_type, bool force_pages)
+static void early_remap_range(uint64_t addr, size_t size, maptype_t map_type)
 {
-	__arch_remap_range(addr, addr, size, map_type, force_pages);
+	__arch_remap_range(addr, addr, size, map_type);
 }
 
 int arch_remap_range(void *virt_addr, phys_addr_t phys_addr, size_t size, maptype_t map_type)
@@ -324,7 +325,7 @@ int arch_remap_range(void *virt_addr, phys_addr_t phys_addr, size_t size, maptyp
 	if (!maptype_is_compatible(map_type, MAP_CACHED))
 		flush_cacheable_pages(virt_addr, size);
 
-	return __arch_remap_range((uint64_t)virt_addr, phys_addr, (uint64_t)size, map_type, false);
+	return __arch_remap_range((uint64_t)virt_addr, phys_addr, (uint64_t)size, map_type);
 }
 
 static void mmu_enable(void)
@@ -419,7 +420,7 @@ static void early_init_range(size_t total_level0_tables)
 	uint64_t addr = 0;
 
 	while (total_level0_tables--) {
-		early_remap_range(addr, L0_XLAT_SIZE, MAP_UNCACHED, false);
+		early_remap_range(addr, L0_XLAT_SIZE, MAP_UNCACHED);
 		split_block(ttb, 0);
 		addr += L0_XLAT_SIZE;
 		ttb++;
@@ -451,7 +452,7 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize, unsigned lon
 	 */
 	early_init_range(2);
 
-	early_remap_range(membase, memsize, ARCH_MAP_CACHED_RWX, false);
+	early_remap_range(membase, memsize, ARCH_MAP_CACHED_RWX);
 
 	if (optee_get_membase(&optee_membase)) {
                 optee_membase = membase + memsize - OPTEE_SIZE;
@@ -459,18 +460,18 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize, unsigned lon
 		barebox_size = optee_membase - barebox_start;
 
 		early_remap_range(optee_membase - barebox_size, barebox_size,
-			     ARCH_MAP_CACHED_RWX, true);
+			     ARCH_MAP_CACHED_RWX | ARCH_MAP_FLAG_PAGEWISE);
 	} else {
 		barebox_size = membase + memsize - barebox_start;
 
 		early_remap_range(membase + memsize - barebox_size, barebox_size,
-			     ARCH_MAP_CACHED_RWX, true);
+			     ARCH_MAP_CACHED_RWX | ARCH_MAP_FLAG_PAGEWISE);
 	}
 
-	early_remap_range(optee_membase, OPTEE_SIZE, MAP_FAULT, false);
+	early_remap_range(optee_membase, OPTEE_SIZE, MAP_FAULT);
 
 	early_remap_range(PAGE_ALIGN_DOWN((uintptr_t)_stext), PAGE_ALIGN(_etext - _stext),
-			  ARCH_MAP_CACHED_RWX, false);
+			  ARCH_MAP_CACHED_RWX);
 
 	mmu_enable();
 }
-- 
2.39.5




More information about the barebox mailing list