[PATCH 02/22] ARM: mmu: compare only lowest 16 bits for map type

Ahmad Fatoum a.fatoum at pengutronix.de
Wed Aug 6 05:36:54 PDT 2025


Regions remapped as MAP_CODE are still cacheable, even if they aren't
MAP_CACHED. To handle that and to support a future use of flags in the
maptype_t, let's limit the existing memory type enumeration to the lower
16 bits and use a MAP_CODE/MAP_CACHED aware comparison helper.

Signed-off-by: Ahmad Fatoum <a.fatoum at pengutronix.de>
---
 arch/arm/cpu/mmu-common.h   |  6 +++---
 arch/arm/cpu/mmu_32.c       | 10 +++++-----
 arch/arm/cpu/mmu_64.c       |  4 ++--
 arch/powerpc/cpu-85xx/mmu.c |  2 +-
 include/mmu.h               | 19 ++++++++++++++++++-
 5 files changed, 29 insertions(+), 12 deletions(-)

diff --git a/arch/arm/cpu/mmu-common.h b/arch/arm/cpu/mmu-common.h
index a545958b5cc2..e9005dfae766 100644
--- a/arch/arm/cpu/mmu-common.h
+++ b/arch/arm/cpu/mmu-common.h
@@ -10,8 +10,8 @@
 #include <linux/kernel.h>
 #include <linux/sizes.h>
 
-#define ARCH_MAP_CACHED_RWX	((unsigned)-2)
-#define ARCH_MAP_CACHED_RO	((unsigned)-3)
+#define ARCH_MAP_CACHED_RWX	MAP_ARCH(2)
+#define ARCH_MAP_CACHED_RO	MAP_ARCH(3)
 
 struct device;
 
@@ -26,7 +26,7 @@ static inline maptype_t arm_mmu_maybe_skip_permissions(maptype_t map_type)
 	if (IS_ENABLED(CONFIG_ARM_MMU_PERMISSIONS))
 		return map_type;
 
-	switch (map_type) {
+	switch (map_type & MAP_TYPE_MASK) {
 	case MAP_CODE:
 	case MAP_CACHED:
 	case ARCH_MAP_CACHED_RO:
diff --git a/arch/arm/cpu/mmu_32.c b/arch/arm/cpu/mmu_32.c
index 8d1343b5d7d7..ae86c27e7e27 100644
--- a/arch/arm/cpu/mmu_32.c
+++ b/arch/arm/cpu/mmu_32.c
@@ -226,7 +226,7 @@ static u32 pte_flags_to_pmd(u32 pte)
 static uint32_t get_pte_flags(maptype_t map_type)
 {
 	if (cpu_architecture() >= CPU_ARCH_ARMv7) {
-		switch (map_type) {
+		switch (map_type & MAP_TYPE_MASK) {
 		case ARCH_MAP_CACHED_RWX:
 			return PTE_FLAGS_CACHED_V7_RWX;
 		case ARCH_MAP_CACHED_RO:
@@ -244,7 +244,7 @@ static uint32_t get_pte_flags(maptype_t map_type)
 			return 0x0;
 		}
 	} else {
-		switch (map_type) {
+		switch (map_type & MAP_TYPE_MASK) {
 		case ARCH_MAP_CACHED_RO:
 		case MAP_CODE:
 			return PTE_FLAGS_CACHED_RO_V4;
@@ -300,7 +300,7 @@ static void __arch_remap_range(void *_virt_addr, phys_addr_t phys_addr, size_t s
 			 */
 			chunk = PGDIR_SIZE;
 			val = phys_addr | pmd_flags;
-			if (map_type != MAP_FAULT)
+			if (!maptype_is_compatible(map_type, MAP_FAULT))
 				val |= PMD_TYPE_SECT;
 			// TODO break-before-make missing
 			set_pte(pgd, val);
@@ -346,7 +346,7 @@ static void __arch_remap_range(void *_virt_addr, phys_addr_t phys_addr, size_t s
 
 				val = phys_addr + i * PAGE_SIZE;
 				val |= pte_flags;
-				if (map_type != MAP_FAULT)
+				if (!maptype_is_compatible(map_type, MAP_FAULT))
 					val |= PTE_TYPE_SMALL;
 
 				// TODO break-before-make missing
@@ -375,7 +375,7 @@ int arch_remap_range(void *virt_addr, phys_addr_t phys_addr, size_t size, maptyp
 
 	__arch_remap_range(virt_addr, phys_addr, size, map_type, false);
 
-	if (map_type == MAP_UNCACHED)
+	if (maptype_is_compatible(map_type, MAP_UNCACHED))
 		dma_inv_range(virt_addr, size);
 
 	return 0;
diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index ad96bda702b8..9e8d36d94944 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -289,7 +289,7 @@ static void flush_cacheable_pages(void *start, size_t size)
 
 static unsigned long get_pte_attrs(maptype_t map_type)
 {
-	switch (map_type) {
+	switch (map_type & MAP_TYPE_MASK) {
 	case MAP_CACHED:
 		return attrs_xn() | CACHED_MEM;
 	case MAP_UNCACHED:
@@ -330,7 +330,7 @@ int arch_remap_range(void *virt_addr, phys_addr_t phys_addr, size_t size, maptyp
 	if (attrs == ~0UL)
 		return -EINVAL;
 
-	if (map_type != MAP_CACHED)
+	if (!maptype_is_compatible(map_type, MAP_CACHED))
 		flush_cacheable_pages(virt_addr, size);
 
 	create_sections((uint64_t)virt_addr, phys_addr, (uint64_t)size, attrs, false);
diff --git a/arch/powerpc/cpu-85xx/mmu.c b/arch/powerpc/cpu-85xx/mmu.c
index 5fe9ba9db6d8..eec4d3e05b56 100644
--- a/arch/powerpc/cpu-85xx/mmu.c
+++ b/arch/powerpc/cpu-85xx/mmu.c
@@ -27,7 +27,7 @@ int arch_remap_range(void *virt_addr, phys_addr_t phys_addr, size_t size, maptyp
 	if (phys_addr != virt_to_phys(virt_addr))
 		return -ENOSYS;
 
-	switch (flags) {
+	switch (flags & MAP_TYPE_MASK) {
 	case MAP_UNCACHED:
 		pte_flags = MAS2_I;
 		break;
diff --git a/include/mmu.h b/include/mmu.h
index db8453f58521..29992ae1d6c6 100644
--- a/include/mmu.h
+++ b/include/mmu.h
@@ -16,6 +16,9 @@
 #define MAP_WRITECOMBINE	MAP_UNCACHED
 #endif
 
+#define MAP_TYPE_MASK	0xFFFF
+#define MAP_ARCH(x)	((u16)~(x))
+
 /*
  * Depending on the architecture the default mapping can be
  * cached or uncached. Without ARCH_HAS_REMAP being set this
@@ -25,11 +28,25 @@
 
 #include <asm/mmu.h>
 
+static inline bool maptype_is_compatible(maptype_t active, maptype_t check)
+{
+	active &= MAP_TYPE_MASK;
+	check &= MAP_TYPE_MASK;
+
+	if (active == check)
+		return true;
+	if (active == MAP_CODE && check == MAP_CACHED)
+		return true;
+
+	return false;
+}
+
 #ifndef ARCH_HAS_REMAP
 static inline int arch_remap_range(void *virt_addr, phys_addr_t phys_addr,
 				   size_t size, maptype_t map_type)
 {
-	if (map_type == MAP_ARCH_DEFAULT && phys_addr == virt_to_phys(virt_addr))
+	if (maptype_is_compatible(map_type, MAP_ARCH_DEFAULT) &&
+	    phys_addr == virt_to_phys(virt_addr))
 		return 0;
 
 	return -EINVAL;
-- 
2.39.5




More information about the barebox mailing list