[PATCH 17/27] ARM: mmu: drop ttb argument
Sascha Hauer
s.hauer at pengutronix.de
Fri May 12 04:09:58 PDT 2023
No need to pass ttb to the MMU code, the MMU code can itself call
arm_mem_ttb() to get the desired base.
Signed-off-by: Sascha Hauer <s.hauer at pengutronix.de>
---
arch/arm/cpu/mmu_32.c | 9 +++++----
arch/arm/cpu/mmu_64.c | 8 +++++---
arch/arm/cpu/start.c | 11 +++--------
arch/arm/cpu/uncompress.c | 7 ++-----
arch/arm/include/asm/mmu.h | 3 +--
5 files changed, 16 insertions(+), 22 deletions(-)
diff --git a/arch/arm/cpu/mmu_32.c b/arch/arm/cpu/mmu_32.c
index a82382ad1e..bef4a01670 100644
--- a/arch/arm/cpu/mmu_32.c
+++ b/arch/arm/cpu/mmu_32.c
@@ -533,10 +533,11 @@ static inline void map_region(unsigned long start, unsigned long size,
create_sections(ttb, start, start + size - 1, flags);
}
-void mmu_early_enable(unsigned long membase, unsigned long memsize,
- unsigned long _ttb)
+void mmu_early_enable(unsigned long membase, unsigned long memsize)
{
- ttb = (uint32_t *)_ttb;
+ ttb = (uint32_t *)arm_mem_ttb(membase, membase + memsize);
+
+ pr_debug("enabling MMU, ttb @ 0x%p\n", ttb);
set_ttbr(ttb);
@@ -566,7 +567,7 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize,
map_region((unsigned long)_stext, _etext - _stext, PMD_SECT_DEF_UNCACHED);
/* maps main memory as cachable */
- map_region(membase, memsize, PMD_SECT_DEF_CACHED);
+ map_region(membase, memsize - OPTEE_SIZE, PMD_SECT_DEF_CACHED);
__mmu_cache_on();
}
diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index 3cc5b14a46..4b75be621d 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -292,10 +292,12 @@ static void early_create_sections(void *ttb, uint64_t virt, uint64_t phys,
#define EARLY_BITS_PER_VA 39
-void mmu_early_enable(unsigned long membase, unsigned long memsize,
- unsigned long ttb)
+void mmu_early_enable(unsigned long membase, unsigned long memsize)
{
int el;
+ unsigned long ttb = arm_mem_ttb(membase, membase + memsize);
+
+ pr_debug("enabling MMU, ttb @ 0x%08lx\n", ttb);
/*
* For the early code we only create level 1 pagetables which only
@@ -311,7 +313,7 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize,
set_ttbr_tcr_mair(el, ttb, calc_tcr(el, EARLY_BITS_PER_VA), MEMORY_ATTRIBUTES);
early_create_sections((void *)ttb, 0, 0, 1UL << (EARLY_BITS_PER_VA - 1),
attrs_uncached_mem());
- early_create_sections((void *)ttb, membase, membase, memsize, CACHED_MEM);
+ early_create_sections((void *)ttb, membase, membase, memsize - OPTEE_SIZE, CACHED_MEM);
tlb_invalidate();
isb();
set_cr(get_cr() | CR_M);
diff --git a/arch/arm/cpu/start.c b/arch/arm/cpu/start.c
index 9d788eba2b..0b08af0176 100644
--- a/arch/arm/cpu/start.c
+++ b/arch/arm/cpu/start.c
@@ -216,14 +216,9 @@ __noreturn __no_sanitize_address void barebox_non_pbl_start(unsigned long membas
mem_malloc_init((void *)malloc_start, (void *)malloc_end - 1);
- if (IS_ENABLED(CONFIG_MMU_EARLY)) {
- unsigned long ttb = arm_mem_ttb(membase, endmem);
-
- if (!IS_ENABLED(CONFIG_PBL_IMAGE)) {
- pr_debug("enabling MMU, ttb @ 0x%08lx\n", ttb);
- arm_early_mmu_cache_invalidate();
- mmu_early_enable(membase, memsize - OPTEE_SIZE, ttb);
- }
+ if (IS_ENABLED(CONFIG_MMU_EARLY) && !IS_ENABLED(CONFIG_PBL_IMAGE)) {
+ arm_early_mmu_cache_invalidate();
+ mmu_early_enable(membase, memsize);
}
if (IS_ENABLED(CONFIG_BOOTM_OPTEE))
diff --git a/arch/arm/cpu/uncompress.c b/arch/arm/cpu/uncompress.c
index 65de87f109..7c85f5a1fe 100644
--- a/arch/arm/cpu/uncompress.c
+++ b/arch/arm/cpu/uncompress.c
@@ -81,11 +81,8 @@ void __noreturn barebox_pbl_start(unsigned long membase, unsigned long memsize,
pr_debug("memory at 0x%08lx, size 0x%08lx\n", membase, memsize);
- if (IS_ENABLED(CONFIG_MMU_EARLY)) {
- unsigned long ttb = arm_mem_ttb(membase, endmem);
- pr_debug("enabling MMU, ttb @ 0x%08lx\n", ttb);
- mmu_early_enable(membase, memsize - OPTEE_SIZE, ttb);
- }
+ if (IS_ENABLED(CONFIG_MMU_EARLY))
+ mmu_early_enable(membase, memsize);
free_mem_ptr = arm_mem_early_malloc(membase, endmem);
free_mem_end_ptr = arm_mem_early_malloc_end(membase, endmem);
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index fd8e93f7a3..9d2fdcf365 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -56,8 +56,7 @@ void __dma_clean_range(unsigned long, unsigned long);
void __dma_flush_range(unsigned long, unsigned long);
void __dma_inv_range(unsigned long, unsigned long);
-void mmu_early_enable(unsigned long membase, unsigned long memsize,
- unsigned long ttb);
+void mmu_early_enable(unsigned long membase, unsigned long memsize);
void mmu_early_disable(void);
#endif /* __ASM_MMU_H */
--
2.39.2
More information about the barebox
mailing list