[PATCH v2 20/34] ARM: mmu: merge mmu-early_xx.c into mmu_xx.c
Sascha Hauer
s.hauer at pengutronix.de
Wed May 17 02:03:26 PDT 2023
The code will be further consolidated, so move it together for easier
code sharing.
Signed-off-by: Sascha Hauer <s.hauer at pengutronix.de>
---
arch/arm/cpu/Makefile | 4 +-
arch/arm/cpu/mmu-early_32.c | 62 -------------------------
arch/arm/cpu/mmu-early_64.c | 93 -------------------------------------
arch/arm/cpu/mmu_32.c | 50 ++++++++++++++++++++
arch/arm/cpu/mmu_64.c | 76 ++++++++++++++++++++++++++++++
5 files changed, 128 insertions(+), 157 deletions(-)
delete mode 100644 arch/arm/cpu/mmu-early_32.c
delete mode 100644 arch/arm/cpu/mmu-early_64.c
diff --git a/arch/arm/cpu/Makefile b/arch/arm/cpu/Makefile
index cd5f36eb49..0e4fa69229 100644
--- a/arch/arm/cpu/Makefile
+++ b/arch/arm/cpu/Makefile
@@ -3,10 +3,10 @@
obj-y += cpu.o
obj-$(CONFIG_ARM_EXCEPTIONS) += exceptions_$(S64_32).o interrupts_$(S64_32).o
-obj-$(CONFIG_MMU) += mmu_$(S64_32).o mmu-common.o
+obj-$(CONFIG_MMU) += mmu-common.o
+obj-pbl-$(CONFIG_MMU) += mmu_$(S64_32).o
obj-$(CONFIG_MMU) += dma_$(S64_32).o
obj-pbl-y += lowlevel_$(S64_32).o
-obj-pbl-$(CONFIG_MMU) += mmu-early_$(S64_32).o
obj-pbl-$(CONFIG_CPU_32v7) += hyp.o
AFLAGS_hyp.o :=-Wa,-march=armv7-a -Wa,-mcpu=all
AFLAGS_hyp.pbl.o :=-Wa,-march=armv7-a -Wa,-mcpu=all
diff --git a/arch/arm/cpu/mmu-early_32.c b/arch/arm/cpu/mmu-early_32.c
deleted file mode 100644
index 94bde44c9b..0000000000
--- a/arch/arm/cpu/mmu-early_32.c
+++ /dev/null
@@ -1,62 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-
-#include <common.h>
-#include <asm/mmu.h>
-#include <errno.h>
-#include <linux/sizes.h>
-#include <asm/memory.h>
-#include <asm/system.h>
-#include <asm/cache.h>
-#include <asm-generic/sections.h>
-
-#include "mmu_32.h"
-
-static uint32_t *ttb;
-
-static inline void map_region(unsigned long start, unsigned long size,
- uint64_t flags)
-
-{
- start = ALIGN_DOWN(start, SZ_1M);
- size = ALIGN(size, SZ_1M);
-
- create_sections(ttb, start, start + size - 1, flags);
-}
-
-void mmu_early_enable(unsigned long membase, unsigned long memsize,
- unsigned long _ttb)
-{
- ttb = (uint32_t *)_ttb;
-
- set_ttbr(ttb);
-
- /* For the XN bit to take effect, we can't be using DOMAIN_MANAGER. */
- if (cpu_architecture() >= CPU_ARCH_ARMv7)
- set_domain(DOMAIN_CLIENT);
- else
- set_domain(DOMAIN_MANAGER);
-
- /*
- * This marks the whole address space as uncachable as well as
- * unexecutable if possible
- */
- create_flat_mapping(ttb);
-
- /*
- * There can be SoCs that have a section shared between device memory
- * and the on-chip RAM hosting the PBL. Thus mark this section
- * uncachable, but executable.
- * On such SoCs, executing from OCRAM could cause the instruction
- * prefetcher to speculatively access that device memory, triggering
- * potential errant behavior.
- *
- * If your SoC has such a memory layout, you should rewrite the code
- * here to map the OCRAM page-wise.
- */
- map_region((unsigned long)_stext, _etext - _stext, PMD_SECT_DEF_UNCACHED);
-
- /* maps main memory as cachable */
- map_region(membase, memsize, PMD_SECT_DEF_CACHED);
-
- __mmu_cache_on();
-}
diff --git a/arch/arm/cpu/mmu-early_64.c b/arch/arm/cpu/mmu-early_64.c
deleted file mode 100644
index d1f4a046bb..0000000000
--- a/arch/arm/cpu/mmu-early_64.c
+++ /dev/null
@@ -1,93 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-
-#include <common.h>
-#include <dma-dir.h>
-#include <init.h>
-#include <mmu.h>
-#include <errno.h>
-#include <linux/sizes.h>
-#include <asm/memory.h>
-#include <asm/pgtable64.h>
-#include <asm/barebox-arm.h>
-#include <asm/system.h>
-#include <asm/cache.h>
-#include <memory.h>
-#include <asm/system_info.h>
-
-#include "mmu_64.h"
-
-static void create_sections(void *ttb, uint64_t virt, uint64_t phys,
- uint64_t size, uint64_t attr)
-{
- uint64_t block_size;
- uint64_t block_shift;
- uint64_t *pte;
- uint64_t idx;
- uint64_t addr;
- uint64_t *table;
-
- addr = virt;
-
- attr &= ~PTE_TYPE_MASK;
-
- table = ttb;
-
- while (1) {
- block_shift = level2shift(1);
- idx = (addr & level2mask(1)) >> block_shift;
- block_size = (1ULL << block_shift);
-
- pte = table + idx;
-
- *pte = phys | attr | PTE_TYPE_BLOCK;
-
- if (size < block_size)
- break;
-
- addr += block_size;
- phys += block_size;
- size -= block_size;
- }
-}
-
-#define EARLY_BITS_PER_VA 39
-
-void mmu_early_enable(unsigned long membase, unsigned long memsize,
- unsigned long ttb)
-{
- int el;
-
- /*
- * For the early code we only create level 1 pagetables which only
- * allow for a 1GiB granularity. If our membase is not aligned to that
- * bail out without enabling the MMU.
- */
- if (membase & ((1ULL << level2shift(1)) - 1))
- return;
-
- memset((void *)ttb, 0, GRANULE_SIZE);
-
- el = current_el();
- set_ttbr_tcr_mair(el, ttb, calc_tcr(el, EARLY_BITS_PER_VA), MEMORY_ATTRIBUTES);
- create_sections((void *)ttb, 0, 0, 1UL << (EARLY_BITS_PER_VA - 1),
- attrs_uncached_mem());
- create_sections((void *)ttb, membase, membase, memsize, CACHED_MEM);
- tlb_invalidate();
- isb();
- set_cr(get_cr() | CR_M);
-}
-
-void mmu_early_disable(void)
-{
- unsigned int cr;
-
- cr = get_cr();
- cr &= ~(CR_M | CR_C);
-
- set_cr(cr);
- v8_flush_dcache_all();
- tlb_invalidate();
-
- dsb();
- isb();
-}
diff --git a/arch/arm/cpu/mmu_32.c b/arch/arm/cpu/mmu_32.c
index 10f447874c..12fe892400 100644
--- a/arch/arm/cpu/mmu_32.c
+++ b/arch/arm/cpu/mmu_32.c
@@ -494,3 +494,53 @@ void *dma_alloc_writecombine(size_t size, dma_addr_t *dma_handle)
{
return dma_alloc_map(size, dma_handle, ARCH_MAP_WRITECOMBINE);
}
+
+static uint32_t *ttb;
+
+static inline void map_region(unsigned long start, unsigned long size,
+ uint64_t flags)
+
+{
+ start = ALIGN_DOWN(start, SZ_1M);
+ size = ALIGN(size, SZ_1M);
+
+ create_sections(ttb, start, start + size - 1, flags);
+}
+
+void mmu_early_enable(unsigned long membase, unsigned long memsize,
+ unsigned long _ttb)
+{
+ ttb = (uint32_t *)_ttb;
+
+ set_ttbr(ttb);
+
+ /* For the XN bit to take effect, we can't be using DOMAIN_MANAGER. */
+ if (cpu_architecture() >= CPU_ARCH_ARMv7)
+ set_domain(DOMAIN_CLIENT);
+ else
+ set_domain(DOMAIN_MANAGER);
+
+ /*
+ * This marks the whole address space as uncachable as well as
+ * unexecutable if possible
+ */
+ create_flat_mapping(ttb);
+
+ /*
+ * There can be SoCs that have a section shared between device memory
+ * and the on-chip RAM hosting the PBL. Thus mark this section
+ * uncachable, but executable.
+ * On such SoCs, executing from OCRAM could cause the instruction
+ * prefetcher to speculatively access that device memory, triggering
+ * potential errant behavior.
+ *
+ * If your SoC has such a memory layout, you should rewrite the code
+ * here to map the OCRAM page-wise.
+ */
+ map_region((unsigned long)_stext, _etext - _stext, PMD_SECT_DEF_UNCACHED);
+
+ /* maps main memory as cachable */
+ map_region(membase, memsize, PMD_SECT_DEF_CACHED);
+
+ __mmu_cache_on();
+}
diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index 9150de1676..55ada960c5 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -241,3 +241,79 @@ void dma_flush_range(void *ptr, size_t size)
v8_flush_dcache_range(start, end);
}
+
+static void early_create_sections(void *ttb, uint64_t virt, uint64_t phys,
+ uint64_t size, uint64_t attr)
+{
+ uint64_t block_size;
+ uint64_t block_shift;
+ uint64_t *pte;
+ uint64_t idx;
+ uint64_t addr;
+ uint64_t *table;
+
+ addr = virt;
+
+ attr &= ~PTE_TYPE_MASK;
+
+ table = ttb;
+
+ while (1) {
+ block_shift = level2shift(1);
+ idx = (addr & level2mask(1)) >> block_shift;
+ block_size = (1ULL << block_shift);
+
+ pte = table + idx;
+
+ *pte = phys | attr | PTE_TYPE_BLOCK;
+
+ if (size < block_size)
+ break;
+
+ addr += block_size;
+ phys += block_size;
+ size -= block_size;
+ }
+}
+
+#define EARLY_BITS_PER_VA 39
+
+void mmu_early_enable(unsigned long membase, unsigned long memsize,
+ unsigned long ttb)
+{
+ int el;
+
+ /*
+ * For the early code we only create level 1 pagetables which only
+ * allow for a 1GiB granularity. If our membase is not aligned to that
+ * bail out without enabling the MMU.
+ */
+ if (membase & ((1ULL << level2shift(1)) - 1))
+ return;
+
+ memset((void *)ttb, 0, GRANULE_SIZE);
+
+ el = current_el();
+ set_ttbr_tcr_mair(el, ttb, calc_tcr(el, EARLY_BITS_PER_VA), MEMORY_ATTRIBUTES);
+ early_create_sections((void *)ttb, 0, 0, 1UL << (EARLY_BITS_PER_VA - 1),
+ attrs_uncached_mem());
+ early_create_sections((void *)ttb, membase, membase, memsize, CACHED_MEM);
+ tlb_invalidate();
+ isb();
+ set_cr(get_cr() | CR_M);
+}
+
+void mmu_early_disable(void)
+{
+ unsigned int cr;
+
+ cr = get_cr();
+ cr &= ~(CR_M | CR_C);
+
+ set_cr(cr);
+ v8_flush_dcache_all();
+ tlb_invalidate();
+
+ dsb();
+ isb();
+}
--
2.39.2
More information about the barebox
mailing list