[RFC 8/8] arm64/head: convert idmap_pg_dir and init_pg_dir to __create_pgd_mapping()

Pingfan Liu kernelfans at gmail.com
Sat Apr 10 10:56:54 BST 2021


Signed-off-by: Pingfan Liu <kernelfans at gmail.com>
Cc: Catalin Marinas <catalin.marinas at arm.com>
Cc: Will Deacon <will at kernel.org>
Cc: Marc Zyngier <maz at kernel.org>
Cc: Kristina Martsenko <kristina.martsenko at arm.com>
Cc: James Morse <james.morse at arm.com>
Cc: Steven Price <steven.price at arm.com>
Cc: Jonathan Cameron <Jonathan.Cameron at huawei.com>
Cc: Pavel Tatashin <pasha.tatashin at soleen.com>
Cc: Anshuman Khandual <anshuman.khandual at arm.com>
Cc: Atish Patra <atish.patra at wdc.com>
Cc: Mike Rapoport <rppt at kernel.org>
Cc: Logan Gunthorpe <logang at deltatee.com>
Cc: Mark Brown <broonie at kernel.org>
To: linux-arm-kernel at lists.infradead.org
---
 arch/arm64/include/asm/pgalloc.h |   5 +
 arch/arm64/kernel/head.S         | 187 +++++++------------------------
 arch/arm64/mm/mmu.c              |   9 ++
 3 files changed, 55 insertions(+), 146 deletions(-)

diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index f848a0300228..128d784d78d4 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -8,6 +8,9 @@
 #ifndef __ASM_PGALLOC_H
 #define __ASM_PGALLOC_H
 
+#include <vdso/bits.h>
+
+#ifndef __ASSEMBLY__
 #include <asm/pgtable-hwdef.h>
 #include <asm/processor.h>
 #include <asm/cacheflush.h>
@@ -102,6 +105,8 @@ extern void create_idmap(pgd_t *pgdir, phys_addr_t phys,
 		void *info,
 		int flags);
 
+#endif
+
 #define NO_BLOCK_MAPPINGS	BIT(0)
 #define NO_CONT_MAPPINGS	BIT(1)
 #define NO_FIXMAP	BIT(2)
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index e19649dbbafb..3b0c9359ab70 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -28,6 +28,8 @@
 #include <asm/memory.h>
 #include <asm/pgtable-hwdef.h>
 #include <asm/page.h>
+#include <asm/pgtable-prot.h>
+#include <asm/pgalloc.h>
 #include <asm/scs.h>
 #include <asm/smp.h>
 #include <asm/sysreg.h>
@@ -93,6 +95,8 @@ SYM_CODE_START(primary_entry)
 	adrp	x23, __PHYS_OFFSET
 	and	x23, x23, MIN_KIMG_ALIGN - 1	// KASLR offset, defaults to 0
 	bl	set_cpu_boot_mode_flag
+	adrp	x4, init_thread_union
+	add	sp, x4, #THREAD_SIZE
 	bl	__create_page_tables
 	/*
 	 * The following calls CPU setup code, see arch/arm64/mm/proc.S for
@@ -121,135 +125,6 @@ SYM_CODE_START_LOCAL(preserve_boot_args)
 	b	__inval_dcache_area		// tail call
 SYM_CODE_END(preserve_boot_args)
 
-/*
- * Macro to create a table entry to the next page.
- *
- *	tbl:	page table address
- *	virt:	virtual address
- *	shift:	#imm page table shift
- *	ptrs:	#imm pointers per table page
- *
- * Preserves:	virt
- * Corrupts:	ptrs, tmp1, tmp2
- * Returns:	tbl -> next level table page address
- */
-	.macro	create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2
-	add	\tmp1, \tbl, #PAGE_SIZE
-	phys_to_pte \tmp2, \tmp1
-	orr	\tmp2, \tmp2, #PMD_TYPE_TABLE	// address of next table and entry type
-	lsr	\tmp1, \virt, #\shift
-	sub	\ptrs, \ptrs, #1
-	and	\tmp1, \tmp1, \ptrs		// table index
-	str	\tmp2, [\tbl, \tmp1, lsl #3]
-	add	\tbl, \tbl, #PAGE_SIZE		// next level table page
-	.endm
-
-/*
- * Macro to populate page table entries, these entries can be pointers to the next level
- * or last level entries pointing to physical memory.
- *
- *	tbl:	page table address
- *	rtbl:	pointer to page table or physical memory
- *	index:	start index to write
- *	eindex:	end index to write - [index, eindex] written to
- *	flags:	flags for pagetable entry to or in
- *	inc:	increment to rtbl between each entry
- *	tmp1:	temporary variable
- *
- * Preserves:	tbl, eindex, flags, inc
- * Corrupts:	index, tmp1
- * Returns:	rtbl
- */
-	.macro populate_entries, tbl, rtbl, index, eindex, flags, inc, tmp1
-.Lpe\@:	phys_to_pte \tmp1, \rtbl
-	orr	\tmp1, \tmp1, \flags	// tmp1 = table entry
-	str	\tmp1, [\tbl, \index, lsl #3]
-	add	\rtbl, \rtbl, \inc	// rtbl = pa next level
-	add	\index, \index, #1
-	cmp	\index, \eindex
-	b.ls	.Lpe\@
-	.endm
-
-/*
- * Compute indices of table entries from virtual address range. If multiple entries
- * were needed in the previous page table level then the next page table level is assumed
- * to be composed of multiple pages. (This effectively scales the end index).
- *
- *	vstart:	virtual address of start of range
- *	vend:	virtual address of end of range
- *	shift:	shift used to transform virtual address into index
- *	ptrs:	number of entries in page table
- *	istart:	index in table corresponding to vstart
- *	iend:	index in table corresponding to vend
- *	count:	On entry: how many extra entries were required in previous level, scales
- *			  our end index.
- *		On exit: returns how many extra entries required for next page table level
- *
- * Preserves:	vstart, vend, shift, ptrs
- * Returns:	istart, iend, count
- */
-	.macro compute_indices, vstart, vend, shift, ptrs, istart, iend, count
-	lsr	\iend, \vend, \shift
-	mov	\istart, \ptrs
-	sub	\istart, \istart, #1
-	and	\iend, \iend, \istart	// iend = (vend >> shift) & (ptrs - 1)
-	mov	\istart, \ptrs
-	mul	\istart, \istart, \count
-	add	\iend, \iend, \istart	// iend += (count - 1) * ptrs
-					// our entries span multiple tables
-
-	lsr	\istart, \vstart, \shift
-	mov	\count, \ptrs
-	sub	\count, \count, #1
-	and	\istart, \istart, \count
-
-	sub	\count, \iend, \istart
-	.endm
-
-/*
- * Map memory for specified virtual address range. Each level of page table needed supports
- * multiple entries. If a level requires n entries the next page table level is assumed to be
- * formed from n pages.
- *
- *	tbl:	location of page table
- *	rtbl:	address to be used for first level page table entry (typically tbl + PAGE_SIZE)
- *	vstart:	start address to map
- *	vend:	end address to map - we map [vstart, vend]
- *	flags:	flags to use to map last level entries
- *	phys:	physical address corresponding to vstart - physical memory is contiguous
- *	pgds:	the number of pgd entries
- *
- * Temporaries:	istart, iend, tmp, count, sv - these need to be different registers
- * Preserves:	vstart, vend, flags
- * Corrupts:	tbl, rtbl, istart, iend, tmp, count, sv
- */
-	.macro map_memory, tbl, rtbl, vstart, vend, flags, phys, pgds, istart, iend, tmp, count, sv
-	add \rtbl, \tbl, #PAGE_SIZE
-	mov \sv, \rtbl
-	mov \count, #0
-	compute_indices \vstart, \vend, #PGDIR_SHIFT, \pgds, \istart, \iend, \count
-	populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
-	mov \tbl, \sv
-	mov \sv, \rtbl
-
-#if SWAPPER_PGTABLE_LEVELS > 3
-	compute_indices \vstart, \vend, #PUD_SHIFT, #PTRS_PER_PUD, \istart, \iend, \count
-	populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
-	mov \tbl, \sv
-	mov \sv, \rtbl
-#endif
-
-#if SWAPPER_PGTABLE_LEVELS > 2
-	compute_indices \vstart, \vend, #SWAPPER_TABLE_SHIFT, #PTRS_PER_PMD, \istart, \iend, \count
-	populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
-	mov \tbl, \sv
-#endif
-
-	compute_indices \vstart, \vend, #SWAPPER_BLOCK_SHIFT, #PTRS_PER_PTE, \istart, \iend, \count
-	bic \count, \phys, #SWAPPER_BLOCK_SIZE - 1
-	populate_entries \tbl, \count, \istart, \iend, \flags, #SWAPPER_BLOCK_SIZE, \tmp
-	.endm
-
 /*
  * Setup the initial page tables. We only setup the barest amount which is
  * required to get the kernel running. The following sections are required:
@@ -344,9 +219,6 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
 	adr_l	x4, idmap_extend_pgtable
 	mov	x5, #1
 	str	x5, [x4]                //require expanded pagetable
-
-	mov	x4, EXTRA_PTRS
-	create_table_entry x0, x3, EXTRA_SHIFT, x4, x5, x6
 #else
 	/*
 	 * If VA_BITS == 48, we don't have to configure an additional
@@ -356,25 +228,50 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
 	str_l	x4, idmap_ptrs_per_pgd, x5
 #endif
 1:
-	ldr_l	x4, idmap_ptrs_per_pgd
-	mov	x5, x3				// __pa(__idmap_text_start)
-	adr_l	x6, __idmap_text_end		// __pa(__idmap_text_end)
+	stp	x0, x1, [sp, #-64]!
+	stp	x2, x3, [sp, #48]
+	stp	x4, x5, [sp, #32]
+	stp	x6, x7, [sp, #16]
 
-	map_memory x0, x1, x3, x6, x7, x3, x4, x10, x11, x12, x13, x14
+	adrp	x0, idmap_pg_dir
+	adrp	x1, idmap_pg_end
+	sub	x1, x1, x0
+	bl	set_cur_mempool
+		
+	adrp	x1, __idmap_text_start
+	adr_l	x2, __idmap_text_end
+	sub	x2, x2, x1
+	ldr	x3, =PAGE_KERNEL_EXEC
+	adr_l	x4, head_pgtable_alloc
+	mov	x5, #0
+	mov	x6, #NO_FIXMAP
+	bl	create_idmap
 
 	/*
 	 * Map the kernel image (starting with PHYS_OFFSET).
 	 */
 	adrp	x0, init_pg_dir
-	mov_q	x5, KIMAGE_VADDR		// compile time __va(_text)
-	add	x5, x5, x23			// add KASLR displacement
-	mov	x4, PTRS_PER_PGD
-	adrp	x6, _end			// runtime __pa(_end)
-	adrp	x3, _text			// runtime __pa(_text)
-	sub	x6, x6, x3			// _end - _text
-	add	x6, x6, x5			// runtime __va(_end)
+	adrp	x1, init_pg_end
+	sub	x1, x1, x0
+	bl	set_cur_mempool
 
-	map_memory x0, x1, x5, x6, x7, x3, x4, x10, x11, x12, x13, x14
+	mov	x1, PTRS_PER_PGD
+	adrp	x3, _text			// runtime __pa(_text)
+	mov_q	x4, KIMAGE_VADDR		// compile time __va(_text)
+	add	x4, x4, x23			// add KASLR displacement
+	adrp	x5, _end			// runtime __pa(_end)
+	sub	x5, x5, x3			// _end - _text
+
+	ldr	x3, =PAGE_KERNEL_EXEC
+	adr_l	x4, head_pgtable_alloc
+	mov	x5, #0
+	mov	x6, #NO_FIXMAP
+
+	bl	create_init_pgd_mapping
+	ldp	x6, x7, [sp, #16]
+	ldp	x4, x5, [sp, #32]
+	ldp	x2, x3, [sp, #48]
+	ldp	x0, x1, [sp], #-64
 
 	/*
 	 * Since the page tables have been populated with non-cacheable
@@ -402,8 +299,6 @@ SYM_FUNC_END(__create_page_tables)
  *   x0 = __PHYS_OFFSET
  */
 SYM_FUNC_START_LOCAL(__primary_switched)
-	adrp	x4, init_thread_union
-	add	sp, x4, #THREAD_SIZE
 	adr_l	x5, init_task
 	msr	sp_el0, x5			// Save thread_info
 
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index b546e47543e2..b886332a7c3f 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -167,6 +167,15 @@ static unsigned long pgd_pgtable_alloc(unsigned long shift, void *unused)
 
 #include "./mmu_include.c"
 
+void create_init_pgd_mapping(pgd_t * pgdir, unsigned int entries_cnt,
+		phys_addr_t phys, unsigned long virt, phys_addr_t size,
+	       	pgprot_t prot, pgtable_alloc allocator,
+		void * info, int flags)
+{
+	__create_pgd_mapping(pgdir, entries_cnt, phys, virt, size,
+		prot, allocator, info, flags);
+}
+
 int idmap_extend_pgtable;
 
 /* 
-- 
2.29.2




More information about the linux-arm-kernel mailing list