[PATCHv2 02/10] arm64/mm: change __create_pgd_mapping() to accept nr_entries param and introduce create_idmap()
Pingfan Liu
kernelfans at gmail.com
Sun Apr 25 15:12:56 BST 2021
As idmap_ptrs_per_pgd may have pgd entries greater than PTRS_PER_PGD,
the prototype of __create_pgd_mapping() needs change to cope with that
to create idmap.
Now this adaption, create_idmap() API can be introduced to create idmap
handly for all kinds of CONFIG_PGTABLE_LEVEL
Signed-off-by: Pingfan Liu <kernelfans at gmail.com>
Cc: Catalin Marinas <catalin.marinas at arm.com>
Cc: Will Deacon <will at kernel.org>
Cc: Marc Zyngier <maz at kernel.org>
Cc: Kristina Martsenko <kristina.martsenko at arm.com>
Cc: James Morse <james.morse at arm.com>
Cc: Steven Price <steven.price at arm.com>
Cc: Jonathan Cameron <Jonathan.Cameron at huawei.com>
Cc: Pavel Tatashin <pasha.tatashin at soleen.com>
Cc: Anshuman Khandual <anshuman.khandual at arm.com>
Cc: Atish Patra <atish.patra at wdc.com>
Cc: Mike Rapoport <rppt at kernel.org>
Cc: Logan Gunthorpe <logang at deltatee.com>
Cc: Mark Brown <broonie at kernel.org>
To: linux-arm-kernel at lists.infradead.org
---
arch/arm64/include/asm/pgalloc.h | 7 ++++++
arch/arm64/kernel/head.S | 3 +++
arch/arm64/mm/idmap_mmu.c | 16 ++++++++-----
arch/arm64/mm/mmu.c | 41 ++++++++++++++++++++++++++------
arch/arm64/mm/mmu_include.c | 9 +++++--
5 files changed, 61 insertions(+), 15 deletions(-)
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index 3c6a7f5988b1..555792921af0 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -83,4 +83,11 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
}
#define pmd_pgtable(pmd) pmd_page(pmd)
+extern void __create_pgd_mapping_extend(pgd_t *pgdir,
+ unsigned int entries_cnt, phys_addr_t phys,
+ unsigned long virt, phys_addr_t size,
+ pgprot_t prot,
+ phys_addr_t (*pgtable_alloc)(int),
+ int flags);
+
#endif
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 840bda1869e9..e19649dbbafb 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -341,6 +341,9 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
#if VA_BITS != EXTRA_SHIFT
#error "Mismatch between VA_BITS and page size/number of translation levels"
#endif
+ adr_l x4, idmap_extend_pgtable
+ mov x5, #1
+ str x5, [x4] //require expanded pagetable
mov x4, EXTRA_PTRS
create_table_entry x0, x3, EXTRA_SHIFT, x4, x5, x6
diff --git a/arch/arm64/mm/idmap_mmu.c b/arch/arm64/mm/idmap_mmu.c
index 42a27dd5cc9f..bff1bffee321 100644
--- a/arch/arm64/mm/idmap_mmu.c
+++ b/arch/arm64/mm/idmap_mmu.c
@@ -21,13 +21,17 @@
#include "./mmu_include.c"
-void __create_pgd_mapping_extend(pgd_t *pgdir, phys_addr_t phys,
- unsigned long virt, phys_addr_t size,
- pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(int),
- int flags)
+void __create_pgd_mapping_extend(pgd_t *pgdir,
+ unsigned int entries_cnt,
+ phys_addr_t phys,
+ unsigned long virt,
+ phys_addr_t size,
+ pgprot_t prot,
+ phys_addr_t (*pgtable_alloc)(int),
+ int flags)
{
- __create_pgd_mapping(pgdir, phys, virt, size, prot, pgtable_alloc, flags);
+ __create_pgd_mapping(pgdir, entries_cnt, phys, virt, size, prot,
+ pgtable_alloc, flags);
}
#endif
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 56e4f25e8d6d..70a5a7b032dc 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -145,6 +145,33 @@ static phys_addr_t pgd_pgtable_alloc(int shift)
#include "./mmu_include.c"
+int idmap_extend_pgtable;
+
+/*
+ * lock: no lock protection alongside this function call
+ * todo: tear down idmap. (no requirement at present)
+ */
+void create_idmap(pgd_t *pgdir, phys_addr_t phys,
+ phys_addr_t size,
+ pgprot_t prot,
+ phys_addr_t (*pgtable_alloc)(int),
+ int flags)
+{
+ u64 ptrs_per_pgd = idmap_ptrs_per_pgd;
+
+#ifdef CONFIG_IDMAP_PGTABLE_EXPAND
+ if (idmap_extend_pgtable)
+ __create_pgd_mapping_extend(pgdir, ptrs_per_pgd,
+ phys, phys, size, prot, pgtable_alloc, flags);
+ else
+ __create_pgd_mapping(pgdir, ptrs_per_pgd,
+ phys, phys, size, prot, pgtable_alloc, flags);
+#else
+ __create_pgd_mapping(pgdir, ptrs_per_pgd,
+ phys, phys, size, prot, pgtable_alloc, flags);
+#endif
+}
+
/*
* This function can only be used to modify existing table entries,
* without allocating new levels of table. Note that this permits the
@@ -158,7 +185,7 @@ static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
&phys, virt);
return;
}
- __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL,
+ __create_pgd_mapping(init_mm.pgd, PTRS_PER_PGD, phys, virt, size, prot, NULL,
NO_CONT_MAPPINGS);
}
@@ -173,7 +200,7 @@ void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
if (page_mappings_only)
flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
- __create_pgd_mapping(mm->pgd, phys, virt, size, prot,
+ __create_pgd_mapping(mm->pgd, PTRS_PER_PGD, phys, virt, size, prot,
pgd_pgtable_alloc, flags);
}
@@ -186,7 +213,7 @@ static void update_mapping_prot(phys_addr_t phys, unsigned long virt,
return;
}
- __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL,
+ __create_pgd_mapping(init_mm.pgd, PTRS_PER_PGD, phys, virt, size, prot, NULL,
NO_CONT_MAPPINGS);
/* flush the TLBs after updating live kernel mappings */
@@ -196,7 +223,7 @@ static void update_mapping_prot(phys_addr_t phys, unsigned long virt,
static void __init __map_memblock(pgd_t *pgdp, phys_addr_t start,
phys_addr_t end, pgprot_t prot, int flags)
{
- __create_pgd_mapping(pgdp, start, __phys_to_virt(start), end - start,
+ __create_pgd_mapping(pgdp, PTRS_PER_PGD, start, __phys_to_virt(start), end - start,
prot, early_pgtable_alloc, flags);
}
@@ -297,7 +324,7 @@ static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end,
BUG_ON(!PAGE_ALIGNED(pa_start));
BUG_ON(!PAGE_ALIGNED(size));
- __create_pgd_mapping(pgdp, pa_start, (unsigned long)va_start, size, prot,
+ __create_pgd_mapping(pgdp, PTRS_PER_PGD, pa_start, (unsigned long)va_start, size, prot,
early_pgtable_alloc, flags);
if (!(vm_flags & VM_NO_GUARD))
@@ -341,7 +368,7 @@ static int __init map_entry_trampoline(void)
/* Map only the text into the trampoline page table */
memset(tramp_pg_dir, 0, PGD_SIZE);
- __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE,
+ __create_pgd_mapping(tramp_pg_dir, PTRS_PER_PGD, pa_start, TRAMP_VALIAS, PAGE_SIZE,
prot, __pgd_pgtable_alloc, 0);
/* Map both the text and data into the kernel page table */
@@ -1233,7 +1260,7 @@ int arch_add_memory(int nid, u64 start, u64 size,
IS_ENABLED(CONFIG_KFENCE))
flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
- __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),
+ __create_pgd_mapping(swapper_pg_dir, PTRS_PER_PGD, start, __phys_to_virt(start),
size, params->pgprot, __pgd_pgtable_alloc,
flags);
diff --git a/arch/arm64/mm/mmu_include.c b/arch/arm64/mm/mmu_include.c
index 95ff35a3c6cb..be51689d1133 100644
--- a/arch/arm64/mm/mmu_include.c
+++ b/arch/arm64/mm/mmu_include.c
@@ -241,14 +241,19 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
pud_clear_fixmap();
}
-static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
+static void __create_pgd_mapping(pgd_t *pgdir, unsigned int entries_cnt, phys_addr_t phys,
unsigned long virt, phys_addr_t size,
pgprot_t prot,
phys_addr_t (*pgtable_alloc)(int),
int flags)
{
unsigned long addr, end, next;
- pgd_t *pgdp = pgd_offset_pgd(pgdir, virt);
+ pgd_t *pgdp;
+
+ if (likely(entries_cnt == PTRS_PER_PGD))
+ pgdp = pgd_offset_pgd(pgdir, virt);
+ else
+ pgdp = pgdir + ((virt >> PGDIR_SHIFT) & (entries_cnt - 1));
/*
* If the virtual and physical address don't have the same offset
--
2.29.2
More information about the linux-arm-kernel
mailing list