[RFC 1/8] arm64/mm: split out __create_pgd_mapping() routines

Pingfan Liu kernelfans at gmail.com
Sat Apr 10 10:56:47 BST 2021


Split out the routines for __create_pgd_mapping(), in order to use it
to generate two sets of operations for CONFIG_PGTABLE_LEVELS and
CONFIG_PGTABLE_LEVELS + 1

Later the one generated with 'CONFIG_PGTABLE_LEVELS + 1' can be used for
idmap if VA_BITS is too small to cover system RAM, which is located
sufficiently high in the physical address space.

Later, idmap can be created by __create_pgd_mapping() directly.

Signed-off-by: Pingfan Liu <kernelfans at gmail.com>
Cc: Catalin Marinas <catalin.marinas at arm.com>
Cc: Will Deacon <will at kernel.org>
Cc: Marc Zyngier <maz at kernel.org>
Cc: Kristina Martsenko <kristina.martsenko at arm.com>
Cc: James Morse <james.morse at arm.com>
Cc: Steven Price <steven.price at arm.com>
Cc: Jonathan Cameron <Jonathan.Cameron at huawei.com>
Cc: Pavel Tatashin <pasha.tatashin at soleen.com>
Cc: Anshuman Khandual <anshuman.khandual at arm.com>
Cc: Atish Patra <atish.patra at wdc.com>
Cc: Mike Rapoport <rppt at kernel.org>
Cc: Logan Gunthorpe <logang at deltatee.com>
Cc: Mark Brown <broonie at kernel.org>
To: linux-arm-kernel at lists.infradead.org
---
 arch/arm64/Kconfig          |   4 +
 arch/arm64/mm/Makefile      |   2 +
 arch/arm64/mm/idmap_mmu.c   |  45 ++++++
 arch/arm64/mm/mmu.c         | 263 +-----------------------------------
 arch/arm64/mm/mmu_include.c | 262 +++++++++++++++++++++++++++++++++++
 5 files changed, 315 insertions(+), 261 deletions(-)
 create mode 100644 arch/arm64/mm/idmap_mmu.c
 create mode 100644 arch/arm64/mm/mmu_include.c

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index e4e1b6550115..989fc501a1b4 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -327,6 +327,10 @@ config PGTABLE_LEVELS
 	default 3 if ARM64_16K_PAGES && ARM64_VA_BITS_47
 	default 4 if !ARM64_64K_PAGES && ARM64_VA_BITS_48
 
+config IDMAP_PGTABLE_EXPAND
+	def_bool y
+	depends on (ARM64_4K_PAGES && ARM64_VA_BITS_39) || (ARM64_64K_PAGES && ARM64_VA_BITS_42)
+
 config ARCH_SUPPORTS_UPROBES
 	def_bool y
 
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile
index f188c9092696..f9283cb9a201 100644
--- a/arch/arm64/mm/Makefile
+++ b/arch/arm64/mm/Makefile
@@ -3,6 +3,8 @@ obj-y				:= dma-mapping.o extable.o fault.o init.o \
 				   cache.o copypage.o flush.o \
 				   ioremap.o mmap.o pgd.o mmu.o \
 				   context.o proc.o pageattr.o
+
+obj-$(CONFIG_IDMAP_PGTABLE_EXPAND)	+= idmap_mmu.o
 obj-$(CONFIG_HUGETLB_PAGE)	+= hugetlbpage.o
 obj-$(CONFIG_PTDUMP_CORE)	+= ptdump.o
 obj-$(CONFIG_PTDUMP_DEBUGFS)	+= ptdump_debugfs.o
diff --git a/arch/arm64/mm/idmap_mmu.c b/arch/arm64/mm/idmap_mmu.c
new file mode 100644
index 000000000000..7e9a4f4017d3
--- /dev/null
+++ b/arch/arm64/mm/idmap_mmu.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+
+#include <asm/barrier.h>
+#include <asm/cputype.h>
+#include <asm/fixmap.h>
+#include <asm/kasan.h>
+#include <asm/kernel-pgtable.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <linux/sizes.h>
+#include <asm/tlb.h>
+#include <asm/mmu_context.h>
+#include <asm/ptdump.h>
+#include <asm/tlbflush.h>
+#include <asm/pgalloc.h>
+
+#if CONFIG_IDMAP_PGTABLE_EXPAND
+
+#if CONFIG_PGTABLE_LEVELS == 2
+#define EXTEND_LEVEL 3
+#elif CONFIG_PGTABLE_LEVELS == 3
+#define EXTEND_LEVEL 4
+#endif
+
+#undef CONFIG_PGTABLE_LEVELS
+#define CONFIG_PGTABLE_LEVELS EXTEND_LEVEL
+
+
+#include "./mmu_include.c"
+
+void __create_pgd_mapping_extend(pgd_t *pgdir, unsigned int entries_cnt, phys_addr_t phys,
+				 unsigned long virt, phys_addr_t size,
+				 pgprot_t prot,
+				 phys_addr_t (*pgtable_alloc)(int),
+				 int flags)
+{
+	__create_pgd_mapping(pgdir, entries_cnt, phys, virt, size, prot, pgtable_alloc, flags);
+}
+#endif
+
+
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 5d9550fdb9cf..56e4f25e8d6d 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -37,9 +37,6 @@
 #include <asm/tlbflush.h>
 #include <asm/pgalloc.h>
 
-#define NO_BLOCK_MAPPINGS	BIT(0)
-#define NO_CONT_MAPPINGS	BIT(1)
-
 u64 idmap_t0sz = TCR_T0SZ(VA_BITS_MIN);
 u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
 
@@ -116,264 +113,6 @@ static phys_addr_t __init early_pgtable_alloc(int shift)
 	return phys;
 }
 
-static bool pgattr_change_is_safe(u64 old, u64 new)
-{
-	/*
-	 * The following mapping attributes may be updated in live
-	 * kernel mappings without the need for break-before-make.
-	 */
-	pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG;
-
-	/* creating or taking down mappings is always safe */
-	if (old == 0 || new == 0)
-		return true;
-
-	/* live contiguous mappings may not be manipulated at all */
-	if ((old | new) & PTE_CONT)
-		return false;
-
-	/* Transitioning from Non-Global to Global is unsafe */
-	if (old & ~new & PTE_NG)
-		return false;
-
-	/*
-	 * Changing the memory type between Normal and Normal-Tagged is safe
-	 * since Tagged is considered a permission attribute from the
-	 * mismatched attribute aliases perspective.
-	 */
-	if (((old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) ||
-	     (old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED)) &&
-	    ((new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) ||
-	     (new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED)))
-		mask |= PTE_ATTRINDX_MASK;
-
-	return ((old ^ new) & ~mask) == 0;
-}
-
-static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end,
-		     phys_addr_t phys, pgprot_t prot)
-{
-	pte_t *ptep;
-
-	ptep = pte_set_fixmap_offset(pmdp, addr);
-	do {
-		pte_t old_pte = READ_ONCE(*ptep);
-
-		set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot));
-
-		/*
-		 * After the PTE entry has been populated once, we
-		 * only allow updates to the permission attributes.
-		 */
-		BUG_ON(!pgattr_change_is_safe(pte_val(old_pte),
-					      READ_ONCE(pte_val(*ptep))));
-
-		phys += PAGE_SIZE;
-	} while (ptep++, addr += PAGE_SIZE, addr != end);
-
-	pte_clear_fixmap();
-}
-
-static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
-				unsigned long end, phys_addr_t phys,
-				pgprot_t prot,
-				phys_addr_t (*pgtable_alloc)(int),
-				int flags)
-{
-	unsigned long next;
-	pmd_t pmd = READ_ONCE(*pmdp);
-
-	BUG_ON(pmd_sect(pmd));
-	if (pmd_none(pmd)) {
-		phys_addr_t pte_phys;
-		BUG_ON(!pgtable_alloc);
-		pte_phys = pgtable_alloc(PAGE_SHIFT);
-		__pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
-		pmd = READ_ONCE(*pmdp);
-	}
-	BUG_ON(pmd_bad(pmd));
-
-	do {
-		pgprot_t __prot = prot;
-
-		next = pte_cont_addr_end(addr, end);
-
-		/* use a contiguous mapping if the range is suitably aligned */
-		if ((((addr | next | phys) & ~CONT_PTE_MASK) == 0) &&
-		    (flags & NO_CONT_MAPPINGS) == 0)
-			__prot = __pgprot(pgprot_val(prot) | PTE_CONT);
-
-		init_pte(pmdp, addr, next, phys, __prot);
-
-		phys += next - addr;
-	} while (addr = next, addr != end);
-}
-
-static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end,
-		     phys_addr_t phys, pgprot_t prot,
-		     phys_addr_t (*pgtable_alloc)(int), int flags)
-{
-	unsigned long next;
-	pmd_t *pmdp;
-
-	pmdp = pmd_set_fixmap_offset(pudp, addr);
-	do {
-		pmd_t old_pmd = READ_ONCE(*pmdp);
-
-		next = pmd_addr_end(addr, end);
-
-		/* try section mapping first */
-		if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
-		    (flags & NO_BLOCK_MAPPINGS) == 0) {
-			pmd_set_huge(pmdp, phys, prot);
-
-			/*
-			 * After the PMD entry has been populated once, we
-			 * only allow updates to the permission attributes.
-			 */
-			BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd),
-						      READ_ONCE(pmd_val(*pmdp))));
-		} else {
-			alloc_init_cont_pte(pmdp, addr, next, phys, prot,
-					    pgtable_alloc, flags);
-
-			BUG_ON(pmd_val(old_pmd) != 0 &&
-			       pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp)));
-		}
-		phys += next - addr;
-	} while (pmdp++, addr = next, addr != end);
-
-	pmd_clear_fixmap();
-}
-
-static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
-				unsigned long end, phys_addr_t phys,
-				pgprot_t prot,
-				phys_addr_t (*pgtable_alloc)(int), int flags)
-{
-	unsigned long next;
-	pud_t pud = READ_ONCE(*pudp);
-
-	/*
-	 * Check for initial section mappings in the pgd/pud.
-	 */
-	BUG_ON(pud_sect(pud));
-	if (pud_none(pud)) {
-		phys_addr_t pmd_phys;
-		BUG_ON(!pgtable_alloc);
-		pmd_phys = pgtable_alloc(PMD_SHIFT);
-		__pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE);
-		pud = READ_ONCE(*pudp);
-	}
-	BUG_ON(pud_bad(pud));
-
-	do {
-		pgprot_t __prot = prot;
-
-		next = pmd_cont_addr_end(addr, end);
-
-		/* use a contiguous mapping if the range is suitably aligned */
-		if ((((addr | next | phys) & ~CONT_PMD_MASK) == 0) &&
-		    (flags & NO_CONT_MAPPINGS) == 0)
-			__prot = __pgprot(pgprot_val(prot) | PTE_CONT);
-
-		init_pmd(pudp, addr, next, phys, __prot, pgtable_alloc, flags);
-
-		phys += next - addr;
-	} while (addr = next, addr != end);
-}
-
-static inline bool use_1G_block(unsigned long addr, unsigned long next,
-			unsigned long phys)
-{
-	if (PAGE_SHIFT != 12)
-		return false;
-
-	if (((addr | next | phys) & ~PUD_MASK) != 0)
-		return false;
-
-	return true;
-}
-
-static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
-			   phys_addr_t phys, pgprot_t prot,
-			   phys_addr_t (*pgtable_alloc)(int),
-			   int flags)
-{
-	unsigned long next;
-	pud_t *pudp;
-	p4d_t *p4dp = p4d_offset(pgdp, addr);
-	p4d_t p4d = READ_ONCE(*p4dp);
-
-	if (p4d_none(p4d)) {
-		phys_addr_t pud_phys;
-		BUG_ON(!pgtable_alloc);
-		pud_phys = pgtable_alloc(PUD_SHIFT);
-		__p4d_populate(p4dp, pud_phys, PUD_TYPE_TABLE);
-		p4d = READ_ONCE(*p4dp);
-	}
-	BUG_ON(p4d_bad(p4d));
-
-	pudp = pud_set_fixmap_offset(p4dp, addr);
-	do {
-		pud_t old_pud = READ_ONCE(*pudp);
-
-		next = pud_addr_end(addr, end);
-
-		/*
-		 * For 4K granule only, attempt to put down a 1GB block
-		 */
-		if (use_1G_block(addr, next, phys) &&
-		    (flags & NO_BLOCK_MAPPINGS) == 0) {
-			pud_set_huge(pudp, phys, prot);
-
-			/*
-			 * After the PUD entry has been populated once, we
-			 * only allow updates to the permission attributes.
-			 */
-			BUG_ON(!pgattr_change_is_safe(pud_val(old_pud),
-						      READ_ONCE(pud_val(*pudp))));
-		} else {
-			alloc_init_cont_pmd(pudp, addr, next, phys, prot,
-					    pgtable_alloc, flags);
-
-			BUG_ON(pud_val(old_pud) != 0 &&
-			       pud_val(old_pud) != READ_ONCE(pud_val(*pudp)));
-		}
-		phys += next - addr;
-	} while (pudp++, addr = next, addr != end);
-
-	pud_clear_fixmap();
-}
-
-static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
-				 unsigned long virt, phys_addr_t size,
-				 pgprot_t prot,
-				 phys_addr_t (*pgtable_alloc)(int),
-				 int flags)
-{
-	unsigned long addr, end, next;
-	pgd_t *pgdp = pgd_offset_pgd(pgdir, virt);
-
-	/*
-	 * If the virtual and physical address don't have the same offset
-	 * within a page, we cannot map the region as the caller expects.
-	 */
-	if (WARN_ON((phys ^ virt) & ~PAGE_MASK))
-		return;
-
-	phys &= PAGE_MASK;
-	addr = virt & PAGE_MASK;
-	end = PAGE_ALIGN(virt + size);
-
-	do {
-		next = pgd_addr_end(addr, end);
-		alloc_init_pud(pgdp, addr, next, phys, prot, pgtable_alloc,
-			       flags);
-		phys += next - addr;
-	} while (pgdp++, addr = next, addr != end);
-}
-
 static phys_addr_t __pgd_pgtable_alloc(int shift)
 {
 	void *ptr = (void *)__get_free_page(GFP_PGTABLE_KERNEL);
@@ -404,6 +143,8 @@ static phys_addr_t pgd_pgtable_alloc(int shift)
 	return pa;
 }
 
+#include "./mmu_include.c"
+
 /*
  * This function can only be used to modify existing table entries,
  * without allocating new levels of table. Note that this permits the
diff --git a/arch/arm64/mm/mmu_include.c b/arch/arm64/mm/mmu_include.c
new file mode 100644
index 000000000000..e9ebdffe860b
--- /dev/null
+++ b/arch/arm64/mm/mmu_include.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#define NO_BLOCK_MAPPINGS	BIT(0)
+#define NO_CONT_MAPPINGS	BIT(1)
+
+static bool pgattr_change_is_safe(u64 old, u64 new)
+{
+	/*
+	 * The following mapping attributes may be updated in live
+	 * kernel mappings without the need for break-before-make.
+	 */
+	pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG;
+
+	/* creating or taking down mappings is always safe */
+	if (old == 0 || new == 0)
+		return true;
+
+	/* live contiguous mappings may not be manipulated at all */
+	if ((old | new) & PTE_CONT)
+		return false;
+
+	/* Transitioning from Non-Global to Global is unsafe */
+	if (old & ~new & PTE_NG)
+		return false;
+
+	/*
+	 * Changing the memory type between Normal and Normal-Tagged is safe
+	 * since Tagged is considered a permission attribute from the
+	 * mismatched attribute aliases perspective.
+	 */
+	if (((old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) ||
+	     (old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED)) &&
+	    ((new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) ||
+	     (new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED)))
+		mask |= PTE_ATTRINDX_MASK;
+
+	return ((old ^ new) & ~mask) == 0;
+}
+
+static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end,
+		     phys_addr_t phys, pgprot_t prot)
+{
+	pte_t *ptep;
+
+	ptep = pte_set_fixmap_offset(pmdp, addr);
+	do {
+		pte_t old_pte = READ_ONCE(*ptep);
+
+		set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot));
+
+		/*
+		 * After the PTE entry has been populated once, we
+		 * only allow updates to the permission attributes.
+		 */
+		BUG_ON(!pgattr_change_is_safe(pte_val(old_pte),
+					      READ_ONCE(pte_val(*ptep))));
+
+		phys += PAGE_SIZE;
+	} while (ptep++, addr += PAGE_SIZE, addr != end);
+
+	pte_clear_fixmap();
+}
+
+static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
+				unsigned long end, phys_addr_t phys,
+				pgprot_t prot,
+				phys_addr_t (*pgtable_alloc)(int),
+				int flags)
+{
+	unsigned long next;
+	pmd_t pmd = READ_ONCE(*pmdp);
+
+	BUG_ON(pmd_sect(pmd));
+	if (pmd_none(pmd)) {
+		phys_addr_t pte_phys;
+		BUG_ON(!pgtable_alloc);
+		pte_phys = pgtable_alloc(PAGE_SHIFT);
+		__pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
+		pmd = READ_ONCE(*pmdp);
+	}
+	BUG_ON(pmd_bad(pmd));
+
+	do {
+		pgprot_t __prot = prot;
+
+		next = pte_cont_addr_end(addr, end);
+
+		/* use a contiguous mapping if the range is suitably aligned */
+		if ((((addr | next | phys) & ~CONT_PTE_MASK) == 0) &&
+		    (flags & NO_CONT_MAPPINGS) == 0)
+			__prot = __pgprot(pgprot_val(prot) | PTE_CONT);
+
+		init_pte(pmdp, addr, next, phys, __prot);
+
+		phys += next - addr;
+	} while (addr = next, addr != end);
+}
+
+static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end,
+		     phys_addr_t phys, pgprot_t prot,
+		     phys_addr_t (*pgtable_alloc)(int), int flags)
+{
+	unsigned long next;
+	pmd_t *pmdp;
+
+	pmdp = pmd_set_fixmap_offset(pudp, addr);
+	do {
+		pmd_t old_pmd = READ_ONCE(*pmdp);
+
+		next = pmd_addr_end(addr, end);
+
+		/* try section mapping first */
+		if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
+		    (flags & NO_BLOCK_MAPPINGS) == 0) {
+			pmd_set_huge(pmdp, phys, prot);
+
+			/*
+			 * After the PMD entry has been populated once, we
+			 * only allow updates to the permission attributes.
+			 */
+			BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd),
+						      READ_ONCE(pmd_val(*pmdp))));
+		} else {
+			alloc_init_cont_pte(pmdp, addr, next, phys, prot,
+					    pgtable_alloc, flags);
+
+			BUG_ON(pmd_val(old_pmd) != 0 &&
+			       pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp)));
+		}
+		phys += next - addr;
+	} while (pmdp++, addr = next, addr != end);
+
+	pmd_clear_fixmap();
+}
+
+static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
+				unsigned long end, phys_addr_t phys,
+				pgprot_t prot,
+				phys_addr_t (*pgtable_alloc)(int), int flags)
+{
+	unsigned long next;
+	pud_t pud = READ_ONCE(*pudp);
+
+	/*
+	 * Check for initial section mappings in the pgd/pud.
+	 */
+	BUG_ON(pud_sect(pud));
+	if (pud_none(pud)) {
+		phys_addr_t pmd_phys;
+		BUG_ON(!pgtable_alloc);
+		pmd_phys = pgtable_alloc(PMD_SHIFT);
+		__pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE);
+		pud = READ_ONCE(*pudp);
+	}
+	BUG_ON(pud_bad(pud));
+
+	do {
+		pgprot_t __prot = prot;
+
+		next = pmd_cont_addr_end(addr, end);
+
+		/* use a contiguous mapping if the range is suitably aligned */
+		if ((((addr | next | phys) & ~CONT_PMD_MASK) == 0) &&
+		    (flags & NO_CONT_MAPPINGS) == 0)
+			__prot = __pgprot(pgprot_val(prot) | PTE_CONT);
+
+		init_pmd(pudp, addr, next, phys, __prot, pgtable_alloc, flags);
+
+		phys += next - addr;
+	} while (addr = next, addr != end);
+}
+
+static inline bool use_1G_block(unsigned long addr, unsigned long next,
+			unsigned long phys)
+{
+	if (PAGE_SHIFT != 12)
+		return false;
+
+	if (((addr | next | phys) & ~PUD_MASK) != 0)
+		return false;
+
+	return true;
+}
+
+static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
+			   phys_addr_t phys, pgprot_t prot,
+			   phys_addr_t (*pgtable_alloc)(int),
+			   int flags)
+{
+	unsigned long next;
+	pud_t *pudp;
+	p4d_t *p4dp = p4d_offset(pgdp, addr);
+	p4d_t p4d = READ_ONCE(*p4dp);
+
+	if (p4d_none(p4d)) {
+		phys_addr_t pud_phys;
+		BUG_ON(!pgtable_alloc);
+		pud_phys = pgtable_alloc(PUD_SHIFT);
+		__p4d_populate(p4dp, pud_phys, PUD_TYPE_TABLE);
+		p4d = READ_ONCE(*p4dp);
+	}
+	BUG_ON(p4d_bad(p4d));
+
+	pudp = pud_set_fixmap_offset(p4dp, addr);
+	do {
+		pud_t old_pud = READ_ONCE(*pudp);
+
+		next = pud_addr_end(addr, end);
+
+		/*
+		 * For 4K granule only, attempt to put down a 1GB block
+		 */
+		if (use_1G_block(addr, next, phys) &&
+		    (flags & NO_BLOCK_MAPPINGS) == 0) {
+			pud_set_huge(pudp, phys, prot);
+
+			/*
+			 * After the PUD entry has been populated once, we
+			 * only allow updates to the permission attributes.
+			 */
+			BUG_ON(!pgattr_change_is_safe(pud_val(old_pud),
+						      READ_ONCE(pud_val(*pudp))));
+		} else {
+			alloc_init_cont_pmd(pudp, addr, next, phys, prot,
+					    pgtable_alloc, flags);
+
+			BUG_ON(pud_val(old_pud) != 0 &&
+			       pud_val(old_pud) != READ_ONCE(pud_val(*pudp)));
+		}
+		phys += next - addr;
+	} while (pudp++, addr = next, addr != end);
+
+	pud_clear_fixmap();
+}
+
+static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
+				 unsigned long virt, phys_addr_t size,
+				 pgprot_t prot,
+				 phys_addr_t (*pgtable_alloc)(int),
+				 int flags)
+{
+	unsigned long addr, end, next;
+	pgd_t *pgdp = pgd_offset_pgd(pgdir, virt);
+
+	/*
+	 * If the virtual and physical address don't have the same offset
+	 * within a page, we cannot map the region as the caller expects.
+	 */
+	if (WARN_ON((phys ^ virt) & ~PAGE_MASK))
+		return;
+
+	phys &= PAGE_MASK;
+	addr = virt & PAGE_MASK;
+	end = PAGE_ALIGN(virt + size);
+
+	do {
+		next = pgd_addr_end(addr, end);
+		alloc_init_pud(pgdp, addr, next, phys, prot, pgtable_alloc,
+			       flags);
+		phys += next - addr;
+	} while (pgdp++, addr = next, addr != end);
+}
-- 
2.29.2




More information about the linux-arm-kernel mailing list