[PATCH v2 3/4] arm64: mm: make vmemmap region a projection of the linear region

Ard Biesheuvel ardb at kernel.org
Thu Oct 8 11:36:01 EDT 2020


Now that we have reverted the introduction of the vmemmap struct page
pointer and the separate physvirt_offset, we can simplify things further,
and place the vmemmap region in the VA space in such a way that virtual
to page translations and vice versa can be implemented using a single
arithmetic shift.

One happy coincidence resulting from this is that the 48-bit/4k and
52-bit/64k configurations (which are assumed to be the two most
prevalent) end up with the same placement of the vmemmap region. In
a subsequent patch, we will take advantage of this, and unify the
memory maps even more.

Signed-off-by: Ard Biesheuvel <ardb at kernel.org>
---
 Documentation/arm64/memory.rst  | 30 ++++++++++----------
 arch/arm64/include/asm/memory.h | 14 ++++-----
 arch/arm64/mm/init.c            |  2 ++
 3 files changed, 23 insertions(+), 23 deletions(-)

diff --git a/Documentation/arm64/memory.rst b/Documentation/arm64/memory.rst
index ee51eb66a578..476edb6015b2 100644
--- a/Documentation/arm64/memory.rst
+++ b/Documentation/arm64/memory.rst
@@ -35,14 +35,14 @@ AArch64 Linux memory layout with 4KB pages + 4 levels (48-bit)::
 [ ffff600000000000	ffff7fffffffffff ]	  32TB		[ kasan shadow region ]
   ffff800000000000	ffff800007ffffff	 128MB		bpf jit region
   ffff800008000000	ffff80000fffffff	 128MB		modules
-  ffff800010000000	fffffdffbffeffff	 125TB		vmalloc
-  fffffdffbfff0000	fffffdfffe5f8fff	~998MB		[guard region]
-  fffffdfffe5f9000	fffffdfffe9fffff	4124KB		fixed mappings
-  fffffdfffea00000	fffffdfffebfffff	   2MB		[guard region]
-  fffffdfffec00000	fffffdffffbfffff	  16MB		PCI I/O space
-  fffffdffffc00000	fffffdffffdfffff	   2MB		[guard region]
-  fffffdffffe00000	ffffffffffdfffff	   2TB		vmemmap
-  ffffffffffe00000	ffffffffffffffff	   2MB		[guard region]
+  ffff800010000000	fffffbffbffeffff	 123TB		vmalloc
+  fffffbffbfff0000	fffffbfffe7f8fff	~998MB		[guard region]
+  fffffbfffe7f9000	fffffbfffebfffff	4124KB		fixed mappings
+  fffffbfffec00000	fffffbfffedfffff	   2MB		[guard region]
+  fffffbfffee00000	fffffbffffdfffff	  16MB		PCI I/O space
+  fffffbffffe00000	fffffbffffffffff	   2MB		[guard region]
+  fffffc0000000000	fffffdffffffffff	   2TB		vmemmap
+  fffffe0000000000	ffffffffffffffff	   2TB		[guard region]
 
 
 AArch64 Linux memory layout with 64KB pages + 3 levels (52-bit with HW support)::
@@ -55,13 +55,13 @@ AArch64 Linux memory layout with 64KB pages + 3 levels (52-bit with HW support):
   ffff800000000000	ffff800007ffffff	 128MB		bpf jit region
   ffff800008000000	ffff80000fffffff	 128MB		modules
   ffff800010000000	fffff81ffffeffff	 120TB		vmalloc
-  fffff81fffff0000	fffffc1ffe58ffff	  ~3TB		[guard region]
-  fffffc1ffe590000	fffffc1ffe9fffff	4544KB		fixed mappings
-  fffffc1ffea00000	fffffc1ffebfffff	   2MB		[guard region]
-  fffffc1ffec00000	fffffc1fffbfffff	  16MB		PCI I/O space
-  fffffc1fffc00000	fffffc1fffdfffff	   2MB		[guard region]
-  fffffc1fffe00000	ffffffffffdfffff	3968GB		vmemmap
-  ffffffffffe00000	ffffffffffffffff	   2MB		[guard region]
+  fffff81fffff0000	fffffbfffe38ffff	  ~3TB		[guard region]
+  fffffbfffe390000	fffffbfffebfffff	4544KB		fixed mappings
+  fffffbfffec00000	fffffbfffedfffff	   2MB		[guard region]
+  fffffbfffee00000	fffffbffffdfffff	  16MB		PCI I/O space
+  fffffbffffe00000	fffffbffffffffff	   2MB		[guard region]
+  fffffc0000000000	ffffffdfffffffff	  ~4TB		vmemmap
+  ffffffe000000000	ffffffffffffffff	 128GB		[guard region]
 
 
 Translation table lookup with 4KB pages::
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 58932f434433..39ea35f7e34e 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -30,8 +30,8 @@
  * keep a constant PAGE_OFFSET and "fallback" to using the higher end
  * of the VMEMMAP where 52-bit support is not available in hardware.
  */
-#define VMEMMAP_SIZE ((_PAGE_END(VA_BITS_MIN) - PAGE_OFFSET) \
-			>> (PAGE_SHIFT - STRUCT_PAGE_MAX_SHIFT))
+#define VMEMMAP_SHIFT	(PAGE_SHIFT - STRUCT_PAGE_MAX_SHIFT)
+#define VMEMMAP_SIZE	((_PAGE_END(VA_BITS_MIN) - PAGE_OFFSET) >> VMEMMAP_SHIFT)
 
 /*
  * PAGE_OFFSET - the virtual address of the start of the linear map, at the
@@ -50,7 +50,7 @@
 #define MODULES_END		(MODULES_VADDR + MODULES_VSIZE)
 #define MODULES_VADDR		(BPF_JIT_REGION_END)
 #define MODULES_VSIZE		(SZ_128M)
-#define VMEMMAP_START		(-VMEMMAP_SIZE - SZ_2M)
+#define VMEMMAP_START		(-(UL(1) << (VA_BITS - VMEMMAP_SHIFT)))
 #define VMEMMAP_END		(VMEMMAP_START + VMEMMAP_SIZE)
 #define PCI_IO_END		(VMEMMAP_START - SZ_2M)
 #define PCI_IO_START		(PCI_IO_END - PCI_IO_SIZE)
@@ -303,15 +303,13 @@ static inline void *phys_to_virt(phys_addr_t x)
 #else
 #define page_to_virt(x)	({						\
 	__typeof__(x) __page = x;					\
-	u64 __idx = ((u64)__page - VMEMMAP_START) / sizeof(struct page);\
-	u64 __addr = PAGE_OFFSET + (__idx * PAGE_SIZE);			\
+	u64 __addr = (u64)__page << VMEMMAP_SHIFT;			\
 	(void *)__tag_set((const void *)__addr, page_kasan_tag(__page));\
 })
 
 #define virt_to_page(x)	({						\
-	u64 __idx = (__tag_reset((u64)x) - PAGE_OFFSET) / PAGE_SIZE;	\
-	u64 __addr = VMEMMAP_START + (__idx * sizeof(struct page));	\
-	(struct page *)__addr;						\
+	u64 __addr = __tag_reset((u64)(x)) & PAGE_MASK;			\
+	(struct page *)((s64)__addr >> VMEMMAP_SHIFT);			\
 })
 #endif /* !CONFIG_SPARSEMEM_VMEMMAP || CONFIG_DEBUG_VIRTUAL */
 
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 9090779dd3cd..f0599ae73b8d 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -504,6 +504,8 @@ static void __init free_unused_memmap(void)
  */
 void __init mem_init(void)
 {
+	BUILD_BUG_ON(!is_power_of_2(sizeof(struct page)));
+
 	if (swiotlb_force == SWIOTLB_FORCE ||
 	    max_pfn > PFN_DOWN(arm64_dma_phys_limit ? : arm64_dma32_phys_limit))
 		swiotlb_init(1);
-- 
2.17.1




More information about the linux-arm-kernel mailing list