[kvm-unit-tests PATCH 21/33] lib/alloc_page: Add shared page allocation support

Suzuki K Poulose suzuki.poulose at arm.com
Fri Apr 12 03:33:56 PDT 2024


From: Joey Gouly <joey.gouly at arm.com>

Add support for allocating "pages" that can be shared with the host.
Or in other words, decrypted pages. This is achieved by adding hooks for
setting a memory region as "encrypted" or "decrypted", which can be overridden
by the architecture specific backends.

Also add a new flag - FLAG_SHARED - for allocating shared pages.

The page allocation/free routines get a "_shared_" variant too.
These will be later used for Realm support and tests.

Signed-off-by: Joey Gouly <joey.gouly at arm.com>
Signed-off-by: Suzuki K Poulose <suzuki.poulose at arm.com>
---
 lib/alloc_page.c     | 20 +++++++++++++++++---
 lib/alloc_page.h     | 24 ++++++++++++++++++++++++
 lib/asm-generic/io.h | 12 ++++++++++++
 3 files changed, 53 insertions(+), 3 deletions(-)

diff --git a/lib/alloc_page.c b/lib/alloc_page.c
index 84f01e11..e253cd1d 100644
--- a/lib/alloc_page.c
+++ b/lib/alloc_page.c
@@ -263,7 +263,7 @@ static bool coalesce(struct mem_area *a, u8 order, pfn_t pfn, pfn_t pfn2)
  * - no pages in the memory block were already free
  * - no pages in the memory block are special
  */
-static void _free_pages(void *mem)
+static void _free_pages(void *mem, u32 flags)
 {
 	pfn_t pfn2, pfn = virt_to_pfn(mem);
 	struct mem_area *a = NULL;
@@ -281,6 +281,9 @@ static void _free_pages(void *mem)
 	p = pfn - a->base;
 	order = a->page_states[p] & ORDER_MASK;
 
+	if (flags & FLAG_SHARED)
+		set_memory_encrypted((unsigned long)mem, BIT(order) * PAGE_SIZE);
+
 	/* ensure that the first page is allocated and not special */
 	assert(IS_ALLOCATED(a->page_states[p]));
 	/* ensure that the order has a sane value */
@@ -320,7 +323,14 @@ static void _free_pages(void *mem)
 void free_pages(void *mem)
 {
 	spin_lock(&lock);
-	_free_pages(mem);
+	_free_pages(mem, 0);
+	spin_unlock(&lock);
+}
+
+void free_pages_shared(void *mem)
+{
+	spin_lock(&lock);
+	_free_pages(mem, FLAG_SHARED);
 	spin_unlock(&lock);
 }
 
@@ -353,7 +363,7 @@ static void _unreserve_one_page(pfn_t pfn)
 	i = pfn - a->base;
 	assert(a->page_states[i] == STATUS_SPECIAL);
 	a->page_states[i] = STATUS_ALLOCATED;
-	_free_pages(pfn_to_virt(pfn));
+	_free_pages(pfn_to_virt(pfn), 0);
 }
 
 int reserve_pages(phys_addr_t addr, size_t n)
@@ -401,6 +411,10 @@ static void *page_memalign_order_flags(u8 al, u8 ord, u32 flags)
 		if (area & BIT(i))
 			res = page_memalign_order(areas + i, al, ord, fresh);
 	spin_unlock(&lock);
+
+	if (res && (flags & FLAG_SHARED))
+		set_memory_decrypted((unsigned long)res, BIT(ord) * PAGE_SIZE);
+
 	if (res && !(flags & FLAG_DONTZERO))
 		memset(res, 0, BIT(ord) * PAGE_SIZE);
 	return res;
diff --git a/lib/alloc_page.h b/lib/alloc_page.h
index 060e0418..8c1ea7b5 100644
--- a/lib/alloc_page.h
+++ b/lib/alloc_page.h
@@ -21,6 +21,7 @@
 
 #define FLAG_DONTZERO	0x10000
 #define FLAG_FRESH	0x20000
+#define FLAG_SHARED	0x40000
 
 /* Returns true if the page allocator has been initialized */
 bool page_alloc_initialized(void);
@@ -121,4 +122,27 @@ int reserve_pages(phys_addr_t addr, size_t npages);
  */
 void unreserve_pages(phys_addr_t addr, size_t npages);
 
+/* Shared page operations */
+static inline void *alloc_pages_shared(unsigned int order)
+{
+	return alloc_pages_flags(order, FLAG_SHARED);
+}
+
+static inline void *alloc_page_shared(void)
+{
+	return alloc_pages_shared(0);
+}
+
+void free_pages_shared(void *mem);
+
+static inline void free_page_shared(void *page)
+{
+	free_pages_shared(page);
+}
+
+static inline void free_pages_shared_by_order(void *mem, unsigned int order)
+{
+	free_pages_shared(mem);
+}
+
 #endif
diff --git a/lib/asm-generic/io.h b/lib/asm-generic/io.h
index dc0f46f5..fb65184b 100644
--- a/lib/asm-generic/io.h
+++ b/lib/asm-generic/io.h
@@ -214,4 +214,16 @@ static inline void *phys_to_virt(unsigned long address)
 }
 #endif
 
+#ifndef set_memory_encrypted
+static inline void set_memory_encrypted(unsigned long mem, size_t size)
+{
+}
+#endif
+
+#ifndef set_memory_decrypted
+static inline void set_memory_decrypted(unsigned long mem, size_t size)
+{
+}
+#endif
+
 #endif /* _ASM_GENERIC_IO_H_ */
-- 
2.34.1




More information about the linux-arm-kernel mailing list