[RFC PATCH v2 15/58] KVM: arm64: iommu: Add a memory pool for the IOMMU

Mostafa Saleh smostafa at google.com
Thu Dec 12 10:03:39 PST 2024


This patch defines a new hypervisor allocator which is an instance of
the hyp buddy allocator.
This allocator would be used from the IOMMU drivers to be used for
the page tables, generally these pages have 2 properties:
- Can be multi order
- Can be non-coherent

The interface provide functions and wrappers for those types of
allocations.

The IOMMU hypervisor will leverage the allocator interface which
provides a standardized interface that can be called from the kernel
part of the IOMMU driver to top up the allocator, and can be
reclaimed through the shrinker for pKVM.

Also, the allocation function would automatically create a request
when it fails to allocate memory from the pool, so it’s sufficient
for the driver to return an error code and the kernel part of the
driver should check the requests in the return and fill the
hypervisor allocator.

Signed-off-by: Mostafa Saleh <smostafa at google.com>
---
 arch/arm64/include/asm/kvm_host.h       |  1 +
 arch/arm64/kvm/hyp/include/nvhe/iommu.h | 13 ++++
 arch/arm64/kvm/hyp/include/nvhe/mm.h    |  1 +
 arch/arm64/kvm/hyp/nvhe/alloc_mgt.c     |  2 +
 arch/arm64/kvm/hyp/nvhe/iommu/iommu.c   | 86 +++++++++++++++++++++++++
 arch/arm64/kvm/hyp/nvhe/mm.c            | 17 +++++
 6 files changed, 120 insertions(+)

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 54416cfea573..a3b5d8dd8995 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -1625,6 +1625,7 @@ void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val);
 
 /* Allocator interface IDs. */
 #define HYP_ALLOC_MGT_HEAP_ID		0
+#define HYP_ALLOC_MGT_IOMMU_ID		1
 
 unsigned long __pkvm_reclaim_hyp_alloc_mgt(unsigned long nr_pages);
 
diff --git a/arch/arm64/kvm/hyp/include/nvhe/iommu.h b/arch/arm64/kvm/hyp/include/nvhe/iommu.h
index 908863f07b0b..5f91605cd48a 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/iommu.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/iommu.h
@@ -4,6 +4,8 @@
 
 #include <asm/kvm_host.h>
 
+#include <nvhe/alloc_mgt.h>
+
 /* Hypercall handlers */
 int kvm_iommu_alloc_domain(pkvm_handle_t domain_id, int type);
 int kvm_iommu_free_domain(pkvm_handle_t domain_id);
@@ -18,10 +20,21 @@ size_t kvm_iommu_unmap_pages(pkvm_handle_t domain_id, unsigned long iova,
 			     size_t pgsize, size_t pgcount);
 phys_addr_t kvm_iommu_iova_to_phys(pkvm_handle_t domain_id, unsigned long iova);
 
+/* Flags for memory allocation for IOMMU drivers */
+#define IOMMU_PAGE_NOCACHE				BIT(0)
+void *kvm_iommu_donate_pages(u8 order, int flags);
+void kvm_iommu_reclaim_pages(void *p, u8 order);
+
+#define kvm_iommu_donate_page()		kvm_iommu_donate_pages(0, 0)
+#define kvm_iommu_donate_page_nc()	kvm_iommu_donate_pages(0, IOMMU_PAGE_NOCACHE)
+#define kvm_iommu_reclaim_page(p)	kvm_iommu_reclaim_pages(p, 0)
+
 struct kvm_iommu_ops {
 	int (*init)(void);
 };
 
 int kvm_iommu_init(void);
 
+extern struct hyp_mgt_allocator_ops kvm_iommu_allocator_ops;
+
 #endif /* __ARM64_KVM_NVHE_IOMMU_H__ */
diff --git a/arch/arm64/kvm/hyp/include/nvhe/mm.h b/arch/arm64/kvm/hyp/include/nvhe/mm.h
index 5d33aca7d686..7b425f811efb 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/mm.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/mm.h
@@ -37,6 +37,7 @@ int __hyp_allocator_map(unsigned long start, phys_addr_t phys);
 int __pkvm_map_module_page(u64 pfn, void *va, enum kvm_pgtable_prot prot, bool is_protected);
 void __pkvm_unmap_module_page(u64 pfn, void *va);
 void *__pkvm_alloc_module_va(u64 nr_pages);
+int pkvm_remap_range(void *va, int nr_pages, bool nc);
 #ifdef CONFIG_NVHE_EL2_DEBUG
 void assert_in_mod_range(unsigned long addr);
 #else
diff --git a/arch/arm64/kvm/hyp/nvhe/alloc_mgt.c b/arch/arm64/kvm/hyp/nvhe/alloc_mgt.c
index 4a0f33b9820a..cfd903b30427 100644
--- a/arch/arm64/kvm/hyp/nvhe/alloc_mgt.c
+++ b/arch/arm64/kvm/hyp/nvhe/alloc_mgt.c
@@ -7,9 +7,11 @@
 
 #include <nvhe/alloc.h>
 #include <nvhe/alloc_mgt.h>
+#include <nvhe/iommu.h>
 
 static struct hyp_mgt_allocator_ops *registered_allocators[] = {
 	[HYP_ALLOC_MGT_HEAP_ID] = &hyp_alloc_ops,
+	[HYP_ALLOC_MGT_IOMMU_ID] = &kvm_iommu_allocator_ops,
 };
 
 #define MAX_ALLOC_ID		(ARRAY_SIZE(registered_allocators))
diff --git a/arch/arm64/kvm/hyp/nvhe/iommu/iommu.c b/arch/arm64/kvm/hyp/nvhe/iommu/iommu.c
index 9022fd612a49..af6ae9b4dc51 100644
--- a/arch/arm64/kvm/hyp/nvhe/iommu/iommu.c
+++ b/arch/arm64/kvm/hyp/nvhe/iommu/iommu.c
@@ -5,15 +5,101 @@
  * Copyright (C) 2022 Linaro Ltd.
  */
 #include <nvhe/iommu.h>
+#include <nvhe/mem_protect.h>
+#include <nvhe/mm.h>
 
 /* Only one set of ops supported, similary to the kernel */
 struct kvm_iommu_ops *kvm_iommu_ops;
 
+/*
+ * Common pool that can be used by IOMMU driver to allocate pages.
+ */
+static struct hyp_pool iommu_host_pool;
+
+DECLARE_PER_CPU(struct kvm_hyp_req, host_hyp_reqs);
+
+static int kvm_iommu_refill(struct kvm_hyp_memcache *host_mc)
+{
+	if (!kvm_iommu_ops)
+		return -EINVAL;
+
+	return refill_hyp_pool(&iommu_host_pool, host_mc);
+}
+
+static void kvm_iommu_reclaim(struct kvm_hyp_memcache *host_mc, int target)
+{
+	if (!kvm_iommu_ops)
+		return;
+
+	reclaim_hyp_pool(&iommu_host_pool, host_mc, target);
+}
+
+static int kvm_iommu_reclaimable(void)
+{
+	if (!kvm_iommu_ops)
+		return 0;
+
+	return hyp_pool_free_pages(&iommu_host_pool);
+}
+
+struct hyp_mgt_allocator_ops kvm_iommu_allocator_ops = {
+	.refill = kvm_iommu_refill,
+	.reclaim = kvm_iommu_reclaim,
+	.reclaimable = kvm_iommu_reclaimable,
+};
+
+void *kvm_iommu_donate_pages(u8 order, int flags)
+{
+	void *p;
+	struct kvm_hyp_req *req = this_cpu_ptr(&host_hyp_reqs);
+	int ret;
+
+	p = hyp_alloc_pages(&iommu_host_pool, order);
+	if (p) {
+		/*
+		 * If page request is non-cacheable remap it as such
+		 * as all pages in the pool are mapped before hand and
+		 * assumed to be cacheable.
+		 */
+		if (flags & IOMMU_PAGE_NOCACHE) {
+			ret = pkvm_remap_range(p, 1 << order, true);
+			if (ret) {
+				hyp_put_page(&iommu_host_pool, p);
+				return NULL;
+			}
+		}
+		return p;
+	}
+
+	req->type = KVM_HYP_REQ_TYPE_MEM;
+	req->mem.dest = REQ_MEM_DEST_HYP_IOMMU;
+	req->mem.sz_alloc = (1 << order) * PAGE_SIZE;
+	req->mem.nr_pages = 1;
+	return NULL;
+}
+
+void kvm_iommu_reclaim_pages(void *p, u8 order)
+{
+	/*
+	 * Remap all pages to cacheable, as we don't know, may be use a flag
+	 * in the vmemmap or trust the driver to pass the cacheability same
+	 * as the allocation on free?
+	 */
+	pkvm_remap_range(p, 1 << order, false);
+	hyp_put_page(&iommu_host_pool, p);
+}
+
 int kvm_iommu_init(void)
 {
+	int ret;
+
 	if (!kvm_iommu_ops || !kvm_iommu_ops->init)
 		return -ENODEV;
 
+	ret = hyp_pool_init_empty(&iommu_host_pool, 64);
+	if (ret)
+		return ret;
+
 	return kvm_iommu_ops->init();
 }
 
diff --git a/arch/arm64/kvm/hyp/nvhe/mm.c b/arch/arm64/kvm/hyp/nvhe/mm.c
index 76bbb4c9012e..7a18b31538ae 100644
--- a/arch/arm64/kvm/hyp/nvhe/mm.c
+++ b/arch/arm64/kvm/hyp/nvhe/mm.c
@@ -564,3 +564,20 @@ int reclaim_hyp_pool(struct hyp_pool *pool, struct kvm_hyp_memcache *host_mc,
 
 	return 0;
 }
+
+/* Remap hyp memory with different cacheability */
+int pkvm_remap_range(void *va, int nr_pages, bool nc)
+{
+	size_t size = nr_pages << PAGE_SHIFT;
+	phys_addr_t phys = hyp_virt_to_phys(va);
+	enum kvm_pgtable_prot prot = PKVM_HOST_MEM_PROT;
+	int ret;
+
+	if (nc)
+		prot |= KVM_PGTABLE_PROT_NORMAL_NC;
+	hyp_spin_lock(&pkvm_pgd_lock);
+	WARN_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, (u64)va, size) != size);
+	ret = kvm_pgtable_hyp_map(&pkvm_pgtable, (u64)va, size, phys, prot);
+	hyp_spin_unlock(&pkvm_pgd_lock);
+	return ret;
+}
-- 
2.47.0.338.g60cca15819-goog




More information about the linux-arm-kernel mailing list