[PATCH 12/89] KVM: arm64: Add helpers to pin memory shared with hyp
Will Deacon
will at kernel.org
Thu May 19 06:40:47 PDT 2022
From: Quentin Perret <qperret at google.com>
Add helpers allowing the hypervisor to check whether a range of pages
are currently shared by the host, and 'pin' them if so by blocking host
unshare operations until the memory has been unpinned. This will allow
the hypervisor to take references on host-provided data-structures
(struct kvm and such) and be guaranteed these pages will remain in a
stable state until it decides to release them, e.g. during guest
teardown.
Signed-off-by: Quentin Perret <qperret at google.com>
---
arch/arm64/kvm/hyp/include/nvhe/mem_protect.h | 3 ++
arch/arm64/kvm/hyp/include/nvhe/memory.h | 7 ++-
arch/arm64/kvm/hyp/nvhe/mem_protect.c | 48 +++++++++++++++++++
3 files changed, 57 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
index c87b19b2d468..998bf165af71 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
@@ -69,6 +69,9 @@ int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id);
int kvm_host_prepare_stage2(void *pgt_pool_base);
void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
+int hyp_pin_shared_mem(void *from, void *to);
+void hyp_unpin_shared_mem(void *from, void *to);
+
static __always_inline void __load_host_stage2(void)
{
if (static_branch_likely(&kvm_protected_mode_initialized))
diff --git a/arch/arm64/kvm/hyp/include/nvhe/memory.h b/arch/arm64/kvm/hyp/include/nvhe/memory.h
index 418b66a82a50..e8a78b72aabf 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/memory.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/memory.h
@@ -51,10 +51,15 @@ static inline void hyp_page_ref_inc(struct hyp_page *p)
p->refcount++;
}
-static inline int hyp_page_ref_dec_and_test(struct hyp_page *p)
+static inline void hyp_page_ref_dec(struct hyp_page *p)
{
BUG_ON(!p->refcount);
p->refcount--;
+}
+
+static inline int hyp_page_ref_dec_and_test(struct hyp_page *p)
+{
+ hyp_page_ref_dec(p);
return (p->refcount == 0);
}
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index a7156fd13bc8..1262dbae7f06 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -625,6 +625,9 @@ static int hyp_ack_unshare(u64 addr, const struct pkvm_mem_transition *tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
+ if (tx->initiator.id == PKVM_ID_HOST && hyp_page_count((void *)addr))
+ return -EBUSY;
+
if (__hyp_ack_skip_pgtable_check(tx))
return 0;
@@ -1038,3 +1041,48 @@ int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages)
return ret;
}
+
+int hyp_pin_shared_mem(void *from, void *to)
+{
+ u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE);
+ u64 end = PAGE_ALIGN((u64)to);
+ u64 size = end - start;
+ int ret;
+
+ host_lock_component();
+ hyp_lock_component();
+
+ ret = __host_check_page_state_range(__hyp_pa(start), size,
+ PKVM_PAGE_SHARED_OWNED);
+ if (ret)
+ goto unlock;
+
+ ret = __hyp_check_page_state_range(start, size,
+ PKVM_PAGE_SHARED_BORROWED);
+ if (ret)
+ goto unlock;
+
+ for (cur = start; cur < end; cur += PAGE_SIZE)
+ hyp_page_ref_inc(hyp_virt_to_page(cur));
+
+unlock:
+ hyp_unlock_component();
+ host_unlock_component();
+
+ return ret;
+}
+
+void hyp_unpin_shared_mem(void *from, void *to)
+{
+ u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE);
+ u64 end = PAGE_ALIGN((u64)to);
+
+ host_lock_component();
+ hyp_lock_component();
+
+ for (cur = start; cur < end; cur += PAGE_SIZE)
+ hyp_page_ref_dec(hyp_virt_to_page(cur));
+
+ hyp_unlock_component();
+ host_unlock_component();
+}
--
2.36.1.124.g0e6072fb45-goog
More information about the linux-arm-kernel
mailing list