[RFC PATCH v2 12/58] KVM: arm64: Add __pkvm_{use, unuse}_dma()
Mostafa Saleh
smostafa at google.com
Thu Dec 12 10:03:36 PST 2024
When a page is mapped in an IOMMU page table for DMA, it must
not be donated to a guest or the hypervisor we ensure this with:
- Host can only map pages that are OWNED
- Any page that is mapped is refcounted
- Donation/Sharing is prevented from refcount check in
host_request_owned_transition()
- No MMIO transtion is allowed beyond IOMMU MMIO which
happens during de-privilege.
In case in the future shared pages are allowed to be mapped,
similar checks are needed in host_request_unshare() and
host_ack_unshare()
Add 2 functions that would be called before each IOMMU map
and after each successful IOMMU unmap.
Signed-off-by: Mostafa Saleh <smostafa at google.com>
---
arch/arm64/kvm/hyp/include/nvhe/mem_protect.h | 2 +
arch/arm64/kvm/hyp/nvhe/mem_protect.c | 97 +++++++++++++++++++
2 files changed, 99 insertions(+)
diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
index 67466b4941b4..d75e64e59596 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
@@ -92,6 +92,8 @@ int __pkvm_remove_ioguard_page(struct pkvm_hyp_vcpu *hyp_vcpu, u64 ipa);
bool __pkvm_check_ioguard_page(struct pkvm_hyp_vcpu *hyp_vcpu);
int __pkvm_guest_relinquish_to_host(struct pkvm_hyp_vcpu *vcpu,
u64 ipa, u64 *ppa);
+int __pkvm_host_use_dma(u64 phys_addr, size_t size);
+int __pkvm_host_unuse_dma(u64 phys_addr, size_t size);
bool addr_is_memory(phys_addr_t phys);
int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot);
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index d14f4d63eb8b..0840af20c366 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -513,6 +513,20 @@ bool addr_is_memory(phys_addr_t phys)
return !!find_mem_range(phys, &range);
}
+static bool is_range_refcounted(phys_addr_t addr, u64 nr_pages)
+{
+ struct hyp_page *p;
+ int i;
+
+ for (i = 0 ; i < nr_pages ; ++i) {
+ p = hyp_phys_to_page(addr + i * PAGE_SIZE);
+ if (hyp_refcount_get(p->refcount))
+ return true;
+ }
+
+ return false;
+}
+
static bool addr_is_allowed_memory(phys_addr_t phys)
{
struct memblock_region *reg;
@@ -927,6 +941,9 @@ static int host_request_owned_transition(u64 *completer_addr,
u64 size = tx->nr_pages * PAGE_SIZE;
u64 addr = tx->initiator.addr;
+ if (range_is_memory(addr, addr + size) && is_range_refcounted(addr, tx->nr_pages))
+ return -EINVAL;
+
*completer_addr = tx->initiator.host.completer_addr;
return __host_check_page_state_range(addr, size, PKVM_PAGE_OWNED);
}
@@ -938,6 +955,7 @@ static int host_request_unshare(u64 *completer_addr,
u64 addr = tx->initiator.addr;
*completer_addr = tx->initiator.host.completer_addr;
+
return __host_check_page_state_range(addr, size, PKVM_PAGE_SHARED_OWNED);
}
@@ -2047,6 +2065,85 @@ int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages)
return ret;
}
+static void __pkvm_host_use_dma_page(phys_addr_t phys_addr)
+{
+ struct hyp_page *p = hyp_phys_to_page(phys_addr);
+
+ hyp_page_ref_inc(p);
+}
+
+static void __pkvm_host_unuse_dma_page(phys_addr_t phys_addr)
+{
+ struct hyp_page *p = hyp_phys_to_page(phys_addr);
+
+ hyp_page_ref_dec(p);
+}
+
+/*
+ * __pkvm_host_use_dma - Mark host memory as used for DMA
+ * @phys_addr: physical address of the DMA region
+ * @size: size of the DMA region
+ * When a page is mapped in an IOMMU page table for DMA, it must
+ * not be donated to a guest or the hypervisor we ensure this with:
+ * - Host can only map pages that are OWNED
+ * - Any page that is mapped is refcounted
+ * - Donation/Sharing is prevented from refcount check in
+ * host_request_owned_transition()
+ * - No MMIO transtion is allowed beyond IOMMU MMIO which
+ * happens during de-privilege.
+ * In case in the future shared pages are allowed to be mapped,
+ * similar checks are needed in host_request_unshare() and
+ * host_ack_unshare()
+ */
+int __pkvm_host_use_dma(phys_addr_t phys_addr, size_t size)
+{
+ int i;
+ int ret = 0;
+ size_t nr_pages = size >> PAGE_SHIFT;
+
+ if (WARN_ON(!PAGE_ALIGNED(phys_addr | size)))
+ return -EINVAL;
+
+ host_lock_component();
+ ret = __host_check_page_state_range(phys_addr, size, PKVM_PAGE_OWNED);
+ if (ret)
+ goto out_ret;
+
+ if (!range_is_memory(phys_addr, phys_addr + size))
+ goto out_ret;
+
+ for (i = 0; i < nr_pages; i++)
+ __pkvm_host_use_dma_page(phys_addr + i * PAGE_SIZE);
+
+out_ret:
+ host_unlock_component();
+ return ret;
+}
+
+int __pkvm_host_unuse_dma(phys_addr_t phys_addr, size_t size)
+{
+ int i;
+ size_t nr_pages = size >> PAGE_SHIFT;
+
+ if (WARN_ON(!PAGE_ALIGNED(phys_addr | size)))
+ return -EINVAL;
+
+ host_lock_component();
+ if (!range_is_memory(phys_addr, phys_addr + size))
+ goto out_ret;
+ /*
+ * We end up here after the caller successfully unmapped the page from
+ * the IOMMU table. Which means that a ref is held, the page is shared
+ * in the host s2, there can be no failure.
+ */
+ for (i = 0; i < nr_pages; i++)
+ __pkvm_host_unuse_dma_page(phys_addr + i * PAGE_SIZE);
+
+out_ret:
+ host_unlock_component();
+ return 0;
+}
+
int __pkvm_host_share_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu,
enum kvm_pgtable_prot prot)
{
--
2.47.0.338.g60cca15819-goog
More information about the linux-arm-kernel
mailing list