[RFC PATCH 26/45] KVM: arm64: smmu-v3: Support io-pgtable
Jean-Philippe Brucker
jean-philippe at linaro.org
Wed Feb 1 04:53:10 PST 2023
Implement the hypervisor version of io-pgtable allocation functions,
mirroring drivers/iommu/io-pgtable-arm.c. Page allocation uses the IOMMU
memcache filled by the host, except for the PGD which may be larger than
a page.
Signed-off-by: Jean-Philippe Brucker <jean-philippe at linaro.org>
---
arch/arm64/kvm/hyp/nvhe/Makefile | 2 +
arch/arm64/kvm/hyp/include/nvhe/iommu.h | 7 ++
include/linux/io-pgtable-arm.h | 6 ++
.../arm64/kvm/hyp/nvhe/iommu/io-pgtable-arm.c | 97 +++++++++++++++++++
4 files changed, 112 insertions(+)
create mode 100644 arch/arm64/kvm/hyp/nvhe/iommu/io-pgtable-arm.c
diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile
index 349c874762c8..8359909bd796 100644
--- a/arch/arm64/kvm/hyp/nvhe/Makefile
+++ b/arch/arm64/kvm/hyp/nvhe/Makefile
@@ -30,6 +30,8 @@ hyp-obj-y += $(lib-objs)
hyp-obj-$(CONFIG_KVM_IOMMU) += iommu/iommu.o
hyp-obj-$(CONFIG_ARM_SMMU_V3_PKVM) += iommu/arm-smmu-v3.o
+hyp-obj-$(CONFIG_ARM_SMMU_V3_PKVM) += iommu/io-pgtable-arm.o \
+ ../../../../../drivers/iommu/io-pgtable-arm-common.o
##
## Build rules for compiling nVHE hyp code
diff --git a/arch/arm64/kvm/hyp/include/nvhe/iommu.h b/arch/arm64/kvm/hyp/include/nvhe/iommu.h
index 0ba59d20bef3..c7744cca6e13 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/iommu.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/iommu.h
@@ -6,7 +6,14 @@
#include <linux/io-pgtable.h>
#if IS_ENABLED(CONFIG_ARM_SMMU_V3_PKVM)
+#include <linux/io-pgtable-arm.h>
+
int kvm_arm_smmu_v3_register(void);
+
+int kvm_arm_io_pgtable_init(struct io_pgtable_cfg *cfg,
+ struct arm_lpae_io_pgtable *data);
+int kvm_arm_io_pgtable_alloc(struct io_pgtable *iop, unsigned long pgd_hva);
+int kvm_arm_io_pgtable_free(struct io_pgtable *iop);
#else /* CONFIG_ARM_SMMU_V3_PKVM */
static inline int kvm_arm_smmu_v3_register(void)
{
diff --git a/include/linux/io-pgtable-arm.h b/include/linux/io-pgtable-arm.h
index 2b3e69386d08..b89b8ec57721 100644
--- a/include/linux/io-pgtable-arm.h
+++ b/include/linux/io-pgtable-arm.h
@@ -161,8 +161,14 @@ static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl,
return iopte_type(pte) == ARM_LPAE_PTE_TYPE_BLOCK;
}
+#ifdef __KVM_NVHE_HYPERVISOR__
+#include <nvhe/memory.h>
+#define __arm_lpae_virt_to_phys hyp_virt_to_phys
+#define __arm_lpae_phys_to_virt hyp_phys_to_virt
+#else
#define __arm_lpae_virt_to_phys __pa
#define __arm_lpae_phys_to_virt __va
+#endif
/* Generic functions */
void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
diff --git a/arch/arm64/kvm/hyp/nvhe/iommu/io-pgtable-arm.c b/arch/arm64/kvm/hyp/nvhe/iommu/io-pgtable-arm.c
new file mode 100644
index 000000000000..a46490acb45c
--- /dev/null
+++ b/arch/arm64/kvm/hyp/nvhe/iommu/io-pgtable-arm.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 Arm Ltd.
+ */
+#include <asm/kvm_hyp.h>
+#include <asm/kvm_mmu.h>
+#include <kvm/arm_smmu_v3.h>
+#include <linux/types.h>
+#include <linux/gfp_types.h>
+#include <linux/io-pgtable-arm.h>
+
+#include <nvhe/iommu.h>
+#include <nvhe/mem_protect.h>
+
+bool __ro_after_init selftest_running;
+
+void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, struct io_pgtable_cfg *cfg)
+{
+ void *addr = kvm_iommu_donate_page();
+
+ BUG_ON(size != PAGE_SIZE);
+
+ if (addr && !cfg->coherent_walk)
+ kvm_flush_dcache_to_poc(addr, size);
+
+ return addr;
+}
+
+void __arm_lpae_free_pages(void *addr, size_t size, struct io_pgtable_cfg *cfg)
+{
+ BUG_ON(size != PAGE_SIZE);
+
+ if (!cfg->coherent_walk)
+ kvm_flush_dcache_to_poc(addr, size);
+
+ kvm_iommu_reclaim_page(addr);
+}
+
+void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries,
+ struct io_pgtable_cfg *cfg)
+{
+ if (!cfg->coherent_walk)
+ kvm_flush_dcache_to_poc(ptep, sizeof(*ptep) * num_entries);
+}
+
+int kvm_arm_io_pgtable_init(struct io_pgtable_cfg *cfg,
+ struct arm_lpae_io_pgtable *data)
+{
+ int ret = arm_lpae_init_pgtable_s2(cfg, data);
+
+ if (ret)
+ return ret;
+
+ data->iop.cfg = *cfg;
+ return 0;
+}
+
+int kvm_arm_io_pgtable_alloc(struct io_pgtable *iopt, unsigned long pgd_hva)
+{
+ size_t pgd_size, alignment;
+ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(iopt->ops);
+
+ pgd_size = ARM_LPAE_PGD_SIZE(data);
+ /*
+ * If it has eight or more entries, the table must be aligned on
+ * its size. Otherwise 64 bytes.
+ */
+ alignment = max(pgd_size, 8 * sizeof(arm_lpae_iopte));
+ if (!IS_ALIGNED(pgd_hva, alignment))
+ return -EINVAL;
+
+ iopt->pgd = pkvm_map_donated_memory(pgd_hva, pgd_size);
+ if (!iopt->pgd)
+ return -ENOMEM;
+
+ if (!data->iop.cfg.coherent_walk)
+ kvm_flush_dcache_to_poc(iopt->pgd, pgd_size);
+
+ /* Ensure the empty pgd is visible before any actual TTBR write */
+ wmb();
+
+ return 0;
+}
+
+int kvm_arm_io_pgtable_free(struct io_pgtable *iopt)
+{
+ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(iopt->ops);
+ size_t pgd_size = ARM_LPAE_PGD_SIZE(data);
+
+ if (!data->iop.cfg.coherent_walk)
+ kvm_flush_dcache_to_poc(iopt->pgd, pgd_size);
+
+ /* Free all tables but the pgd */
+ __arm_lpae_free_pgtable(data, data->start_level, iopt->pgd, true);
+ pkvm_unmap_donated_memory(iopt->pgd, pgd_size);
+ return 0;
+}
--
2.39.0
More information about the linux-arm-kernel
mailing list