[PATCH v3 23/29] iommu/arm-smmu-v3-kvm: Support io-pgtable
Mostafa Saleh
smostafa at google.com
Mon Jul 28 10:53:10 PDT 2025
Implement the hypervisor version of io-pgtable allocation functions,
mirroring drivers/iommu/io-pgtable-arm.c. Page allocation uses the
IOMMU pool filled by the host.
Signed-off-by: Jean-Philippe Brucker <jean-philippe at linaro.org>
Signed-off-by: Mostafa Saleh <smostafa at google.com>
---
arch/arm64/kvm/hyp/nvhe/Makefile | 4 +-
.../arm/arm-smmu-v3/pkvm/io-pgtable-arm.c | 115 ++++++++++++++++++
drivers/iommu/io-pgtable-arm.h | 15 ++-
3 files changed, 131 insertions(+), 3 deletions(-)
create mode 100644 drivers/iommu/arm/arm-smmu-v3/pkvm/io-pgtable-arm.c
diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile
index c71c96262378..d641a9987152 100644
--- a/arch/arm64/kvm/hyp/nvhe/Makefile
+++ b/arch/arm64/kvm/hyp/nvhe/Makefile
@@ -34,7 +34,9 @@ hyp-obj-y += $(lib-objs)
HYP_SMMU_V3_DRV_PATH = ../../../../../drivers/iommu/arm/arm-smmu-v3
hyp-obj-$(CONFIG_ARM_SMMU_V3_PKVM) += $(HYP_SMMU_V3_DRV_PATH)/pkvm/arm-smmu-v3.o \
- $(HYP_SMMU_V3_DRV_PATH)/arm-smmu-v3-common-hyp.o
+ $(HYP_SMMU_V3_DRV_PATH)/arm-smmu-v3-common-hyp.o \
+ $(HYP_SMMU_V3_DRV_PATH)/pkvm/io-pgtable-arm.o \
+ $(HYP_SMMU_V3_DRV_PATH)/../../io-pgtable-arm-common.o
##
## Build rules for compiling nVHE hyp code
diff --git a/drivers/iommu/arm/arm-smmu-v3/pkvm/io-pgtable-arm.c b/drivers/iommu/arm/arm-smmu-v3/pkvm/io-pgtable-arm.c
new file mode 100644
index 000000000000..ce17f21238c8
--- /dev/null
+++ b/drivers/iommu/arm/arm-smmu-v3/pkvm/io-pgtable-arm.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 Arm Ltd.
+ */
+#include <nvhe/iommu.h>
+
+#include "../../../io-pgtable-arm.h"
+
+void arm_lpae_split_blk(void)
+{
+ WARN_ON(1);
+}
+
+void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
+ struct io_pgtable_cfg *cfg, void *cookie)
+{
+ void *addr;
+
+ if (!PAGE_ALIGNED(size))
+ return NULL;
+
+ addr = kvm_iommu_donate_pages(get_order(size));
+
+ if (addr && !cfg->coherent_walk)
+ kvm_flush_dcache_to_poc(addr, size);
+
+ return addr;
+}
+
+void __arm_lpae_free_pages(void *addr, size_t size, struct io_pgtable_cfg *cfg,
+ void *cookie)
+{
+ if (!cfg->coherent_walk)
+ kvm_flush_dcache_to_poc(addr, size);
+
+ kvm_iommu_reclaim_pages(addr);
+}
+
+void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries,
+ struct io_pgtable_cfg *cfg)
+{
+ if (!cfg->coherent_walk)
+ kvm_flush_dcache_to_poc(ptep, sizeof(*ptep) * num_entries);
+}
+
+static int kvm_arm_io_pgtable_init(struct io_pgtable_cfg *cfg,
+ enum io_pgtable_fmt fmt,
+ struct arm_lpae_io_pgtable *data,
+ void *cookie)
+{
+ int ret = -EINVAL;
+
+ if (fmt == ARM_64_LPAE_S2)
+ ret = arm_lpae_init_pgtable_s2(cfg, data, cookie);
+ else if (fmt == ARM_64_LPAE_S1)
+ ret = arm_lpae_init_pgtable_s1(cfg, data, cookie);
+
+ if (ret)
+ return ret;
+
+ data->iop.cfg = *cfg;
+ data->iop.fmt = fmt;
+ return 0;
+}
+
+struct io_pgtable *kvm_arm_io_pgtable_alloc(struct io_pgtable_cfg *cfg,
+ void *cookie,
+ enum io_pgtable_fmt fmt,
+ int *out_ret)
+{
+ size_t pgd_size;
+ struct arm_lpae_io_pgtable *data;
+ int ret;
+
+ data = kvm_iommu_donate_pages(get_order(sizeof(*data)));
+ if (!data) {
+ *out_ret = -ENOMEM;
+ return NULL;
+ }
+
+ data->iop.ops = (struct io_pgtable_ops) {
+ .map_pages = arm_lpae_map_pages,
+ .unmap_pages = arm_lpae_unmap_pages,
+ };
+
+ ret = kvm_arm_io_pgtable_init(cfg, fmt, data, cookie);
+ if (ret) {
+ *out_ret = ret;
+ goto out_free;
+ }
+ pgd_size = PAGE_ALIGN(ARM_LPAE_PGD_SIZE(data));
+ data->pgd = __arm_lpae_alloc_pages(pgd_size, 0, &data->iop.cfg, cookie);
+ if (!data->pgd) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ if (fmt == ARM_64_LPAE_S2)
+ data->iop.cfg.arm_lpae_s2_cfg.vttbr = __arm_lpae_virt_to_phys(data->pgd);
+ else if (fmt == ARM_64_LPAE_S1)
+ data->iop.cfg.arm_lpae_s1_cfg.ttbr = __arm_lpae_virt_to_phys(data->pgd);
+
+ if (!data->iop.cfg.coherent_walk)
+ kvm_flush_dcache_to_poc(data->pgd, pgd_size);
+
+ /* Ensure the empty pgd is visible before any actual TTBR write */
+ wmb();
+
+ *out_ret = 0;
+ return &data->iop;
+out_free:
+ kvm_iommu_reclaim_pages(data);
+ *out_ret = ret;
+ return NULL;
+}
diff --git a/drivers/iommu/io-pgtable-arm.h b/drivers/iommu/io-pgtable-arm.h
index 2807cf563f11..c1450eca934f 100644
--- a/drivers/iommu/io-pgtable-arm.h
+++ b/drivers/iommu/io-pgtable-arm.h
@@ -188,8 +188,19 @@ static inline bool iopte_table(arm_lpae_iopte pte, int lvl)
return iopte_type(pte) == ARM_LPAE_PTE_TYPE_TABLE;
}
-#define __arm_lpae_virt_to_phys __pa
-#define __arm_lpae_phys_to_virt __va
+#ifdef __KVM_NVHE_HYPERVISOR__
+#include <nvhe/memory.h>
+#define __arm_lpae_virt_to_phys hyp_virt_to_phys
+#define __arm_lpae_phys_to_virt hyp_phys_to_virt
+
+struct io_pgtable *kvm_arm_io_pgtable_alloc(struct io_pgtable_cfg *cfg,
+ void *cookie,
+ enum io_pgtable_fmt fmt,
+ int *out_ret);
+#else
+#define __arm_lpae_virt_to_phys __pa
+#define __arm_lpae_phys_to_virt __va
+#endif
static inline phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
struct arm_lpae_io_pgtable *data)
--
2.50.1.552.g942d659e1b-goog
More information about the linux-arm-kernel
mailing list