[RFC PATCH 40/45] iommu/arm-smmu-v3-kvm: Add IOMMU ops
Mostafa Saleh
smostafa at google.com
Tue Feb 7 05:22:11 PST 2023
Hi Jean,
On Wed, Feb 01, 2023 at 12:53:24PM +0000, Jean-Philippe Brucker wrote:
> Forward alloc_domain(), attach_dev(), map_pages(), etc to the
> hypervisor.
>
> Signed-off-by: Jean-Philippe Brucker <jean-philippe at linaro.org>
> ---
> .../iommu/arm/arm-smmu-v3/arm-smmu-v3-kvm.c | 330 +++++++++++++++++-
> 1 file changed, 328 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-kvm.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-kvm.c
> index 55489d56fb5b..930d78f6e29f 100644
> --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-kvm.c
> +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-kvm.c
> @@ -22,10 +22,28 @@ struct host_arm_smmu_device {
> #define smmu_to_host(_smmu) \
> container_of(_smmu, struct host_arm_smmu_device, smmu);
>
> +struct kvm_arm_smmu_master {
> + struct arm_smmu_device *smmu;
> + struct device *dev;
> + struct kvm_arm_smmu_domain *domain;
> +};
> +
> +struct kvm_arm_smmu_domain {
> + struct iommu_domain domain;
> + struct arm_smmu_device *smmu;
> + struct mutex init_mutex;
> + unsigned long pgd;
> + pkvm_handle_t id;
> +};
> +
> +#define to_kvm_smmu_domain(_domain) \
> + container_of(_domain, struct kvm_arm_smmu_domain, domain)
> +
> static size_t kvm_arm_smmu_cur;
> static size_t kvm_arm_smmu_count;
> static struct hyp_arm_smmu_v3_device *kvm_arm_smmu_array;
> static struct kvm_hyp_iommu_memcache *kvm_arm_smmu_memcache;
> +static DEFINE_IDA(kvm_arm_smmu_domain_ida);
>
> static DEFINE_PER_CPU(local_lock_t, memcache_lock) =
> INIT_LOCAL_LOCK(memcache_lock);
> @@ -57,7 +75,6 @@ static void *kvm_arm_smmu_host_va(phys_addr_t pa)
> return __va(pa);
> }
>
> -__maybe_unused
> static int kvm_arm_smmu_topup_memcache(struct arm_smmu_device *smmu)
> {
> struct kvm_hyp_memcache *mc;
> @@ -74,7 +91,6 @@ static int kvm_arm_smmu_topup_memcache(struct arm_smmu_device *smmu)
> kvm_arm_smmu_host_pa, smmu);
> }
>
> -__maybe_unused
> static void kvm_arm_smmu_reclaim_memcache(void)
> {
> struct kvm_hyp_memcache *mc;
> @@ -101,6 +117,299 @@ static void kvm_arm_smmu_reclaim_memcache(void)
> __ret; \
> })
>
> +static struct platform_driver kvm_arm_smmu_driver;
> +
> +static struct arm_smmu_device *
> +kvm_arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
> +{
> + struct device *dev;
> +
> + dev = driver_find_device_by_fwnode(&kvm_arm_smmu_driver.driver, fwnode);
> + put_device(dev);
> + return dev ? dev_get_drvdata(dev) : NULL;
> +}
> +
> +static struct iommu_ops kvm_arm_smmu_ops;
> +
> +static struct iommu_device *kvm_arm_smmu_probe_device(struct device *dev)
> +{
> + struct arm_smmu_device *smmu;
> + struct kvm_arm_smmu_master *master;
> + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
> +
> + if (!fwspec || fwspec->ops != &kvm_arm_smmu_ops)
> + return ERR_PTR(-ENODEV);
> +
> + if (WARN_ON_ONCE(dev_iommu_priv_get(dev)))
> + return ERR_PTR(-EBUSY);
> +
> + smmu = kvm_arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
> + if (!smmu)
> + return ERR_PTR(-ENODEV);
> +
> + master = kzalloc(sizeof(*master), GFP_KERNEL);
> + if (!master)
> + return ERR_PTR(-ENOMEM);
> +
> + master->dev = dev;
> + master->smmu = smmu;
> + dev_iommu_priv_set(dev, master);
> +
> + return &smmu->iommu;
> +}
> +
> +static void kvm_arm_smmu_release_device(struct device *dev)
> +{
> + struct kvm_arm_smmu_master *master = dev_iommu_priv_get(dev);
> +
> + kfree(master);
> + iommu_fwspec_free(dev);
> +}
> +
> +static struct iommu_domain *kvm_arm_smmu_domain_alloc(unsigned type)
> +{
> + struct kvm_arm_smmu_domain *kvm_smmu_domain;
> +
> + /*
> + * We don't support
> + * - IOMMU_DOMAIN_IDENTITY because we rely on the host telling the
> + * hypervisor which pages are used for DMA.
> + * - IOMMU_DOMAIN_DMA_FQ because lazy unmap would clash with memory
> + * donation to guests.
> + */
> + if (type != IOMMU_DOMAIN_DMA &&
> + type != IOMMU_DOMAIN_UNMANAGED)
> + return NULL;
> +
> + kvm_smmu_domain = kzalloc(sizeof(*kvm_smmu_domain), GFP_KERNEL);
> + if (!kvm_smmu_domain)
> + return NULL;
> +
> + mutex_init(&kvm_smmu_domain->init_mutex);
> +
> + return &kvm_smmu_domain->domain;
> +}
> +
> +static int kvm_arm_smmu_domain_finalize(struct kvm_arm_smmu_domain *kvm_smmu_domain,
> + struct kvm_arm_smmu_master *master)
> +{
> + int ret = 0;
> + struct page *p;
> + unsigned long pgd;
> + struct arm_smmu_device *smmu = master->smmu;
> + struct host_arm_smmu_device *host_smmu = smmu_to_host(smmu);
> +
> + if (kvm_smmu_domain->smmu) {
> + if (kvm_smmu_domain->smmu != smmu)
> + return -EINVAL;
> + return 0;
> + }
> +
> + ret = ida_alloc_range(&kvm_arm_smmu_domain_ida, 0, 1 << smmu->vmid_bits,
> + GFP_KERNEL);
> + if (ret < 0)
> + return ret;
> + kvm_smmu_domain->id = ret;
> +
> + /*
> + * PGD allocation does not use the memcache because it may be of higher
> + * order when concatenated.
> + */
> + p = alloc_pages_node(dev_to_node(smmu->dev), GFP_KERNEL | __GFP_ZERO,
> + host_smmu->pgd_order);
> + if (!p)
> + return -ENOMEM;
> +
> + pgd = (unsigned long)page_to_virt(p);
> +
> + local_lock_irq(&memcache_lock);
> + ret = kvm_call_hyp_nvhe_mc(smmu, __pkvm_host_iommu_alloc_domain,
> + host_smmu->id, kvm_smmu_domain->id, pgd);
What is the idea of postponing this HVC to attach and don’t call it in
alloc_domain HVC?
Thanks,
Mostafa
More information about the linux-arm-kernel
mailing list