[RFC PATCH 20/45] KVM: arm64: iommu: Add map() and unmap() operations
Mostafa Saleh
smostafa at google.com
Fri Feb 16 03:59:26 PST 2024
Hi Jean,
On Wed, Feb 01, 2023 at 12:53:04PM +0000, Jean-Philippe Brucker wrote:
> Handle map() and unmap() hypercalls by calling the io-pgtable library.
>
> Signed-off-by: Jean-Philippe Brucker <jean-philippe at linaro.org>
> ---
> arch/arm64/kvm/hyp/nvhe/iommu/iommu.c | 144 ++++++++++++++++++++++++++
> 1 file changed, 144 insertions(+)
>
> diff --git a/arch/arm64/kvm/hyp/nvhe/iommu/iommu.c b/arch/arm64/kvm/hyp/nvhe/iommu/iommu.c
> index 7404ea77ed9f..0550e7bdf179 100644
> --- a/arch/arm64/kvm/hyp/nvhe/iommu/iommu.c
> +++ b/arch/arm64/kvm/hyp/nvhe/iommu/iommu.c
> @@ -183,6 +183,150 @@ int kvm_iommu_detach_dev(pkvm_handle_t iommu_id, pkvm_handle_t domain_id,
> return ret;
> }
>
> +static int __kvm_iommu_unmap_pages(struct io_pgtable *iopt, unsigned long iova,
> + size_t pgsize, size_t pgcount)
> +{
> + int ret;
> + size_t unmapped;
> + phys_addr_t paddr;
> + size_t total_unmapped = 0;
> + size_t size = pgsize * pgcount;
> +
> + while (total_unmapped < size) {
> + paddr = iopt_iova_to_phys(iopt, iova);
> + if (paddr == 0)
> + return -EINVAL;
> +
> + /*
> + * One page/block at a time, because the range provided may not
> + * be physically contiguous, and we need to unshare all physical
> + * pages.
> + */
> + unmapped = iopt_unmap_pages(iopt, iova, pgsize, 1, NULL);
> + if (!unmapped)
> + return -EINVAL;
> +
> + ret = __pkvm_host_unshare_dma(paddr, pgsize);
> + if (ret)
> + return ret;
> +
> + iova += unmapped;
> + pgcount -= unmapped / pgsize;
> + total_unmapped += unmapped;
> + }
> +
> + return 0;
> +}
> +
> +#define IOMMU_PROT_MASK (IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE |\
> + IOMMU_NOEXEC | IOMMU_MMIO)
Is there a reason IOMMU_PRIV is not allowed?
> +
> +int kvm_iommu_map_pages(pkvm_handle_t iommu_id, pkvm_handle_t domain_id,
> + unsigned long iova, phys_addr_t paddr, size_t pgsize,
> + size_t pgcount, int prot)
> +{
> + size_t size;
> + size_t granule;
> + int ret = -EINVAL;
> + size_t mapped = 0;
> + struct io_pgtable iopt;
> + struct kvm_hyp_iommu *iommu;
> + size_t pgcount_orig = pgcount;
> + unsigned long iova_orig = iova;
> + struct kvm_hyp_iommu_domain *domain;
> +
> + if (prot & ~IOMMU_PROT_MASK)
> + return -EINVAL;
> +
> + if (__builtin_mul_overflow(pgsize, pgcount, &size) ||
> + iova + size < iova || paddr + size < paddr)
> + return -EOVERFLOW;
> +
> + hyp_spin_lock(&iommu_lock);
> +
> + domain = handle_to_domain(iommu_id, domain_id, &iommu);
> + if (!domain)
> + goto err_unlock;
> +
> + granule = 1 << __ffs(iommu->pgtable->cfg.pgsize_bitmap);
> + if (!IS_ALIGNED(iova | paddr | pgsize, granule))
> + goto err_unlock;
> +
> + ret = __pkvm_host_share_dma(paddr, size, !(prot & IOMMU_MMIO));
> + if (ret)
> + goto err_unlock;
> +
> + iopt = domain_to_iopt(iommu, domain, domain_id);
> + while (pgcount) {
> + ret = iopt_map_pages(&iopt, iova, paddr, pgsize, pgcount, prot,
> + 0, &mapped);
> + WARN_ON(!IS_ALIGNED(mapped, pgsize));
> + pgcount -= mapped / pgsize;
> + if (ret)
> + goto err_unmap;
> + iova += mapped;
> + paddr += mapped;
> + }
> +
> + hyp_spin_unlock(&iommu_lock);
> + return 0;
> +
> +err_unmap:
> + __kvm_iommu_unmap_pages(&iopt, iova_orig, pgsize, pgcount_orig - pgcount);
> +err_unlock:
> + hyp_spin_unlock(&iommu_lock);
> + return ret;
> +}
> +
> +int kvm_iommu_unmap_pages(pkvm_handle_t iommu_id, pkvm_handle_t domain_id,
> + unsigned long iova, size_t pgsize, size_t pgcount)
> +{
> + size_t size;
> + size_t granule;
> + int ret = -EINVAL;
> + struct io_pgtable iopt;
> + struct kvm_hyp_iommu *iommu;
> + struct kvm_hyp_iommu_domain *domain;
> +
> + if (__builtin_mul_overflow(pgsize, pgcount, &size) ||
> + iova + size < iova)
> + return -EOVERFLOW;
> +
> + hyp_spin_lock(&iommu_lock);
> + domain = handle_to_domain(iommu_id, domain_id, &iommu);
> + if (!domain)
> + goto out_unlock;
> +
> + granule = 1 << __ffs(iommu->pgtable->cfg.pgsize_bitmap);
> + if (!IS_ALIGNED(iova | pgsize, granule))
> + goto out_unlock;
> +
> + iopt = domain_to_iopt(iommu, domain, domain_id);
> + ret = __kvm_iommu_unmap_pages(&iopt, iova, pgsize, pgcount);
> +out_unlock:
> + hyp_spin_unlock(&iommu_lock);
> + return ret;
> +}
> +
> +phys_addr_t kvm_iommu_iova_to_phys(pkvm_handle_t iommu_id,
> + pkvm_handle_t domain_id, unsigned long iova)
> +{
> + phys_addr_t phys = 0;
> + struct io_pgtable iopt;
> + struct kvm_hyp_iommu *iommu;
> + struct kvm_hyp_iommu_domain *domain;
> +
> + hyp_spin_lock(&iommu_lock);
> + domain = handle_to_domain(iommu_id, domain_id, &iommu);
> + if (domain) {
> + iopt = domain_to_iopt(iommu, domain, domain_id);
> +
> + phys = iopt_iova_to_phys(&iopt, iova);
> + }
> + hyp_spin_unlock(&iommu_lock);
> + return phys;
> +}
> +
> int kvm_iommu_init_device(struct kvm_hyp_iommu *iommu)
> {
> void *domains;
> --
> 2.39.0
>
Thanks,
Mostafa
More information about the linux-arm-kernel
mailing list