[PATCH v7 22/22] iommu/dma: Avoid PCI host bridge windows
Marek Szyprowski
m.szyprowski at samsung.com
Wed Sep 14 03:55:45 PDT 2016
Hi Robin,
On 2016-09-12 18:14, Robin Murphy wrote:
> With our DMA ops enabled for PCI devices, we should avoid allocating
> IOVAs which a host bridge might misinterpret as peer-to-peer DMA and
> lead to faults, corruption or other badness. To be safe, punch out holes
> for all of the relevant host bridge's windows when initialising a DMA
> domain for a PCI device.
>
> CC: Marek Szyprowski <m.szyprowski at samsung.com>
> CC: Inki Dae <inki.dae at samsung.com>
> Reported-by: Lorenzo Pieralisi <lorenzo.pieralisi at arm.com>
> Signed-off-by: Robin Murphy <robin.murphy at arm.com>
I don't know much about PCI and their IOMMU integration, but can't we use
the direct mapping region feature of iommu core for it? There are already
iommu_get_dm_regions(), iommu_put_dm_regions() and
iommu_request_dm_for_dev()
functions for handling them...
> ---
>
> - Squash in the previous drm/exynos fixup
> - If need be, this one can probably wait
> ---
> arch/arm64/mm/dma-mapping.c | 2 +-
> drivers/gpu/drm/exynos/exynos_drm_iommu.h | 2 +-
> drivers/iommu/dma-iommu.c | 25 ++++++++++++++++++++++++-
> include/linux/dma-iommu.h | 3 ++-
> 4 files changed, 28 insertions(+), 4 deletions(-)
>
> diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
> index c4284c432ae8..610d8e53011e 100644
> --- a/arch/arm64/mm/dma-mapping.c
> +++ b/arch/arm64/mm/dma-mapping.c
> @@ -827,7 +827,7 @@ static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops,
> * then the IOMMU core will have already configured a group for this
> * device, and allocated the default domain for that group.
> */
> - if (!domain || iommu_dma_init_domain(domain, dma_base, size)) {
> + if (!domain || iommu_dma_init_domain(domain, dma_base, size, dev)) {
> pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
> dev_name(dev));
> return false;
> diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
> index c8de4913fdbe..87f6b5672e11 100644
> --- a/drivers/gpu/drm/exynos/exynos_drm_iommu.h
> +++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
> @@ -66,7 +66,7 @@ static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
> if (ret)
> goto free_domain;
>
> - ret = iommu_dma_init_domain(domain, start, size);
> + ret = iommu_dma_init_domain(domain, start, size, NULL);
> if (ret)
> goto put_cookie;
>
> diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
> index 4329d18080cf..c5ab8667e6f2 100644
> --- a/drivers/iommu/dma-iommu.c
> +++ b/drivers/iommu/dma-iommu.c
> @@ -27,6 +27,7 @@
> #include <linux/iova.h>
> #include <linux/irq.h>
> #include <linux/mm.h>
> +#include <linux/pci.h>
> #include <linux/scatterlist.h>
> #include <linux/vmalloc.h>
>
> @@ -103,18 +104,38 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
> }
> EXPORT_SYMBOL(iommu_put_dma_cookie);
>
> +static void iova_reserve_pci_windows(struct pci_dev *dev,
> + struct iova_domain *iovad)
> +{
> + struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
> + struct resource_entry *window;
> + unsigned long lo, hi;
> +
> + resource_list_for_each_entry(window, &bridge->windows) {
> + if (resource_type(window->res) != IORESOURCE_MEM &&
> + resource_type(window->res) != IORESOURCE_IO)
> + continue;
> +
> + lo = iova_pfn(iovad, window->res->start - window->offset);
> + hi = iova_pfn(iovad, window->res->end - window->offset);
> + reserve_iova(iovad, lo, hi);
> + }
> +}
> +
> /**
> * iommu_dma_init_domain - Initialise a DMA mapping domain
> * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
> * @base: IOVA at which the mappable address space starts
> * @size: Size of IOVA space
> + * @dev: Device the domain is being initialised for
> *
> * @base and @size should be exact multiples of IOMMU page granularity to
> * avoid rounding surprises. If necessary, we reserve the page at address 0
> * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
> * any change which could make prior IOVAs invalid will fail.
> */
> -int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size)
> +int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
> + u64 size, struct device *dev)
> {
> struct iova_domain *iovad = cookie_iovad(domain);
> unsigned long order, base_pfn, end_pfn;
> @@ -152,6 +173,8 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size
> iovad->dma_32bit_pfn = end_pfn;
> } else {
> init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
> + if (dev && dev_is_pci(dev))
> + iova_reserve_pci_windows(to_pci_dev(dev), iovad);
> }
> return 0;
> }
> diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
> index 5ee806e41b5c..32c589062bd9 100644
> --- a/include/linux/dma-iommu.h
> +++ b/include/linux/dma-iommu.h
> @@ -30,7 +30,8 @@ int iommu_get_dma_cookie(struct iommu_domain *domain);
> void iommu_put_dma_cookie(struct iommu_domain *domain);
>
> /* Setup call for arch DMA mapping code */
> -int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size);
> +int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
> + u64 size, struct device *dev);
>
> /* General helpers for DMA-API <-> IOMMU-API interaction */
> int dma_direction_to_prot(enum dma_data_direction dir, bool coherent);
Best regards
--
Marek Szyprowski, PhD
Samsung R&D Institute Poland
More information about the linux-arm-kernel
mailing list