Commit fade1ec0 authored by Robin Murphy's avatar Robin Murphy Committed by Will Deacon

iommu/dma: Avoid PCI host bridge windows

With our DMA ops enabled for PCI devices, we should avoid allocating
IOVAs which a host bridge might misinterpret as peer-to-peer DMA and
lead to faults, corruption or other badness. To be safe, punch out holes
for all of the relevant host bridge's windows when initialising a DMA
domain for a PCI device.

CC: Marek Szyprowski <m.szyprowski@samsung.com>
CC: Inki Dae <inki.dae@samsung.com>
Reported-by: default avatarLorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Signed-off-by: default avatarRobin Murphy <robin.murphy@arm.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent 44bb7e24
...@@ -827,7 +827,7 @@ static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops, ...@@ -827,7 +827,7 @@ static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops,
* then the IOMMU core will have already configured a group for this * then the IOMMU core will have already configured a group for this
* device, and allocated the default domain for that group. * device, and allocated the default domain for that group.
*/ */
if (!domain || iommu_dma_init_domain(domain, dma_base, size)) { if (!domain || iommu_dma_init_domain(domain, dma_base, size, dev)) {
pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n", pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
dev_name(dev)); dev_name(dev));
return false; return false;
......
...@@ -66,7 +66,7 @@ static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv, ...@@ -66,7 +66,7 @@ static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
if (ret) if (ret)
goto free_domain; goto free_domain;
ret = iommu_dma_init_domain(domain, start, size); ret = iommu_dma_init_domain(domain, start, size, NULL);
if (ret) if (ret)
goto put_cookie; goto put_cookie;
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <linux/iova.h> #include <linux/iova.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/pci.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
...@@ -103,18 +104,38 @@ void iommu_put_dma_cookie(struct iommu_domain *domain) ...@@ -103,18 +104,38 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
} }
EXPORT_SYMBOL(iommu_put_dma_cookie); EXPORT_SYMBOL(iommu_put_dma_cookie);
static void iova_reserve_pci_windows(struct pci_dev *dev,
struct iova_domain *iovad)
{
struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
struct resource_entry *window;
unsigned long lo, hi;
resource_list_for_each_entry(window, &bridge->windows) {
if (resource_type(window->res) != IORESOURCE_MEM &&
resource_type(window->res) != IORESOURCE_IO)
continue;
lo = iova_pfn(iovad, window->res->start - window->offset);
hi = iova_pfn(iovad, window->res->end - window->offset);
reserve_iova(iovad, lo, hi);
}
}
/** /**
* iommu_dma_init_domain - Initialise a DMA mapping domain * iommu_dma_init_domain - Initialise a DMA mapping domain
* @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
* @base: IOVA at which the mappable address space starts * @base: IOVA at which the mappable address space starts
* @size: Size of IOVA space * @size: Size of IOVA space
* @dev: Device the domain is being initialised for
* *
* @base and @size should be exact multiples of IOMMU page granularity to * @base and @size should be exact multiples of IOMMU page granularity to
* avoid rounding surprises. If necessary, we reserve the page at address 0 * avoid rounding surprises. If necessary, we reserve the page at address 0
* to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
* any change which could make prior IOVAs invalid will fail. * any change which could make prior IOVAs invalid will fail.
*/ */
int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size) int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
u64 size, struct device *dev)
{ {
struct iova_domain *iovad = cookie_iovad(domain); struct iova_domain *iovad = cookie_iovad(domain);
unsigned long order, base_pfn, end_pfn; unsigned long order, base_pfn, end_pfn;
...@@ -152,6 +173,8 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size ...@@ -152,6 +173,8 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size
iovad->dma_32bit_pfn = end_pfn; iovad->dma_32bit_pfn = end_pfn;
} else { } else {
init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn); init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
if (dev && dev_is_pci(dev))
iova_reserve_pci_windows(to_pci_dev(dev), iovad);
} }
return 0; return 0;
} }
......
...@@ -30,7 +30,8 @@ int iommu_get_dma_cookie(struct iommu_domain *domain); ...@@ -30,7 +30,8 @@ int iommu_get_dma_cookie(struct iommu_domain *domain);
void iommu_put_dma_cookie(struct iommu_domain *domain); void iommu_put_dma_cookie(struct iommu_domain *domain);
/* Setup call for arch DMA mapping code */ /* Setup call for arch DMA mapping code */
int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size); int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
u64 size, struct device *dev);
/* General helpers for DMA-API <-> IOMMU-API interaction */ /* General helpers for DMA-API <-> IOMMU-API interaction */
int dma_direction_to_prot(enum dma_data_direction dir, bool coherent); int dma_direction_to_prot(enum dma_data_direction dir, bool coherent);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment