Commit 7c1b058c authored by Robin Murphy's avatar Robin Murphy Committed by Joerg Roedel

iommu/dma: Handle IOMMU API reserved regions

Now that it's simple to discover the necessary reservations for a given
device/IOMMU combination, let's wire up the appropriate handling. Basic
reserved regions and direct-mapped regions we simply have to carve out
of IOVA space (the IOMMU core having already mapped the latter before
attaching the device). For hardware MSI regions, we also pre-populate
the cookie with matching msi_pages. That way, irqchip drivers which
normally assume MSIs to require mapping at the IOMMU can keep working
without having to special-case their iommu_dma_map_msi_msg() hook, or
indeed be aware at all of quirks preventing the IOMMU from translating
certain addresses.
Reviewed-by: default avatarEric Auger <eric.auger@redhat.com>
Signed-off-by: default avatarRobin Murphy <robin.murphy@arm.com>
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 938f1bbe
...@@ -184,6 +184,66 @@ static void iova_reserve_pci_windows(struct pci_dev *dev, ...@@ -184,6 +184,66 @@ static void iova_reserve_pci_windows(struct pci_dev *dev,
} }
} }
static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
phys_addr_t start, phys_addr_t end)
{
struct iova_domain *iovad = &cookie->iovad;
struct iommu_dma_msi_page *msi_page;
int i, num_pages;
start -= iova_offset(iovad, start);
num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL);
if (!msi_page)
return -ENOMEM;
for (i = 0; i < num_pages; i++) {
msi_page[i].phys = start;
msi_page[i].iova = start;
INIT_LIST_HEAD(&msi_page[i].list);
list_add(&msi_page[i].list, &cookie->msi_page_list);
start += iovad->granule;
}
return 0;
}
static int iova_reserve_iommu_regions(struct device *dev,
struct iommu_domain *domain)
{
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
struct iommu_resv_region *region;
LIST_HEAD(resv_regions);
int ret = 0;
if (dev_is_pci(dev))
iova_reserve_pci_windows(to_pci_dev(dev), iovad);
iommu_get_resv_regions(dev, &resv_regions);
list_for_each_entry(region, &resv_regions, list) {
unsigned long lo, hi;
/* We ARE the software that manages these! */
if (region->type == IOMMU_RESV_SW_MSI)
continue;
lo = iova_pfn(iovad, region->start);
hi = iova_pfn(iovad, region->start + region->length - 1);
reserve_iova(iovad, lo, hi);
if (region->type == IOMMU_RESV_MSI)
ret = cookie_init_hw_msi_region(cookie, region->start,
region->start + region->length);
if (ret)
break;
}
iommu_put_resv_regions(dev, &resv_regions);
return ret;
}
/** /**
* iommu_dma_init_domain - Initialise a DMA mapping domain * iommu_dma_init_domain - Initialise a DMA mapping domain
* @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
...@@ -202,7 +262,6 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, ...@@ -202,7 +262,6 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
struct iommu_dma_cookie *cookie = domain->iova_cookie; struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad; struct iova_domain *iovad = &cookie->iovad;
unsigned long order, base_pfn, end_pfn; unsigned long order, base_pfn, end_pfn;
bool pci = dev && dev_is_pci(dev);
if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
return -EINVAL; return -EINVAL;
...@@ -232,7 +291,7 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, ...@@ -232,7 +291,7 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
* leave the cache limit at the top of their range to save an rb_last() * leave the cache limit at the top of their range to save an rb_last()
* traversal on every allocation. * traversal on every allocation.
*/ */
if (pci) if (dev && dev_is_pci(dev))
end_pfn &= DMA_BIT_MASK(32) >> order; end_pfn &= DMA_BIT_MASK(32) >> order;
/* start_pfn is always nonzero for an already-initialised domain */ /* start_pfn is always nonzero for an already-initialised domain */
...@@ -247,12 +306,15 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, ...@@ -247,12 +306,15 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
* area cache limit down for the benefit of the smaller one. * area cache limit down for the benefit of the smaller one.
*/ */
iovad->dma_32bit_pfn = min(end_pfn, iovad->dma_32bit_pfn); iovad->dma_32bit_pfn = min(end_pfn, iovad->dma_32bit_pfn);
} else {
init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn); return 0;
if (pci)
iova_reserve_pci_windows(to_pci_dev(dev), iovad);
} }
return 0;
init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
if (!dev)
return 0;
return iova_reserve_iommu_regions(dev, domain);
} }
EXPORT_SYMBOL(iommu_dma_init_domain); EXPORT_SYMBOL(iommu_dma_init_domain);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment