Commit 576175c2 authored by Joerg Roedel's avatar Joerg Roedel

x86/amd-iommu: Make alloc_new_range aware of multiple IOMMUs

Since the assumption that an dma_ops domain is only bound to
one IOMMU was given up we need to make alloc_new_range aware
of it.
Signed-off-by: default avatarJoerg Roedel <joerg.roedel@amd.com>
parent 680525e0
...@@ -788,11 +788,11 @@ static u64 *fetch_pte(struct protection_domain *domain, ...@@ -788,11 +788,11 @@ static u64 *fetch_pte(struct protection_domain *domain,
* aperture in case of dma_ops domain allocation or address allocation * aperture in case of dma_ops domain allocation or address allocation
* failure. * failure.
*/ */
static int alloc_new_range(struct amd_iommu *iommu, static int alloc_new_range(struct dma_ops_domain *dma_dom,
struct dma_ops_domain *dma_dom,
bool populate, gfp_t gfp) bool populate, gfp_t gfp)
{ {
int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT; int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
struct amd_iommu *iommu;
int i; int i;
#ifdef CONFIG_IOMMU_STRESS #ifdef CONFIG_IOMMU_STRESS
...@@ -832,15 +832,18 @@ static int alloc_new_range(struct amd_iommu *iommu, ...@@ -832,15 +832,18 @@ static int alloc_new_range(struct amd_iommu *iommu,
dma_dom->aperture_size += APERTURE_RANGE_SIZE; dma_dom->aperture_size += APERTURE_RANGE_SIZE;
/* Intialize the exclusion range if necessary */ /* Intialize the exclusion range if necessary */
for_each_iommu(iommu) {
if (iommu->exclusion_start && if (iommu->exclusion_start &&
iommu->exclusion_start >= dma_dom->aperture[index]->offset && iommu->exclusion_start >= dma_dom->aperture[index]->offset
iommu->exclusion_start < dma_dom->aperture_size) { && iommu->exclusion_start < dma_dom->aperture_size) {
unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT; unsigned long startpage;
int pages = iommu_num_pages(iommu->exclusion_start, int pages = iommu_num_pages(iommu->exclusion_start,
iommu->exclusion_length, iommu->exclusion_length,
PAGE_SIZE); PAGE_SIZE);
startpage = iommu->exclusion_start >> PAGE_SHIFT;
dma_ops_reserve_addresses(dma_dom, startpage, pages); dma_ops_reserve_addresses(dma_dom, startpage, pages);
} }
}
/* /*
* Check for areas already mapped as present in the new aperture * Check for areas already mapped as present in the new aperture
...@@ -1143,7 +1146,7 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu) ...@@ -1143,7 +1146,7 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu)
add_domain_to_list(&dma_dom->domain); add_domain_to_list(&dma_dom->domain);
if (alloc_new_range(iommu, dma_dom, true, GFP_KERNEL)) if (alloc_new_range(dma_dom, true, GFP_KERNEL))
goto free_dma_dom; goto free_dma_dom;
/* /*
...@@ -1686,7 +1689,7 @@ static dma_addr_t __map_single(struct device *dev, ...@@ -1686,7 +1689,7 @@ static dma_addr_t __map_single(struct device *dev,
*/ */
dma_dom->next_address = dma_dom->aperture_size; dma_dom->next_address = dma_dom->aperture_size;
if (alloc_new_range(iommu, dma_dom, false, GFP_ATOMIC)) if (alloc_new_range(dma_dom, false, GFP_ATOMIC))
goto out; goto out;
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment