Commit ccae4592 authored by Lucas Stach's avatar Lucas Stach

drm/etnaviv: remove cycling through MMU address space

This was useful on MMUv1 GPUs, which don't generate proper faults,
when the GPU write caches weren't fully understood and not properly
handled by the kernel driver. As this has been fixed for quite some
time, the cycling though the MMU address space needlessly spreads
out the MMU mappings.
Signed-off-by: default avatarLucas Stach <l.stach@pengutronix.de>
parent d066b246
......@@ -162,22 +162,10 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
bool found;
ret = drm_mm_insert_node_in_range(&mmu->mm, node,
size, 0, 0,
mmu->last_iova, U64_MAX,
mode);
size, 0, 0, 0, U64_MAX, mode);
if (ret != -ENOSPC)
break;
/*
* If we did not search from the start of the MMU region,
* try again in case there are free slots.
*/
if (mmu->last_iova) {
mmu->last_iova = 0;
mmu->need_flush = true;
continue;
}
/* Try to retire some entries */
drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0, mode);
......@@ -274,7 +262,6 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
if (ret < 0)
goto unlock;
mmu->last_iova = node->start + etnaviv_obj->base.size;
mapping->iova = node->start;
ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size,
ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
......@@ -381,7 +368,6 @@ int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr,
mutex_unlock(&mmu->lock);
return ret;
}
mmu->last_iova = vram_node->start + size;
gpu->mmu->need_flush = true;
mutex_unlock(&mmu->lock);
......
......@@ -59,7 +59,6 @@ struct etnaviv_iommu {
struct mutex lock;
struct list_head mappings;
struct drm_mm mm;
u32 last_iova;
bool need_flush;
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment