Commit e6364d70 authored by Lucas Stach's avatar Lucas Stach

drm/etnaviv: provide MMU context to etnaviv_gem_mapping_get

In preparation to having a context per process, etnaviv_gem_mapping_get
should not use the current GPU context, but needs to be told which
context to use.
Signed-off-by: default avatarLucas Stach <l.stach@pengutronix.de>
Reviewed-by: default avatarPhilipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: default avatarGuido Günther <agx@sigxcpu.org>
parent d80d842a
...@@ -248,7 +248,8 @@ void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping) ...@@ -248,7 +248,8 @@ void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
} }
struct etnaviv_vram_mapping *etnaviv_gem_mapping_get( struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
struct drm_gem_object *obj, struct etnaviv_gpu *gpu) struct drm_gem_object *obj, struct etnaviv_gpu *gpu,
struct etnaviv_iommu_context *mmu_context)
{ {
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
struct etnaviv_vram_mapping *mapping; struct etnaviv_vram_mapping *mapping;
...@@ -256,7 +257,7 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get( ...@@ -256,7 +257,7 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
int ret = 0; int ret = 0;
mutex_lock(&etnaviv_obj->lock); mutex_lock(&etnaviv_obj->lock);
mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu_context); mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, mmu_context);
if (mapping) { if (mapping) {
/* /*
* Holding the object lock prevents the use count changing * Holding the object lock prevents the use count changing
...@@ -265,12 +266,12 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get( ...@@ -265,12 +266,12 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
* the MMU owns this mapping to close this race. * the MMU owns this mapping to close this race.
*/ */
if (mapping->use == 0) { if (mapping->use == 0) {
mutex_lock(&gpu->mmu_context->lock); mutex_lock(&mmu_context->lock);
if (mapping->context == gpu->mmu_context) if (mapping->context == mmu_context)
mapping->use += 1; mapping->use += 1;
else else
mapping = NULL; mapping = NULL;
mutex_unlock(&gpu->mmu_context->lock); mutex_unlock(&mmu_context->lock);
if (mapping) if (mapping)
goto out; goto out;
} else { } else {
...@@ -303,15 +304,18 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get( ...@@ -303,15 +304,18 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
list_del(&mapping->obj_node); list_del(&mapping->obj_node);
} }
mapping->context = gpu->mmu_context; etnaviv_iommu_context_get(mmu_context);
mapping->context = mmu_context;
mapping->use = 1; mapping->use = 1;
ret = etnaviv_iommu_map_gem(gpu->mmu_context, etnaviv_obj, ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj, gpu->memory_base,
gpu->memory_base, mapping); mapping);
if (ret < 0) if (ret < 0) {
etnaviv_iommu_context_put(mmu_context);
kfree(mapping); kfree(mapping);
else } else {
list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list); list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
}
out: out:
mutex_unlock(&etnaviv_obj->lock); mutex_unlock(&etnaviv_obj->lock);
...@@ -529,8 +533,10 @@ void etnaviv_gem_free_object(struct drm_gem_object *obj) ...@@ -529,8 +533,10 @@ void etnaviv_gem_free_object(struct drm_gem_object *obj)
WARN_ON(mapping->use); WARN_ON(mapping->use);
if (context) if (context) {
etnaviv_iommu_unmap_gem(context, mapping); etnaviv_iommu_unmap_gem(context, mapping);
etnaviv_iommu_context_put(context);
}
list_del(&mapping->obj_node); list_del(&mapping->obj_node);
kfree(mapping); kfree(mapping);
......
...@@ -119,7 +119,8 @@ struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *obj); ...@@ -119,7 +119,8 @@ struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *obj);
void etnaviv_gem_put_pages(struct etnaviv_gem_object *obj); void etnaviv_gem_put_pages(struct etnaviv_gem_object *obj);
struct etnaviv_vram_mapping *etnaviv_gem_mapping_get( struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
struct drm_gem_object *obj, struct etnaviv_gpu *gpu); struct drm_gem_object *obj, struct etnaviv_gpu *gpu,
struct etnaviv_iommu_context *mmu_context);
void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping); void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping);
#endif /* __ETNAVIV_GEM_H__ */ #endif /* __ETNAVIV_GEM_H__ */
...@@ -224,7 +224,8 @@ static int submit_pin_objects(struct etnaviv_gem_submit *submit) ...@@ -224,7 +224,8 @@ static int submit_pin_objects(struct etnaviv_gem_submit *submit)
struct etnaviv_vram_mapping *mapping; struct etnaviv_vram_mapping *mapping;
mapping = etnaviv_gem_mapping_get(&etnaviv_obj->base, mapping = etnaviv_gem_mapping_get(&etnaviv_obj->base,
submit->gpu); submit->gpu,
submit->gpu->mmu_context);
if (IS_ERR(mapping)) { if (IS_ERR(mapping)) {
ret = PTR_ERR(mapping); ret = PTR_ERR(mapping);
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment