Commit 470db8b7 authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/gem: tie deferred unmapping of buffers to VMA fence completion

As VMAs are per-client, unlike buffers, this allows us to avoid referencing
foreign fences (those that belong to another client/driver) from the client
deferred work handler, and prevent some not-fun race conditions that can be
triggered when a fence stalls.
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent 0db912af
...@@ -115,25 +115,12 @@ nouveau_gem_object_delete_work(struct nouveau_cli_work *w) ...@@ -115,25 +115,12 @@ nouveau_gem_object_delete_work(struct nouveau_cli_work *w)
static void static void
nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma) nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
{ {
const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM; struct dma_fence *fence = vma->fence ? &vma->fence->base : NULL;
struct reservation_object *resv = nvbo->bo.resv;
struct reservation_object_list *fobj;
struct nouveau_gem_object_unmap *work; struct nouveau_gem_object_unmap *work;
struct dma_fence *fence = NULL;
fobj = reservation_object_get_list(resv);
list_del_init(&vma->head); list_del_init(&vma->head);
if (fobj && fobj->shared_count > 1) if (!fence) {
ttm_bo_wait(&nvbo->bo, false, false);
else if (fobj && fobj->shared_count == 1)
fence = rcu_dereference_protected(fobj->shared[0],
reservation_object_held(resv));
else
fence = reservation_object_get_excl(nvbo->bo.resv);
if (!fence || !mapped) {
nouveau_gem_object_delete(vma); nouveau_gem_object_delete(vma);
return; return;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment