Commit ae9c0af2 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/ttm: allow fence to be added as shared

This patch adds a new flag to the ttm_validate_buffer list to
add the fence as shared to the reservation object.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent c4d922b1
...@@ -226,6 +226,7 @@ int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo) ...@@ -226,6 +226,7 @@ int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
qxl_bo_ref(bo); qxl_bo_ref(bo);
entry->tv.bo = &bo->tbo; entry->tv.bo = &bo->tbo;
entry->tv.shared = false;
list_add_tail(&entry->tv.head, &release->bos); list_add_tail(&entry->tv.head, &release->bos);
return 0; return 0;
} }
......
...@@ -183,6 +183,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) ...@@ -183,6 +183,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
} }
p->relocs[i].tv.bo = &p->relocs[i].robj->tbo; p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
p->relocs[i].tv.shared = false;
p->relocs[i].handle = r->handle; p->relocs[i].handle = r->handle;
radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head, radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
......
...@@ -143,6 +143,7 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev, ...@@ -143,6 +143,7 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM; list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM; list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
list[0].tv.bo = &vm->page_directory->tbo; list[0].tv.bo = &vm->page_directory->tbo;
list[0].tv.shared = false;
list[0].tiling_flags = 0; list[0].tiling_flags = 0;
list[0].handle = 0; list[0].handle = 0;
list_add(&list[0].tv.head, head); list_add(&list[0].tv.head, head);
...@@ -156,6 +157,7 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev, ...@@ -156,6 +157,7 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM; list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM; list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
list[idx].tv.bo = &list[idx].robj->tbo; list[idx].tv.bo = &list[idx].robj->tbo;
list[idx].tv.shared = false;
list[idx].tiling_flags = 0; list[idx].tiling_flags = 0;
list[idx].handle = 0; list[idx].handle = 0;
list_add(&list[idx++].tv.head, head); list_add(&list[idx++].tv.head, head);
...@@ -395,6 +397,7 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev, ...@@ -395,6 +397,7 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev,
memset(&tv, 0, sizeof(tv)); memset(&tv, 0, sizeof(tv));
tv.bo = &bo->tbo; tv.bo = &bo->tbo;
tv.shared = false;
INIT_LIST_HEAD(&head); INIT_LIST_HEAD(&head);
list_add(&tv.head, &head); list_add(&tv.head, &head);
......
...@@ -119,8 +119,14 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, ...@@ -119,8 +119,14 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
ret = -EBUSY; ret = -EBUSY;
} }
if (!ret) {
if (!entry->shared)
continue;
ret = reservation_object_reserve_shared(bo->resv);
if (!ret) if (!ret)
continue; continue;
}
/* uh oh, we lost out, drop every reservation and try /* uh oh, we lost out, drop every reservation and try
* to only reserve this buffer, then start over if * to only reserve this buffer, then start over if
...@@ -136,6 +142,9 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, ...@@ -136,6 +142,9 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
ret = 0; ret = 0;
} }
if (!ret && entry->shared)
ret = reservation_object_reserve_shared(bo->resv);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
if (ret == -EINTR) if (ret == -EINTR)
ret = -ERESTARTSYS; ret = -ERESTARTSYS;
...@@ -183,6 +192,9 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, ...@@ -183,6 +192,9 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
list_for_each_entry(entry, list, head) { list_for_each_entry(entry, list, head) {
bo = entry->bo; bo = entry->bo;
if (entry->shared)
reservation_object_add_shared_fence(bo->resv, fence);
else
reservation_object_add_excl_fence(bo->resv, fence); reservation_object_add_excl_fence(bo->resv, fence);
ttm_bo_add_to_lru(bo); ttm_bo_add_to_lru(bo);
__ttm_bo_unreserve(bo); __ttm_bo_unreserve(bo);
......
...@@ -346,6 +346,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, ...@@ -346,6 +346,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
++sw_context->cur_val_buf; ++sw_context->cur_val_buf;
val_buf = &vval_buf->base; val_buf = &vval_buf->base;
val_buf->bo = ttm_bo_reference(bo); val_buf->bo = ttm_bo_reference(bo);
val_buf->shared = false;
list_add_tail(&val_buf->head, &sw_context->validate_nodes); list_add_tail(&val_buf->head, &sw_context->validate_nodes);
vval_buf->validate_as_mob = validate_as_mob; vval_buf->validate_as_mob = validate_as_mob;
} }
...@@ -2670,9 +2671,11 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, ...@@ -2670,9 +2671,11 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
INIT_LIST_HEAD(&validate_list); INIT_LIST_HEAD(&validate_list);
pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo); pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
pinned_val.shared = false;
list_add_tail(&pinned_val.head, &validate_list); list_add_tail(&pinned_val.head, &validate_list);
query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo); query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
query_val.shared = false;
list_add_tail(&query_val.head, &validate_list); list_add_tail(&query_val.head, &validate_list);
ret = ttm_eu_reserve_buffers(&ticket, &validate_list, false); ret = ttm_eu_reserve_buffers(&ticket, &validate_list, false);
......
...@@ -133,6 +133,7 @@ static void vmw_resource_release(struct kref *kref) ...@@ -133,6 +133,7 @@ static void vmw_resource_release(struct kref *kref)
struct ttm_validate_buffer val_buf; struct ttm_validate_buffer val_buf;
val_buf.bo = bo; val_buf.bo = bo;
val_buf.shared = false;
res->func->unbind(res, false, &val_buf); res->func->unbind(res, false, &val_buf);
} }
res->backup_dirty = false; res->backup_dirty = false;
...@@ -1219,6 +1220,7 @@ vmw_resource_check_buffer(struct vmw_resource *res, ...@@ -1219,6 +1220,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
INIT_LIST_HEAD(&val_list); INIT_LIST_HEAD(&val_list);
val_buf->bo = ttm_bo_reference(&res->backup->base); val_buf->bo = ttm_bo_reference(&res->backup->base);
val_buf->shared = false;
list_add_tail(&val_buf->head, &val_list); list_add_tail(&val_buf->head, &val_list);
ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible); ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
...@@ -1312,6 +1314,7 @@ int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible) ...@@ -1312,6 +1314,7 @@ int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
BUG_ON(!func->may_evict); BUG_ON(!func->may_evict);
val_buf.bo = NULL; val_buf.bo = NULL;
val_buf.shared = false;
ret = vmw_resource_check_buffer(res, interruptible, &val_buf); ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -1357,6 +1360,7 @@ int vmw_resource_validate(struct vmw_resource *res) ...@@ -1357,6 +1360,7 @@ int vmw_resource_validate(struct vmw_resource *res)
return 0; return 0;
val_buf.bo = NULL; val_buf.bo = NULL;
val_buf.shared = false;
if (res->backup) if (res->backup)
val_buf.bo = &res->backup->base; val_buf.bo = &res->backup->base;
do { do {
...@@ -1474,6 +1478,7 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo, ...@@ -1474,6 +1478,7 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo,
struct ttm_validate_buffer val_buf; struct ttm_validate_buffer val_buf;
val_buf.bo = bo; val_buf.bo = bo;
val_buf.shared = false;
list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) { list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
......
...@@ -39,11 +39,13 @@ ...@@ -39,11 +39,13 @@
* *
* @head: list head for thread-private list. * @head: list head for thread-private list.
* @bo: refcounted buffer object pointer. * @bo: refcounted buffer object pointer.
* @shared: should the fence be added shared?
*/ */
struct ttm_validate_buffer { struct ttm_validate_buffer {
struct list_head head; struct list_head head;
struct ttm_buffer_object *bo; struct ttm_buffer_object *bo;
bool shared;
}; };
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment