Commit 65705962 authored by Thomas Hellstrom's avatar Thomas Hellstrom Committed by Dave Airlie

drm/ttm/vmwgfx: Have TTM manage the validation sequence.

Rather than having the driver supply the validation sequence, leave that
responsibility to TTM. This saves some confusion and a function argument.
Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent 95762c2b
...@@ -1539,6 +1539,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, ...@@ -1539,6 +1539,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
bdev->dev_mapping = NULL; bdev->dev_mapping = NULL;
bdev->glob = glob; bdev->glob = glob;
bdev->need_dma32 = need_dma32; bdev->need_dma32 = need_dma32;
bdev->val_seq = 0;
spin_lock_init(&bdev->fence_lock); spin_lock_init(&bdev->fence_lock);
mutex_lock(&glob->device_list_mutex); mutex_lock(&glob->device_list_mutex);
list_add_tail(&bdev->device_list, &glob->device_list); list_add_tail(&bdev->device_list, &glob->device_list);
......
...@@ -126,11 +126,12 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation); ...@@ -126,11 +126,12 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
* buffers in different orders. * buffers in different orders.
*/ */
int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq) int ttm_eu_reserve_buffers(struct list_head *list)
{ {
struct ttm_bo_global *glob; struct ttm_bo_global *glob;
struct ttm_validate_buffer *entry; struct ttm_validate_buffer *entry;
int ret; int ret;
uint32_t val_seq;
if (list_empty(list)) if (list_empty(list))
return 0; return 0;
...@@ -146,6 +147,8 @@ int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq) ...@@ -146,6 +147,8 @@ int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq)
retry: retry:
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
val_seq = entry->bo->bdev->val_seq++;
list_for_each_entry(entry, list, head) { list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo; struct ttm_buffer_object *bo = entry->bo;
......
...@@ -264,7 +264,6 @@ struct vmw_private { ...@@ -264,7 +264,6 @@ struct vmw_private {
*/ */
struct vmw_sw_context ctx; struct vmw_sw_context ctx;
uint32_t val_seq;
struct mutex cmdbuf_mutex; struct mutex cmdbuf_mutex;
/** /**
......
...@@ -653,8 +653,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, ...@@ -653,8 +653,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size); ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_err; goto out_err;
ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes, ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
dev_priv->val_seq++);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_err; goto out_err;
......
...@@ -515,6 +515,7 @@ struct ttm_bo_global { ...@@ -515,6 +515,7 @@ struct ttm_bo_global {
* @addr_space_mm: Range manager for the device address space. * @addr_space_mm: Range manager for the device address space.
* lru_lock: Spinlock that protects the buffer+device lru lists and * lru_lock: Spinlock that protects the buffer+device lru lists and
* ddestroy lists. * ddestroy lists.
* @val_seq: Current validation sequence.
* @nice_mode: Try nicely to wait for buffer idle when cleaning a manager. * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager.
* If a GPU lockup has been detected, this is forced to 0. * If a GPU lockup has been detected, this is forced to 0.
* @dev_mapping: A pointer to the struct address_space representing the * @dev_mapping: A pointer to the struct address_space representing the
...@@ -544,6 +545,7 @@ struct ttm_bo_device { ...@@ -544,6 +545,7 @@ struct ttm_bo_device {
* Protected by the global:lru lock. * Protected by the global:lru lock.
*/ */
struct list_head ddestroy; struct list_head ddestroy;
uint32_t val_seq;
/* /*
* Protected by load / firstopen / lastclose /unload sync. * Protected by load / firstopen / lastclose /unload sync.
......
...@@ -72,7 +72,6 @@ extern void ttm_eu_backoff_reservation(struct list_head *list); ...@@ -72,7 +72,6 @@ extern void ttm_eu_backoff_reservation(struct list_head *list);
* function ttm_eu_reserve_buffers * function ttm_eu_reserve_buffers
* *
* @list: thread private list of ttm_validate_buffer structs. * @list: thread private list of ttm_validate_buffer structs.
* @val_seq: A unique sequence number.
* *
* Tries to reserve bos pointed to by the list entries for validation. * Tries to reserve bos pointed to by the list entries for validation.
* If the function returns 0, all buffers are marked as "unfenced", * If the function returns 0, all buffers are marked as "unfenced",
...@@ -94,7 +93,7 @@ extern void ttm_eu_backoff_reservation(struct list_head *list); ...@@ -94,7 +93,7 @@ extern void ttm_eu_backoff_reservation(struct list_head *list);
* has failed. * has failed.
*/ */
extern int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq); extern int ttm_eu_reserve_buffers(struct list_head *list);
/** /**
* function ttm_eu_fence_buffer_objects. * function ttm_eu_fence_buffer_objects.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment