Commit 46bca88b authored by Dave Airlie's avatar Dave Airlie

drm/ttm/amdgpu: consolidate ttm reserve paths

Drop the WARN_ON and consolidate the two paths into one.

Use the consolidate slowpath in the execbuf utils code.
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200804025632.3868079-6-airlied@gmail.com
parent 9c4cbb3a
...@@ -160,7 +160,7 @@ static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr) ...@@ -160,7 +160,7 @@ static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int r; int r;
r = __ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL); r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
if (unlikely(r != 0)) { if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) if (r != -ERESTARTSYS)
dev_err(adev->dev, "%p reserve failed\n", bo); dev_err(adev->dev, "%p reserve failed\n", bo);
......
...@@ -93,7 +93,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, ...@@ -93,7 +93,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
list_for_each_entry(entry, list, head) { list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo; struct ttm_buffer_object *bo = entry->bo;
ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket); ret = ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
if (ret == -EALREADY && dups) { if (ret == -EALREADY && dups) {
struct ttm_validate_buffer *safe = entry; struct ttm_validate_buffer *safe = entry;
entry = list_prev_entry(entry, head); entry = list_prev_entry(entry, head);
...@@ -119,13 +119,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, ...@@ -119,13 +119,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
ttm_eu_backoff_reservation_reverse(list, entry); ttm_eu_backoff_reservation_reverse(list, entry);
if (ret == -EDEADLK) { if (ret == -EDEADLK) {
if (intr) { ret = ttm_bo_reserve_slowpath(bo, intr, ticket);
ret = dma_resv_lock_slow_interruptible(bo->base.resv,
ticket);
} else {
dma_resv_lock_slow(bo->base.resv, ticket);
ret = 0;
}
} }
if (!ret && entry->num_shared) if (!ret && entry->num_shared)
...@@ -133,8 +127,6 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, ...@@ -133,8 +127,6 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
entry->num_shared); entry->num_shared);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
if (ret == -EINTR)
ret = -ERESTARTSYS;
if (ticket) { if (ticket) {
ww_acquire_done(ticket); ww_acquire_done(ticket);
ww_acquire_fini(ticket); ww_acquire_fini(ticket);
......
...@@ -599,29 +599,30 @@ int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible); ...@@ -599,29 +599,30 @@ int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible);
void ttm_mem_io_unlock(struct ttm_mem_type_manager *man); void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
/** /**
* __ttm_bo_reserve: * ttm_bo_reserve:
* *
* @bo: A pointer to a struct ttm_buffer_object. * @bo: A pointer to a struct ttm_buffer_object.
* @interruptible: Sleep interruptible if waiting. * @interruptible: Sleep interruptible if waiting.
* @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
* @ticket: ticket used to acquire the ww_mutex. * @ticket: ticket used to acquire the ww_mutex.
* *
* Will not remove reserved buffers from the lru lists. * Locks a buffer object for validation. (Or prevents other processes from
* Otherwise identical to ttm_bo_reserve. * locking it for validation), while taking a number of measures to prevent
* deadlocks.
* *
* Returns: * Returns:
* -EDEADLK: The reservation may cause a deadlock. * -EDEADLK: The reservation may cause a deadlock.
* Release all buffer reservations, wait for @bo to become unreserved and * Release all buffer reservations, wait for @bo to become unreserved and
* try again. (only if use_sequence == 1). * try again.
* -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
* a signal. Release all buffer reservations and return to user-space. * a signal. Release all buffer reservations and return to user-space.
* -EBUSY: The function needed to sleep, but @no_wait was true * -EBUSY: The function needed to sleep, but @no_wait was true
* -EALREADY: Bo already reserved using @ticket. This error code will only * -EALREADY: Bo already reserved using @ticket. This error code will only
* be returned if @use_ticket is set to true. * be returned if @use_ticket is set to true.
*/ */
static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo, static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
bool interruptible, bool no_wait, bool interruptible, bool no_wait,
struct ww_acquire_ctx *ticket) struct ww_acquire_ctx *ticket)
{ {
int ret = 0; int ret = 0;
...@@ -643,59 +644,6 @@ static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo, ...@@ -643,59 +644,6 @@ static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
return ret; return ret;
} }
/**
* ttm_bo_reserve:
*
* @bo: A pointer to a struct ttm_buffer_object.
* @interruptible: Sleep interruptible if waiting.
* @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
* @ticket: ticket used to acquire the ww_mutex.
*
* Locks a buffer object for validation. (Or prevents other processes from
* locking it for validation) and removes it from lru lists, while taking
* a number of measures to prevent deadlocks.
*
* Deadlocks may occur when two processes try to reserve multiple buffers in
* different order, either by will or as a result of a buffer being evicted
* to make room for a buffer already reserved. (Buffers are reserved before
* they are evicted). The following algorithm prevents such deadlocks from
* occurring:
* Processes attempting to reserve multiple buffers other than for eviction,
* (typically execbuf), should first obtain a unique 32-bit
* validation sequence number,
* and call this function with @use_ticket == 1 and @ticket->stamp == the unique
* sequence number. If upon call of this function, the buffer object is already
* reserved, the validation sequence is checked against the validation
* sequence of the process currently reserving the buffer,
* and if the current validation sequence is greater than that of the process
* holding the reservation, the function returns -EDEADLK. Otherwise it sleeps
* waiting for the buffer to become unreserved, after which it retries
* reserving.
* The caller should, when receiving an -EDEADLK error
* release all its buffer reservations, wait for @bo to become unreserved, and
* then rerun the validation with the same validation sequence. This procedure
* will always guarantee that the process with the lowest validation sequence
* will eventually succeed, preventing both deadlocks and starvation.
*
* Returns:
* -EDEADLK: The reservation may cause a deadlock.
* Release all buffer reservations, wait for @bo to become unreserved and
* try again. (only if use_sequence == 1).
* -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
* a signal. Release all buffer reservations and return to user-space.
* -EBUSY: The function needed to sleep, but @no_wait was true
* -EALREADY: Bo already reserved using @ticket. This error code will only
* be returned if @use_ticket is set to true.
*/
static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
bool interruptible, bool no_wait,
struct ww_acquire_ctx *ticket)
{
WARN_ON(!kref_read(&bo->kref));
return __ttm_bo_reserve(bo, interruptible, no_wait, ticket);
}
/** /**
* ttm_bo_reserve_slowpath: * ttm_bo_reserve_slowpath:
* @bo: A pointer to a struct ttm_buffer_object. * @bo: A pointer to a struct ttm_buffer_object.
...@@ -710,20 +658,15 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, ...@@ -710,20 +658,15 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
bool interruptible, bool interruptible,
struct ww_acquire_ctx *ticket) struct ww_acquire_ctx *ticket)
{ {
int ret = 0; if (interruptible) {
int ret = dma_resv_lock_slow_interruptible(bo->base.resv,
WARN_ON(!kref_read(&bo->kref)); ticket);
if (ret == -EINTR)
if (interruptible) ret = -ERESTARTSYS;
ret = dma_resv_lock_slow_interruptible(bo->base.resv, return ret;
ticket); }
else dma_resv_lock_slow(bo->base.resv, ticket);
dma_resv_lock_slow(bo->base.resv, ticket); return 0;
if (ret == -EINTR)
ret = -ERESTARTSYS;
return ret;
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment