Commit 20784cdf authored by Dave Airlie's avatar Dave Airlie

drm/ttm: use a helper for unlocked moves to the lru tail

The pattern was repeated a few times, just make an inline for it.
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarBen Skeggs <bskeggs@redhat.com>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200804025632.3868079-7-airlied@gmail.com
parent 46bca88b
...@@ -1101,9 +1101,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, ...@@ -1101,9 +1101,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
error: error:
if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) { if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) {
spin_lock(&ttm_bo_glob.lru_lock); ttm_bo_move_to_lru_tail_unlocked(bo);
ttm_bo_move_to_lru_tail(bo, NULL);
spin_unlock(&ttm_bo_glob.lru_lock);
} }
return ret; return ret;
...@@ -1318,9 +1316,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, ...@@ -1318,9 +1316,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
return ret; return ret;
} }
spin_lock(&ttm_bo_glob.lru_lock); ttm_bo_move_to_lru_tail_unlocked(bo);
ttm_bo_move_to_lru_tail(bo, NULL);
spin_unlock(&ttm_bo_glob.lru_lock);
return ret; return ret;
} }
......
...@@ -306,9 +306,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, ...@@ -306,9 +306,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
} }
if (bo->moving != moving) { if (bo->moving != moving) {
spin_lock(&ttm_bo_glob.lru_lock); ttm_bo_move_to_lru_tail_unlocked(bo);
ttm_bo_move_to_lru_tail(bo, NULL);
spin_unlock(&ttm_bo_glob.lru_lock);
} }
dma_fence_put(moving); dma_fence_put(moving);
} }
......
...@@ -669,6 +669,13 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, ...@@ -669,6 +669,13 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
return 0; return 0;
} }
static inline void ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo)
{
spin_lock(&ttm_bo_glob.lru_lock);
ttm_bo_move_to_lru_tail(bo, NULL);
spin_unlock(&ttm_bo_glob.lru_lock);
}
/** /**
* ttm_bo_unreserve * ttm_bo_unreserve
* *
...@@ -678,9 +685,7 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, ...@@ -678,9 +685,7 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
*/ */
static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo) static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
{ {
spin_lock(&ttm_bo_glob.lru_lock); ttm_bo_move_to_lru_tail_unlocked(bo);
ttm_bo_move_to_lru_tail(bo, NULL);
spin_unlock(&ttm_bo_glob.lru_lock);
dma_resv_unlock(bo->base.resv); dma_resv_unlock(bo->base.resv);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment