Commit 5d50fcbd authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/ttm: stop always moving BOs on the LRU on page fault

Move the BO on the LRU only when it is actually moved by a DMA
operation.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarMichel Dänzer <michel.daenzer@amd.com>
Tested-And-Reviewed-by: default avatarChunming Zhou <david1.zhou@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent d7e28e2d
...@@ -71,7 +71,7 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, ...@@ -71,7 +71,7 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
ttm_bo_get(bo); ttm_bo_get(bo);
up_read(&vmf->vma->vm_mm->mmap_sem); up_read(&vmf->vma->vm_mm->mmap_sem);
(void) dma_fence_wait(bo->moving, true); (void) dma_fence_wait(bo->moving, true);
ttm_bo_unreserve(bo); reservation_object_unlock(bo->resv);
ttm_bo_put(bo); ttm_bo_put(bo);
goto out_unlock; goto out_unlock;
} }
...@@ -131,11 +131,7 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) ...@@ -131,11 +131,7 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
* for reserve, and if it fails, retry the fault after waiting * for reserve, and if it fails, retry the fault after waiting
* for the buffer to become unreserved. * for the buffer to become unreserved.
*/ */
err = ttm_bo_reserve(bo, true, true, NULL); if (unlikely(!reservation_object_trylock(bo->resv))) {
if (unlikely(err != 0)) {
if (err != -EBUSY)
return VM_FAULT_NOPAGE;
if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) { if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
ttm_bo_get(bo); ttm_bo_get(bo);
...@@ -165,6 +161,8 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) ...@@ -165,6 +161,8 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
} }
if (bdev->driver->fault_reserve_notify) { if (bdev->driver->fault_reserve_notify) {
struct dma_fence *moving = dma_fence_get(bo->moving);
err = bdev->driver->fault_reserve_notify(bo); err = bdev->driver->fault_reserve_notify(bo);
switch (err) { switch (err) {
case 0: case 0:
...@@ -177,6 +175,13 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) ...@@ -177,6 +175,13 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
ret = VM_FAULT_SIGBUS; ret = VM_FAULT_SIGBUS;
goto out_unlock; goto out_unlock;
} }
if (bo->moving != moving) {
spin_lock(&bdev->glob->lru_lock);
ttm_bo_move_to_lru_tail(bo, NULL);
spin_unlock(&bdev->glob->lru_lock);
}
dma_fence_put(moving);
} }
/* /*
...@@ -291,7 +296,7 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) ...@@ -291,7 +296,7 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
out_io_unlock: out_io_unlock:
ttm_mem_io_unlock(man); ttm_mem_io_unlock(man);
out_unlock: out_unlock:
ttm_bo_unreserve(bo); reservation_object_unlock(bo->resv);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment