Commit 6b1ce0a2 authored by Daniel Vetter's avatar Daniel Vetter

drm/ttm: remove ttm_bo_wait_unreserved

With nouveau fixed all ttm-using drives have the correct nesting of
mmap_sem vs dma_resv, and we can just lock the buffer.

Assuming I didn't screw up anything with my audit of course.

v2:
- Dont forget wu_mutex (Christian König)
- Keep the mmap_sem-less wait optimization (Thomas)
- Use _lock_interruptible to be good citizens (Thomas)

v3: Rebase over fault handler helperification.

Reviewed-by: Christian König <christian.koenig@amd.com> (v2)
Reviewed-by: Thomas Hellström <thellstrom@vmware.com> (v2)
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@intel.com>
Cc: Christian Koenig <christian.koenig@amd.com>
Cc: Huang Rui <ray.huang@amd.com>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Cc: "VMware Graphics" <linux-graphics-maintainer@vmware.com>
Cc: Thomas Hellstrom <thellstrom@vmware.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191104173801.2972-3-daniel.vetter@ffwll.ch
parent 03e0d26f
......@@ -161,7 +161,6 @@ static void ttm_bo_release_list(struct kref *list_kref)
dma_fence_put(bo->moving);
if (!ttm_bo_uses_embedded_gem_object(bo))
dma_resv_fini(&bo->base._resv);
mutex_destroy(&bo->wu_mutex);
bo->destroy(bo);
ttm_mem_global_free(&ttm_mem_glob, acc_size);
}
......@@ -1291,7 +1290,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
INIT_LIST_HEAD(&bo->ddestroy);
INIT_LIST_HEAD(&bo->swap);
INIT_LIST_HEAD(&bo->io_reserve_lru);
mutex_init(&bo->wu_mutex);
bo->bdev = bdev;
bo->type = type;
bo->num_pages = num_pages;
......@@ -1895,37 +1893,3 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
while (ttm_bo_swapout(&ttm_bo_glob, &ctx) == 0);
}
EXPORT_SYMBOL(ttm_bo_swapout_all);
/**
* ttm_bo_wait_unreserved - interruptible wait for a buffer object to become
* unreserved
*
* @bo: Pointer to buffer
*/
int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
{
int ret;
/*
* In the absense of a wait_unlocked API,
* Use the bo::wu_mutex to avoid triggering livelocks due to
* concurrent use of this function. Note that this use of
* bo::wu_mutex can go away if we change locking order to
* mmap_sem -> bo::reserve.
*/
ret = mutex_lock_interruptible(&bo->wu_mutex);
if (unlikely(ret != 0))
return -ERESTARTSYS;
if (!dma_resv_is_locked(bo->base.resv))
goto out_unlock;
ret = dma_resv_lock_interruptible(bo->base.resv, NULL);
if (ret == -EINTR)
ret = -ERESTARTSYS;
if (unlikely(ret != 0))
goto out_unlock;
dma_resv_unlock(bo->base.resv);
out_unlock:
mutex_unlock(&bo->wu_mutex);
return ret;
}
......@@ -504,7 +504,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
INIT_LIST_HEAD(&fbo->base.lru);
INIT_LIST_HEAD(&fbo->base.swap);
INIT_LIST_HEAD(&fbo->base.io_reserve_lru);
mutex_init(&fbo->base.wu_mutex);
fbo->base.moving = NULL;
drm_vma_node_reset(&fbo->base.base.vma_node);
......
......@@ -128,30 +128,22 @@ static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
struct vm_fault *vmf)
{
/*
* Work around locking order reversal in fault / nopfn
* between mmap_sem and bo_reserve: Perform a trylock operation
* for reserve, and if it fails, retry the fault after waiting
* for the buffer to become unreserved.
*/
if (unlikely(!dma_resv_trylock(bo->base.resv))) {
if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
ttm_bo_get(bo);
up_read(&vmf->vma->vm_mm->mmap_sem);
(void) ttm_bo_wait_unreserved(bo);
if (!dma_resv_lock_interruptible(bo->base.resv,
NULL))
dma_resv_unlock(bo->base.resv);
ttm_bo_put(bo);
}
return VM_FAULT_RETRY;
}
/*
* If we'd want to change locking order to
* mmap_sem -> bo::reserve, we'd use a blocking reserve here
* instead of retrying the fault...
*/
return VM_FAULT_NOPAGE;
if (dma_resv_lock_interruptible(bo->base.resv, NULL))
return VM_FAULT_NOPAGE;
}
return 0;
......
......@@ -154,7 +154,6 @@ struct ttm_tt;
* @offset: The current GPU offset, which can have different meanings
* depending on the memory type. For SYSTEM type memory, it should be 0.
* @cur_placement: Hint of current placement.
* @wu_mutex: Wait unreserved mutex.
*
* Base class for TTM buffer object, that deals with data placement and CPU
* mappings. GPU mappings are really up to the driver, but for simpler GPUs
......@@ -222,8 +221,6 @@ struct ttm_buffer_object {
uint64_t offset; /* GPU address space is independent of CPU word size */
struct sg_table *sg;
struct mutex wu_mutex;
};
/**
......@@ -707,7 +704,6 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
int ttm_bo_swapout(struct ttm_bo_global *glob,
struct ttm_operation_ctx *ctx);
void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo);
/**
* ttm_bo_uses_embedded_gem_object - check if the given bo uses the
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment