Commit 0b5aebec authored by Christian König's avatar Christian König

drm/ttm: move SG flag check into ttm_bo_vm_reserve

Just check earlier if a BO can be page faulted in the first place.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarDave Airlie <airlied@redhat.com>
Link: https://patchwork.freedesktop.org/patch/392321/
parent 686d4b4b
...@@ -157,6 +157,15 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo, ...@@ -157,6 +157,15 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
return VM_FAULT_NOPAGE; return VM_FAULT_NOPAGE;
} }
/*
* Refuse to fault imported pages. This should be handled
* (if at all) by redirecting mmap to the exporter.
*/
if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
dma_resv_unlock(bo->base.resv);
return VM_FAULT_SIGBUS;
}
return 0; return 0;
} }
EXPORT_SYMBOL(ttm_bo_vm_reserve); EXPORT_SYMBOL(ttm_bo_vm_reserve);
...@@ -281,13 +290,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, ...@@ -281,13 +290,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
vm_fault_t ret = VM_FAULT_NOPAGE; vm_fault_t ret = VM_FAULT_NOPAGE;
unsigned long address = vmf->address; unsigned long address = vmf->address;
/*
* Refuse to fault imported pages. This should be handled
* (if at all) by redirecting mmap to the exporter.
*/
if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
return VM_FAULT_SIGBUS;
if (bdev->driver->fault_reserve_notify) { if (bdev->driver->fault_reserve_notify) {
struct dma_fence *moving = dma_fence_get(bo->moving); struct dma_fence *moving = dma_fence_get(bo->moving);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment