Commit 8a206685 authored by Christian König's avatar Christian König

drm/amdgpu: use drm_exec for GEM and CSA handling v2

Start using the new component here as well.

v2: ignore duplicates to allow per VM BO mappings
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Acked-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230711133122.3710-5-christian.koenig@amd.com
parent 8abc1eb2
...@@ -22,6 +22,8 @@ ...@@ -22,6 +22,8 @@
* * Author: Monk.liu@amd.com * * Author: Monk.liu@amd.com
*/ */
#include <drm/drm_exec.h>
#include "amdgpu.h" #include "amdgpu.h"
uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev) uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
...@@ -65,31 +67,25 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -65,31 +67,25 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va, struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va,
uint64_t csa_addr, uint32_t size) uint64_t csa_addr, uint32_t size)
{ {
struct ww_acquire_ctx ticket; struct drm_exec exec;
struct list_head list;
struct amdgpu_bo_list_entry pd;
struct ttm_validate_buffer csa_tv;
int r; int r;
INIT_LIST_HEAD(&list); drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
INIT_LIST_HEAD(&csa_tv.head); drm_exec_until_all_locked(&exec) {
csa_tv.bo = &bo->tbo; r = amdgpu_vm_lock_pd(vm, &exec, 0);
csa_tv.num_shared = 1; if (likely(!r))
r = drm_exec_lock_obj(&exec, &bo->tbo.base);
list_add(&csa_tv.head, &list); drm_exec_retry_on_contention(&exec);
amdgpu_vm_get_pd_bo(vm, &list, &pd); if (unlikely(r)) {
DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); goto error;
if (r) { }
DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
return r;
} }
*bo_va = amdgpu_vm_bo_add(adev, vm, bo); *bo_va = amdgpu_vm_bo_add(adev, vm, bo);
if (!*bo_va) { if (!*bo_va) {
ttm_eu_backoff_reservation(&ticket, &list); r = -ENOMEM;
DRM_ERROR("failed to create bo_va for static CSA\n"); goto error;
return -ENOMEM;
} }
r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size, r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size,
...@@ -99,48 +95,42 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -99,48 +95,42 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (r) { if (r) {
DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r); DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
amdgpu_vm_bo_del(adev, *bo_va); amdgpu_vm_bo_del(adev, *bo_va);
ttm_eu_backoff_reservation(&ticket, &list); goto error;
return r;
} }
ttm_eu_backoff_reservation(&ticket, &list); error:
return 0; drm_exec_fini(&exec);
return r;
} }
int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo *bo, struct amdgpu_bo_va *bo_va, struct amdgpu_bo *bo, struct amdgpu_bo_va *bo_va,
uint64_t csa_addr) uint64_t csa_addr)
{ {
struct ww_acquire_ctx ticket; struct drm_exec exec;
struct list_head list;
struct amdgpu_bo_list_entry pd;
struct ttm_validate_buffer csa_tv;
int r; int r;
INIT_LIST_HEAD(&list); drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
INIT_LIST_HEAD(&csa_tv.head); drm_exec_until_all_locked(&exec) {
csa_tv.bo = &bo->tbo; r = amdgpu_vm_lock_pd(vm, &exec, 0);
csa_tv.num_shared = 1; if (likely(!r))
r = drm_exec_lock_obj(&exec, &bo->tbo.base);
list_add(&csa_tv.head, &list); drm_exec_retry_on_contention(&exec);
amdgpu_vm_get_pd_bo(vm, &list, &pd); if (unlikely(r)) {
DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); goto error;
if (r) { }
DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
return r;
} }
r = amdgpu_vm_bo_unmap(adev, bo_va, csa_addr); r = amdgpu_vm_bo_unmap(adev, bo_va, csa_addr);
if (r) { if (r) {
DRM_ERROR("failed to do bo_unmap on static CSA, err=%d\n", r); DRM_ERROR("failed to do bo_unmap on static CSA, err=%d\n", r);
ttm_eu_backoff_reservation(&ticket, &list); goto error;
return r;
} }
amdgpu_vm_bo_del(adev, bo_va); amdgpu_vm_bo_del(adev, bo_va);
ttm_eu_backoff_reservation(&ticket, &list); error:
drm_exec_fini(&exec);
return 0; return r;
} }
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <drm/amdgpu_drm.h> #include <drm/amdgpu_drm.h>
#include <drm/drm_drv.h> #include <drm/drm_drv.h>
#include <drm/drm_exec.h>
#include <drm/drm_gem_ttm_helper.h> #include <drm/drm_gem_ttm_helper.h>
#include <drm/ttm/ttm_tt.h> #include <drm/ttm/ttm_tt.h>
...@@ -198,29 +199,24 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj, ...@@ -198,29 +199,24 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct amdgpu_fpriv *fpriv = file_priv->driver_priv; struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_list_entry vm_pd;
struct list_head list, duplicates;
struct dma_fence *fence = NULL; struct dma_fence *fence = NULL;
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct amdgpu_bo_va *bo_va; struct amdgpu_bo_va *bo_va;
struct drm_exec exec;
long r; long r;
INIT_LIST_HEAD(&list); drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES);
INIT_LIST_HEAD(&duplicates); drm_exec_until_all_locked(&exec) {
r = drm_exec_prepare_obj(&exec, &bo->tbo.base, 1);
tv.bo = &bo->tbo; drm_exec_retry_on_contention(&exec);
tv.num_shared = 2; if (unlikely(r))
list_add(&tv.head, &list); goto out_unlock;
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd); r = amdgpu_vm_lock_pd(vm, &exec, 0);
drm_exec_retry_on_contention(&exec);
r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates); if (unlikely(r))
if (r) { goto out_unlock;
dev_err(adev->dev, "leaking bo va because "
"we fail to reserve bo (%ld)\n", r);
return;
} }
bo_va = amdgpu_vm_bo_find(vm, bo); bo_va = amdgpu_vm_bo_find(vm, bo);
if (!bo_va || --bo_va->ref_count) if (!bo_va || --bo_va->ref_count)
goto out_unlock; goto out_unlock;
...@@ -230,6 +226,9 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj, ...@@ -230,6 +226,9 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
goto out_unlock; goto out_unlock;
r = amdgpu_vm_clear_freed(adev, vm, &fence); r = amdgpu_vm_clear_freed(adev, vm, &fence);
if (unlikely(r < 0))
dev_err(adev->dev, "failed to clear page "
"tables on GEM object close (%ld)\n", r);
if (r || !fence) if (r || !fence)
goto out_unlock; goto out_unlock;
...@@ -237,10 +236,9 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj, ...@@ -237,10 +236,9 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
dma_fence_put(fence); dma_fence_put(fence);
out_unlock: out_unlock:
if (unlikely(r < 0)) if (r)
dev_err(adev->dev, "failed to clear page " dev_err(adev->dev, "leaking bo va (%ld)\n", r);
"tables on GEM object close (%ld)\n", r); drm_exec_fini(&exec);
ttm_eu_backoff_reservation(&ticket, &list);
} }
static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
...@@ -675,10 +673,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, ...@@ -675,10 +673,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct amdgpu_fpriv *fpriv = filp->driver_priv; struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct amdgpu_bo *abo; struct amdgpu_bo *abo;
struct amdgpu_bo_va *bo_va; struct amdgpu_bo_va *bo_va;
struct amdgpu_bo_list_entry vm_pd; struct drm_exec exec;
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct list_head list, duplicates;
uint64_t va_flags; uint64_t va_flags;
uint64_t vm_size; uint64_t vm_size;
int r = 0; int r = 0;
...@@ -728,36 +723,38 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, ...@@ -728,36 +723,38 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
return -EINVAL; return -EINVAL;
} }
INIT_LIST_HEAD(&list);
INIT_LIST_HEAD(&duplicates);
if ((args->operation != AMDGPU_VA_OP_CLEAR) && if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
!(args->flags & AMDGPU_VM_PAGE_PRT)) { !(args->flags & AMDGPU_VM_PAGE_PRT)) {
gobj = drm_gem_object_lookup(filp, args->handle); gobj = drm_gem_object_lookup(filp, args->handle);
if (gobj == NULL) if (gobj == NULL)
return -ENOENT; return -ENOENT;
abo = gem_to_amdgpu_bo(gobj); abo = gem_to_amdgpu_bo(gobj);
tv.bo = &abo->tbo;
if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
tv.num_shared = 1;
else
tv.num_shared = 0;
list_add(&tv.head, &list);
} else { } else {
gobj = NULL; gobj = NULL;
abo = NULL; abo = NULL;
} }
amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd); drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
DRM_EXEC_IGNORE_DUPLICATES);
drm_exec_until_all_locked(&exec) {
if (gobj) {
r = drm_exec_lock_obj(&exec, gobj);
drm_exec_retry_on_contention(&exec);
if (unlikely(r))
goto error;
}
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); r = amdgpu_vm_lock_pd(&fpriv->vm, &exec, 2);
if (r) drm_exec_retry_on_contention(&exec);
goto error_unref; if (unlikely(r))
goto error;
}
if (abo) { if (abo) {
bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo); bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
if (!bo_va) { if (!bo_va) {
r = -ENOENT; r = -ENOENT;
goto error_backoff; goto error;
} }
} else if (args->operation != AMDGPU_VA_OP_CLEAR) { } else if (args->operation != AMDGPU_VA_OP_CLEAR) {
bo_va = fpriv->prt_va; bo_va = fpriv->prt_va;
...@@ -794,10 +791,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, ...@@ -794,10 +791,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va, amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
args->operation); args->operation);
error_backoff: error:
ttm_eu_backoff_reservation(&ticket, &list); drm_exec_fini(&exec);
error_unref:
drm_gem_object_put(gobj); drm_gem_object_put(gobj);
return r; return r;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment