Commit 22f0463a authored by Christian König's avatar Christian König

drm/amdgpu: unwrap fence chains in the explicit sync fence

Unwrap the explicit fence if it is a dma_fence_chain and
sync to the first fence not matching the owner rules.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Acked-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210614174536.5188-1-christian.koenig@amd.com
parent 1451d0e9
...@@ -28,6 +28,8 @@ ...@@ -28,6 +28,8 @@
* Christian König <christian.koenig@amd.com> * Christian König <christian.koenig@amd.com>
*/ */
#include <linux/dma-fence-chain.h>
#include "amdgpu.h" #include "amdgpu.h"
#include "amdgpu_trace.h" #include "amdgpu_trace.h"
#include "amdgpu_amdkfd.h" #include "amdgpu_amdkfd.h"
...@@ -186,92 +188,108 @@ int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence) ...@@ -186,92 +188,108 @@ int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence)
return amdgpu_sync_fence(sync, fence); return amdgpu_sync_fence(sync, fence);
} }
/** /* Determine based on the owner and mode if we should sync to a fence or not */
* amdgpu_sync_resv - sync to a reservation object static bool amdgpu_sync_test_fence(struct amdgpu_device *adev,
* enum amdgpu_sync_mode mode,
* @adev: amdgpu device void *owner, struct dma_fence *f)
* @sync: sync object to add fences from reservation object to
* @resv: reservation object with embedded fence
* @mode: how owner affects which fences we sync to
* @owner: owner of the planned job submission
*
* Sync to the fence
*/
int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
struct dma_resv *resv, enum amdgpu_sync_mode mode,
void *owner)
{ {
struct dma_resv_list *flist; void *fence_owner = amdgpu_sync_get_owner(f);
struct dma_fence *f;
unsigned i;
int r = 0;
if (resv == NULL)
return -EINVAL;
/* always sync to the exclusive fence */
f = dma_resv_excl_fence(resv);
r = amdgpu_sync_fence(sync, f);
flist = dma_resv_shared_list(resv);
if (!flist || r)
return r;
for (i = 0; i < flist->shared_count; ++i) {
void *fence_owner;
f = rcu_dereference_protected(flist->shared[i],
dma_resv_held(resv));
fence_owner = amdgpu_sync_get_owner(f);
/* Always sync to moves, no matter what */ /* Always sync to moves, no matter what */
if (fence_owner == AMDGPU_FENCE_OWNER_UNDEFINED) { if (fence_owner == AMDGPU_FENCE_OWNER_UNDEFINED)
r = amdgpu_sync_fence(sync, f); return true;
if (r)
break;
}
/* We only want to trigger KFD eviction fences on /* We only want to trigger KFD eviction fences on
* evict or move jobs. Skip KFD fences otherwise. * evict or move jobs. Skip KFD fences otherwise.
*/ */
if (fence_owner == AMDGPU_FENCE_OWNER_KFD && if (fence_owner == AMDGPU_FENCE_OWNER_KFD &&
owner != AMDGPU_FENCE_OWNER_UNDEFINED) owner != AMDGPU_FENCE_OWNER_UNDEFINED)
continue; return false;
/* Never sync to VM updates either. */ /* Never sync to VM updates either. */
if (fence_owner == AMDGPU_FENCE_OWNER_VM && if (fence_owner == AMDGPU_FENCE_OWNER_VM &&
owner != AMDGPU_FENCE_OWNER_UNDEFINED) owner != AMDGPU_FENCE_OWNER_UNDEFINED)
continue; return false;
/* Ignore fences depending on the sync mode */ /* Ignore fences depending on the sync mode */
switch (mode) { switch (mode) {
case AMDGPU_SYNC_ALWAYS: case AMDGPU_SYNC_ALWAYS:
break; return true;
case AMDGPU_SYNC_NE_OWNER: case AMDGPU_SYNC_NE_OWNER:
if (amdgpu_sync_same_dev(adev, f) && if (amdgpu_sync_same_dev(adev, f) &&
fence_owner == owner) fence_owner == owner)
continue; return false;
break; break;
case AMDGPU_SYNC_EQ_OWNER: case AMDGPU_SYNC_EQ_OWNER:
if (amdgpu_sync_same_dev(adev, f) && if (amdgpu_sync_same_dev(adev, f) &&
fence_owner != owner) fence_owner != owner)
continue; return false;
break; break;
case AMDGPU_SYNC_EXPLICIT: case AMDGPU_SYNC_EXPLICIT:
continue; return false;
} }
WARN(debug_evictions && fence_owner == AMDGPU_FENCE_OWNER_KFD, WARN(debug_evictions && fence_owner == AMDGPU_FENCE_OWNER_KFD,
"Adding eviction fence to sync obj"); "Adding eviction fence to sync obj");
return true;
}
/**
* amdgpu_sync_resv - sync to a reservation object
*
* @adev: amdgpu device
* @sync: sync object to add fences from reservation object to
* @resv: reservation object with embedded fence
* @mode: how owner affects which fences we sync to
* @owner: owner of the planned job submission
*
* Sync to the fence
*/
int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
struct dma_resv *resv, enum amdgpu_sync_mode mode,
void *owner)
{
struct dma_resv_list *flist;
struct dma_fence *f;
unsigned i;
int r = 0;
if (resv == NULL)
return -EINVAL;
/* always sync to the exclusive fence */
f = dma_resv_excl_fence(resv);
dma_fence_chain_for_each(f, f) {
struct dma_fence_chain *chain = to_dma_fence_chain(f);
if (amdgpu_sync_test_fence(adev, mode, owner, chain ?
chain->fence : f)) {
r = amdgpu_sync_fence(sync, f); r = amdgpu_sync_fence(sync, f);
dma_fence_put(f);
if (r) if (r)
return r;
break; break;
} }
}
flist = dma_resv_shared_list(resv);
if (!flist)
return 0;
for (i = 0; i < flist->shared_count; ++i) {
f = rcu_dereference_protected(flist->shared[i],
dma_resv_held(resv));
if (amdgpu_sync_test_fence(adev, mode, owner, f)) {
r = amdgpu_sync_fence(sync, f);
if (r)
return r; return r;
}
}
return 0;
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment