Commit 1aa4051b authored by Junwei Zhang's avatar Junwei Zhang Committed by Alex Deucher

drm/amdgpu: modify amdgpu_fence_wait_any() to amdgpu_fence_wait_multiple()

Rename the function and update the related code with this modified function.
Add the new parameter of bool wait_all.

If wait_all is true, it will return when all fences are signaled or timeout.
If wait_all is false, it will return when any fence is signaled or timeout.
Signed-off-by: default avatarJunwei Zhang <Jerry.Zhang@amd.com>
Reviewed-by: default avatarMonk Liu <monk.liu@amd.com>
Reviewed-by: default avatarJammy Zhou <Jammy.Zhou@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
parent 52293c67
...@@ -440,9 +440,12 @@ int amdgpu_fence_wait_next(struct amdgpu_ring *ring); ...@@ -440,9 +440,12 @@ int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
signed long amdgpu_fence_wait_any(struct amdgpu_device *adev, signed long amdgpu_fence_wait_multiple(struct amdgpu_device *adev,
struct amdgpu_fence **fences, struct amdgpu_fence **array,
bool intr, long t); uint32_t count,
bool wait_all,
bool intr,
signed long t);
struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence); struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence);
void amdgpu_fence_unref(struct amdgpu_fence **fence); void amdgpu_fence_unref(struct amdgpu_fence **fence);
......
...@@ -836,13 +836,12 @@ static inline bool amdgpu_test_signaled(struct amdgpu_fence *fence) ...@@ -836,13 +836,12 @@ static inline bool amdgpu_test_signaled(struct amdgpu_fence *fence)
return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
} }
static inline bool amdgpu_test_signaled_any(struct amdgpu_fence **fences) static bool amdgpu_test_signaled_any(struct amdgpu_fence **fences, uint32_t count)
{ {
int idx; int idx;
struct amdgpu_fence *fence; struct amdgpu_fence *fence;
idx = 0; for (idx = 0; idx < count; ++idx) {
for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) {
fence = fences[idx]; fence = fences[idx];
if (fence) { if (fence) {
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
...@@ -852,6 +851,22 @@ static inline bool amdgpu_test_signaled_any(struct amdgpu_fence **fences) ...@@ -852,6 +851,22 @@ static inline bool amdgpu_test_signaled_any(struct amdgpu_fence **fences)
return false; return false;
} }
static bool amdgpu_test_signaled_all(struct amdgpu_fence **fences, uint32_t count)
{
int idx;
struct amdgpu_fence *fence;
for (idx = 0; idx < count; ++idx) {
fence = fences[idx];
if (fence) {
if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
return false;
}
}
return true;
}
struct amdgpu_wait_cb { struct amdgpu_wait_cb {
struct fence_cb base; struct fence_cb base;
struct task_struct *task; struct task_struct *task;
...@@ -867,33 +882,56 @@ static void amdgpu_fence_wait_cb(struct fence *fence, struct fence_cb *cb) ...@@ -867,33 +882,56 @@ static void amdgpu_fence_wait_cb(struct fence *fence, struct fence_cb *cb)
static signed long amdgpu_fence_default_wait(struct fence *f, bool intr, static signed long amdgpu_fence_default_wait(struct fence *f, bool intr,
signed long t) signed long t)
{ {
struct amdgpu_fence *array[AMDGPU_MAX_RINGS];
struct amdgpu_fence *fence = to_amdgpu_fence(f); struct amdgpu_fence *fence = to_amdgpu_fence(f);
struct amdgpu_device *adev = fence->ring->adev; struct amdgpu_device *adev = fence->ring->adev;
memset(&array[0], 0, sizeof(array)); return amdgpu_fence_wait_multiple(adev, &fence, 1, false, intr, t);
array[0] = fence;
return amdgpu_fence_wait_any(adev, array, intr, t);
} }
/* wait until any fence in array signaled */ /**
signed long amdgpu_fence_wait_any(struct amdgpu_device *adev, * Wait the fence array with timeout
struct amdgpu_fence **array, bool intr, signed long t) *
* @adev: amdgpu device
* @array: the fence array with amdgpu fence pointer
* @count: the number of the fence array
* @wait_all: the flag of wait all(true) or wait any(false)
* @intr: when sleep, set the current task interruptable or not
* @t: timeout to wait
*
* If wait_all is true, it will return when all fences are signaled or timeout.
* If wait_all is false, it will return when any fence is signaled or timeout.
*/
signed long amdgpu_fence_wait_multiple(struct amdgpu_device *adev,
struct amdgpu_fence **array,
uint32_t count,
bool wait_all,
bool intr,
signed long t)
{ {
long idx = 0; long idx = 0;
struct amdgpu_wait_cb cb[AMDGPU_MAX_RINGS]; struct amdgpu_wait_cb *cb;
struct amdgpu_fence *fence; struct amdgpu_fence *fence;
BUG_ON(!array); BUG_ON(!array);
for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) { cb = kcalloc(count, sizeof(struct amdgpu_wait_cb), GFP_KERNEL);
if (cb == NULL) {
t = -ENOMEM;
goto err_free_cb;
}
for (idx = 0; idx < count; ++idx) {
fence = array[idx]; fence = array[idx];
if (fence) { if (fence) {
cb[idx].task = current; cb[idx].task = current;
if (fence_add_callback(&fence->base, if (fence_add_callback(&fence->base,
&cb[idx].base, amdgpu_fence_wait_cb)) &cb[idx].base, amdgpu_fence_wait_cb)) {
return t; /* return if fence is already signaled */ /* The fence is already signaled */
if (wait_all)
continue;
else
goto fence_rm_cb;
}
} }
} }
...@@ -907,7 +945,9 @@ signed long amdgpu_fence_wait_any(struct amdgpu_device *adev, ...@@ -907,7 +945,9 @@ signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
* amdgpu_test_signaled_any must be called after * amdgpu_test_signaled_any must be called after
* set_current_state to prevent a race with wake_up_process * set_current_state to prevent a race with wake_up_process
*/ */
if (amdgpu_test_signaled_any(array)) if (!wait_all && amdgpu_test_signaled_any(array, count))
break;
if (wait_all && amdgpu_test_signaled_all(array, count))
break; break;
if (adev->needs_reset) { if (adev->needs_reset) {
...@@ -923,13 +963,16 @@ signed long amdgpu_fence_wait_any(struct amdgpu_device *adev, ...@@ -923,13 +963,16 @@ signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
idx = 0; fence_rm_cb:
for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) { for (idx = 0; idx < count; ++idx) {
fence = array[idx]; fence = array[idx];
if (fence) if (fence)
fence_remove_callback(&fence->base, &cb[idx].base); fence_remove_callback(&fence->base, &cb[idx].base);
} }
err_free_cb:
kfree(cb);
return t; return t;
} }
......
...@@ -352,7 +352,8 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev, ...@@ -352,7 +352,8 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev,
} while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries)); } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
spin_unlock(&sa_manager->wq.lock); spin_unlock(&sa_manager->wq.lock);
t = amdgpu_fence_wait_any(adev, fences, false, MAX_SCHEDULE_TIMEOUT); t = amdgpu_fence_wait_multiple(adev, fences, AMDGPU_MAX_RINGS, false, false,
MAX_SCHEDULE_TIMEOUT);
r = (t > 0) ? 0 : t; r = (t > 0) ? 0 : t;
spin_lock(&sa_manager->wq.lock); spin_lock(&sa_manager->wq.lock);
/* if we have nothing to wait for block */ /* if we have nothing to wait for block */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment