Commit ee327caf authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: switch to common fence_wait_any_timeout v2

No need to duplicate the functionality any more.

v2: fix handling if no fence is available.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com> (v1)
parent 318cd340
...@@ -447,10 +447,6 @@ int amdgpu_fence_wait_next(struct amdgpu_ring *ring); ...@@ -447,10 +447,6 @@ int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
signed long amdgpu_fence_wait_any(struct fence **array,
uint32_t count,
bool intr,
signed long t);
struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence); struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence);
void amdgpu_fence_unref(struct amdgpu_fence **fence); void amdgpu_fence_unref(struct amdgpu_fence **fence);
......
...@@ -822,104 +822,6 @@ static const char *amdgpu_fence_get_timeline_name(struct fence *f) ...@@ -822,104 +822,6 @@ static const char *amdgpu_fence_get_timeline_name(struct fence *f)
return (const char *)fence->ring->name; return (const char *)fence->ring->name;
} }
static bool amdgpu_test_signaled_any(struct fence **fences, uint32_t count)
{
int idx;
struct fence *fence;
for (idx = 0; idx < count; ++idx) {
fence = fences[idx];
if (fence) {
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return true;
}
}
return false;
}
struct amdgpu_wait_cb {
struct fence_cb base;
struct task_struct *task;
};
static void amdgpu_fence_wait_cb(struct fence *fence, struct fence_cb *cb)
{
struct amdgpu_wait_cb *wait =
container_of(cb, struct amdgpu_wait_cb, base);
wake_up_process(wait->task);
}
/**
* Wait the fence array with timeout
*
* @array: the fence array with amdgpu fence pointer
* @count: the number of the fence array
* @intr: when sleep, set the current task interruptable or not
* @t: timeout to wait
*
* It will return when any fence is signaled or timeout.
*/
signed long amdgpu_fence_wait_any(struct fence **array, uint32_t count,
bool intr, signed long t)
{
struct amdgpu_wait_cb *cb;
struct fence *fence;
unsigned idx;
BUG_ON(!array);
cb = kcalloc(count, sizeof(struct amdgpu_wait_cb), GFP_KERNEL);
if (cb == NULL) {
t = -ENOMEM;
goto err_free_cb;
}
for (idx = 0; idx < count; ++idx) {
fence = array[idx];
if (fence) {
cb[idx].task = current;
if (fence_add_callback(fence,
&cb[idx].base, amdgpu_fence_wait_cb)) {
/* The fence is already signaled */
goto fence_rm_cb;
}
}
}
while (t > 0) {
if (intr)
set_current_state(TASK_INTERRUPTIBLE);
else
set_current_state(TASK_UNINTERRUPTIBLE);
/*
* amdgpu_test_signaled_any must be called after
* set_current_state to prevent a race with wake_up_process
*/
if (amdgpu_test_signaled_any(array, count))
break;
t = schedule_timeout(t);
if (t > 0 && intr && signal_pending(current))
t = -ERESTARTSYS;
}
__set_current_state(TASK_RUNNING);
fence_rm_cb:
for (idx = 0; idx < count; ++idx) {
fence = array[idx];
if (fence && cb[idx].base.func)
fence_remove_callback(fence, &cb[idx].base);
}
err_free_cb:
kfree(cb);
return t;
}
const struct fence_ops amdgpu_fence_ops = { const struct fence_ops amdgpu_fence_ops = {
.get_driver_name = amdgpu_fence_get_driver_name, .get_driver_name = amdgpu_fence_get_driver_name,
.get_timeline_name = amdgpu_fence_get_timeline_name, .get_timeline_name = amdgpu_fence_get_timeline_name,
......
...@@ -337,6 +337,7 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev, ...@@ -337,6 +337,7 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev,
{ {
struct fence *fences[AMDGPU_MAX_RINGS]; struct fence *fences[AMDGPU_MAX_RINGS];
unsigned tries[AMDGPU_MAX_RINGS]; unsigned tries[AMDGPU_MAX_RINGS];
unsigned count;
int i, r; int i, r;
signed long t; signed long t;
...@@ -371,13 +372,18 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev, ...@@ -371,13 +372,18 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev,
/* see if we can skip over some allocations */ /* see if we can skip over some allocations */
} while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries)); } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
spin_unlock(&sa_manager->wq.lock); for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i)
t = amdgpu_fence_wait_any(fences, AMDGPU_MAX_RINGS, if (fences[i])
false, MAX_SCHEDULE_TIMEOUT); fences[count++] = fences[i];
r = (t > 0) ? 0 : t;
spin_lock(&sa_manager->wq.lock); if (count) {
/* if we have nothing to wait for block */ spin_unlock(&sa_manager->wq.lock);
if (r == -ENOENT) { t = fence_wait_any_timeout(fences, count, false,
MAX_SCHEDULE_TIMEOUT);
r = (t > 0) ? 0 : t;
spin_lock(&sa_manager->wq.lock);
} else {
/* if we have nothing to wait for block */
r = wait_event_interruptible_locked( r = wait_event_interruptible_locked(
sa_manager->wq, sa_manager->wq,
amdgpu_sa_event(sa_manager, size, align) amdgpu_sa_event(sa_manager, size, align)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment