Commit 8bc4d885 authored by Lucas Stach's avatar Lucas Stach

drm/etnaviv: track fences by IDR instead of seqno

This moves away from using the internal seqno as the userspace fence
reference. By moving to a generic ID, we can later replace the internal
fence by something different than the etnaviv seqno fence.
Signed-off-by: default avatarLucas Stach <l.stach@pengutronix.de>
Reviewed-by: default avatarPhilipp Zabel <p.zabel@pengutronix.de>
parent 3d9fc642
...@@ -104,6 +104,7 @@ struct etnaviv_gem_submit { ...@@ -104,6 +104,7 @@ struct etnaviv_gem_submit {
struct kref refcount; struct kref refcount;
struct etnaviv_gpu *gpu; struct etnaviv_gpu *gpu;
struct dma_fence *out_fence, *in_fence; struct dma_fence *out_fence, *in_fence;
int out_fence_id;
struct list_head node; /* GPU active submit list */ struct list_head node; /* GPU active submit list */
struct etnaviv_cmdbuf cmdbuf; struct etnaviv_cmdbuf cmdbuf;
bool runtime_resumed; bool runtime_resumed;
......
...@@ -563,7 +563,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, ...@@ -563,7 +563,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
} }
args->fence_fd = out_fence_fd; args->fence_fd = out_fence_fd;
args->fence = submit->out_fence->seqno; args->fence = submit->out_fence_id;
err_submit_objects: err_submit_objects:
etnaviv_submit_put(submit); etnaviv_submit_put(submit);
......
...@@ -1016,6 +1016,7 @@ static void hangcheck_disable(struct etnaviv_gpu *gpu) ...@@ -1016,6 +1016,7 @@ static void hangcheck_disable(struct etnaviv_gpu *gpu)
/* fence object management */ /* fence object management */
struct etnaviv_fence { struct etnaviv_fence {
struct etnaviv_gpu *gpu; struct etnaviv_gpu *gpu;
int id;
struct dma_fence base; struct dma_fence base;
}; };
...@@ -1052,6 +1053,11 @@ static void etnaviv_fence_release(struct dma_fence *fence) ...@@ -1052,6 +1053,11 @@ static void etnaviv_fence_release(struct dma_fence *fence)
{ {
struct etnaviv_fence *f = to_etnaviv_fence(fence); struct etnaviv_fence *f = to_etnaviv_fence(fence);
/* first remove from IDR, so fence can not be looked up anymore */
mutex_lock(&f->gpu->lock);
idr_remove(&f->gpu->fence_idr, f->id);
mutex_unlock(&f->gpu->lock);
kfree_rcu(f, base.rcu); kfree_rcu(f, base.rcu);
} }
...@@ -1078,6 +1084,11 @@ static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu) ...@@ -1078,6 +1084,11 @@ static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
if (!f) if (!f)
return NULL; return NULL;
f->id = idr_alloc_cyclic(&gpu->fence_idr, &f->base, 0, INT_MAX, GFP_KERNEL);
if (f->id < 0) {
kfree(f);
return NULL;
}
f->gpu = gpu; f->gpu = gpu;
dma_fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock, dma_fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
...@@ -1226,35 +1237,43 @@ static void retire_worker(struct work_struct *work) ...@@ -1226,35 +1237,43 @@ static void retire_worker(struct work_struct *work)
} }
int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu, int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
u32 fence, struct timespec *timeout) u32 id, struct timespec *timeout)
{ {
struct dma_fence *fence;
int ret; int ret;
if (fence_after(fence, gpu->next_fence)) { /*
DRM_ERROR("waiting on invalid fence: %u (of %u)\n", * Look up the fence and take a reference. The mutex only synchronizes
fence, gpu->next_fence); * the IDR lookup with the fence release. We might still find a fence
return -EINVAL; * whose refcount has already dropped to zero. dma_fence_get_rcu
} * pretends we didn't find a fence in that case.
*/
ret = mutex_lock_interruptible(&gpu->lock);
if (ret)
return ret;
fence = idr_find(&gpu->fence_idr, id);
if (fence)
fence = dma_fence_get_rcu(fence);
mutex_unlock(&gpu->lock);
if (!fence)
return 0;
if (!timeout) { if (!timeout) {
/* No timeout was requested: just test for completion */ /* No timeout was requested: just test for completion */
ret = fence_completed(gpu, fence) ? 0 : -EBUSY; ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY;
} else { } else {
unsigned long remaining = etnaviv_timeout_to_jiffies(timeout); unsigned long remaining = etnaviv_timeout_to_jiffies(timeout);
ret = wait_event_interruptible_timeout(gpu->fence_event, ret = dma_fence_wait_timeout(fence, true, remaining);
fence_completed(gpu, fence), if (ret == 0)
remaining);
if (ret == 0) {
DBG("timeout waiting for fence: %u (retired: %u completed: %u)",
fence, gpu->retired_fence,
gpu->completed_fence);
ret = -ETIMEDOUT; ret = -ETIMEDOUT;
} else if (ret != -ERESTARTSYS) { else if (ret != -ERESTARTSYS)
ret = 0; ret = 0;
}
} }
dma_fence_put(fence);
return ret; return ret;
} }
...@@ -1386,6 +1405,7 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, ...@@ -1386,6 +1405,7 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
ret = -ENOMEM; ret = -ENOMEM;
goto out_unlock; goto out_unlock;
} }
submit->out_fence_id = to_etnaviv_fence(submit->out_fence)->id;
gpu->active_fence = submit->out_fence->seqno; gpu->active_fence = submit->out_fence->seqno;
...@@ -1490,7 +1510,6 @@ static irqreturn_t irq_handler(int irq, void *data) ...@@ -1490,7 +1510,6 @@ static irqreturn_t irq_handler(int irq, void *data)
continue; continue;
gpu->event[event].fence = NULL; gpu->event[event].fence = NULL;
dma_fence_signal(fence);
/* /*
* Events can be processed out of order. Eg, * Events can be processed out of order. Eg,
...@@ -1503,6 +1522,7 @@ static irqreturn_t irq_handler(int irq, void *data) ...@@ -1503,6 +1522,7 @@ static irqreturn_t irq_handler(int irq, void *data)
*/ */
if (fence_after(fence->seqno, gpu->completed_fence)) if (fence_after(fence->seqno, gpu->completed_fence))
gpu->completed_fence = fence->seqno; gpu->completed_fence = fence->seqno;
dma_fence_signal(fence);
event_free(gpu, event); event_free(gpu, event);
} }
...@@ -1700,6 +1720,7 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master, ...@@ -1700,6 +1720,7 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
gpu->drm = drm; gpu->drm = drm;
gpu->fence_context = dma_fence_context_alloc(1); gpu->fence_context = dma_fence_context_alloc(1);
idr_init(&gpu->fence_idr);
spin_lock_init(&gpu->fence_spinlock); spin_lock_init(&gpu->fence_spinlock);
INIT_LIST_HEAD(&gpu->active_submit_list); INIT_LIST_HEAD(&gpu->active_submit_list);
...@@ -1751,6 +1772,7 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master, ...@@ -1751,6 +1772,7 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
} }
gpu->drm = NULL; gpu->drm = NULL;
idr_destroy(&gpu->fence_idr);
if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL)) if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
thermal_cooling_device_unregister(gpu->cooling); thermal_cooling_device_unregister(gpu->cooling);
......
...@@ -128,6 +128,7 @@ struct etnaviv_gpu { ...@@ -128,6 +128,7 @@ struct etnaviv_gpu {
u32 idle_mask; u32 idle_mask;
/* Fencing support */ /* Fencing support */
struct idr fence_idr;
u32 next_fence; u32 next_fence;
u32 active_fence; u32 active_fence;
u32 completed_fence; u32 completed_fence;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment