Commit 764be123 authored by Lucas Stach's avatar Lucas Stach

drm/etnaviv: convert user fence tracking to XArray

This simplifies the driver code a bit, as XArray already provides
internal locking. IDRs are implemented using XArrays anyways, so
this drops one level of unneeded abstraction.
Signed-off-by: default avatarLucas Stach <l.stach@pengutronix.de>
Reviewed-by: default avatarPhilipp Zabel <p.zabel@pengutronix.de>
parent 2cd5bd98
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <linux/sizes.h> #include <linux/sizes.h>
#include <linux/time64.h> #include <linux/time64.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/xarray.h>
#include <drm/drm_drv.h> #include <drm/drm_drv.h>
#include <drm/drm_gem.h> #include <drm/drm_gem.h>
......
...@@ -393,10 +393,11 @@ static void submit_cleanup(struct kref *kref) ...@@ -393,10 +393,11 @@ static void submit_cleanup(struct kref *kref)
wake_up_all(&submit->gpu->fence_event); wake_up_all(&submit->gpu->fence_event);
if (submit->out_fence) { if (submit->out_fence) {
/* first remove from IDR, so fence can not be found anymore */ /*
mutex_lock(&submit->gpu->idr_lock); * Remove from user fence array before dropping the reference,
idr_remove(&submit->gpu->fence_idr, submit->out_fence_id); * so fence can not be found in lookup anymore.
mutex_unlock(&submit->gpu->idr_lock); */
xa_erase(&submit->gpu->user_fences, submit->out_fence_id);
dma_fence_put(submit->out_fence); dma_fence_put(submit->out_fence);
} }
......
...@@ -1244,7 +1244,7 @@ int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu, ...@@ -1244,7 +1244,7 @@ int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
* pretends we didn't find a fence in that case. * pretends we didn't find a fence in that case.
*/ */
rcu_read_lock(); rcu_read_lock();
fence = idr_find(&gpu->fence_idr, id); fence = xa_load(&gpu->user_fences, id);
if (fence) if (fence)
fence = dma_fence_get_rcu(fence); fence = dma_fence_get_rcu(fence);
rcu_read_unlock(); rcu_read_unlock();
...@@ -1744,7 +1744,7 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master, ...@@ -1744,7 +1744,7 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
gpu->drm = drm; gpu->drm = drm;
gpu->fence_context = dma_fence_context_alloc(1); gpu->fence_context = dma_fence_context_alloc(1);
idr_init(&gpu->fence_idr); xa_init_flags(&gpu->user_fences, XA_FLAGS_ALLOC);
spin_lock_init(&gpu->fence_spinlock); spin_lock_init(&gpu->fence_spinlock);
INIT_WORK(&gpu->sync_point_work, sync_point_worker); INIT_WORK(&gpu->sync_point_work, sync_point_worker);
...@@ -1798,7 +1798,7 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master, ...@@ -1798,7 +1798,7 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
} }
gpu->drm = NULL; gpu->drm = NULL;
idr_destroy(&gpu->fence_idr); xa_destroy(&gpu->user_fences);
if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL)) if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
thermal_cooling_device_unregister(gpu->cooling); thermal_cooling_device_unregister(gpu->cooling);
...@@ -1831,7 +1831,6 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev) ...@@ -1831,7 +1831,6 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
gpu->dev = &pdev->dev; gpu->dev = &pdev->dev;
mutex_init(&gpu->lock); mutex_init(&gpu->lock);
mutex_init(&gpu->sched_lock); mutex_init(&gpu->sched_lock);
mutex_init(&gpu->idr_lock);
/* Map registers: */ /* Map registers: */
gpu->mmio = devm_platform_ioremap_resource(pdev, 0); gpu->mmio = devm_platform_ioremap_resource(pdev, 0);
......
...@@ -121,8 +121,8 @@ struct etnaviv_gpu { ...@@ -121,8 +121,8 @@ struct etnaviv_gpu {
u32 idle_mask; u32 idle_mask;
/* Fencing support */ /* Fencing support */
struct mutex idr_lock; struct xarray user_fences;
struct idr fence_idr; u32 next_user_fence;
u32 next_fence; u32 next_fence;
u32 completed_fence; u32 completed_fence;
wait_queue_head_t fence_event; wait_queue_head_t fence_event;
......
...@@ -98,7 +98,7 @@ static const struct drm_sched_backend_ops etnaviv_sched_ops = { ...@@ -98,7 +98,7 @@ static const struct drm_sched_backend_ops etnaviv_sched_ops = {
int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit) int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit)
{ {
struct etnaviv_gpu *gpu = submit->gpu; struct etnaviv_gpu *gpu = submit->gpu;
int ret = 0; int ret;
/* /*
* Hold the sched lock across the whole operation to avoid jobs being * Hold the sched lock across the whole operation to avoid jobs being
...@@ -110,14 +110,11 @@ int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit) ...@@ -110,14 +110,11 @@ int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit)
drm_sched_job_arm(&submit->sched_job); drm_sched_job_arm(&submit->sched_job);
submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished); submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
mutex_lock(&gpu->idr_lock); ret = xa_alloc_cyclic(&gpu->user_fences, &submit->out_fence_id,
submit->out_fence_id = idr_alloc_cyclic(&gpu->fence_idr, submit->out_fence, xa_limit_32b,
submit->out_fence, 0, &gpu->next_user_fence, GFP_KERNEL);
INT_MAX, GFP_KERNEL); if (ret < 0) {
mutex_unlock(&gpu->idr_lock);
if (submit->out_fence_id < 0) {
drm_sched_job_cleanup(&submit->sched_job); drm_sched_job_cleanup(&submit->sched_job);
ret = -ENOMEM;
goto out_unlock; goto out_unlock;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment