Commit a7790d78 authored by Lucas Stach's avatar Lucas Stach

drm/etnaviv: move workqueue to be per GPU

While the etnaviv workqueue needs to be ordered, as we rely on work items
being executed in queuing order, this is only true for a single GPU.
Having a shared workqueue for all GPUs in the system limits concurrency
artificially.

Getting each GPU its own ordered workqueue still meets our ordering
expectations and enables retire workers to run concurrently.
Signed-off-by: default avatarLucas Stach <l.stach@pengutronix.de>
Reviewed-by: default avatarPhilipp Zabel <p.zabel@pengutronix.de>
parent 4375ffff
...@@ -580,12 +580,6 @@ static int etnaviv_bind(struct device *dev) ...@@ -580,12 +580,6 @@ static int etnaviv_bind(struct device *dev)
} }
drm->dev_private = priv; drm->dev_private = priv;
priv->wq = alloc_ordered_workqueue("etnaviv", 0);
if (!priv->wq) {
ret = -ENOMEM;
goto out_wq;
}
mutex_init(&priv->gem_lock); mutex_init(&priv->gem_lock);
INIT_LIST_HEAD(&priv->gem_list); INIT_LIST_HEAD(&priv->gem_list);
priv->num_gpus = 0; priv->num_gpus = 0;
...@@ -607,9 +601,6 @@ static int etnaviv_bind(struct device *dev) ...@@ -607,9 +601,6 @@ static int etnaviv_bind(struct device *dev)
out_register: out_register:
component_unbind_all(dev, drm); component_unbind_all(dev, drm);
out_bind: out_bind:
flush_workqueue(priv->wq);
destroy_workqueue(priv->wq);
out_wq:
kfree(priv); kfree(priv);
out_unref: out_unref:
drm_dev_unref(drm); drm_dev_unref(drm);
...@@ -624,9 +615,6 @@ static void etnaviv_unbind(struct device *dev) ...@@ -624,9 +615,6 @@ static void etnaviv_unbind(struct device *dev)
drm_dev_unregister(drm); drm_dev_unregister(drm);
flush_workqueue(priv->wq);
destroy_workqueue(priv->wq);
component_unbind_all(dev, drm); component_unbind_all(dev, drm);
drm->dev_private = NULL; drm->dev_private = NULL;
......
...@@ -56,18 +56,8 @@ struct etnaviv_drm_private { ...@@ -56,18 +56,8 @@ struct etnaviv_drm_private {
/* list of GEM objects: */ /* list of GEM objects: */
struct mutex gem_lock; struct mutex gem_lock;
struct list_head gem_list; struct list_head gem_list;
struct workqueue_struct *wq;
}; };
static inline void etnaviv_queue_work(struct drm_device *dev,
struct work_struct *w)
{
struct etnaviv_drm_private *priv = dev->dev_private;
queue_work(priv->wq, w);
}
int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file); struct drm_file *file);
......
...@@ -958,7 +958,7 @@ static void recover_worker(struct work_struct *work) ...@@ -958,7 +958,7 @@ static void recover_worker(struct work_struct *work)
pm_runtime_put_autosuspend(gpu->dev); pm_runtime_put_autosuspend(gpu->dev);
/* Retire the buffer objects in a work */ /* Retire the buffer objects in a work */
etnaviv_queue_work(gpu->drm, &gpu->retire_work); queue_work(gpu->wq, &gpu->retire_work);
} }
static void hangcheck_timer_reset(struct etnaviv_gpu *gpu) static void hangcheck_timer_reset(struct etnaviv_gpu *gpu)
...@@ -994,7 +994,7 @@ static void hangcheck_handler(struct timer_list *t) ...@@ -994,7 +994,7 @@ static void hangcheck_handler(struct timer_list *t)
dev_err(gpu->dev, " completed fence: %u\n", fence); dev_err(gpu->dev, " completed fence: %u\n", fence);
dev_err(gpu->dev, " active fence: %u\n", dev_err(gpu->dev, " active fence: %u\n",
gpu->active_fence); gpu->active_fence);
etnaviv_queue_work(gpu->drm, &gpu->recover_work); queue_work(gpu->wq, &gpu->recover_work);
} }
/* if still more pending work, reset the hangcheck timer: */ /* if still more pending work, reset the hangcheck timer: */
...@@ -1526,7 +1526,7 @@ static irqreturn_t irq_handler(int irq, void *data) ...@@ -1526,7 +1526,7 @@ static irqreturn_t irq_handler(int irq, void *data)
if (gpu->event[event].sync_point) { if (gpu->event[event].sync_point) {
gpu->sync_point_event = event; gpu->sync_point_event = event;
etnaviv_queue_work(gpu->drm, &gpu->sync_point_work); queue_work(gpu->wq, &gpu->sync_point_work);
} }
fence = gpu->event[event].fence; fence = gpu->event[event].fence;
...@@ -1552,7 +1552,7 @@ static irqreturn_t irq_handler(int irq, void *data) ...@@ -1552,7 +1552,7 @@ static irqreturn_t irq_handler(int irq, void *data)
} }
/* Retire the buffer objects in a work */ /* Retire the buffer objects in a work */
etnaviv_queue_work(gpu->drm, &gpu->retire_work); queue_work(gpu->wq, &gpu->retire_work);
ret = IRQ_HANDLED; ret = IRQ_HANDLED;
} }
...@@ -1721,12 +1721,20 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master, ...@@ -1721,12 +1721,20 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
return PTR_ERR(gpu->cooling); return PTR_ERR(gpu->cooling);
} }
gpu->wq = alloc_ordered_workqueue(dev_name(dev), 0);
if (!gpu->wq) {
if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
thermal_cooling_device_unregister(gpu->cooling);
return -ENOMEM;
}
#ifdef CONFIG_PM #ifdef CONFIG_PM
ret = pm_runtime_get_sync(gpu->dev); ret = pm_runtime_get_sync(gpu->dev);
#else #else
ret = etnaviv_gpu_clk_enable(gpu); ret = etnaviv_gpu_clk_enable(gpu);
#endif #endif
if (ret < 0) { if (ret < 0) {
destroy_workqueue(gpu->wq);
if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL)) if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
thermal_cooling_device_unregister(gpu->cooling); thermal_cooling_device_unregister(gpu->cooling);
return ret; return ret;
...@@ -1761,6 +1769,9 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master, ...@@ -1761,6 +1769,9 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
hangcheck_disable(gpu); hangcheck_disable(gpu);
flush_workqueue(gpu->wq);
destroy_workqueue(gpu->wq);
#ifdef CONFIG_PM #ifdef CONFIG_PM
pm_runtime_get_sync(gpu->dev); pm_runtime_get_sync(gpu->dev);
pm_runtime_put_sync_suspend(gpu->dev); pm_runtime_put_sync_suspend(gpu->dev);
......
...@@ -106,6 +106,7 @@ struct etnaviv_gpu { ...@@ -106,6 +106,7 @@ struct etnaviv_gpu {
struct mutex lock; struct mutex lock;
struct etnaviv_chip_identity identity; struct etnaviv_chip_identity identity;
struct etnaviv_file_private *lastctx; struct etnaviv_file_private *lastctx;
struct workqueue_struct *wq;
/* 'ring'-buffer: */ /* 'ring'-buffer: */
struct etnaviv_cmdbuf *buffer; struct etnaviv_cmdbuf *buffer;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment