Commit f0c6cef7 authored by Gerd Hoffmann's avatar Gerd Hoffmann

drm/virtio: add worker for object release

Move object release into a separate worker.  Releasing objects requires
sending commands to the host.  Doing that in the dequeue worker will
cause deadlocks in case the command queue gets filled up, because the
dequeue worker is also the one which will free up slots in the command
queue.
Reported-by: default avatarChia-I Wu <olvaffe@gmail.com>
Signed-off-by: default avatarGerd Hoffmann <kraxel@redhat.com>
Reviewed-by: default avatarChia-I Wu <olvaffe@gmail.com>
Tested-by: default avatarChia-I Wu <olvaffe@gmail.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20190830060116.10476-1-kraxel@redhat.com
parent 12afce08
...@@ -78,6 +78,7 @@ struct virtio_gpu_object { ...@@ -78,6 +78,7 @@ struct virtio_gpu_object {
struct virtio_gpu_object_array { struct virtio_gpu_object_array {
struct ww_acquire_ctx ticket; struct ww_acquire_ctx ticket;
struct list_head next;
u32 nents, total; u32 nents, total;
struct drm_gem_object *objs[]; struct drm_gem_object *objs[];
}; };
...@@ -197,6 +198,10 @@ struct virtio_gpu_device { ...@@ -197,6 +198,10 @@ struct virtio_gpu_device {
struct work_struct config_changed_work; struct work_struct config_changed_work;
struct work_struct obj_free_work;
spinlock_t obj_free_lock;
struct list_head obj_free_list;
struct virtio_gpu_drv_capset *capsets; struct virtio_gpu_drv_capset *capsets;
uint32_t num_capsets; uint32_t num_capsets;
struct list_head cap_cache; struct list_head cap_cache;
...@@ -246,6 +251,9 @@ void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs); ...@@ -246,6 +251,9 @@ void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs);
void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs, void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs,
struct dma_fence *fence); struct dma_fence *fence);
void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs); void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs);
void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_array *objs);
void virtio_gpu_array_put_free_work(struct work_struct *work);
/* virtio vg */ /* virtio vg */
int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev); int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev);
......
...@@ -239,3 +239,30 @@ void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs) ...@@ -239,3 +239,30 @@ void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs)
drm_gem_object_put_unlocked(objs->objs[i]); drm_gem_object_put_unlocked(objs->objs[i]);
virtio_gpu_array_free(objs); virtio_gpu_array_free(objs);
} }
void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_array *objs)
{
spin_lock(&vgdev->obj_free_lock);
list_add_tail(&objs->next, &vgdev->obj_free_list);
spin_unlock(&vgdev->obj_free_lock);
schedule_work(&vgdev->obj_free_work);
}
void virtio_gpu_array_put_free_work(struct work_struct *work)
{
struct virtio_gpu_device *vgdev =
container_of(work, struct virtio_gpu_device, obj_free_work);
struct virtio_gpu_object_array *objs;
spin_lock(&vgdev->obj_free_lock);
while (!list_empty(&vgdev->obj_free_list)) {
objs = list_first_entry(&vgdev->obj_free_list,
struct virtio_gpu_object_array, next);
list_del(&objs->next);
spin_unlock(&vgdev->obj_free_lock);
virtio_gpu_array_put_free(objs);
spin_lock(&vgdev->obj_free_lock);
}
spin_unlock(&vgdev->obj_free_lock);
}
...@@ -147,6 +147,11 @@ int virtio_gpu_init(struct drm_device *dev) ...@@ -147,6 +147,11 @@ int virtio_gpu_init(struct drm_device *dev)
INIT_WORK(&vgdev->config_changed_work, INIT_WORK(&vgdev->config_changed_work,
virtio_gpu_config_changed_work_func); virtio_gpu_config_changed_work_func);
INIT_WORK(&vgdev->obj_free_work,
virtio_gpu_array_put_free_work);
INIT_LIST_HEAD(&vgdev->obj_free_list);
spin_lock_init(&vgdev->obj_free_lock);
#ifdef __LITTLE_ENDIAN #ifdef __LITTLE_ENDIAN
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_VIRGL)) if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_VIRGL))
vgdev->has_virgl_3d = true; vgdev->has_virgl_3d = true;
...@@ -226,6 +231,7 @@ void virtio_gpu_deinit(struct drm_device *dev) ...@@ -226,6 +231,7 @@ void virtio_gpu_deinit(struct drm_device *dev)
{ {
struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_device *vgdev = dev->dev_private;
flush_work(&vgdev->obj_free_work);
vgdev->vqs_ready = false; vgdev->vqs_ready = false;
flush_work(&vgdev->ctrlq.dequeue_work); flush_work(&vgdev->ctrlq.dequeue_work);
flush_work(&vgdev->cursorq.dequeue_work); flush_work(&vgdev->cursorq.dequeue_work);
......
...@@ -227,7 +227,7 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) ...@@ -227,7 +227,7 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
if (entry->objs) if (entry->objs)
virtio_gpu_array_put_free(entry->objs); virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
list_del(&entry->list); list_del(&entry->list);
free_vbuf(vgdev, entry); free_vbuf(vgdev, entry);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment