Commit 2cd7b6f0 authored by Robert Foss's avatar Robert Foss Committed by Gerd Hoffmann

drm/virtio: add in/out fence support for explicit synchronization

When the execbuf call receives an in-fence it will get the dma_fence
related to that fence fd and wait on it before submitting the draw call.

On the out-fence side we get fence returned by the submitted draw call
and attach it to a sync_file and send the sync_file fd to userspace. On
error -1 is returned to userspace.

VIRTGPU_EXECBUF_FENCE_FD_IN & VIRTGPU_EXECBUF_FENCE_FD_OUT
are supported at the simultaneously and can be flagged
for simultaneously.
Signed-off-by: default avatarGustavo Padovan <gustavo.padovan@collabora.com>
Signed-off-by: default avatarRobert Foss <robert.foss@collabora.com>
Reviewed-by: default avatarEmil Velikov <emil.velikov@collabora.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20181112165157.32765-4-robert.foss@collabora.comSuggested-by: default avatarRob Herring <robh@kernel.org>
Reviewed-by: default avatarEmil Velikov <emil.velikov@collabora.com>
Signed-off-by: default avatarGerd Hoffmann <kraxel@redhat.com>
parent a56f9c86
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <drm/drmP.h> #include <drm/drmP.h>
#include <drm/virtgpu_drm.h> #include <drm/virtgpu_drm.h>
#include <drm/ttm/ttm_execbuf_util.h> #include <drm/ttm/ttm_execbuf_util.h>
#include <linux/sync_file.h>
#include "virtgpu_drv.h" #include "virtgpu_drv.h"
...@@ -105,7 +106,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, ...@@ -105,7 +106,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv; struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
struct drm_gem_object *gobj; struct drm_gem_object *gobj;
struct virtio_gpu_fence *fence; struct virtio_gpu_fence *out_fence;
struct virtio_gpu_object *qobj; struct virtio_gpu_object *qobj;
int ret; int ret;
uint32_t *bo_handles = NULL; uint32_t *bo_handles = NULL;
...@@ -114,6 +115,9 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, ...@@ -114,6 +115,9 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
struct ttm_validate_buffer *buflist = NULL; struct ttm_validate_buffer *buflist = NULL;
int i; int i;
struct ww_acquire_ctx ticket; struct ww_acquire_ctx ticket;
struct sync_file *sync_file;
int in_fence_fd = exbuf->fence_fd;
int out_fence_fd = -1;
void *buf; void *buf;
if (vgdev->has_virgl_3d == false) if (vgdev->has_virgl_3d == false)
...@@ -124,6 +128,33 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, ...@@ -124,6 +128,33 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
exbuf->fence_fd = -1; exbuf->fence_fd = -1;
if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
struct dma_fence *in_fence;
in_fence = sync_file_get_fence(in_fence_fd);
if (!in_fence)
return -EINVAL;
/*
* Wait if the fence is from a foreign context, or if the fence
* array contains any fence from a foreign context.
*/
ret = 0;
if (!dma_fence_match_context(in_fence, vgdev->fence_drv.context))
ret = dma_fence_wait(in_fence, true);
dma_fence_put(in_fence);
if (ret)
return ret;
}
if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) {
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
if (out_fence_fd < 0)
return out_fence_fd;
}
INIT_LIST_HEAD(&validate_list); INIT_LIST_HEAD(&validate_list);
if (exbuf->num_bo_handles) { if (exbuf->num_bo_handles) {
...@@ -133,26 +164,22 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, ...@@ -133,26 +164,22 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
sizeof(struct ttm_validate_buffer), sizeof(struct ttm_validate_buffer),
GFP_KERNEL | __GFP_ZERO); GFP_KERNEL | __GFP_ZERO);
if (!bo_handles || !buflist) { if (!bo_handles || !buflist) {
kvfree(bo_handles); ret = -ENOMEM;
kvfree(buflist); goto out_unused_fd;
return -ENOMEM;
} }
user_bo_handles = (void __user *)(uintptr_t)exbuf->bo_handles; user_bo_handles = (void __user *)(uintptr_t)exbuf->bo_handles;
if (copy_from_user(bo_handles, user_bo_handles, if (copy_from_user(bo_handles, user_bo_handles,
exbuf->num_bo_handles * sizeof(uint32_t))) { exbuf->num_bo_handles * sizeof(uint32_t))) {
ret = -EFAULT; ret = -EFAULT;
kvfree(bo_handles); goto out_unused_fd;
kvfree(buflist);
return ret;
} }
for (i = 0; i < exbuf->num_bo_handles; i++) { for (i = 0; i < exbuf->num_bo_handles; i++) {
gobj = drm_gem_object_lookup(drm_file, bo_handles[i]); gobj = drm_gem_object_lookup(drm_file, bo_handles[i]);
if (!gobj) { if (!gobj) {
kvfree(bo_handles); ret = -ENOENT;
kvfree(buflist); goto out_unused_fd;
return -ENOENT;
} }
qobj = gem_to_virtio_gpu_obj(gobj); qobj = gem_to_virtio_gpu_obj(gobj);
...@@ -161,6 +188,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, ...@@ -161,6 +188,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
list_add(&buflist[i].head, &validate_list); list_add(&buflist[i].head, &validate_list);
} }
kvfree(bo_handles); kvfree(bo_handles);
bo_handles = NULL;
} }
ret = virtio_gpu_object_list_validate(&ticket, &validate_list); ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
...@@ -174,28 +202,47 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, ...@@ -174,28 +202,47 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
goto out_unresv; goto out_unresv;
} }
fence = virtio_gpu_fence_alloc(vgdev); out_fence = virtio_gpu_fence_alloc(vgdev);
if (!fence) { if(!out_fence) {
kfree(buf);
ret = -ENOMEM; ret = -ENOMEM;
goto out_unresv; goto out_memdup;
}
if (out_fence_fd >= 0) {
sync_file = sync_file_create(&out_fence->f);
if (!sync_file) {
dma_fence_put(&out_fence->f);
ret = -ENOMEM;
goto out_memdup;
} }
exbuf->fence_fd = out_fence_fd;
fd_install(out_fence_fd, sync_file->file);
}
virtio_gpu_cmd_submit(vgdev, buf, exbuf->size, virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
vfpriv->ctx_id, &fence); vfpriv->ctx_id, &out_fence);
ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f); ttm_eu_fence_buffer_objects(&ticket, &validate_list, &out_fence->f);
/* fence the command bo */ /* fence the command bo */
virtio_gpu_unref_list(&validate_list); virtio_gpu_unref_list(&validate_list);
kvfree(buflist); kvfree(buflist);
dma_fence_put(&fence->f);
return 0; return 0;
out_memdup:
kfree(buf);
out_unresv: out_unresv:
ttm_eu_backoff_reservation(&ticket, &validate_list); ttm_eu_backoff_reservation(&ticket, &validate_list);
out_free: out_free:
virtio_gpu_unref_list(&validate_list); virtio_gpu_unref_list(&validate_list);
out_unused_fd:
kvfree(bo_handles);
kvfree(buflist); kvfree(buflist);
if (out_fence_fd >= 0)
put_unused_fd(out_fence_fd);
return ret; return ret;
} }
......
...@@ -66,7 +66,7 @@ struct drm_virtgpu_execbuffer { ...@@ -66,7 +66,7 @@ struct drm_virtgpu_execbuffer {
__u64 command; /* void* */ __u64 command; /* void* */
__u64 bo_handles; __u64 bo_handles;
__u32 num_bo_handles; __u32 num_bo_handles;
__s32 fence_fd; __s32 fence_fd; /* in/out fence fd (see VIRTGPU_EXECBUF_FENCE_FD_IN/OUT) */
}; };
#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */ #define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment