Commit 744583ec authored by Gerd Hoffmann's avatar Gerd Hoffmann

drm/virtio: notify virtqueues without holding spinlock

Split virtqueue_kick() call into virtqueue_kick_prepare(), which
requires serialization, and virtqueue_notify(), which does not.  Move
the virtqueue_notify() call out of the critical section protected by the
queue lock.  This avoids triggering a vmexit while holding the lock and
thereby fixes a rather bad spinlock contention.
Suggested-by: default avatarChia-I Wu <olvaffe@gmail.com>
Signed-off-by: default avatarGerd Hoffmann <kraxel@redhat.com>
Reviewed-by: default avatarChia-I Wu <olvaffe@gmail.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20190813082509.29324-3-kraxel@redhat.com
parent 4100a7b8
...@@ -252,7 +252,7 @@ void virtio_gpu_dequeue_cursor_func(struct work_struct *work) ...@@ -252,7 +252,7 @@ void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
wake_up(&vgdev->cursorq.ack_queue); wake_up(&vgdev->cursorq.ack_queue);
} }
static void virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev, static bool virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf) struct virtio_gpu_vbuffer *vbuf)
__releases(&vgdev->ctrlq.qlock) __releases(&vgdev->ctrlq.qlock)
__acquires(&vgdev->ctrlq.qlock) __acquires(&vgdev->ctrlq.qlock)
...@@ -260,10 +260,11 @@ static void virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev, ...@@ -260,10 +260,11 @@ static void virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
struct virtqueue *vq = vgdev->ctrlq.vq; struct virtqueue *vq = vgdev->ctrlq.vq;
struct scatterlist *sgs[3], vcmd, vout, vresp; struct scatterlist *sgs[3], vcmd, vout, vresp;
int outcnt = 0, incnt = 0; int outcnt = 0, incnt = 0;
bool notify = false;
int ret; int ret;
if (!vgdev->vqs_ready) if (!vgdev->vqs_ready)
return; return notify;
sg_init_one(&vcmd, vbuf->buf, vbuf->size); sg_init_one(&vcmd, vbuf->buf, vbuf->size);
sgs[outcnt + incnt] = &vcmd; sgs[outcnt + incnt] = &vcmd;
...@@ -292,16 +293,21 @@ static void virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev, ...@@ -292,16 +293,21 @@ static void virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
trace_virtio_gpu_cmd_queue(vq, trace_virtio_gpu_cmd_queue(vq,
(struct virtio_gpu_ctrl_hdr *)vbuf->buf); (struct virtio_gpu_ctrl_hdr *)vbuf->buf);
virtqueue_kick(vq); notify = virtqueue_kick_prepare(vq);
} }
return notify;
} }
static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev, static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf) struct virtio_gpu_vbuffer *vbuf)
{ {
bool notify;
spin_lock(&vgdev->ctrlq.qlock); spin_lock(&vgdev->ctrlq.qlock);
virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf); notify = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
spin_unlock(&vgdev->ctrlq.qlock); spin_unlock(&vgdev->ctrlq.qlock);
if (notify)
virtqueue_notify(vgdev->ctrlq.vq);
} }
static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
...@@ -310,6 +316,7 @@ static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, ...@@ -310,6 +316,7 @@ static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
struct virtio_gpu_fence *fence) struct virtio_gpu_fence *fence)
{ {
struct virtqueue *vq = vgdev->ctrlq.vq; struct virtqueue *vq = vgdev->ctrlq.vq;
bool notify;
again: again:
spin_lock(&vgdev->ctrlq.qlock); spin_lock(&vgdev->ctrlq.qlock);
...@@ -330,8 +337,10 @@ static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, ...@@ -330,8 +337,10 @@ static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
if (fence) if (fence)
virtio_gpu_fence_emit(vgdev, hdr, fence); virtio_gpu_fence_emit(vgdev, hdr, fence);
virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf); notify = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
spin_unlock(&vgdev->ctrlq.qlock); spin_unlock(&vgdev->ctrlq.qlock);
if (notify)
virtqueue_notify(vgdev->ctrlq.vq);
} }
static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
...@@ -339,6 +348,7 @@ static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, ...@@ -339,6 +348,7 @@ static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
{ {
struct virtqueue *vq = vgdev->cursorq.vq; struct virtqueue *vq = vgdev->cursorq.vq;
struct scatterlist *sgs[1], ccmd; struct scatterlist *sgs[1], ccmd;
bool notify;
int ret; int ret;
int outcnt; int outcnt;
...@@ -361,10 +371,13 @@ static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, ...@@ -361,10 +371,13 @@ static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
trace_virtio_gpu_cmd_queue(vq, trace_virtio_gpu_cmd_queue(vq,
(struct virtio_gpu_ctrl_hdr *)vbuf->buf); (struct virtio_gpu_ctrl_hdr *)vbuf->buf);
virtqueue_kick(vq); notify = virtqueue_kick_prepare(vq);
} }
spin_unlock(&vgdev->cursorq.qlock); spin_unlock(&vgdev->cursorq.qlock);
if (notify)
virtqueue_notify(vq);
} }
/* just create gem objects for userspace and long lived objects, /* just create gem objects for userspace and long lived objects,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment