Commit cc229884 authored by Michael S. Tsirkin's avatar Michael S. Tsirkin Committed by David S. Miller

virtio: support unlocked queue poll

This adds a way to check ring empty state after enable_cb outside any
locks. Will be used by virtio_net.

Note: there's room for more optimization: caller is likely to have a
memory barrier already, which means we might be able to get rid of a
barrier here.  Deferring this optimization until we do some
benchmarking.
Signed-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 01276ed2
...@@ -607,19 +607,21 @@ void virtqueue_disable_cb(struct virtqueue *_vq) ...@@ -607,19 +607,21 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
EXPORT_SYMBOL_GPL(virtqueue_disable_cb); EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
/** /**
* virtqueue_enable_cb - restart callbacks after disable_cb. * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
* @vq: the struct virtqueue we're talking about. * @vq: the struct virtqueue we're talking about.
* *
* This re-enables callbacks; it returns "false" if there are pending * This re-enables callbacks; it returns current queue state
* buffers in the queue, to detect a possible race between the driver * in an opaque unsigned value. This value should be later tested by
* checking for more work, and enabling callbacks. * virtqueue_poll, to detect a possible race between the driver checking for
* more work, and enabling callbacks.
* *
* Caller must ensure we don't call this with other virtqueue * Caller must ensure we don't call this with other virtqueue
* operations at the same time (except where noted). * operations at the same time (except where noted).
*/ */
bool virtqueue_enable_cb(struct virtqueue *_vq) unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
{ {
struct vring_virtqueue *vq = to_vvq(_vq); struct vring_virtqueue *vq = to_vvq(_vq);
u16 last_used_idx;
START_USE(vq); START_USE(vq);
...@@ -629,15 +631,45 @@ bool virtqueue_enable_cb(struct virtqueue *_vq) ...@@ -629,15 +631,45 @@ bool virtqueue_enable_cb(struct virtqueue *_vq)
* either clear the flags bit or point the event index at the next * either clear the flags bit or point the event index at the next
* entry. Always do both to keep code simple. */ * entry. Always do both to keep code simple. */
vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
vring_used_event(&vq->vring) = vq->last_used_idx; vring_used_event(&vq->vring) = last_used_idx = vq->last_used_idx;
END_USE(vq);
return last_used_idx;
}
EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
/**
* virtqueue_poll - query pending used buffers
* @vq: the struct virtqueue we're talking about.
* @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
*
* Returns "true" if there are pending used buffers in the queue.
*
* This does not need to be serialized.
*/
bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
{
struct vring_virtqueue *vq = to_vvq(_vq);
virtio_mb(vq->weak_barriers); virtio_mb(vq->weak_barriers);
if (unlikely(more_used(vq))) { return (u16)last_used_idx != vq->vring.used->idx;
END_USE(vq); }
return false; EXPORT_SYMBOL_GPL(virtqueue_poll);
}
END_USE(vq); /**
return true; * virtqueue_enable_cb - restart callbacks after disable_cb.
* @vq: the struct virtqueue we're talking about.
*
* This re-enables callbacks; it returns "false" if there are pending
* buffers in the queue, to detect a possible race between the driver
* checking for more work, and enabling callbacks.
*
* Caller must ensure we don't call this with other virtqueue
* operations at the same time (except where noted).
*/
bool virtqueue_enable_cb(struct virtqueue *_vq)
{
unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
return !virtqueue_poll(_vq, last_used_idx);
} }
EXPORT_SYMBOL_GPL(virtqueue_enable_cb); EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
......
...@@ -70,6 +70,10 @@ void virtqueue_disable_cb(struct virtqueue *vq); ...@@ -70,6 +70,10 @@ void virtqueue_disable_cb(struct virtqueue *vq);
bool virtqueue_enable_cb(struct virtqueue *vq); bool virtqueue_enable_cb(struct virtqueue *vq);
unsigned virtqueue_enable_cb_prepare(struct virtqueue *vq);
bool virtqueue_poll(struct virtqueue *vq, unsigned);
bool virtqueue_enable_cb_delayed(struct virtqueue *vq); bool virtqueue_enable_cb_delayed(struct virtqueue *vq);
void *virtqueue_detach_unused_buf(struct virtqueue *vq); void *virtqueue_detach_unused_buf(struct virtqueue *vq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment