Commit 06ca287d authored by Rusty Russell's avatar Rusty Russell

virtio: move queue_index and num_free fields into core struct virtqueue.

They're generic concepts, so hoist them.  This also avoids accessor
functions (though kept around for merge with DaveM's net tree).

This goes even further than Jason Wang's 17bb6d40 patch
("virtio-ring: move queue_index to vring_virtqueue") which moved the
queue_index from the specific transport.
Acked-by: default avatarMichael S. Tsirkin <mst@redhat.com>
Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
parent 1ce6853a
...@@ -225,7 +225,7 @@ static void vm_notify(struct virtqueue *vq) ...@@ -225,7 +225,7 @@ static void vm_notify(struct virtqueue *vq)
/* We write the queue's selector into the notification register to /* We write the queue's selector into the notification register to
* signal the other end */ * signal the other end */
writel(virtqueue_get_queue_index(vq), vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY); writel(vq->index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY);
} }
/* Notify all virtqueues on an interrupt. */ /* Notify all virtqueues on an interrupt. */
...@@ -266,7 +266,7 @@ static void vm_del_vq(struct virtqueue *vq) ...@@ -266,7 +266,7 @@ static void vm_del_vq(struct virtqueue *vq)
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
struct virtio_mmio_vq_info *info = vq->priv; struct virtio_mmio_vq_info *info = vq->priv;
unsigned long flags, size; unsigned long flags, size;
unsigned int index = virtqueue_get_queue_index(vq); unsigned int index = vq->index;
spin_lock_irqsave(&vm_dev->lock, flags); spin_lock_irqsave(&vm_dev->lock, flags);
list_del(&info->node); list_del(&info->node);
......
...@@ -203,8 +203,7 @@ static void vp_notify(struct virtqueue *vq) ...@@ -203,8 +203,7 @@ static void vp_notify(struct virtqueue *vq)
/* we write the queue's selector into the notification register to /* we write the queue's selector into the notification register to
* signal the other end */ * signal the other end */
iowrite16(virtqueue_get_queue_index(vq), iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY);
vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY);
} }
/* Handle a configuration change: Tell driver if it wants to know. */ /* Handle a configuration change: Tell driver if it wants to know. */
...@@ -479,8 +478,7 @@ static void vp_del_vq(struct virtqueue *vq) ...@@ -479,8 +478,7 @@ static void vp_del_vq(struct virtqueue *vq)
list_del(&info->node); list_del(&info->node);
spin_unlock_irqrestore(&vp_dev->lock, flags); spin_unlock_irqrestore(&vp_dev->lock, flags);
iowrite16(virtqueue_get_queue_index(vq), iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
if (vp_dev->msix_enabled) { if (vp_dev->msix_enabled) {
iowrite16(VIRTIO_MSI_NO_VECTOR, iowrite16(VIRTIO_MSI_NO_VECTOR,
......
...@@ -93,8 +93,6 @@ struct vring_virtqueue ...@@ -93,8 +93,6 @@ struct vring_virtqueue
/* Host publishes avail event idx */ /* Host publishes avail event idx */
bool event; bool event;
/* Number of free buffers */
unsigned int num_free;
/* Head of free buffer list. */ /* Head of free buffer list. */
unsigned int free_head; unsigned int free_head;
/* Number we've added since last sync. */ /* Number we've added since last sync. */
...@@ -106,9 +104,6 @@ struct vring_virtqueue ...@@ -106,9 +104,6 @@ struct vring_virtqueue
/* How to notify other side. FIXME: commonalize hcalls! */ /* How to notify other side. FIXME: commonalize hcalls! */
void (*notify)(struct virtqueue *vq); void (*notify)(struct virtqueue *vq);
/* Index of the queue */
int queue_index;
#ifdef DEBUG #ifdef DEBUG
/* They're supposed to lock for us. */ /* They're supposed to lock for us. */
unsigned int in_use; unsigned int in_use;
...@@ -167,7 +162,7 @@ static int vring_add_indirect(struct vring_virtqueue *vq, ...@@ -167,7 +162,7 @@ static int vring_add_indirect(struct vring_virtqueue *vq,
desc[i-1].next = 0; desc[i-1].next = 0;
/* We're about to use a buffer */ /* We're about to use a buffer */
vq->num_free--; vq->vq.num_free--;
/* Use a single buffer which doesn't continue */ /* Use a single buffer which doesn't continue */
head = vq->free_head; head = vq->free_head;
...@@ -181,13 +176,6 @@ static int vring_add_indirect(struct vring_virtqueue *vq, ...@@ -181,13 +176,6 @@ static int vring_add_indirect(struct vring_virtqueue *vq,
return head; return head;
} }
int virtqueue_get_queue_index(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
return vq->queue_index;
}
EXPORT_SYMBOL_GPL(virtqueue_get_queue_index);
/** /**
* virtqueue_add_buf - expose buffer to other end * virtqueue_add_buf - expose buffer to other end
* @vq: the struct virtqueue we're talking about. * @vq: the struct virtqueue we're talking about.
...@@ -235,7 +223,7 @@ int virtqueue_add_buf(struct virtqueue *_vq, ...@@ -235,7 +223,7 @@ int virtqueue_add_buf(struct virtqueue *_vq,
/* If the host supports indirect descriptor tables, and we have multiple /* If the host supports indirect descriptor tables, and we have multiple
* buffers, then go indirect. FIXME: tune this threshold */ * buffers, then go indirect. FIXME: tune this threshold */
if (vq->indirect && (out + in) > 1 && vq->num_free) { if (vq->indirect && (out + in) > 1 && vq->vq.num_free) {
head = vring_add_indirect(vq, sg, out, in, gfp); head = vring_add_indirect(vq, sg, out, in, gfp);
if (likely(head >= 0)) if (likely(head >= 0))
goto add_head; goto add_head;
...@@ -244,9 +232,9 @@ int virtqueue_add_buf(struct virtqueue *_vq, ...@@ -244,9 +232,9 @@ int virtqueue_add_buf(struct virtqueue *_vq,
BUG_ON(out + in > vq->vring.num); BUG_ON(out + in > vq->vring.num);
BUG_ON(out + in == 0); BUG_ON(out + in == 0);
if (vq->num_free < out + in) { if (vq->vq.num_free < out + in) {
pr_debug("Can't add buf len %i - avail = %i\n", pr_debug("Can't add buf len %i - avail = %i\n",
out + in, vq->num_free); out + in, vq->vq.num_free);
/* FIXME: for historical reasons, we force a notify here if /* FIXME: for historical reasons, we force a notify here if
* there are outgoing parts to the buffer. Presumably the * there are outgoing parts to the buffer. Presumably the
* host should service the ring ASAP. */ * host should service the ring ASAP. */
...@@ -257,7 +245,7 @@ int virtqueue_add_buf(struct virtqueue *_vq, ...@@ -257,7 +245,7 @@ int virtqueue_add_buf(struct virtqueue *_vq,
} }
/* We're about to use some buffers from the free list. */ /* We're about to use some buffers from the free list. */
vq->num_free -= out + in; vq->vq.num_free -= out + in;
head = vq->free_head; head = vq->free_head;
for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) { for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) {
...@@ -303,7 +291,7 @@ int virtqueue_add_buf(struct virtqueue *_vq, ...@@ -303,7 +291,7 @@ int virtqueue_add_buf(struct virtqueue *_vq,
pr_debug("Added buffer head %i to %p\n", head, vq); pr_debug("Added buffer head %i to %p\n", head, vq);
END_USE(vq); END_USE(vq);
return vq->num_free; return vq->vq.num_free;
} }
EXPORT_SYMBOL_GPL(virtqueue_add_buf); EXPORT_SYMBOL_GPL(virtqueue_add_buf);
...@@ -400,13 +388,13 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head) ...@@ -400,13 +388,13 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
i = vq->vring.desc[i].next; i = vq->vring.desc[i].next;
vq->num_free++; vq->vq.num_free++;
} }
vq->vring.desc[i].next = vq->free_head; vq->vring.desc[i].next = vq->free_head;
vq->free_head = head; vq->free_head = head;
/* Plus final descriptor */ /* Plus final descriptor */
vq->num_free++; vq->vq.num_free++;
} }
static inline bool more_used(const struct vring_virtqueue *vq) static inline bool more_used(const struct vring_virtqueue *vq)
...@@ -606,7 +594,7 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq) ...@@ -606,7 +594,7 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
return buf; return buf;
} }
/* That should have freed everything. */ /* That should have freed everything. */
BUG_ON(vq->num_free != vq->vring.num); BUG_ON(vq->vq.num_free != vq->vring.num);
END_USE(vq); END_USE(vq);
return NULL; return NULL;
...@@ -660,12 +648,13 @@ struct virtqueue *vring_new_virtqueue(unsigned int index, ...@@ -660,12 +648,13 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
vq->vq.callback = callback; vq->vq.callback = callback;
vq->vq.vdev = vdev; vq->vq.vdev = vdev;
vq->vq.name = name; vq->vq.name = name;
vq->vq.num_free = num;
vq->vq.index = index;
vq->notify = notify; vq->notify = notify;
vq->weak_barriers = weak_barriers; vq->weak_barriers = weak_barriers;
vq->broken = false; vq->broken = false;
vq->last_used_idx = 0; vq->last_used_idx = 0;
vq->num_added = 0; vq->num_added = 0;
vq->queue_index = index;
list_add_tail(&vq->vq.list, &vdev->vqs); list_add_tail(&vq->vq.list, &vdev->vqs);
#ifdef DEBUG #ifdef DEBUG
vq->in_use = false; vq->in_use = false;
...@@ -680,7 +669,6 @@ struct virtqueue *vring_new_virtqueue(unsigned int index, ...@@ -680,7 +669,6 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
/* Put everything in free lists. */ /* Put everything in free lists. */
vq->num_free = num;
vq->free_head = 0; vq->free_head = 0;
for (i = 0; i < num-1; i++) { for (i = 0; i < num-1; i++) {
vq->vring.desc[i].next = i+1; vq->vring.desc[i].next = i+1;
......
...@@ -16,12 +16,20 @@ ...@@ -16,12 +16,20 @@
* @name: the name of this virtqueue (mainly for debugging) * @name: the name of this virtqueue (mainly for debugging)
* @vdev: the virtio device this queue was created for. * @vdev: the virtio device this queue was created for.
* @priv: a pointer for the virtqueue implementation to use. * @priv: a pointer for the virtqueue implementation to use.
* @index: the zero-based ordinal number for this queue.
* @num_free: number of elements we expect to be able to fit.
*
* A note on @num_free: with indirect buffers, each buffer needs one
* element in the queue, otherwise a buffer will need one element per
* sg element.
*/ */
struct virtqueue { struct virtqueue {
struct list_head list; struct list_head list;
void (*callback)(struct virtqueue *vq); void (*callback)(struct virtqueue *vq);
const char *name; const char *name;
struct virtio_device *vdev; struct virtio_device *vdev;
unsigned int index;
unsigned int num_free;
void *priv; void *priv;
}; };
...@@ -50,7 +58,11 @@ void *virtqueue_detach_unused_buf(struct virtqueue *vq); ...@@ -50,7 +58,11 @@ void *virtqueue_detach_unused_buf(struct virtqueue *vq);
unsigned int virtqueue_get_vring_size(struct virtqueue *vq); unsigned int virtqueue_get_vring_size(struct virtqueue *vq);
int virtqueue_get_queue_index(struct virtqueue *vq); /* FIXME: Obsolete accessor, but required for virtio_net merge. */
static inline unsigned int virtqueue_get_queue_index(struct virtqueue *vq)
{
return vq->index;
}
/** /**
* virtio_device - representation of a device using virtio * virtio_device - representation of a device using virtio
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment