Commit b4211138 authored by Andy Lutomirski's avatar Andy Lutomirski Committed by Michael S. Tsirkin

virtio_mmio: Use the DMA API if enabled

This switches to vring_create_virtqueue, simplifying the driver and
adding DMA API support.
Signed-off-by: default avatarAndy Lutomirski <luto@kernel.org>
Signed-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
parent 2a2d1382
...@@ -99,12 +99,6 @@ struct virtio_mmio_vq_info { ...@@ -99,12 +99,6 @@ struct virtio_mmio_vq_info {
/* the actual virtqueue */ /* the actual virtqueue */
struct virtqueue *vq; struct virtqueue *vq;
/* the number of entries in the queue */
unsigned int num;
/* the virtual address of the ring queue */
void *queue;
/* the list node for the virtqueues list */ /* the list node for the virtqueues list */
struct list_head node; struct list_head node;
}; };
...@@ -322,15 +316,13 @@ static void vm_del_vq(struct virtqueue *vq) ...@@ -322,15 +316,13 @@ static void vm_del_vq(struct virtqueue *vq)
{ {
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
struct virtio_mmio_vq_info *info = vq->priv; struct virtio_mmio_vq_info *info = vq->priv;
unsigned long flags, size; unsigned long flags;
unsigned int index = vq->index; unsigned int index = vq->index;
spin_lock_irqsave(&vm_dev->lock, flags); spin_lock_irqsave(&vm_dev->lock, flags);
list_del(&info->node); list_del(&info->node);
spin_unlock_irqrestore(&vm_dev->lock, flags); spin_unlock_irqrestore(&vm_dev->lock, flags);
vring_del_virtqueue(vq);
/* Select and deactivate the queue */ /* Select and deactivate the queue */
writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL); writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
if (vm_dev->version == 1) { if (vm_dev->version == 1) {
...@@ -340,8 +332,8 @@ static void vm_del_vq(struct virtqueue *vq) ...@@ -340,8 +332,8 @@ static void vm_del_vq(struct virtqueue *vq)
WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY)); WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY));
} }
size = PAGE_ALIGN(vring_size(info->num, VIRTIO_MMIO_VRING_ALIGN)); vring_del_virtqueue(vq);
free_pages_exact(info->queue, size);
kfree(info); kfree(info);
} }
...@@ -356,8 +348,6 @@ static void vm_del_vqs(struct virtio_device *vdev) ...@@ -356,8 +348,6 @@ static void vm_del_vqs(struct virtio_device *vdev)
free_irq(platform_get_irq(vm_dev->pdev, 0), vm_dev); free_irq(platform_get_irq(vm_dev->pdev, 0), vm_dev);
} }
static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
void (*callback)(struct virtqueue *vq), void (*callback)(struct virtqueue *vq),
const char *name) const char *name)
...@@ -365,7 +355,8 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, ...@@ -365,7 +355,8 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
struct virtio_mmio_vq_info *info; struct virtio_mmio_vq_info *info;
struct virtqueue *vq; struct virtqueue *vq;
unsigned long flags, size; unsigned long flags;
unsigned int num;
int err; int err;
if (!name) if (!name)
...@@ -388,66 +379,40 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, ...@@ -388,66 +379,40 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
goto error_kmalloc; goto error_kmalloc;
} }
/* Allocate pages for the queue - start with a queue as big as num = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NUM_MAX);
* possible (limited by maximum size allowed by device), drop down if (num == 0) {
* to a minimal size, just big enough to fit descriptor table
* and two rings (which makes it "alignment_size * 2")
*/
info->num = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NUM_MAX);
/* If the device reports a 0 entry queue, we won't be able to
* use it to perform I/O, and vring_new_virtqueue() can't create
* empty queues anyway, so don't bother to set up the device.
*/
if (info->num == 0) {
err = -ENOENT; err = -ENOENT;
goto error_alloc_pages; goto error_new_virtqueue;
}
while (1) {
size = PAGE_ALIGN(vring_size(info->num,
VIRTIO_MMIO_VRING_ALIGN));
/* Did the last iter shrink the queue below minimum size? */
if (size < VIRTIO_MMIO_VRING_ALIGN * 2) {
err = -ENOMEM;
goto error_alloc_pages;
}
info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
if (info->queue)
break;
info->num /= 2;
} }
/* Create the vring */ /* Create the vring */
vq = vring_new_virtqueue(index, info->num, VIRTIO_MMIO_VRING_ALIGN, vdev, vq = vring_create_virtqueue(index, num, VIRTIO_MMIO_VRING_ALIGN, vdev,
true, info->queue, vm_notify, callback, name); true, true, vm_notify, callback, name);
if (!vq) { if (!vq) {
err = -ENOMEM; err = -ENOMEM;
goto error_new_virtqueue; goto error_new_virtqueue;
} }
/* Activate the queue */ /* Activate the queue */
writel(info->num, vm_dev->base + VIRTIO_MMIO_QUEUE_NUM); writel(virtqueue_get_vring_size(vq), vm_dev->base + VIRTIO_MMIO_QUEUE_NUM);
if (vm_dev->version == 1) { if (vm_dev->version == 1) {
writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN); writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN);
writel(virt_to_phys(info->queue) >> PAGE_SHIFT, writel(virtqueue_get_desc_addr(vq) >> PAGE_SHIFT,
vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
} else { } else {
u64 addr; u64 addr;
addr = virt_to_phys(info->queue); addr = virtqueue_get_desc_addr(vq);
writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_LOW); writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_LOW);
writel((u32)(addr >> 32), writel((u32)(addr >> 32),
vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_HIGH); vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_HIGH);
addr = virt_to_phys(virtqueue_get_avail(vq)); addr = virtqueue_get_avail_addr(vq);
writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_LOW); writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_LOW);
writel((u32)(addr >> 32), writel((u32)(addr >> 32),
vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_HIGH); vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_HIGH);
addr = virt_to_phys(virtqueue_get_used(vq)); addr = virtqueue_get_used_addr(vq);
writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_USED_LOW); writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_USED_LOW);
writel((u32)(addr >> 32), writel((u32)(addr >> 32),
vm_dev->base + VIRTIO_MMIO_QUEUE_USED_HIGH); vm_dev->base + VIRTIO_MMIO_QUEUE_USED_HIGH);
...@@ -471,8 +436,6 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, ...@@ -471,8 +436,6 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY); writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY)); WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY));
} }
free_pages_exact(info->queue, size);
error_alloc_pages:
kfree(info); kfree(info);
error_kmalloc: error_kmalloc:
error_available: error_available:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment