Commit 846ade7d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost

Pull virtio/vhost updates from Michael Tsirkin:
 "virtio, vhost: fixes, cleanups, features

  This includes the disk/cache memory stats for for the virtio balloon,
  as well as multiple fixes and cleanups"

* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost:
  vhost: don't hold onto file pointer for VHOST_SET_LOG_FD
  vhost: don't hold onto file pointer for VHOST_SET_VRING_ERR
  vhost: don't hold onto file pointer for VHOST_SET_VRING_CALL
  ringtest: ring.c malloc & memset to calloc
  virtio_vop: don't kfree device on register failure
  virtio_pci: don't kfree device on register failure
  virtio: split device_register into device_initialize and device_add
  vhost: remove unused lock check flag in vhost_dev_cleanup()
  vhost: Remove the unused variable.
  virtio_blk: print capacity at probe time
  virtio: make VIRTIO a menuconfig to ease disabling it all
  virtio/ringtest: virtio_ring: fix up need_event math
  virtio/ringtest: fix up need_event math
  virtio: virtio_mmio: make of_device_ids const.
  firmware: Use PTR_ERR_OR_ZERO()
  virtio-mmio: Use PTR_ERR_OR_ZERO()
  vhost/scsi: Improve a size determination in four functions
  virtio_balloon: include disk/file caches memory statistics
parents 977e4152 d25cc43c
...@@ -373,14 +373,12 @@ static ssize_t virtblk_serial_show(struct device *dev, ...@@ -373,14 +373,12 @@ static ssize_t virtblk_serial_show(struct device *dev,
static DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL); static DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL);
static void virtblk_config_changed_work(struct work_struct *work) /* The queue's logical block size must be set before calling this */
static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
{ {
struct virtio_blk *vblk =
container_of(work, struct virtio_blk, config_work);
struct virtio_device *vdev = vblk->vdev; struct virtio_device *vdev = vblk->vdev;
struct request_queue *q = vblk->disk->queue; struct request_queue *q = vblk->disk->queue;
char cap_str_2[10], cap_str_10[10]; char cap_str_2[10], cap_str_10[10];
char *envp[] = { "RESIZE=1", NULL };
unsigned long long nblocks; unsigned long long nblocks;
u64 capacity; u64 capacity;
...@@ -402,13 +400,24 @@ static void virtblk_config_changed_work(struct work_struct *work) ...@@ -402,13 +400,24 @@ static void virtblk_config_changed_work(struct work_struct *work)
STRING_UNITS_10, cap_str_10, sizeof(cap_str_10)); STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
dev_notice(&vdev->dev, dev_notice(&vdev->dev,
"new size: %llu %d-byte logical blocks (%s/%s)\n", "[%s] %s%llu %d-byte logical blocks (%s/%s)\n",
vblk->disk->disk_name,
resize ? "new size: " : "",
nblocks, nblocks,
queue_logical_block_size(q), queue_logical_block_size(q),
cap_str_10, cap_str_10,
cap_str_2); cap_str_2);
set_capacity(vblk->disk, capacity); set_capacity(vblk->disk, capacity);
}
static void virtblk_config_changed_work(struct work_struct *work)
{
struct virtio_blk *vblk =
container_of(work, struct virtio_blk, config_work);
char *envp[] = { "RESIZE=1", NULL };
virtblk_update_capacity(vblk, true);
revalidate_disk(vblk->disk); revalidate_disk(vblk->disk);
kobject_uevent_env(&disk_to_dev(vblk->disk)->kobj, KOBJ_CHANGE, envp); kobject_uevent_env(&disk_to_dev(vblk->disk)->kobj, KOBJ_CHANGE, envp);
} }
...@@ -621,7 +630,6 @@ static int virtblk_probe(struct virtio_device *vdev) ...@@ -621,7 +630,6 @@ static int virtblk_probe(struct virtio_device *vdev)
struct request_queue *q; struct request_queue *q;
int err, index; int err, index;
u64 cap;
u32 v, blk_size, sg_elems, opt_io_size; u32 v, blk_size, sg_elems, opt_io_size;
u16 min_io_size; u16 min_io_size;
u8 physical_block_exp, alignment_offset; u8 physical_block_exp, alignment_offset;
...@@ -719,17 +727,6 @@ static int virtblk_probe(struct virtio_device *vdev) ...@@ -719,17 +727,6 @@ static int virtblk_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO)) if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
set_disk_ro(vblk->disk, 1); set_disk_ro(vblk->disk, 1);
/* Host must always specify the capacity. */
virtio_cread(vdev, struct virtio_blk_config, capacity, &cap);
/* If capacity is too big, truncate with warning. */
if ((sector_t)cap != cap) {
dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
(unsigned long long)cap);
cap = (sector_t)-1;
}
set_capacity(vblk->disk, cap);
/* We can handle whatever the host told us to handle. */ /* We can handle whatever the host told us to handle. */
blk_queue_max_segments(q, vblk->sg_elems-2); blk_queue_max_segments(q, vblk->sg_elems-2);
...@@ -780,6 +777,7 @@ static int virtblk_probe(struct virtio_device *vdev) ...@@ -780,6 +777,7 @@ static int virtblk_probe(struct virtio_device *vdev)
if (!err && opt_io_size) if (!err && opt_io_size)
blk_queue_io_opt(q, blk_size * opt_io_size); blk_queue_io_opt(q, blk_size * opt_io_size);
virtblk_update_capacity(vblk, false);
virtio_device_ready(vdev); virtio_device_ready(vdev);
device_add_disk(&vdev->dev, vblk->disk); device_add_disk(&vdev->dev, vblk->disk);
......
...@@ -694,10 +694,8 @@ static int fw_cfg_cmdline_set(const char *arg, const struct kernel_param *kp) ...@@ -694,10 +694,8 @@ static int fw_cfg_cmdline_set(const char *arg, const struct kernel_param *kp)
*/ */
fw_cfg_cmdline_dev = platform_device_register_simple("fw_cfg", fw_cfg_cmdline_dev = platform_device_register_simple("fw_cfg",
PLATFORM_DEVID_NONE, res, processed); PLATFORM_DEVID_NONE, res, processed);
if (IS_ERR(fw_cfg_cmdline_dev))
return PTR_ERR(fw_cfg_cmdline_dev);
return 0; return PTR_ERR_OR_ZERO(fw_cfg_cmdline_dev);
} }
static int fw_cfg_cmdline_get(char *buf, const struct kernel_param *kp) static int fw_cfg_cmdline_get(char *buf, const struct kernel_param *kp)
......
...@@ -452,10 +452,12 @@ static irqreturn_t vop_virtio_intr_handler(int irq, void *data) ...@@ -452,10 +452,12 @@ static irqreturn_t vop_virtio_intr_handler(int irq, void *data)
static void vop_virtio_release_dev(struct device *_d) static void vop_virtio_release_dev(struct device *_d)
{ {
/* struct virtio_device *vdev =
* No need for a release method similar to virtio PCI. container_of(_d, struct virtio_device, dev);
* Provide an empty one to avoid getting a warning from core. struct _vop_vdev *vop_vdev =
*/ container_of(vdev, struct _vop_vdev, vdev);
kfree(vop_vdev);
} }
/* /*
...@@ -466,7 +468,7 @@ static int _vop_add_device(struct mic_device_desc __iomem *d, ...@@ -466,7 +468,7 @@ static int _vop_add_device(struct mic_device_desc __iomem *d,
unsigned int offset, struct vop_device *vpdev, unsigned int offset, struct vop_device *vpdev,
int dnode) int dnode)
{ {
struct _vop_vdev *vdev; struct _vop_vdev *vdev, *reg_dev = NULL;
int ret; int ret;
u8 type = ioread8(&d->type); u8 type = ioread8(&d->type);
...@@ -497,6 +499,7 @@ static int _vop_add_device(struct mic_device_desc __iomem *d, ...@@ -497,6 +499,7 @@ static int _vop_add_device(struct mic_device_desc __iomem *d,
vdev->c2h_vdev_db = ioread8(&vdev->dc->c2h_vdev_db); vdev->c2h_vdev_db = ioread8(&vdev->dc->c2h_vdev_db);
ret = register_virtio_device(&vdev->vdev); ret = register_virtio_device(&vdev->vdev);
reg_dev = vdev;
if (ret) { if (ret) {
dev_err(_vop_dev(vdev), dev_err(_vop_dev(vdev),
"Failed to register vop device %u type %u\n", "Failed to register vop device %u type %u\n",
...@@ -512,7 +515,10 @@ static int _vop_add_device(struct mic_device_desc __iomem *d, ...@@ -512,7 +515,10 @@ static int _vop_add_device(struct mic_device_desc __iomem *d,
free_irq: free_irq:
vpdev->hw_ops->free_irq(vpdev, vdev->virtio_cookie, vdev); vpdev->hw_ops->free_irq(vpdev, vdev->virtio_cookie, vdev);
kfree: kfree:
kfree(vdev); if (reg_dev)
put_device(&vdev->vdev.dev);
else
kfree(vdev);
return ret; return ret;
} }
...@@ -568,7 +574,7 @@ static int _vop_remove_device(struct mic_device_desc __iomem *d, ...@@ -568,7 +574,7 @@ static int _vop_remove_device(struct mic_device_desc __iomem *d,
iowrite8(-1, &dc->h2c_vdev_db); iowrite8(-1, &dc->h2c_vdev_db);
if (status & VIRTIO_CONFIG_S_DRIVER_OK) if (status & VIRTIO_CONFIG_S_DRIVER_OK)
wait_for_completion(&vdev->reset_done); wait_for_completion(&vdev->reset_done);
kfree(vdev); put_device(&vdev->vdev.dev);
iowrite8(1, &dc->guest_ack); iowrite8(1, &dc->guest_ack);
dev_dbg(&vpdev->dev, "%s %d guest_ack %d\n", dev_dbg(&vpdev->dev, "%s %d guest_ack %d\n",
__func__, __LINE__, ioread8(&dc->guest_ack)); __func__, __LINE__, ioread8(&dc->guest_ack));
......
...@@ -1015,7 +1015,7 @@ static int vhost_net_release(struct inode *inode, struct file *f) ...@@ -1015,7 +1015,7 @@ static int vhost_net_release(struct inode *inode, struct file *f)
vhost_net_stop(n, &tx_sock, &rx_sock); vhost_net_stop(n, &tx_sock, &rx_sock);
vhost_net_flush(n); vhost_net_flush(n);
vhost_dev_stop(&n->dev); vhost_dev_stop(&n->dev);
vhost_dev_cleanup(&n->dev, false); vhost_dev_cleanup(&n->dev);
vhost_net_vq_reset(n); vhost_net_vq_reset(n);
if (tx_sock) if (tx_sock)
sockfd_put(tx_sock); sockfd_put(tx_sock);
......
...@@ -586,8 +586,7 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg, ...@@ -586,8 +586,7 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
sg = cmd->tvc_sgl; sg = cmd->tvc_sgl;
prot_sg = cmd->tvc_prot_sgl; prot_sg = cmd->tvc_prot_sgl;
pages = cmd->tvc_upages; pages = cmd->tvc_upages;
memset(cmd, 0, sizeof(struct vhost_scsi_cmd)); memset(cmd, 0, sizeof(*cmd));
cmd->tvc_sgl = sg; cmd->tvc_sgl = sg;
cmd->tvc_prot_sgl = prot_sg; cmd->tvc_prot_sgl = prot_sg;
cmd->tvc_upages = pages; cmd->tvc_upages = pages;
...@@ -1420,7 +1419,7 @@ static int vhost_scsi_release(struct inode *inode, struct file *f) ...@@ -1420,7 +1419,7 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
mutex_unlock(&vs->dev.mutex); mutex_unlock(&vs->dev.mutex);
vhost_scsi_clear_endpoint(vs, &t); vhost_scsi_clear_endpoint(vs, &t);
vhost_dev_stop(&vs->dev); vhost_dev_stop(&vs->dev);
vhost_dev_cleanup(&vs->dev, false); vhost_dev_cleanup(&vs->dev);
/* Jobs can re-queue themselves in evt kick handler. Do extra flush. */ /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
vhost_scsi_flush(vs); vhost_scsi_flush(vs);
kfree(vs->dev.vqs); kfree(vs->dev.vqs);
...@@ -1725,7 +1724,7 @@ static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg, ...@@ -1725,7 +1724,7 @@ static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
return -EEXIST; return -EEXIST;
} }
tv_nexus = kzalloc(sizeof(struct vhost_scsi_nexus), GFP_KERNEL); tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
if (!tv_nexus) { if (!tv_nexus) {
mutex_unlock(&tpg->tv_tpg_mutex); mutex_unlock(&tpg->tv_tpg_mutex);
pr_err("Unable to allocate struct vhost_scsi_nexus\n"); pr_err("Unable to allocate struct vhost_scsi_nexus\n");
...@@ -1926,7 +1925,7 @@ vhost_scsi_make_tpg(struct se_wwn *wwn, ...@@ -1926,7 +1925,7 @@ vhost_scsi_make_tpg(struct se_wwn *wwn,
if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET) if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
tpg = kzalloc(sizeof(struct vhost_scsi_tpg), GFP_KERNEL); tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
if (!tpg) { if (!tpg) {
pr_err("Unable to allocate struct vhost_scsi_tpg"); pr_err("Unable to allocate struct vhost_scsi_tpg");
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -1980,7 +1979,7 @@ vhost_scsi_make_tport(struct target_fabric_configfs *tf, ...@@ -1980,7 +1979,7 @@ vhost_scsi_make_tport(struct target_fabric_configfs *tf,
/* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0) /* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
return ERR_PTR(-EINVAL); */ return ERR_PTR(-EINVAL); */
tport = kzalloc(sizeof(struct vhost_scsi_tport), GFP_KERNEL); tport = kzalloc(sizeof(*tport), GFP_KERNEL);
if (!tport) { if (!tport) {
pr_err("Unable to allocate struct vhost_scsi_tport"); pr_err("Unable to allocate struct vhost_scsi_tport");
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
......
...@@ -157,7 +157,7 @@ static int vhost_test_release(struct inode *inode, struct file *f) ...@@ -157,7 +157,7 @@ static int vhost_test_release(struct inode *inode, struct file *f)
vhost_test_stop(n, &private); vhost_test_stop(n, &private);
vhost_test_flush(n); vhost_test_flush(n);
vhost_dev_cleanup(&n->dev, false); vhost_dev_cleanup(&n->dev);
/* We do an extra flush before freeing memory, /* We do an extra flush before freeing memory,
* since jobs can re-queue themselves. */ * since jobs can re-queue themselves. */
vhost_test_flush(n); vhost_test_flush(n);
......
...@@ -181,7 +181,6 @@ void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn) ...@@ -181,7 +181,6 @@ void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
{ {
clear_bit(VHOST_WORK_QUEUED, &work->flags); clear_bit(VHOST_WORK_QUEUED, &work->flags);
work->fn = fn; work->fn = fn;
init_waitqueue_head(&work->done);
} }
EXPORT_SYMBOL_GPL(vhost_work_init); EXPORT_SYMBOL_GPL(vhost_work_init);
...@@ -319,10 +318,8 @@ static void vhost_vq_reset(struct vhost_dev *dev, ...@@ -319,10 +318,8 @@ static void vhost_vq_reset(struct vhost_dev *dev,
vq->acked_features = 0; vq->acked_features = 0;
vq->log_base = NULL; vq->log_base = NULL;
vq->error_ctx = NULL; vq->error_ctx = NULL;
vq->error = NULL;
vq->kick = NULL; vq->kick = NULL;
vq->call_ctx = NULL; vq->call_ctx = NULL;
vq->call = NULL;
vq->log_ctx = NULL; vq->log_ctx = NULL;
vhost_reset_is_le(vq); vhost_reset_is_le(vq);
vhost_disable_cross_endian(vq); vhost_disable_cross_endian(vq);
...@@ -422,7 +419,6 @@ void vhost_dev_init(struct vhost_dev *dev, ...@@ -422,7 +419,6 @@ void vhost_dev_init(struct vhost_dev *dev,
dev->nvqs = nvqs; dev->nvqs = nvqs;
mutex_init(&dev->mutex); mutex_init(&dev->mutex);
dev->log_ctx = NULL; dev->log_ctx = NULL;
dev->log_file = NULL;
dev->umem = NULL; dev->umem = NULL;
dev->iotlb = NULL; dev->iotlb = NULL;
dev->mm = NULL; dev->mm = NULL;
...@@ -544,7 +540,7 @@ void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_umem *umem) ...@@ -544,7 +540,7 @@ void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_umem *umem)
{ {
int i; int i;
vhost_dev_cleanup(dev, true); vhost_dev_cleanup(dev);
/* Restore memory to default empty mapping. */ /* Restore memory to default empty mapping. */
INIT_LIST_HEAD(&umem->umem_list); INIT_LIST_HEAD(&umem->umem_list);
...@@ -611,31 +607,23 @@ static void vhost_clear_msg(struct vhost_dev *dev) ...@@ -611,31 +607,23 @@ static void vhost_clear_msg(struct vhost_dev *dev)
spin_unlock(&dev->iotlb_lock); spin_unlock(&dev->iotlb_lock);
} }
/* Caller should have device mutex if and only if locked is set */ void vhost_dev_cleanup(struct vhost_dev *dev)
void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
{ {
int i; int i;
for (i = 0; i < dev->nvqs; ++i) { for (i = 0; i < dev->nvqs; ++i) {
if (dev->vqs[i]->error_ctx) if (dev->vqs[i]->error_ctx)
eventfd_ctx_put(dev->vqs[i]->error_ctx); eventfd_ctx_put(dev->vqs[i]->error_ctx);
if (dev->vqs[i]->error)
fput(dev->vqs[i]->error);
if (dev->vqs[i]->kick) if (dev->vqs[i]->kick)
fput(dev->vqs[i]->kick); fput(dev->vqs[i]->kick);
if (dev->vqs[i]->call_ctx) if (dev->vqs[i]->call_ctx)
eventfd_ctx_put(dev->vqs[i]->call_ctx); eventfd_ctx_put(dev->vqs[i]->call_ctx);
if (dev->vqs[i]->call)
fput(dev->vqs[i]->call);
vhost_vq_reset(dev, dev->vqs[i]); vhost_vq_reset(dev, dev->vqs[i]);
} }
vhost_dev_free_iovecs(dev); vhost_dev_free_iovecs(dev);
if (dev->log_ctx) if (dev->log_ctx)
eventfd_ctx_put(dev->log_ctx); eventfd_ctx_put(dev->log_ctx);
dev->log_ctx = NULL; dev->log_ctx = NULL;
if (dev->log_file)
fput(dev->log_file);
dev->log_file = NULL;
/* No one will access memory at this point */ /* No one will access memory at this point */
vhost_umem_clean(dev->umem); vhost_umem_clean(dev->umem);
dev->umem = NULL; dev->umem = NULL;
...@@ -1492,38 +1480,24 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp) ...@@ -1492,38 +1480,24 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
r = -EFAULT; r = -EFAULT;
break; break;
} }
eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); ctx = f.fd == -1 ? NULL : eventfd_ctx_fdget(f.fd);
if (IS_ERR(eventfp)) { if (IS_ERR(ctx)) {
r = PTR_ERR(eventfp); r = PTR_ERR(ctx);
break; break;
} }
if (eventfp != vq->call) { swap(ctx, vq->call_ctx);
filep = vq->call;
ctx = vq->call_ctx;
vq->call = eventfp;
vq->call_ctx = eventfp ?
eventfd_ctx_fileget(eventfp) : NULL;
} else
filep = eventfp;
break; break;
case VHOST_SET_VRING_ERR: case VHOST_SET_VRING_ERR:
if (copy_from_user(&f, argp, sizeof f)) { if (copy_from_user(&f, argp, sizeof f)) {
r = -EFAULT; r = -EFAULT;
break; break;
} }
eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); ctx = f.fd == -1 ? NULL : eventfd_ctx_fdget(f.fd);
if (IS_ERR(eventfp)) { if (IS_ERR(ctx)) {
r = PTR_ERR(eventfp); r = PTR_ERR(ctx);
break; break;
} }
if (eventfp != vq->error) { swap(ctx, vq->error_ctx);
filep = vq->error;
vq->error = eventfp;
ctx = vq->error_ctx;
vq->error_ctx = eventfp ?
eventfd_ctx_fileget(eventfp) : NULL;
} else
filep = eventfp;
break; break;
case VHOST_SET_VRING_ENDIAN: case VHOST_SET_VRING_ENDIAN:
r = vhost_set_vring_endian(vq, argp); r = vhost_set_vring_endian(vq, argp);
...@@ -1551,7 +1525,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp) ...@@ -1551,7 +1525,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
if (pollstop && vq->handle_kick) if (pollstop && vq->handle_kick)
vhost_poll_stop(&vq->poll); vhost_poll_stop(&vq->poll);
if (ctx) if (!IS_ERR_OR_NULL(ctx))
eventfd_ctx_put(ctx); eventfd_ctx_put(ctx);
if (filep) if (filep)
fput(filep); fput(filep);
...@@ -1594,8 +1568,7 @@ EXPORT_SYMBOL_GPL(vhost_init_device_iotlb); ...@@ -1594,8 +1568,7 @@ EXPORT_SYMBOL_GPL(vhost_init_device_iotlb);
/* Caller must have device mutex */ /* Caller must have device mutex */
long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp) long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
{ {
struct file *eventfp, *filep = NULL; struct eventfd_ctx *ctx;
struct eventfd_ctx *ctx = NULL;
u64 p; u64 p;
long r; long r;
int i, fd; int i, fd;
...@@ -1641,19 +1614,12 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp) ...@@ -1641,19 +1614,12 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
r = get_user(fd, (int __user *)argp); r = get_user(fd, (int __user *)argp);
if (r < 0) if (r < 0)
break; break;
eventfp = fd == -1 ? NULL : eventfd_fget(fd); ctx = fd == -1 ? NULL : eventfd_ctx_fdget(fd);
if (IS_ERR(eventfp)) { if (IS_ERR(ctx)) {
r = PTR_ERR(eventfp); r = PTR_ERR(ctx);
break; break;
} }
if (eventfp != d->log_file) { swap(ctx, d->log_ctx);
filep = d->log_file;
d->log_file = eventfp;
ctx = d->log_ctx;
d->log_ctx = eventfp ?
eventfd_ctx_fileget(eventfp) : NULL;
} else
filep = eventfp;
for (i = 0; i < d->nvqs; ++i) { for (i = 0; i < d->nvqs; ++i) {
mutex_lock(&d->vqs[i]->mutex); mutex_lock(&d->vqs[i]->mutex);
d->vqs[i]->log_ctx = d->log_ctx; d->vqs[i]->log_ctx = d->log_ctx;
...@@ -1661,8 +1627,6 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp) ...@@ -1661,8 +1627,6 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
} }
if (ctx) if (ctx)
eventfd_ctx_put(ctx); eventfd_ctx_put(ctx);
if (filep)
fput(filep);
break; break;
default: default:
r = -ENOIOCTLCMD; r = -ENOIOCTLCMD;
......
...@@ -20,10 +20,6 @@ typedef void (*vhost_work_fn_t)(struct vhost_work *work); ...@@ -20,10 +20,6 @@ typedef void (*vhost_work_fn_t)(struct vhost_work *work);
struct vhost_work { struct vhost_work {
struct llist_node node; struct llist_node node;
vhost_work_fn_t fn; vhost_work_fn_t fn;
wait_queue_head_t done;
int flushing;
unsigned queue_seq;
unsigned done_seq;
unsigned long flags; unsigned long flags;
}; };
...@@ -96,8 +92,6 @@ struct vhost_virtqueue { ...@@ -96,8 +92,6 @@ struct vhost_virtqueue {
struct vring_used __user *used; struct vring_used __user *used;
const struct vhost_umem_node *meta_iotlb[VHOST_NUM_ADDRS]; const struct vhost_umem_node *meta_iotlb[VHOST_NUM_ADDRS];
struct file *kick; struct file *kick;
struct file *call;
struct file *error;
struct eventfd_ctx *call_ctx; struct eventfd_ctx *call_ctx;
struct eventfd_ctx *error_ctx; struct eventfd_ctx *error_ctx;
struct eventfd_ctx *log_ctx; struct eventfd_ctx *log_ctx;
...@@ -163,7 +157,6 @@ struct vhost_dev { ...@@ -163,7 +157,6 @@ struct vhost_dev {
struct mutex mutex; struct mutex mutex;
struct vhost_virtqueue **vqs; struct vhost_virtqueue **vqs;
int nvqs; int nvqs;
struct file *log_file;
struct eventfd_ctx *log_ctx; struct eventfd_ctx *log_ctx;
struct llist_head work_list; struct llist_head work_list;
struct task_struct *worker; struct task_struct *worker;
...@@ -181,7 +174,7 @@ bool vhost_dev_has_owner(struct vhost_dev *dev); ...@@ -181,7 +174,7 @@ bool vhost_dev_has_owner(struct vhost_dev *dev);
long vhost_dev_check_owner(struct vhost_dev *); long vhost_dev_check_owner(struct vhost_dev *);
struct vhost_umem *vhost_dev_reset_owner_prepare(void); struct vhost_umem *vhost_dev_reset_owner_prepare(void);
void vhost_dev_reset_owner(struct vhost_dev *, struct vhost_umem *); void vhost_dev_reset_owner(struct vhost_dev *, struct vhost_umem *);
void vhost_dev_cleanup(struct vhost_dev *, bool locked); void vhost_dev_cleanup(struct vhost_dev *);
void vhost_dev_stop(struct vhost_dev *); void vhost_dev_stop(struct vhost_dev *);
long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp); long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp); long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp);
......
...@@ -599,7 +599,7 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file) ...@@ -599,7 +599,7 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
} }
spin_unlock_bh(&vsock->send_pkt_list_lock); spin_unlock_bh(&vsock->send_pkt_list_lock);
vhost_dev_cleanup(&vsock->dev, false); vhost_dev_cleanup(&vsock->dev);
kfree(vsock->dev.vqs); kfree(vsock->dev.vqs);
vhost_vsock_free(vsock); vhost_vsock_free(vsock);
return 0; return 0;
......
...@@ -5,7 +5,11 @@ config VIRTIO ...@@ -5,7 +5,11 @@ config VIRTIO
bus, such as CONFIG_VIRTIO_PCI, CONFIG_VIRTIO_MMIO, CONFIG_RPMSG bus, such as CONFIG_VIRTIO_PCI, CONFIG_VIRTIO_MMIO, CONFIG_RPMSG
or CONFIG_S390_GUEST. or CONFIG_S390_GUEST.
menu "Virtio drivers" menuconfig VIRTIO_MENU
bool "Virtio drivers"
default y
if VIRTIO_MENU
config VIRTIO_PCI config VIRTIO_PCI
tristate "PCI driver for virtio devices" tristate "PCI driver for virtio devices"
...@@ -79,4 +83,4 @@ config VIRTIO_MMIO_CMDLINE_DEVICES ...@@ -79,4 +83,4 @@ config VIRTIO_MMIO_CMDLINE_DEVICES
If unsure, say 'N'. If unsure, say 'N'.
endmenu endif # VIRTIO_MENU
...@@ -303,11 +303,21 @@ void unregister_virtio_driver(struct virtio_driver *driver) ...@@ -303,11 +303,21 @@ void unregister_virtio_driver(struct virtio_driver *driver)
} }
EXPORT_SYMBOL_GPL(unregister_virtio_driver); EXPORT_SYMBOL_GPL(unregister_virtio_driver);
/**
* register_virtio_device - register virtio device
* @dev : virtio device to be registered
*
* On error, the caller must call put_device on &@dev->dev (and not kfree),
* as another code path may have obtained a reference to @dev.
*
* Returns: 0 on suceess, -error on failure
*/
int register_virtio_device(struct virtio_device *dev) int register_virtio_device(struct virtio_device *dev)
{ {
int err; int err;
dev->dev.bus = &virtio_bus; dev->dev.bus = &virtio_bus;
device_initialize(&dev->dev);
/* Assign a unique device index and hence name. */ /* Assign a unique device index and hence name. */
err = ida_simple_get(&virtio_index_ida, 0, 0, GFP_KERNEL); err = ida_simple_get(&virtio_index_ida, 0, 0, GFP_KERNEL);
...@@ -330,9 +340,11 @@ int register_virtio_device(struct virtio_device *dev) ...@@ -330,9 +340,11 @@ int register_virtio_device(struct virtio_device *dev)
INIT_LIST_HEAD(&dev->vqs); INIT_LIST_HEAD(&dev->vqs);
/* device_register() causes the bus infrastructure to look for a /*
* matching driver. */ * device_add() causes the bus infrastructure to look for a matching
err = device_register(&dev->dev); * driver.
*/
err = device_add(&dev->dev);
if (err) if (err)
ida_simple_remove(&virtio_index_ida, dev->index); ida_simple_remove(&virtio_index_ida, dev->index);
out: out:
......
...@@ -257,11 +257,13 @@ static unsigned int update_balloon_stats(struct virtio_balloon *vb) ...@@ -257,11 +257,13 @@ static unsigned int update_balloon_stats(struct virtio_balloon *vb)
struct sysinfo i; struct sysinfo i;
unsigned int idx = 0; unsigned int idx = 0;
long available; long available;
unsigned long caches;
all_vm_events(events); all_vm_events(events);
si_meminfo(&i); si_meminfo(&i);
available = si_mem_available(); available = si_mem_available();
caches = global_node_page_state(NR_FILE_PAGES);
#ifdef CONFIG_VM_EVENT_COUNTERS #ifdef CONFIG_VM_EVENT_COUNTERS
update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN, update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN,
...@@ -277,6 +279,8 @@ static unsigned int update_balloon_stats(struct virtio_balloon *vb) ...@@ -277,6 +279,8 @@ static unsigned int update_balloon_stats(struct virtio_balloon *vb)
pages_to_bytes(i.totalram)); pages_to_bytes(i.totalram));
update_stat(vb, idx++, VIRTIO_BALLOON_S_AVAIL, update_stat(vb, idx++, VIRTIO_BALLOON_S_AVAIL,
pages_to_bytes(available)); pages_to_bytes(available));
update_stat(vb, idx++, VIRTIO_BALLOON_S_CACHES,
pages_to_bytes(caches));
return idx; return idx;
} }
......
...@@ -662,10 +662,8 @@ static int vm_cmdline_set(const char *device, ...@@ -662,10 +662,8 @@ static int vm_cmdline_set(const char *device,
pdev = platform_device_register_resndata(&vm_cmdline_parent, pdev = platform_device_register_resndata(&vm_cmdline_parent,
"virtio-mmio", vm_cmdline_id++, "virtio-mmio", vm_cmdline_id++,
resources, ARRAY_SIZE(resources), NULL, 0); resources, ARRAY_SIZE(resources), NULL, 0);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
return 0; return PTR_ERR_OR_ZERO(pdev);
} }
static int vm_cmdline_get_device(struct device *dev, void *data) static int vm_cmdline_get_device(struct device *dev, void *data)
...@@ -725,7 +723,7 @@ static void vm_unregister_cmdline_devices(void) ...@@ -725,7 +723,7 @@ static void vm_unregister_cmdline_devices(void)
/* Platform driver */ /* Platform driver */
static struct of_device_id virtio_mmio_match[] = { static const struct of_device_id virtio_mmio_match[] = {
{ .compatible = "virtio,mmio", }, { .compatible = "virtio,mmio", },
{}, {},
}; };
......
...@@ -513,7 +513,7 @@ static void virtio_pci_release_dev(struct device *_d) ...@@ -513,7 +513,7 @@ static void virtio_pci_release_dev(struct device *_d)
static int virtio_pci_probe(struct pci_dev *pci_dev, static int virtio_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *id) const struct pci_device_id *id)
{ {
struct virtio_pci_device *vp_dev; struct virtio_pci_device *vp_dev, *reg_dev = NULL;
int rc; int rc;
/* allocate our structure and fill it out */ /* allocate our structure and fill it out */
...@@ -551,6 +551,7 @@ static int virtio_pci_probe(struct pci_dev *pci_dev, ...@@ -551,6 +551,7 @@ static int virtio_pci_probe(struct pci_dev *pci_dev,
pci_set_master(pci_dev); pci_set_master(pci_dev);
rc = register_virtio_device(&vp_dev->vdev); rc = register_virtio_device(&vp_dev->vdev);
reg_dev = vp_dev;
if (rc) if (rc)
goto err_register; goto err_register;
...@@ -564,7 +565,10 @@ static int virtio_pci_probe(struct pci_dev *pci_dev, ...@@ -564,7 +565,10 @@ static int virtio_pci_probe(struct pci_dev *pci_dev,
err_probe: err_probe:
pci_disable_device(pci_dev); pci_disable_device(pci_dev);
err_enable_device: err_enable_device:
kfree(vp_dev); if (reg_dev)
put_device(&vp_dev->vdev.dev);
else
kfree(vp_dev);
return rc; return rc;
} }
......
...@@ -52,7 +52,8 @@ struct virtio_balloon_config { ...@@ -52,7 +52,8 @@ struct virtio_balloon_config {
#define VIRTIO_BALLOON_S_MEMFREE 4 /* Total amount of free memory */ #define VIRTIO_BALLOON_S_MEMFREE 4 /* Total amount of free memory */
#define VIRTIO_BALLOON_S_MEMTOT 5 /* Total amount of memory */ #define VIRTIO_BALLOON_S_MEMTOT 5 /* Total amount of memory */
#define VIRTIO_BALLOON_S_AVAIL 6 /* Available memory as in /proc */ #define VIRTIO_BALLOON_S_AVAIL 6 /* Available memory as in /proc */
#define VIRTIO_BALLOON_S_NR 7 #define VIRTIO_BALLOON_S_CACHES 7 /* Disk caches */
#define VIRTIO_BALLOON_S_NR 8
/* /*
* Memory statistics structure. * Memory statistics structure.
......
...@@ -84,12 +84,11 @@ void alloc_ring(void) ...@@ -84,12 +84,11 @@ void alloc_ring(void)
perror("Unable to allocate ring buffer.\n"); perror("Unable to allocate ring buffer.\n");
exit(3); exit(3);
} }
event = malloc(sizeof *event); event = calloc(1, sizeof(*event));
if (!event) { if (!event) {
perror("Unable to allocate event buffer.\n"); perror("Unable to allocate event buffer.\n");
exit(3); exit(3);
} }
memset(event, 0, sizeof *event);
guest.avail_idx = 0; guest.avail_idx = 0;
guest.kicked_avail_idx = -1; guest.kicked_avail_idx = -1;
guest.last_used_idx = 0; guest.last_used_idx = 0;
...@@ -102,12 +101,11 @@ void alloc_ring(void) ...@@ -102,12 +101,11 @@ void alloc_ring(void)
ring[i] = desc; ring[i] = desc;
} }
guest.num_free = ring_size; guest.num_free = ring_size;
data = malloc(ring_size * sizeof *data); data = calloc(ring_size, sizeof(*data));
if (!data) { if (!data) {
perror("Unable to allocate data buffer.\n"); perror("Unable to allocate data buffer.\n");
exit(3); exit(3);
} }
memset(data, 0, ring_size * sizeof *data);
} }
/* guest side */ /* guest side */
...@@ -188,16 +186,18 @@ bool enable_call() ...@@ -188,16 +186,18 @@ bool enable_call()
void kick_available(void) void kick_available(void)
{ {
bool need;
/* Flush in previous flags write */ /* Flush in previous flags write */
/* Barrier C (for pairing) */ /* Barrier C (for pairing) */
smp_mb(); smp_mb();
if (!need_event(event->kick_index, need = need_event(event->kick_index,
guest.avail_idx, guest.avail_idx,
guest.kicked_avail_idx)) guest.kicked_avail_idx);
return;
guest.kicked_avail_idx = guest.avail_idx; guest.kicked_avail_idx = guest.avail_idx;
kick(); if (need)
kick();
} }
/* host side */ /* host side */
...@@ -253,14 +253,18 @@ bool use_buf(unsigned *lenp, void **bufp) ...@@ -253,14 +253,18 @@ bool use_buf(unsigned *lenp, void **bufp)
void call_used(void) void call_used(void)
{ {
bool need;
/* Flush in previous flags write */ /* Flush in previous flags write */
/* Barrier D (for pairing) */ /* Barrier D (for pairing) */
smp_mb(); smp_mb();
if (!need_event(event->call_index,
need = need_event(event->call_index,
host.used_idx, host.used_idx,
host.called_used_idx)) host.called_used_idx);
return;
host.called_used_idx = host.used_idx; host.called_used_idx = host.used_idx;
call();
if (need)
call();
} }
...@@ -225,16 +225,18 @@ bool enable_call() ...@@ -225,16 +225,18 @@ bool enable_call()
void kick_available(void) void kick_available(void)
{ {
bool need;
/* Flush in previous flags write */ /* Flush in previous flags write */
/* Barrier C (for pairing) */ /* Barrier C (for pairing) */
smp_mb(); smp_mb();
if (!vring_need_event(vring_avail_event(&ring), need = vring_need_event(vring_avail_event(&ring),
guest.avail_idx, guest.avail_idx,
guest.kicked_avail_idx)) guest.kicked_avail_idx);
return;
guest.kicked_avail_idx = guest.avail_idx; guest.kicked_avail_idx = guest.avail_idx;
kick(); if (need)
kick();
} }
/* host side */ /* host side */
...@@ -316,14 +318,16 @@ bool use_buf(unsigned *lenp, void **bufp) ...@@ -316,14 +318,16 @@ bool use_buf(unsigned *lenp, void **bufp)
void call_used(void) void call_used(void)
{ {
bool need;
/* Flush in previous flags write */ /* Flush in previous flags write */
/* Barrier D (for pairing) */ /* Barrier D (for pairing) */
smp_mb(); smp_mb();
if (!vring_need_event(vring_used_event(&ring), need = vring_need_event(vring_used_event(&ring),
host.used_idx, host.used_idx,
host.called_used_idx)) host.called_used_idx);
return;
host.called_used_idx = host.used_idx; host.called_used_idx = host.used_idx;
call(); if (need)
call();
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment