Commit 1a5f8090 authored by Mike Christie's avatar Mike Christie Committed by Christian Brauner

vhost: move worker thread fields to new struct

This is just a prep patch. It moves the worker related fields to a new
vhost_worker struct and moves the code around to create some helpers that
will be used in the next patch.
Signed-off-by: default avatarMike Christie <michael.christie@oracle.com>
Acked-by: default avatarMichael S. Tsirkin <mst@redhat.com>
Reviewed-by: default avatarStefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarChristian Brauner (Microsoft) <brauner@kernel.org>
Signed-off-by: default avatarChristian Brauner <brauner@kernel.org>
parent e297cd54
......@@ -255,8 +255,8 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
* sure it was not in the list.
* test_and_set_bit() implies a memory barrier.
*/
llist_add(&work->node, &dev->work_list);
wake_up_process(dev->worker);
llist_add(&work->node, &dev->worker->work_list);
wake_up_process(dev->worker->task);
}
}
EXPORT_SYMBOL_GPL(vhost_work_queue);
......@@ -264,7 +264,7 @@ EXPORT_SYMBOL_GPL(vhost_work_queue);
/* A lockless hint for busy polling code to exit the loop */
bool vhost_has_work(struct vhost_dev *dev)
{
return !llist_empty(&dev->work_list);
return dev->worker && !llist_empty(&dev->worker->work_list);
}
EXPORT_SYMBOL_GPL(vhost_has_work);
......@@ -335,7 +335,8 @@ static void vhost_vq_reset(struct vhost_dev *dev,
static int vhost_worker(void *data)
{
struct vhost_dev *dev = data;
struct vhost_worker *worker = data;
struct vhost_dev *dev = worker->dev;
struct vhost_work *work, *work_next;
struct llist_node *node;
......@@ -350,7 +351,7 @@ static int vhost_worker(void *data)
break;
}
node = llist_del_all(&dev->work_list);
node = llist_del_all(&worker->work_list);
if (!node)
schedule();
......@@ -360,7 +361,7 @@ static int vhost_worker(void *data)
llist_for_each_entry_safe(work, work_next, node, node) {
clear_bit(VHOST_WORK_QUEUED, &work->flags);
__set_current_state(TASK_RUNNING);
kcov_remote_start_common(dev->kcov_handle);
kcov_remote_start_common(worker->kcov_handle);
work->fn(work);
kcov_remote_stop();
if (need_resched())
......@@ -479,7 +480,6 @@ void vhost_dev_init(struct vhost_dev *dev,
dev->byte_weight = byte_weight;
dev->use_worker = use_worker;
dev->msg_handler = msg_handler;
init_llist_head(&dev->work_list);
init_waitqueue_head(&dev->wait);
INIT_LIST_HEAD(&dev->read_list);
INIT_LIST_HEAD(&dev->pending_list);
......@@ -571,10 +571,60 @@ static void vhost_detach_mm(struct vhost_dev *dev)
dev->mm = NULL;
}
static void vhost_worker_free(struct vhost_dev *dev)
{
struct vhost_worker *worker = dev->worker;
if (!worker)
return;
dev->worker = NULL;
WARN_ON(!llist_empty(&worker->work_list));
kthread_stop(worker->task);
kfree(worker);
}
static int vhost_worker_create(struct vhost_dev *dev)
{
struct vhost_worker *worker;
struct task_struct *task;
int ret;
worker = kzalloc(sizeof(*worker), GFP_KERNEL_ACCOUNT);
if (!worker)
return -ENOMEM;
dev->worker = worker;
worker->dev = dev;
worker->kcov_handle = kcov_common_handle();
init_llist_head(&worker->work_list);
task = kthread_create(vhost_worker, worker, "vhost-%d", current->pid);
if (IS_ERR(task)) {
ret = PTR_ERR(task);
goto free_worker;
}
worker->task = task;
wake_up_process(task); /* avoid contributing to loadavg */
ret = vhost_attach_cgroups(dev);
if (ret)
goto stop_worker;
return 0;
stop_worker:
kthread_stop(worker->task);
free_worker:
kfree(worker);
dev->worker = NULL;
return ret;
}
/* Caller should have device mutex */
long vhost_dev_set_owner(struct vhost_dev *dev)
{
struct task_struct *worker;
int err;
/* Is there an owner already? */
......@@ -585,36 +635,21 @@ long vhost_dev_set_owner(struct vhost_dev *dev)
vhost_attach_mm(dev);
dev->kcov_handle = kcov_common_handle();
if (dev->use_worker) {
worker = kthread_create(vhost_worker, dev,
"vhost-%d", current->pid);
if (IS_ERR(worker)) {
err = PTR_ERR(worker);
goto err_worker;
}
dev->worker = worker;
wake_up_process(worker); /* avoid contributing to loadavg */
err = vhost_attach_cgroups(dev);
err = vhost_worker_create(dev);
if (err)
goto err_cgroup;
goto err_worker;
}
err = vhost_dev_alloc_iovecs(dev);
if (err)
goto err_cgroup;
goto err_iovecs;
return 0;
err_cgroup:
if (dev->worker) {
kthread_stop(dev->worker);
dev->worker = NULL;
}
err_iovecs:
vhost_worker_free(dev);
err_worker:
vhost_detach_mm(dev);
dev->kcov_handle = 0;
err_mm:
return err;
}
......@@ -705,12 +740,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
dev->iotlb = NULL;
vhost_clear_msg(dev);
wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
WARN_ON(!llist_empty(&dev->work_list));
if (dev->worker) {
kthread_stop(dev->worker);
dev->worker = NULL;
dev->kcov_handle = 0;
}
vhost_worker_free(dev);
vhost_detach_mm(dev);
}
EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
......
......@@ -25,6 +25,13 @@ struct vhost_work {
unsigned long flags;
};
struct vhost_worker {
struct task_struct *task;
struct llist_head work_list;
struct vhost_dev *dev;
u64 kcov_handle;
};
/* Poll a file (eventfd or socket) */
/* Note: there's nothing vhost specific about this structure. */
struct vhost_poll {
......@@ -147,8 +154,7 @@ struct vhost_dev {
struct vhost_virtqueue **vqs;
int nvqs;
struct eventfd_ctx *log_ctx;
struct llist_head work_list;
struct task_struct *worker;
struct vhost_worker *worker;
struct vhost_iotlb *umem;
struct vhost_iotlb *iotlb;
spinlock_t iotlb_lock;
......@@ -158,7 +164,6 @@ struct vhost_dev {
int iov_limit;
int weight;
int byte_weight;
u64 kcov_handle;
bool use_worker;
int (*msg_handler)(struct vhost_dev *dev, u32 asid,
struct vhost_iotlb_msg *msg);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment