Commit 9d6064a3 authored by Asias He's avatar Asias He Committed by Nicholas Bellinger

tcm_vhost: Use llist for cmd completion list

This drops the cmd completion list spin lock and makes the cmd
completion queue lock-less.
Signed-off-by: default avatarAsias He <asias@redhat.com>
Signed-off-by: default avatarNicholas Bellinger <nab@linux-iscsi.org>
parent 703d641d
......@@ -47,6 +47,7 @@
#include <linux/vhost.h>
#include <linux/virtio_net.h> /* TODO vhost.h currently depends on this */
#include <linux/virtio_scsi.h>
#include <linux/llist.h>
#include "vhost.c"
#include "vhost.h"
......@@ -64,8 +65,7 @@ struct vhost_scsi {
struct vhost_virtqueue vqs[3];
struct vhost_work vs_completion_work; /* cmd completion work item */
struct list_head vs_completion_list; /* cmd completion queue */
spinlock_t vs_completion_lock; /* protects s_completion_list */
struct llist_head vs_completion_list; /* cmd completion queue */
};
/* Local pointer to allocated TCM configfs fabric module */
......@@ -301,9 +301,7 @@ static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd)
{
struct vhost_scsi *vs = tv_cmd->tvc_vhost;
spin_lock_bh(&vs->vs_completion_lock);
list_add_tail(&tv_cmd->tvc_completion_list, &vs->vs_completion_list);
spin_unlock_bh(&vs->vs_completion_lock);
llist_add(&tv_cmd->tvc_completion_list, &vs->vs_completion_list);
vhost_work_queue(&vs->dev, &vs->vs_completion_work);
}
......@@ -347,27 +345,6 @@ static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
kfree(tv_cmd);
}
/* Dequeue a command from the completion list */
static struct tcm_vhost_cmd *vhost_scsi_get_cmd_from_completion(
struct vhost_scsi *vs)
{
struct tcm_vhost_cmd *tv_cmd = NULL;
spin_lock_bh(&vs->vs_completion_lock);
if (list_empty(&vs->vs_completion_list)) {
spin_unlock_bh(&vs->vs_completion_lock);
return NULL;
}
list_for_each_entry(tv_cmd, &vs->vs_completion_list,
tvc_completion_list) {
list_del(&tv_cmd->tvc_completion_list);
break;
}
spin_unlock_bh(&vs->vs_completion_lock);
return tv_cmd;
}
/* Fill in status and signal that we are done processing this command
*
* This is scheduled in the vhost work queue so we are called with the owner
......@@ -377,12 +354,18 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
{
struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
vs_completion_work);
struct virtio_scsi_cmd_resp v_rsp;
struct tcm_vhost_cmd *tv_cmd;
struct llist_node *llnode;
struct se_cmd *se_cmd;
int ret;
while ((tv_cmd = vhost_scsi_get_cmd_from_completion(vs))) {
struct virtio_scsi_cmd_resp v_rsp;
struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
int ret;
llnode = llist_del_all(&vs->vs_completion_list);
while (llnode) {
tv_cmd = llist_entry(llnode, struct tcm_vhost_cmd,
tvc_completion_list);
llnode = llist_next(llnode);
se_cmd = &tv_cmd->tvc_se_cmd;
pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
tv_cmd, se_cmd->residual_count, se_cmd->scsi_status);
......@@ -426,7 +409,6 @@ static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
pr_err("Unable to allocate struct tcm_vhost_cmd\n");
return ERR_PTR(-ENOMEM);
}
INIT_LIST_HEAD(&tv_cmd->tvc_completion_list);
tv_cmd->tvc_tag = v_req->tag;
tv_cmd->tvc_task_attr = v_req->task_attr;
tv_cmd->tvc_exp_data_len = exp_data_len;
......@@ -857,8 +839,6 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
return -ENOMEM;
vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work);
INIT_LIST_HEAD(&s->vs_completion_list);
spin_lock_init(&s->vs_completion_lock);
s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick;
s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick;
......
......@@ -34,7 +34,7 @@ struct tcm_vhost_cmd {
/* Sense buffer that will be mapped into outgoing status */
unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
/* Completed commands list, serviced from vhost worker thread */
struct list_head tvc_completion_list;
struct llist_node tvc_completion_list;
};
struct tcm_vhost_nexus {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment