Commit 09236994 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

Pull SCSI target fixes from Nicholas Bellinger:
 "The executive summary includes:

   - Post-merge review comments for tcm_vhost (MST + nab)
   - Avoid debugging overhead when not debugging for tcm-fc(FCoE) (MDR)
   - Fix NULL pointer dereference bug on alloc_page failulre (Yi Zou)
   - Fix REPORT_LUNs regression bug with pSCSI export (AlexE + nab)
   - Fix regression bug with handling of zero-length data CDBs (nab)
   - Fix vhost_scsi_target structure alignment (MST)

  Thanks again to everyone who contributed a bugfix patch, gave review
  feedback on tcm_vhost code, and/or reported a bug during their own
  testing over the last weeks.

  There is one other outstanding bug reported by Roland recently related
  to SCSI transfer length overflow handling, for which the current
  proposed bugfix has been left in queue pending further testing with
  other non iscsi-target based fabric drivers.

  As the patch is verified with loopback (local SGL memory from SCSI
  LLD) + tcm_qla2xxx (TCM allocated SGL memory mapped to PCI HW) fabric
  ports, it will be included into the next 3.6-rc-fixes PULL request."

* git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending:
  target: Remove unused se_cmd.cmd_spdtl
  tcm_fc: rcu_deref outside rcu lock/unlock section
  tcm_vhost: Fix vhost_scsi_target structure alignment
  target: Fix regression bug with handling of zero-length data CDBs
  target/pscsi: Fix bug with REPORT_LUNs handling for SCSI passthrough
  tcm_vhost: Change vhost_scsi_target->vhost_wwpn to char *
  target: fix NULL pointer dereference bug alloc_page() fails to get memory
  tcm_fc: Avoid debug overhead when not debugging
  tcm_vhost: Post-merge review changes requested by MST
  tcm_vhost: Fix incorrect IS_ERR() usage in vhost_scsi_map_iov_to_sgl
parents 2e2d8c93 af74115e
......@@ -673,8 +673,15 @@ static int pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg)
struct scsi_device *sd = pdv->pdv_sd;
int result;
struct pscsi_plugin_task *pt = cmd->priv;
unsigned char *cdb = &pt->pscsi_cdb[0];
unsigned char *cdb;
/*
* Special case for REPORT_LUNs handling where pscsi_plugin_task has
* not been allocated because TCM is handling the emulation directly.
*/
if (!pt)
return 0;
cdb = &pt->pscsi_cdb[0];
result = pt->pscsi_result;
/*
* Hack to make sure that Write-Protect modepage is set if R/O mode is
......
......@@ -1165,8 +1165,6 @@ int target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
" 0x%02x\n", cmd->se_tfo->get_fabric_name(),
cmd->data_length, size, cmd->t_task_cdb[0]);
cmd->cmd_spdtl = size;
if (cmd->data_direction == DMA_TO_DEVICE) {
pr_err("Rejecting underflow/overflow"
" WRITE data\n");
......@@ -2294,9 +2292,9 @@ transport_generic_get_mem(struct se_cmd *cmd)
return 0;
out:
while (i >= 0) {
__free_page(sg_page(&cmd->t_data_sg[i]));
while (i > 0) {
i--;
__free_page(sg_page(&cmd->t_data_sg[i]));
}
kfree(cmd->t_data_sg);
cmd->t_data_sg = NULL;
......@@ -2323,9 +2321,12 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
if (ret < 0)
goto out_fail;
}
/* Workaround for handling zero-length control CDBs */
if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->data_length) {
/*
* If this command doesn't have any payload and we don't have to call
* into the fabric for data transfers, go ahead and complete it right
* away.
*/
if (!cmd->data_length) {
spin_lock_irq(&cmd->t_state_lock);
cmd->t_state = TRANSPORT_COMPLETE;
cmd->transport_state |= CMD_T_ACTIVE;
......
......@@ -131,6 +131,7 @@ extern struct list_head ft_lport_list;
extern struct mutex ft_lport_lock;
extern struct fc4_prov ft_prov;
extern struct target_fabric_configfs *ft_configfs;
extern unsigned int ft_debug_logging;
/*
* Fabric methods.
......
......@@ -48,7 +48,7 @@
/*
* Dump cmd state for debugging.
*/
void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
static void _ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
{
struct fc_exch *ep;
struct fc_seq *sp;
......@@ -80,6 +80,12 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
}
}
void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
{
if (unlikely(ft_debug_logging))
_ft_dump_cmd(cmd, caller);
}
static void ft_free_cmd(struct ft_cmd *cmd)
{
struct fc_frame *fp;
......
......@@ -456,7 +456,9 @@ static void ft_prlo(struct fc_rport_priv *rdata)
struct ft_tport *tport;
mutex_lock(&ft_lport_lock);
tport = rcu_dereference(rdata->local_port->prov[FC_TYPE_FCP]);
tport = rcu_dereference_protected(rdata->local_port->prov[FC_TYPE_FCP],
lockdep_is_held(&ft_lport_lock));
if (!tport) {
mutex_unlock(&ft_lport_lock);
return;
......
......@@ -53,9 +53,14 @@
#include "vhost.h"
#include "tcm_vhost.h"
enum {
VHOST_SCSI_VQ_CTL = 0,
VHOST_SCSI_VQ_EVT = 1,
VHOST_SCSI_VQ_IO = 2,
};
struct vhost_scsi {
atomic_t vhost_ref_cnt;
struct tcm_vhost_tpg *vs_tpg;
struct tcm_vhost_tpg *vs_tpg; /* Protected by vhost_scsi->dev.mutex */
struct vhost_dev dev;
struct vhost_virtqueue vqs[3];
......@@ -131,8 +136,7 @@ static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg)
return 1;
}
static u32 tcm_vhost_get_pr_transport_id(
struct se_portal_group *se_tpg,
static u32 tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
struct se_node_acl *se_nacl,
struct t10_pr_registration *pr_reg,
int *format_code,
......@@ -162,8 +166,7 @@ static u32 tcm_vhost_get_pr_transport_id(
format_code, buf);
}
static u32 tcm_vhost_get_pr_transport_id_len(
struct se_portal_group *se_tpg,
static u32 tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
struct se_node_acl *se_nacl,
struct t10_pr_registration *pr_reg,
int *format_code)
......@@ -192,8 +195,7 @@ static u32 tcm_vhost_get_pr_transport_id_len(
format_code);
}
static char *tcm_vhost_parse_pr_out_transport_id(
struct se_portal_group *se_tpg,
static char *tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
const char *buf,
u32 *out_tid_len,
char **port_nexus_ptr)
......@@ -236,8 +238,7 @@ static struct se_node_acl *tcm_vhost_alloc_fabric_acl(
return &nacl->se_node_acl;
}
static void tcm_vhost_release_fabric_acl(
struct se_portal_group *se_tpg,
static void tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg,
struct se_node_acl *se_nacl)
{
struct tcm_vhost_nacl *nacl = container_of(se_nacl,
......@@ -297,7 +298,16 @@ static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd)
return 0;
}
static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *);
static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd)
{
struct vhost_scsi *vs = tv_cmd->tvc_vhost;
spin_lock_bh(&vs->vs_completion_lock);
list_add_tail(&tv_cmd->tvc_completion_list, &vs->vs_completion_list);
spin_unlock_bh(&vs->vs_completion_lock);
vhost_work_queue(&vs->dev, &vs->vs_completion_work);
}
static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd)
{
......@@ -381,7 +391,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
vs_completion_work);
struct tcm_vhost_cmd *tv_cmd;
while ((tv_cmd = vhost_scsi_get_cmd_from_completion(vs)) != NULL) {
while ((tv_cmd = vhost_scsi_get_cmd_from_completion(vs))) {
struct virtio_scsi_cmd_resp v_rsp;
struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
int ret;
......@@ -408,19 +418,6 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
vhost_signal(&vs->dev, &vs->vqs[2]);
}
static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd)
{
struct vhost_scsi *vs = tv_cmd->tvc_vhost;
pr_debug("%s tv_cmd %p\n", __func__, tv_cmd);
spin_lock_bh(&vs->vs_completion_lock);
list_add_tail(&tv_cmd->tvc_completion_list, &vs->vs_completion_list);
spin_unlock_bh(&vs->vs_completion_lock);
vhost_work_queue(&vs->dev, &vs->vs_completion_work);
}
static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
struct tcm_vhost_tpg *tv_tpg,
struct virtio_scsi_cmd_req *v_req,
......@@ -533,8 +530,8 @@ static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd,
sg = kmalloc(sizeof(tv_cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC);
if (!sg)
return -ENOMEM;
pr_debug("%s sg %p sgl_count %u is_err %ld\n", __func__,
sg, sgl_count, IS_ERR(sg));
pr_debug("%s sg %p sgl_count %u is_err %d\n", __func__,
sg, sgl_count, !sg);
sg_init_table(sg, sgl_count);
tv_cmd->tvc_sgl = sg;
......@@ -787,12 +784,12 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs)
static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
{
pr_err("%s: The handling func for control queue.\n", __func__);
pr_debug("%s: The handling func for control queue.\n", __func__);
}
static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
{
pr_err("%s: The handling func for event queue.\n", __func__);
pr_debug("%s: The handling func for event queue.\n", __func__);
}
static void vhost_scsi_handle_kick(struct vhost_work *work)
......@@ -825,11 +822,6 @@ static int vhost_scsi_set_endpoint(
return -EFAULT;
}
}
if (vs->vs_tpg) {
mutex_unlock(&vs->dev.mutex);
return -EEXIST;
}
mutex_unlock(&vs->dev.mutex);
mutex_lock(&tcm_vhost_mutex);
......@@ -839,7 +831,7 @@ static int vhost_scsi_set_endpoint(
mutex_unlock(&tv_tpg->tv_tpg_mutex);
continue;
}
if (atomic_read(&tv_tpg->tv_tpg_vhost_count)) {
if (tv_tpg->tv_tpg_vhost_count != 0) {
mutex_unlock(&tv_tpg->tv_tpg_mutex);
continue;
}
......@@ -847,14 +839,20 @@ static int vhost_scsi_set_endpoint(
if (!strcmp(tv_tport->tport_name, t->vhost_wwpn) &&
(tv_tpg->tport_tpgt == t->vhost_tpgt)) {
atomic_inc(&tv_tpg->tv_tpg_vhost_count);
smp_mb__after_atomic_inc();
tv_tpg->tv_tpg_vhost_count++;
mutex_unlock(&tv_tpg->tv_tpg_mutex);
mutex_unlock(&tcm_vhost_mutex);
mutex_lock(&vs->dev.mutex);
if (vs->vs_tpg) {
mutex_unlock(&vs->dev.mutex);
mutex_lock(&tv_tpg->tv_tpg_mutex);
tv_tpg->tv_tpg_vhost_count--;
mutex_unlock(&tv_tpg->tv_tpg_mutex);
return -EEXIST;
}
vs->vs_tpg = tv_tpg;
atomic_inc(&vs->vhost_ref_cnt);
smp_mb__after_atomic_inc();
mutex_unlock(&vs->dev.mutex);
return 0;
......@@ -871,38 +869,42 @@ static int vhost_scsi_clear_endpoint(
{
struct tcm_vhost_tport *tv_tport;
struct tcm_vhost_tpg *tv_tpg;
int index;
int index, ret;
mutex_lock(&vs->dev.mutex);
/* Verify that ring has been setup correctly. */
for (index = 0; index < vs->dev.nvqs; ++index) {
if (!vhost_vq_access_ok(&vs->vqs[index])) {
mutex_unlock(&vs->dev.mutex);
return -EFAULT;
ret = -EFAULT;
goto err;
}
}
if (!vs->vs_tpg) {
mutex_unlock(&vs->dev.mutex);
return -ENODEV;
ret = -ENODEV;
goto err;
}
tv_tpg = vs->vs_tpg;
tv_tport = tv_tpg->tport;
if (strcmp(tv_tport->tport_name, t->vhost_wwpn) ||
(tv_tpg->tport_tpgt != t->vhost_tpgt)) {
mutex_unlock(&vs->dev.mutex);
pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu"
" does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
tv_tport->tport_name, tv_tpg->tport_tpgt,
t->vhost_wwpn, t->vhost_tpgt);
return -EINVAL;
ret = -EINVAL;
goto err;
}
atomic_dec(&tv_tpg->tv_tpg_vhost_count);
tv_tpg->tv_tpg_vhost_count--;
vs->vs_tpg = NULL;
mutex_unlock(&vs->dev.mutex);
return 0;
err:
mutex_unlock(&vs->dev.mutex);
return ret;
}
static int vhost_scsi_open(struct inode *inode, struct file *f)
......@@ -918,9 +920,9 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
INIT_LIST_HEAD(&s->vs_completion_list);
spin_lock_init(&s->vs_completion_lock);
s->vqs[0].handle_kick = vhost_scsi_ctl_handle_kick;
s->vqs[1].handle_kick = vhost_scsi_evt_handle_kick;
s->vqs[2].handle_kick = vhost_scsi_handle_kick;
s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick;
s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick;
s->vqs[VHOST_SCSI_VQ_IO].handle_kick = vhost_scsi_handle_kick;
r = vhost_dev_init(&s->dev, s->vqs, 3);
if (r < 0) {
kfree(s);
......@@ -949,6 +951,18 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
return 0;
}
static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
{
vhost_poll_flush(&vs->dev.vqs[index].poll);
}
static void vhost_scsi_flush(struct vhost_scsi *vs)
{
vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_CTL);
vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_EVT);
vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_IO);
}
static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
{
if (features & ~VHOST_FEATURES)
......@@ -961,7 +975,8 @@ static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
return -EFAULT;
}
vs->dev.acked_features = features;
/* TODO possibly smp_wmb() and flush vqs */
smp_wmb();
vhost_scsi_flush(vs);
mutex_unlock(&vs->dev.mutex);
return 0;
}
......@@ -974,26 +989,25 @@ static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
void __user *argp = (void __user *)arg;
u64 __user *featurep = argp;
u64 features;
int r;
int r, abi_version = VHOST_SCSI_ABI_VERSION;
switch (ioctl) {
case VHOST_SCSI_SET_ENDPOINT:
if (copy_from_user(&backend, argp, sizeof backend))
return -EFAULT;
if (backend.reserved != 0)
return -EOPNOTSUPP;
return vhost_scsi_set_endpoint(vs, &backend);
case VHOST_SCSI_CLEAR_ENDPOINT:
if (copy_from_user(&backend, argp, sizeof backend))
return -EFAULT;
if (backend.reserved != 0)
return -EOPNOTSUPP;
return vhost_scsi_clear_endpoint(vs, &backend);
case VHOST_SCSI_GET_ABI_VERSION:
if (copy_from_user(&backend, argp, sizeof backend))
return -EFAULT;
backend.abi_version = VHOST_SCSI_ABI_VERSION;
if (copy_to_user(argp, &backend, sizeof backend))
if (copy_to_user(argp, &abi_version, sizeof abi_version))
return -EFAULT;
return 0;
case VHOST_GET_FEATURES:
......@@ -1013,11 +1027,21 @@ static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
}
}
#ifdef CONFIG_COMPAT
static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
unsigned long arg)
{
return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
}
#endif
static const struct file_operations vhost_scsi_fops = {
.owner = THIS_MODULE,
.release = vhost_scsi_release,
.unlocked_ioctl = vhost_scsi_ioctl,
/* TODO compat ioctl? */
#ifdef CONFIG_COMPAT
.compat_ioctl = vhost_scsi_compat_ioctl,
#endif
.open = vhost_scsi_open,
.llseek = noop_llseek,
};
......@@ -1054,28 +1078,28 @@ static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
return "Unknown";
}
static int tcm_vhost_port_link(
struct se_portal_group *se_tpg,
static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
struct se_lun *lun)
{
struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
struct tcm_vhost_tpg, se_tpg);
atomic_inc(&tv_tpg->tv_tpg_port_count);
smp_mb__after_atomic_inc();
mutex_lock(&tv_tpg->tv_tpg_mutex);
tv_tpg->tv_tpg_port_count++;
mutex_unlock(&tv_tpg->tv_tpg_mutex);
return 0;
}
static void tcm_vhost_port_unlink(
struct se_portal_group *se_tpg,
static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
struct se_lun *se_lun)
{
struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
struct tcm_vhost_tpg, se_tpg);
atomic_dec(&tv_tpg->tv_tpg_port_count);
smp_mb__after_atomic_dec();
mutex_lock(&tv_tpg->tv_tpg_mutex);
tv_tpg->tv_tpg_port_count--;
mutex_unlock(&tv_tpg->tv_tpg_mutex);
}
static struct se_node_acl *tcm_vhost_make_nodeacl(
......@@ -1122,8 +1146,7 @@ static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl)
kfree(nacl);
}
static int tcm_vhost_make_nexus(
struct tcm_vhost_tpg *tv_tpg,
static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tv_tpg,
const char *name)
{
struct se_portal_group *se_tpg;
......@@ -1168,7 +1191,7 @@ static int tcm_vhost_make_nexus(
return -ENOMEM;
}
/*
* Now register the TCM vHost virtual I_T Nexus as active with the
* Now register the TCM vhost virtual I_T Nexus as active with the
* call to __transport_register_session()
*/
__transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
......@@ -1179,8 +1202,7 @@ static int tcm_vhost_make_nexus(
return 0;
}
static int tcm_vhost_drop_nexus(
struct tcm_vhost_tpg *tpg)
static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
{
struct se_session *se_sess;
struct tcm_vhost_nexus *tv_nexus;
......@@ -1198,27 +1220,27 @@ static int tcm_vhost_drop_nexus(
return -ENODEV;
}
if (atomic_read(&tpg->tv_tpg_port_count)) {
if (tpg->tv_tpg_port_count != 0) {
mutex_unlock(&tpg->tv_tpg_mutex);
pr_err("Unable to remove TCM_vHost I_T Nexus with"
pr_err("Unable to remove TCM_vhost I_T Nexus with"
" active TPG port count: %d\n",
atomic_read(&tpg->tv_tpg_port_count));
return -EPERM;
tpg->tv_tpg_port_count);
return -EBUSY;
}
if (atomic_read(&tpg->tv_tpg_vhost_count)) {
if (tpg->tv_tpg_vhost_count != 0) {
mutex_unlock(&tpg->tv_tpg_mutex);
pr_err("Unable to remove TCM_vHost I_T Nexus with"
pr_err("Unable to remove TCM_vhost I_T Nexus with"
" active TPG vhost count: %d\n",
atomic_read(&tpg->tv_tpg_vhost_count));
return -EPERM;
tpg->tv_tpg_vhost_count);
return -EBUSY;
}
pr_debug("TCM_vHost_ConfigFS: Removing I_T Nexus to emulated"
pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
" %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport),
tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
/*
* Release the SCSI I_T Nexus to the emulated vHost Target Port
* Release the SCSI I_T Nexus to the emulated vhost Target Port
*/
transport_deregister_session(tv_nexus->tvn_se_sess);
tpg->tpg_nexus = NULL;
......@@ -1228,8 +1250,7 @@ static int tcm_vhost_drop_nexus(
return 0;
}
static ssize_t tcm_vhost_tpg_show_nexus(
struct se_portal_group *se_tpg,
static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
char *page)
{
struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
......@@ -1250,8 +1271,7 @@ static ssize_t tcm_vhost_tpg_show_nexus(
return ret;
}
static ssize_t tcm_vhost_tpg_store_nexus(
struct se_portal_group *se_tpg,
static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
const char *page,
size_t count)
{
......@@ -1336,8 +1356,7 @@ static struct configfs_attribute *tcm_vhost_tpg_attrs[] = {
NULL,
};
static struct se_portal_group *tcm_vhost_make_tpg(
struct se_wwn *wwn,
static struct se_portal_group *tcm_vhost_make_tpg(struct se_wwn *wwn,
struct config_group *group,
const char *name)
{
......@@ -1385,7 +1404,7 @@ static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
list_del(&tpg->tv_tpg_list);
mutex_unlock(&tcm_vhost_mutex);
/*
* Release the virtual I_T Nexus for this vHost TPG
* Release the virtual I_T Nexus for this vhost TPG
*/
tcm_vhost_drop_nexus(tpg);
/*
......@@ -1395,8 +1414,7 @@ static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
kfree(tpg);
}
static struct se_wwn *tcm_vhost_make_tport(
struct target_fabric_configfs *tf,
static struct se_wwn *tcm_vhost_make_tport(struct target_fabric_configfs *tf,
struct config_group *group,
const char *name)
{
......@@ -1592,7 +1610,10 @@ static void tcm_vhost_deregister_configfs(void)
static int __init tcm_vhost_init(void)
{
int ret = -ENOMEM;
/*
* Use our own dedicated workqueue for submitting I/O into
* target core to avoid contention within system_wq.
*/
tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0);
if (!tcm_vhost_workqueue)
goto out;
......
......@@ -47,9 +47,9 @@ struct tcm_vhost_tpg {
/* Vhost port target portal group tag for TCM */
u16 tport_tpgt;
/* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
atomic_t tv_tpg_port_count;
/* Used for vhost_scsi device reference to tpg_nexus */
atomic_t tv_tpg_vhost_count;
int tv_tpg_port_count;
/* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
int tv_tpg_vhost_count;
/* list for tcm_vhost_list */
struct list_head tv_tpg_list;
/* Used to protect access for tpg_nexus */
......@@ -91,11 +91,13 @@ struct tcm_vhost_tport {
struct vhost_scsi_target {
int abi_version;
unsigned char vhost_wwpn[TRANSPORT_IQN_LEN];
char vhost_wwpn[TRANSPORT_IQN_LEN];
unsigned short vhost_tpgt;
unsigned short reserved;
};
/* VHOST_SCSI specific defines */
#define VHOST_SCSI_SET_ENDPOINT _IOW(VHOST_VIRTIO, 0x40, struct vhost_scsi_target)
#define VHOST_SCSI_CLEAR_ENDPOINT _IOW(VHOST_VIRTIO, 0x41, struct vhost_scsi_target)
#define VHOST_SCSI_GET_ABI_VERSION _IOW(VHOST_VIRTIO, 0x42, struct vhost_scsi_target)
/* Changing this breaks userspace. */
#define VHOST_SCSI_GET_ABI_VERSION _IOW(VHOST_VIRTIO, 0x42, int)
......@@ -503,8 +503,6 @@ struct se_cmd {
u32 se_ordered_id;
/* Total size in bytes associated with command */
u32 data_length;
/* SCSI Presented Data Transfer Length */
u32 cmd_spdtl;
u32 residual_count;
u32 orig_fe_lun;
/* Persistent Reservation key */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment