Commit 36511e86 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

Pull SCSI target fixes from Nicholas Bellinger:
 - fix tcm-user backend driver expired cmd time processing (agrover)
 - eliminate kref_put_spinlock_irqsave() for I/O completion (bart)
 - fix iscsi login kthread failure case hung task regression (nab)
 - fix COMPARE_AND_WRITE completion use-after-free race (nab)
 - fix COMPARE_AND_WRITE with SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC non zero
   SGL offset data corruption.  (Jan + Doug)
 - fix >= v4.4-rc1 regression for tcm_qla2xxx enable configfs attribute
   (Himanshu + HCH)

* git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending:
  target/stat: print full t10_wwn.model buffer
  target: fix COMPARE_AND_WRITE non zero SGL offset data corruption
  qla2xxx: Fix regression introduced by target configFS changes
  kref: Remove kref_put_spinlock_irqsave()
  target: Invoke release_cmd() callback without holding a spinlock
  target: Fix race for SCF_COMPARE_AND_WRITE_POST checking
  iscsi-target: Fix rx_login_comp hang after login failure
  iscsi-target: return -ENOMEM instead of -1 in case of failed kmalloc()
  target/user: Do not set unused fields in tcmu_ops
  target/user: Fix time calc in expired cmd processing
parents 75a29ec1 8f903539
...@@ -902,7 +902,7 @@ static ssize_t tcm_qla2xxx_tpg_fabric_prot_type_show(struct config_item *item, ...@@ -902,7 +902,7 @@ static ssize_t tcm_qla2xxx_tpg_fabric_prot_type_show(struct config_item *item,
return sprintf(page, "%d\n", tpg->tpg_attrib.fabric_prot_type); return sprintf(page, "%d\n", tpg->tpg_attrib.fabric_prot_type);
} }
CONFIGFS_ATTR_WO(tcm_qla2xxx_tpg_, enable); CONFIGFS_ATTR(tcm_qla2xxx_tpg_, enable);
CONFIGFS_ATTR_RO(tcm_qla2xxx_tpg_, dynamic_sessions); CONFIGFS_ATTR_RO(tcm_qla2xxx_tpg_, dynamic_sessions);
CONFIGFS_ATTR(tcm_qla2xxx_tpg_, fabric_prot_type); CONFIGFS_ATTR(tcm_qla2xxx_tpg_, fabric_prot_type);
......
...@@ -4074,6 +4074,17 @@ static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf) ...@@ -4074,6 +4074,17 @@ static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf)
return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
} }
static bool iscsi_target_check_conn_state(struct iscsi_conn *conn)
{
bool ret;
spin_lock_bh(&conn->state_lock);
ret = (conn->conn_state != TARG_CONN_STATE_LOGGED_IN);
spin_unlock_bh(&conn->state_lock);
return ret;
}
int iscsi_target_rx_thread(void *arg) int iscsi_target_rx_thread(void *arg)
{ {
int ret, rc; int ret, rc;
...@@ -4091,7 +4102,7 @@ int iscsi_target_rx_thread(void *arg) ...@@ -4091,7 +4102,7 @@ int iscsi_target_rx_thread(void *arg)
* incoming iscsi/tcp socket I/O, and/or failing the connection. * incoming iscsi/tcp socket I/O, and/or failing the connection.
*/ */
rc = wait_for_completion_interruptible(&conn->rx_login_comp); rc = wait_for_completion_interruptible(&conn->rx_login_comp);
if (rc < 0) if (rc < 0 || iscsi_target_check_conn_state(conn))
return 0; return 0;
if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) { if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
......
...@@ -388,6 +388,7 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log ...@@ -388,6 +388,7 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
if (login->login_complete) { if (login->login_complete) {
if (conn->rx_thread && conn->rx_thread_active) { if (conn->rx_thread && conn->rx_thread_active) {
send_sig(SIGINT, conn->rx_thread, 1); send_sig(SIGINT, conn->rx_thread, 1);
complete(&conn->rx_login_comp);
kthread_stop(conn->rx_thread); kthread_stop(conn->rx_thread);
} }
if (conn->tx_thread && conn->tx_thread_active) { if (conn->tx_thread && conn->tx_thread_active) {
......
...@@ -208,7 +208,7 @@ int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr) ...@@ -208,7 +208,7 @@ int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr)
if (!pl) { if (!pl) {
pr_err("Unable to allocate memory for" pr_err("Unable to allocate memory for"
" struct iscsi_param_list.\n"); " struct iscsi_param_list.\n");
return -1 ; return -ENOMEM;
} }
INIT_LIST_HEAD(&pl->param_list); INIT_LIST_HEAD(&pl->param_list);
INIT_LIST_HEAD(&pl->extra_response_list); INIT_LIST_HEAD(&pl->extra_response_list);
...@@ -578,7 +578,7 @@ int iscsi_copy_param_list( ...@@ -578,7 +578,7 @@ int iscsi_copy_param_list(
param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL); param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL);
if (!param_list) { if (!param_list) {
pr_err("Unable to allocate memory for struct iscsi_param_list.\n"); pr_err("Unable to allocate memory for struct iscsi_param_list.\n");
return -1; return -ENOMEM;
} }
INIT_LIST_HEAD(&param_list->param_list); INIT_LIST_HEAD(&param_list->param_list);
INIT_LIST_HEAD(&param_list->extra_response_list); INIT_LIST_HEAD(&param_list->extra_response_list);
...@@ -629,7 +629,7 @@ int iscsi_copy_param_list( ...@@ -629,7 +629,7 @@ int iscsi_copy_param_list(
err_out: err_out:
iscsi_release_param_list(param_list); iscsi_release_param_list(param_list);
return -1; return -ENOMEM;
} }
static void iscsi_release_extra_responses(struct iscsi_param_list *param_list) static void iscsi_release_extra_responses(struct iscsi_param_list *param_list)
...@@ -729,7 +729,7 @@ static int iscsi_add_notunderstood_response( ...@@ -729,7 +729,7 @@ static int iscsi_add_notunderstood_response(
if (!extra_response) { if (!extra_response) {
pr_err("Unable to allocate memory for" pr_err("Unable to allocate memory for"
" struct iscsi_extra_response.\n"); " struct iscsi_extra_response.\n");
return -1; return -ENOMEM;
} }
INIT_LIST_HEAD(&extra_response->er_list); INIT_LIST_HEAD(&extra_response->er_list);
...@@ -1370,7 +1370,7 @@ int iscsi_decode_text_input( ...@@ -1370,7 +1370,7 @@ int iscsi_decode_text_input(
tmpbuf = kzalloc(length + 1, GFP_KERNEL); tmpbuf = kzalloc(length + 1, GFP_KERNEL);
if (!tmpbuf) { if (!tmpbuf) {
pr_err("Unable to allocate %u + 1 bytes for tmpbuf.\n", length); pr_err("Unable to allocate %u + 1 bytes for tmpbuf.\n", length);
return -1; return -ENOMEM;
} }
memcpy(tmpbuf, textbuf, length); memcpy(tmpbuf, textbuf, length);
......
...@@ -371,7 +371,8 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o ...@@ -371,7 +371,8 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
return 0; return 0;
} }
static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success) static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success,
int *post_ret)
{ {
unsigned char *buf, *addr; unsigned char *buf, *addr;
struct scatterlist *sg; struct scatterlist *sg;
...@@ -437,7 +438,8 @@ sbc_execute_rw(struct se_cmd *cmd) ...@@ -437,7 +438,8 @@ sbc_execute_rw(struct se_cmd *cmd)
cmd->data_direction); cmd->data_direction);
} }
static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success) static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
int *post_ret)
{ {
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
...@@ -447,8 +449,10 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success) ...@@ -447,8 +449,10 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
* sent to the backend driver. * sent to the backend driver.
*/ */
spin_lock_irq(&cmd->t_state_lock); spin_lock_irq(&cmd->t_state_lock);
if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) {
cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
*post_ret = 1;
}
spin_unlock_irq(&cmd->t_state_lock); spin_unlock_irq(&cmd->t_state_lock);
/* /*
...@@ -460,7 +464,8 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success) ...@@ -460,7 +464,8 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
return TCM_NO_SENSE; return TCM_NO_SENSE;
} }
static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success) static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success,
int *post_ret)
{ {
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
struct scatterlist *write_sg = NULL, *sg; struct scatterlist *write_sg = NULL, *sg;
...@@ -556,11 +561,11 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes ...@@ -556,11 +561,11 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes
if (block_size < PAGE_SIZE) { if (block_size < PAGE_SIZE) {
sg_set_page(&write_sg[i], m.page, block_size, sg_set_page(&write_sg[i], m.page, block_size,
block_size); m.piter.sg->offset + block_size);
} else { } else {
sg_miter_next(&m); sg_miter_next(&m);
sg_set_page(&write_sg[i], m.page, block_size, sg_set_page(&write_sg[i], m.page, block_size,
0); m.piter.sg->offset);
} }
len -= block_size; len -= block_size;
i++; i++;
......
...@@ -246,7 +246,7 @@ static ssize_t target_stat_lu_prod_show(struct config_item *item, char *page) ...@@ -246,7 +246,7 @@ static ssize_t target_stat_lu_prod_show(struct config_item *item, char *page)
char str[sizeof(dev->t10_wwn.model)+1]; char str[sizeof(dev->t10_wwn.model)+1];
/* scsiLuProductId */ /* scsiLuProductId */
for (i = 0; i < sizeof(dev->t10_wwn.vendor); i++) for (i = 0; i < sizeof(dev->t10_wwn.model); i++)
str[i] = ISPRINT(dev->t10_wwn.model[i]) ? str[i] = ISPRINT(dev->t10_wwn.model[i]) ?
dev->t10_wwn.model[i] : ' '; dev->t10_wwn.model[i] : ' ';
str[i] = '\0'; str[i] = '\0';
......
...@@ -130,6 +130,9 @@ void core_tmr_abort_task( ...@@ -130,6 +130,9 @@ void core_tmr_abort_task(
if (tmr->ref_task_tag != ref_tag) if (tmr->ref_task_tag != ref_tag)
continue; continue;
if (!kref_get_unless_zero(&se_cmd->cmd_kref))
continue;
printk("ABORT_TASK: Found referenced %s task_tag: %llu\n", printk("ABORT_TASK: Found referenced %s task_tag: %llu\n",
se_cmd->se_tfo->get_fabric_name(), ref_tag); se_cmd->se_tfo->get_fabric_name(), ref_tag);
...@@ -139,13 +142,15 @@ void core_tmr_abort_task( ...@@ -139,13 +142,15 @@ void core_tmr_abort_task(
" skipping\n", ref_tag); " skipping\n", ref_tag);
spin_unlock(&se_cmd->t_state_lock); spin_unlock(&se_cmd->t_state_lock);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
target_put_sess_cmd(se_cmd);
goto out; goto out;
} }
se_cmd->transport_state |= CMD_T_ABORTED; se_cmd->transport_state |= CMD_T_ABORTED;
spin_unlock(&se_cmd->t_state_lock); spin_unlock(&se_cmd->t_state_lock);
list_del_init(&se_cmd->se_cmd_list); list_del_init(&se_cmd->se_cmd_list);
kref_get(&se_cmd->cmd_kref);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
cancel_work_sync(&se_cmd->work); cancel_work_sync(&se_cmd->work);
......
...@@ -1658,7 +1658,7 @@ bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags) ...@@ -1658,7 +1658,7 @@ bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
void transport_generic_request_failure(struct se_cmd *cmd, void transport_generic_request_failure(struct se_cmd *cmd,
sense_reason_t sense_reason) sense_reason_t sense_reason)
{ {
int ret = 0; int ret = 0, post_ret = 0;
pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx" pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx"
" CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]); " CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]);
...@@ -1680,7 +1680,7 @@ void transport_generic_request_failure(struct se_cmd *cmd, ...@@ -1680,7 +1680,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
*/ */
if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
cmd->transport_complete_callback) cmd->transport_complete_callback)
cmd->transport_complete_callback(cmd, false); cmd->transport_complete_callback(cmd, false, &post_ret);
switch (sense_reason) { switch (sense_reason) {
case TCM_NON_EXISTENT_LUN: case TCM_NON_EXISTENT_LUN:
...@@ -2068,11 +2068,13 @@ static void target_complete_ok_work(struct work_struct *work) ...@@ -2068,11 +2068,13 @@ static void target_complete_ok_work(struct work_struct *work)
*/ */
if (cmd->transport_complete_callback) { if (cmd->transport_complete_callback) {
sense_reason_t rc; sense_reason_t rc;
bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE);
bool zero_dl = !(cmd->data_length);
int post_ret = 0;
rc = cmd->transport_complete_callback(cmd, true); rc = cmd->transport_complete_callback(cmd, true, &post_ret);
if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) { if (!rc && !post_ret) {
if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && if (caw && zero_dl)
!cmd->data_length)
goto queue_rsp; goto queue_rsp;
return; return;
...@@ -2507,23 +2509,24 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref) ...@@ -2507,23 +2509,24 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
EXPORT_SYMBOL(target_get_sess_cmd); EXPORT_SYMBOL(target_get_sess_cmd);
static void target_release_cmd_kref(struct kref *kref) static void target_release_cmd_kref(struct kref *kref)
__releases(&se_cmd->se_sess->sess_cmd_lock)
{ {
struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
struct se_session *se_sess = se_cmd->se_sess; struct se_session *se_sess = se_cmd->se_sess;
unsigned long flags;
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
if (list_empty(&se_cmd->se_cmd_list)) { if (list_empty(&se_cmd->se_cmd_list)) {
spin_unlock(&se_sess->sess_cmd_lock); spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
se_cmd->se_tfo->release_cmd(se_cmd); se_cmd->se_tfo->release_cmd(se_cmd);
return; return;
} }
if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
spin_unlock(&se_sess->sess_cmd_lock); spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
complete(&se_cmd->cmd_wait_comp); complete(&se_cmd->cmd_wait_comp);
return; return;
} }
list_del(&se_cmd->se_cmd_list); list_del(&se_cmd->se_cmd_list);
spin_unlock(&se_sess->sess_cmd_lock); spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
se_cmd->se_tfo->release_cmd(se_cmd); se_cmd->se_tfo->release_cmd(se_cmd);
} }
...@@ -2539,8 +2542,7 @@ int target_put_sess_cmd(struct se_cmd *se_cmd) ...@@ -2539,8 +2542,7 @@ int target_put_sess_cmd(struct se_cmd *se_cmd)
se_cmd->se_tfo->release_cmd(se_cmd); se_cmd->se_tfo->release_cmd(se_cmd);
return 1; return 1;
} }
return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref, return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
&se_sess->sess_cmd_lock);
} }
EXPORT_SYMBOL(target_put_sess_cmd); EXPORT_SYMBOL(target_put_sess_cmd);
......
...@@ -638,7 +638,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data) ...@@ -638,7 +638,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
return 0; return 0;
if (!time_after(cmd->deadline, jiffies)) if (!time_after(jiffies, cmd->deadline))
return 0; return 0;
set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
...@@ -1101,8 +1101,6 @@ tcmu_parse_cdb(struct se_cmd *cmd) ...@@ -1101,8 +1101,6 @@ tcmu_parse_cdb(struct se_cmd *cmd)
static const struct target_backend_ops tcmu_ops = { static const struct target_backend_ops tcmu_ops = {
.name = "user", .name = "user",
.inquiry_prod = "USER",
.inquiry_rev = TCMU_VERSION,
.owner = THIS_MODULE, .owner = THIS_MODULE,
.transport_flags = TRANSPORT_FLAG_PASSTHROUGH, .transport_flags = TRANSPORT_FLAG_PASSTHROUGH,
.attach_hba = tcmu_attach_hba, .attach_hba = tcmu_attach_hba,
......
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/spinlock.h>
struct kref { struct kref {
atomic_t refcount; atomic_t refcount;
...@@ -99,38 +98,6 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref) ...@@ -99,38 +98,6 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)
return kref_sub(kref, 1, release); return kref_sub(kref, 1, release);
} }
/**
* kref_put_spinlock_irqsave - decrement refcount for object.
* @kref: object.
* @release: pointer to the function that will clean up the object when the
* last reference to the object is released.
* This pointer is required, and it is not acceptable to pass kfree
* in as this function.
* @lock: lock to take in release case
*
* Behaves identical to kref_put with one exception. If the reference count
* drops to zero, the lock will be taken atomically wrt dropping the reference
* count. The release function has to call spin_unlock() without _irqrestore.
*/
static inline int kref_put_spinlock_irqsave(struct kref *kref,
void (*release)(struct kref *kref),
spinlock_t *lock)
{
unsigned long flags;
WARN_ON(release == NULL);
if (atomic_add_unless(&kref->refcount, -1, 1))
return 0;
spin_lock_irqsave(lock, flags);
if (atomic_dec_and_test(&kref->refcount)) {
release(kref);
local_irq_restore(flags);
return 1;
}
spin_unlock_irqrestore(lock, flags);
return 0;
}
static inline int kref_put_mutex(struct kref *kref, static inline int kref_put_mutex(struct kref *kref,
void (*release)(struct kref *kref), void (*release)(struct kref *kref),
struct mutex *lock) struct mutex *lock)
......
...@@ -474,7 +474,7 @@ struct se_cmd { ...@@ -474,7 +474,7 @@ struct se_cmd {
struct completion cmd_wait_comp; struct completion cmd_wait_comp;
const struct target_core_fabric_ops *se_tfo; const struct target_core_fabric_ops *se_tfo;
sense_reason_t (*execute_cmd)(struct se_cmd *); sense_reason_t (*execute_cmd)(struct se_cmd *);
sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool); sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool, int *);
void *protocol_data; void *protocol_data;
unsigned char *t_task_cdb; unsigned char *t_task_cdb;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment