Commit 019c4ca6 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Nicholas Bellinger

target: kill dev->dev_task_attr_type

We can just key off ordered tag emulation of the transport_type field.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarNicholas Bellinger <nab@linux-iscsi.org>
parent e6c4219b
......@@ -1412,11 +1412,6 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
else
dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
return dev;
}
......
......@@ -1054,7 +1054,6 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
* Do implict HEAD_OF_QUEUE processing for INQUIRY.
* See spc4r17 section 5.3
*/
if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
cmd->sam_task_attr = MSG_HEAD_TAG;
cmd->execute_cmd = spc_emulate_inquiry;
break;
......@@ -1083,7 +1082,6 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
* Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
* See spc4r17 section 5.3
*/
if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
cmd->sam_task_attr = MSG_HEAD_TAG;
break;
case TEST_UNIT_READY:
......
......@@ -1068,11 +1068,13 @@ EXPORT_SYMBOL(transport_init_se_cmd);
static int transport_check_alloc_task_attr(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
/*
* Check if SAM Task Attribute emulation is enabled for this
* struct se_device storage object
*/
if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
return 0;
if (cmd->sam_task_attr == MSG_ACA_TAG) {
......@@ -1084,11 +1086,11 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
* Used to determine when ORDERED commands should go from
* Dormant to Active status.
*/
cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
smp_mb__after_atomic_inc();
pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
cmd->se_ordered_id, cmd->sam_task_attr,
cmd->se_dev->transport->name);
dev->transport->name);
return 0;
}
......@@ -1534,7 +1536,6 @@ void transport_generic_request_failure(struct se_cmd *cmd)
/*
* For SAM Task Attribute emulation for failed struct se_cmd
*/
if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
transport_complete_task_attr(cmd);
switch (cmd->scsi_sense_reason) {
......@@ -1619,49 +1620,12 @@ static void __target_execute_cmd(struct se_cmd *cmd)
}
}
void target_execute_cmd(struct se_cmd *cmd)
static bool target_handle_task_attr(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
/*
* If the received CDB has aleady been aborted stop processing it here.
*/
if (transport_check_aborted_status(cmd, 1))
return;
/*
* Determine if IOCTL context caller in requesting the stopping of this
* command for LUN shutdown purposes.
*/
spin_lock_irq(&cmd->t_state_lock);
if (cmd->transport_state & CMD_T_LUN_STOP) {
pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
__func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
cmd->transport_state &= ~CMD_T_ACTIVE;
spin_unlock_irq(&cmd->t_state_lock);
complete(&cmd->transport_lun_stop_comp);
return;
}
/*
* Determine if frontend context caller is requesting the stopping of
* this command for frontend exceptions.
*/
if (cmd->transport_state & CMD_T_STOP) {
pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
__func__, __LINE__,
cmd->se_tfo->get_task_tag(cmd));
spin_unlock_irq(&cmd->t_state_lock);
complete(&cmd->t_transport_stop_comp);
return;
}
cmd->t_state = TRANSPORT_PROCESSING;
spin_unlock_irq(&cmd->t_state_lock);
if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
goto execute;
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
return false;
/*
* Check for the existence of HEAD_OF_QUEUE, and if true return 1
......@@ -1672,7 +1636,7 @@ void target_execute_cmd(struct se_cmd *cmd)
pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, "
"se_ordered_id: %u\n",
cmd->t_task_cdb[0], cmd->se_ordered_id);
goto execute;
return false;
case MSG_ORDERED_TAG:
atomic_inc(&dev->dev_ordered_sync);
smp_mb__after_atomic_inc();
......@@ -1686,7 +1650,7 @@ void target_execute_cmd(struct se_cmd *cmd)
* exist that need to be completed first.
*/
if (!atomic_read(&dev->simple_cmds))
goto execute;
return false;
break;
default:
/*
......@@ -1697,7 +1661,9 @@ void target_execute_cmd(struct se_cmd *cmd)
break;
}
if (atomic_read(&dev->dev_ordered_sync) != 0) {
if (atomic_read(&dev->dev_ordered_sync) == 0)
return false;
spin_lock(&dev->delayed_cmd_lock);
list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
spin_unlock(&dev->delayed_cmd_lock);
......@@ -1706,13 +1672,49 @@ void target_execute_cmd(struct se_cmd *cmd)
" delayed CMD list, se_ordered_id: %u\n",
cmd->t_task_cdb[0], cmd->sam_task_attr,
cmd->se_ordered_id);
return true;
}
void target_execute_cmd(struct se_cmd *cmd)
{
/*
* If the received CDB has aleady been aborted stop processing it here.
*/
if (transport_check_aborted_status(cmd, 1))
return;
}
execute:
/*
* Otherwise, no ORDERED task attributes exist..
* Determine if IOCTL context caller in requesting the stopping of this
* command for LUN shutdown purposes.
*/
spin_lock_irq(&cmd->t_state_lock);
if (cmd->transport_state & CMD_T_LUN_STOP) {
pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
__func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
cmd->transport_state &= ~CMD_T_ACTIVE;
spin_unlock_irq(&cmd->t_state_lock);
complete(&cmd->transport_lun_stop_comp);
return;
}
/*
* Determine if frontend context caller is requesting the stopping of
* this command for frontend exceptions.
*/
if (cmd->transport_state & CMD_T_STOP) {
pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
__func__, __LINE__,
cmd->se_tfo->get_task_tag(cmd));
spin_unlock_irq(&cmd->t_state_lock);
complete(&cmd->t_transport_stop_comp);
return;
}
cmd->t_state = TRANSPORT_PROCESSING;
spin_unlock_irq(&cmd->t_state_lock);
if (!target_handle_task_attr(cmd))
__target_execute_cmd(cmd);
}
EXPORT_SYMBOL(target_execute_cmd);
......@@ -1752,6 +1754,9 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
return;
if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
atomic_dec(&dev->simple_cmds);
smp_mb__after_atomic_dec();
......@@ -1780,7 +1785,6 @@ static void transport_complete_qf(struct se_cmd *cmd)
{
int ret = 0;
if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
transport_complete_task_attr(cmd);
if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
......@@ -1839,8 +1843,8 @@ static void target_complete_ok_work(struct work_struct *work)
* delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
* Attribute.
*/
if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
transport_complete_task_attr(cmd);
/*
* Check to schedule QUEUE_FULL work, or execute an existing
* cmd->transport_qf_callback()
......
......@@ -232,16 +232,6 @@ typedef enum {
SPC3_ALUA_EMULATED
} t10_alua_index_t;
/*
* Used by TCM Core internally to signal if SAM Task Attribute emulation
* is enabled or disabled, or running in with TCM/pSCSI passthrough mode
*/
typedef enum {
SAM_TASK_ATTR_PASSTHROUGH,
SAM_TASK_ATTR_UNTAGGED,
SAM_TASK_ATTR_EMULATED
} t10_task_attr_index_t;
/*
* Used for target SCSI statistics
*/
......@@ -686,7 +676,6 @@ struct se_device {
u32 queue_depth;
/* Used for SPC-2 reservations enforce of ISIDs */
u64 dev_res_bin_isid;
t10_task_attr_index_t dev_task_attr_type;
/* Pointer to transport specific device structure */
u32 dev_index;
u64 creation_time;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment