Commit 65586d51 authored by Nicholas Bellinger's avatar Nicholas Bellinger

target: Drop se_device TCQ queue_depth usage from I/O path

Historically, pSCSI devices have been the ones that required target-core
to enforce a per se_device->depth_left.  This patch changes target-core
to no longer (by default) enforce a per se_device->depth_left or sleep in
transport_tcq_window_closed() when we out of queue slots for all backend
export cases.

Cc: Christoph Hellwig <hch@lst.de>
Cc: Roland Dreier <roland@purestorage.com>
Cc: Joern Engel <joern@logfs.org>
Signed-off-by: default avatarNicholas Bellinger <nab@linux-iscsi.org>
parent 40be67f4
......@@ -1132,8 +1132,6 @@ int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
*/
int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
{
u32 orig_queue_depth = dev->queue_depth;
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
pr_err("dev[%p]: Unable to change SE Device TCQ while"
" dev_export_obj: %d count exists\n", dev,
......@@ -1167,11 +1165,6 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
}
dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth;
if (queue_depth > orig_queue_depth)
atomic_add(queue_depth - orig_queue_depth, &dev->depth_left);
else if (queue_depth < orig_queue_depth)
atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left);
pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
dev, queue_depth);
return 0;
......
......@@ -350,7 +350,6 @@ static struct se_device *pscsi_add_device_to_list(
* scsi_device_put() and the pdv->pdv_sd cleared.
*/
pdv->pdv_sd = sd;
dev = transport_add_device_to_core_hba(hba, &pscsi_template,
se_dev, dev_flags, pdv,
&dev_limits, NULL, NULL);
......
......@@ -691,12 +691,6 @@ void transport_complete_task(struct se_task *task, int success)
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
unsigned long flags;
#if 0
pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task,
cmd->t_task_cdb[0], dev);
#endif
if (dev)
atomic_inc(&dev->depth_left);
spin_lock_irqsave(&cmd->t_state_lock, flags);
task->task_flags &= ~TF_ACTIVE;
......@@ -971,9 +965,8 @@ void transport_dump_dev_state(
break;
}
*bl += sprintf(b + *bl, " Execute/Left/Max Queue Depth: %d/%d/%d",
atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left),
dev->queue_depth);
*bl += sprintf(b + *bl, " Execute/Max Queue Depth: %d/%d",
atomic_read(&dev->execute_tasks), dev->queue_depth);
*bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n",
dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors);
*bl += sprintf(b + *bl, " ");
......@@ -1328,9 +1321,6 @@ struct se_device *transport_add_device_to_core_hba(
spin_lock_init(&dev->se_port_lock);
spin_lock_init(&dev->se_tmr_lock);
spin_lock_init(&dev->qf_cmd_lock);
dev->queue_depth = dev_limits->queue_depth;
atomic_set(&dev->depth_left, dev->queue_depth);
atomic_set(&dev->dev_ordered_id, 0);
se_dev_set_default_attribs(dev, dev_limits);
......@@ -1982,18 +1972,6 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
}
static inline int transport_tcq_window_closed(struct se_device *dev)
{
if (dev->dev_tcq_window_closed++ <
PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) {
msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT);
} else
msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG);
wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
return 0;
}
/*
* Called from Fabric Module context from transport_execute_tasks()
*
......@@ -2126,16 +2104,7 @@ static int __transport_execute_tasks(struct se_device *dev)
struct se_task *task = NULL;
unsigned long flags;
/*
* Check if there is enough room in the device and HBA queue to send
* struct se_tasks to the selected transport.
*/
check_depth:
if (!atomic_read(&dev->depth_left))
return transport_tcq_window_closed(dev);
dev->dev_tcq_window_closed = 0;
spin_lock_irq(&dev->execute_task_lock);
if (list_empty(&dev->execute_task_list)) {
spin_unlock_irq(&dev->execute_task_lock);
......@@ -2146,10 +2115,7 @@ static int __transport_execute_tasks(struct se_device *dev)
__transport_remove_task_from_execute_queue(task, dev);
spin_unlock_irq(&dev->execute_task_lock);
atomic_dec(&dev->depth_left);
cmd = task->task_se_cmd;
spin_lock_irqsave(&cmd->t_state_lock, flags);
task->task_flags |= (TF_ACTIVE | TF_SENT);
atomic_inc(&cmd->t_task_cdbs_sent);
......@@ -2170,7 +2136,6 @@ static int __transport_execute_tasks(struct se_device *dev)
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
atomic_set(&cmd->t_transport_sent, 0);
transport_stop_tasks_for_cmd(cmd);
atomic_inc(&dev->depth_left);
transport_generic_request_failure(cmd);
}
......
......@@ -798,7 +798,6 @@ struct se_device {
spinlock_t stats_lock;
/* Active commands on this virtual SE device */
atomic_t simple_cmds;
atomic_t depth_left;
atomic_t dev_ordered_id;
atomic_t execute_tasks;
atomic_t dev_ordered_sync;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment