Commit 7dc517df authored by James Smart's avatar James Smart Committed by James Bottomley

[SCSI] lpfc 8.3.15: Add target queue depth throttling

Signed-off-by: default avatarAlex Iannicelli <alex.iannicelli@emulex.com>
Signed-off-by: default avatarJames Smart <james.smart@emulex.com>
Signed-off-by: default avatarJames Bottomley <James.Bottomley@suse.de>
parent 3804dc84
...@@ -48,7 +48,7 @@ struct lpfc_sli2_slim; ...@@ -48,7 +48,7 @@ struct lpfc_sli2_slim;
#define LPFC_TGTQ_INTERVAL 40000 /* Min amount of time between tgt #define LPFC_TGTQ_INTERVAL 40000 /* Min amount of time between tgt
queue depth change in millisecs */ queue depth change in millisecs */
#define LPFC_TGTQ_RAMPUP_PCENT 5 /* Target queue rampup in percentage */ #define LPFC_TGTQ_RAMPUP_PCENT 5 /* Target queue rampup in percentage */
#define LPFC_MIN_TGT_QDEPTH 100 #define LPFC_MIN_TGT_QDEPTH 10
#define LPFC_MAX_TGT_QDEPTH 0xFFFF #define LPFC_MAX_TGT_QDEPTH 0xFFFF
#define LPFC_MAX_BUCKET_COUNT 20 /* Maximum no. of buckets for stat data #define LPFC_MAX_BUCKET_COUNT 20 /* Maximum no. of buckets for stat data
...@@ -400,6 +400,7 @@ struct lpfc_vport { ...@@ -400,6 +400,7 @@ struct lpfc_vport {
uint32_t cfg_max_luns; uint32_t cfg_max_luns;
uint32_t cfg_enable_da_id; uint32_t cfg_enable_da_id;
uint32_t cfg_max_scsicmpl_time; uint32_t cfg_max_scsicmpl_time;
uint32_t cfg_tgt_queue_depth;
uint32_t dev_loss_tmo_changed; uint32_t dev_loss_tmo_changed;
......
...@@ -2207,6 +2207,13 @@ LPFC_VPORT_ATTR_R(enable_da_id, 0, 0, 1, ...@@ -2207,6 +2207,13 @@ LPFC_VPORT_ATTR_R(enable_da_id, 0, 0, 1,
LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 128, LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 128,
"Max number of FCP commands we can queue to a specific LUN"); "Max number of FCP commands we can queue to a specific LUN");
/*
# tgt_queue_depth: This parameter is used to limit the number of outstanding
# commands per target port. Value range is [10,65535]. Default value is 65535.
*/
LPFC_VPORT_ATTR_R(tgt_queue_depth, 65535, 10, 65535,
"Max number of FCP commands we can queue to a specific target port");
/* /*
# hba_queue_depth: This parameter is used to limit the number of outstanding # hba_queue_depth: This parameter is used to limit the number of outstanding
# commands per lpfc HBA. Value range is [32,8192]. If this parameter # commands per lpfc HBA. Value range is [32,8192]. If this parameter
...@@ -3122,7 +3129,7 @@ lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val) ...@@ -3122,7 +3129,7 @@ lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val)
continue; continue;
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
continue; continue;
ndlp->cmd_qdepth = LPFC_MAX_TGT_QDEPTH; ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
} }
spin_unlock_irq(shost->host_lock); spin_unlock_irq(shost->host_lock);
return 0; return 0;
...@@ -3326,6 +3333,7 @@ struct device_attribute *lpfc_hba_attrs[] = { ...@@ -3326,6 +3333,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_temp_sensor, &dev_attr_lpfc_temp_sensor,
&dev_attr_lpfc_log_verbose, &dev_attr_lpfc_log_verbose,
&dev_attr_lpfc_lun_queue_depth, &dev_attr_lpfc_lun_queue_depth,
&dev_attr_lpfc_tgt_queue_depth,
&dev_attr_lpfc_hba_queue_depth, &dev_attr_lpfc_hba_queue_depth,
&dev_attr_lpfc_peer_port_login, &dev_attr_lpfc_peer_port_login,
&dev_attr_lpfc_nodev_tmo, &dev_attr_lpfc_nodev_tmo,
...@@ -3387,6 +3395,7 @@ struct device_attribute *lpfc_vport_attrs[] = { ...@@ -3387,6 +3395,7 @@ struct device_attribute *lpfc_vport_attrs[] = {
&dev_attr_lpfc_drvr_version, &dev_attr_lpfc_drvr_version,
&dev_attr_lpfc_log_verbose, &dev_attr_lpfc_log_verbose,
&dev_attr_lpfc_lun_queue_depth, &dev_attr_lpfc_lun_queue_depth,
&dev_attr_lpfc_tgt_queue_depth,
&dev_attr_lpfc_nodev_tmo, &dev_attr_lpfc_nodev_tmo,
&dev_attr_lpfc_devloss_tmo, &dev_attr_lpfc_devloss_tmo,
&dev_attr_lpfc_hba_queue_depth, &dev_attr_lpfc_hba_queue_depth,
...@@ -4575,6 +4584,7 @@ lpfc_get_vport_cfgparam(struct lpfc_vport *vport) ...@@ -4575,6 +4584,7 @@ lpfc_get_vport_cfgparam(struct lpfc_vport *vport)
{ {
lpfc_log_verbose_init(vport, lpfc_log_verbose); lpfc_log_verbose_init(vport, lpfc_log_verbose);
lpfc_lun_queue_depth_init(vport, lpfc_lun_queue_depth); lpfc_lun_queue_depth_init(vport, lpfc_lun_queue_depth);
lpfc_tgt_queue_depth_init(vport, lpfc_tgt_queue_depth);
lpfc_devloss_tmo_init(vport, lpfc_devloss_tmo); lpfc_devloss_tmo_init(vport, lpfc_devloss_tmo);
lpfc_nodev_tmo_init(vport, lpfc_nodev_tmo); lpfc_nodev_tmo_init(vport, lpfc_nodev_tmo);
lpfc_peer_port_login_init(vport, lpfc_peer_port_login); lpfc_peer_port_login_init(vport, lpfc_peer_port_login);
......
...@@ -3583,7 +3583,7 @@ lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ...@@ -3583,7 +3583,7 @@ lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
kref_init(&ndlp->kref); kref_init(&ndlp->kref);
NLP_INT_NODE_ACT(ndlp); NLP_INT_NODE_ACT(ndlp);
atomic_set(&ndlp->cmd_pending, 0); atomic_set(&ndlp->cmd_pending, 0);
ndlp->cmd_qdepth = LPFC_MAX_TGT_QDEPTH; ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
} }
struct lpfc_nodelist * struct lpfc_nodelist *
......
...@@ -2458,14 +2458,16 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, ...@@ -2458,14 +2458,16 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
} }
spin_unlock_irqrestore(shost->host_lock, flags); spin_unlock_irqrestore(shost->host_lock, flags);
} else if (pnode && NLP_CHK_NODE_ACT(pnode)) { } else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) && if ((pnode->cmd_qdepth < vport->cfg_tgt_queue_depth) &&
time_after(jiffies, pnode->last_change_time + time_after(jiffies, pnode->last_change_time +
msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) { msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
spin_lock_irqsave(shost->host_lock, flags); spin_lock_irqsave(shost->host_lock, flags);
pnode->cmd_qdepth += pnode->cmd_qdepth * depth = pnode->cmd_qdepth * LPFC_TGTQ_RAMPUP_PCENT
LPFC_TGTQ_RAMPUP_PCENT / 100; / 100;
if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH) depth = depth ? depth : 1;
pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH; pnode->cmd_qdepth += depth;
if (pnode->cmd_qdepth > vport->cfg_tgt_queue_depth)
pnode->cmd_qdepth = vport->cfg_tgt_queue_depth;
pnode->last_change_time = jiffies; pnode->last_change_time = jiffies;
spin_unlock_irqrestore(shost->host_lock, flags); spin_unlock_irqrestore(shost->host_lock, flags);
} }
...@@ -2920,8 +2922,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) ...@@ -2920,8 +2922,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0); cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
goto out_fail_command; goto out_fail_command;
} }
if (vport->cfg_max_scsicmpl_time && if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
(atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth))
goto out_host_busy; goto out_host_busy;
lpfc_cmd = lpfc_get_scsi_buf(phba); lpfc_cmd = lpfc_get_scsi_buf(phba);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment