Commit 67d12733 authored by James Smart's avatar James Smart Committed by James Bottomley

[SCSI] lpfc 8.3.33: Tie parallel I/O queues into separate MSIX vectors

Add fcp_io_channel module attribute to control amount of parallel I/O queues
Signed-off-by: default avatarJames Smart <james.smart@emulex.com>
Signed-off-by: default avatarJames Bottomley <JBottomley@Parallels.com>
parent aa6fbb75
...@@ -695,6 +695,7 @@ struct lpfc_hba { ...@@ -695,6 +695,7 @@ struct lpfc_hba {
uint32_t cfg_fcp_imax; uint32_t cfg_fcp_imax;
uint32_t cfg_fcp_wq_count; uint32_t cfg_fcp_wq_count;
uint32_t cfg_fcp_eq_count; uint32_t cfg_fcp_eq_count;
uint32_t cfg_fcp_io_channel;
uint32_t cfg_sg_seg_cnt; uint32_t cfg_sg_seg_cnt;
uint32_t cfg_prot_sg_seg_cnt; uint32_t cfg_prot_sg_seg_cnt;
uint32_t cfg_sg_dma_buf_size; uint32_t cfg_sg_dma_buf_size;
......
...@@ -3654,7 +3654,7 @@ lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr, ...@@ -3654,7 +3654,7 @@ lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
return -EINVAL; return -EINVAL;
phba->cfg_fcp_imax = (uint32_t)val; phba->cfg_fcp_imax = (uint32_t)val;
for (i = 0; i < phba->cfg_fcp_eq_count; i += LPFC_MAX_EQ_DELAY) for (i = 0; i < phba->cfg_fcp_io_channel; i += LPFC_MAX_EQ_DELAY)
lpfc_modify_fcp_eq_delay(phba, i); lpfc_modify_fcp_eq_delay(phba, i);
return strlen(buf); return strlen(buf);
...@@ -3844,20 +3844,32 @@ LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or " ...@@ -3844,20 +3844,32 @@ LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
/* /*
# lpfc_fcp_wq_count: Set the number of fast-path FCP work queues # lpfc_fcp_wq_count: Set the number of fast-path FCP work queues
# This parameter is ignored and will eventually be depricated
# #
# Value range is [1,31]. Default value is 4. # Value range is [1,7]. Default value is 4.
*/ */
LPFC_ATTR_R(fcp_wq_count, LPFC_FP_WQN_DEF, LPFC_FP_WQN_MIN, LPFC_FP_WQN_MAX, LPFC_ATTR_R(fcp_wq_count, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN,
LPFC_FCP_IO_CHAN_MAX,
"Set the number of fast-path FCP work queues, if possible"); "Set the number of fast-path FCP work queues, if possible");
/* /*
# lpfc_fcp_eq_count: Set the number of fast-path FCP event queues # lpfc_fcp_eq_count: Set the number of FCP EQ/CQ/WQ IO channels
# #
# Value range is [1,7]. Default value is 1. # Value range is [1,7]. Default value is 4.
*/ */
LPFC_ATTR_R(fcp_eq_count, LPFC_FP_EQN_DEF, LPFC_FP_EQN_MIN, LPFC_FP_EQN_MAX, LPFC_ATTR_R(fcp_eq_count, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN,
LPFC_FCP_IO_CHAN_MAX,
"Set the number of fast-path FCP event queues, if possible"); "Set the number of fast-path FCP event queues, if possible");
/*
# lpfc_fcp_io_channel: Set the number of FCP EQ/CQ/WQ IO channels
#
# Value range is [1,7]. Default value is 4.
*/
LPFC_ATTR_R(fcp_io_channel, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN,
LPFC_FCP_IO_CHAN_MAX,
"Set the number of FCP I/O channels");
/* /*
# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
# 0 = HBA resets disabled # 0 = HBA resets disabled
...@@ -4002,6 +4014,7 @@ struct device_attribute *lpfc_hba_attrs[] = { ...@@ -4002,6 +4014,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_fcp_imax, &dev_attr_lpfc_fcp_imax,
&dev_attr_lpfc_fcp_wq_count, &dev_attr_lpfc_fcp_wq_count,
&dev_attr_lpfc_fcp_eq_count, &dev_attr_lpfc_fcp_eq_count,
&dev_attr_lpfc_fcp_io_channel,
&dev_attr_lpfc_enable_bg, &dev_attr_lpfc_enable_bg,
&dev_attr_lpfc_soft_wwnn, &dev_attr_lpfc_soft_wwnn,
&dev_attr_lpfc_soft_wwpn, &dev_attr_lpfc_soft_wwpn,
...@@ -4980,6 +4993,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) ...@@ -4980,6 +4993,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_fcp_imax_init(phba, lpfc_fcp_imax); lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count); lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count);
lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count); lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count);
lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
lpfc_enable_bg_init(phba, lpfc_enable_bg); lpfc_enable_bg_init(phba, lpfc_enable_bg);
......
...@@ -196,8 +196,7 @@ irqreturn_t lpfc_sli_intr_handler(int, void *); ...@@ -196,8 +196,7 @@ irqreturn_t lpfc_sli_intr_handler(int, void *);
irqreturn_t lpfc_sli_sp_intr_handler(int, void *); irqreturn_t lpfc_sli_sp_intr_handler(int, void *);
irqreturn_t lpfc_sli_fp_intr_handler(int, void *); irqreturn_t lpfc_sli_fp_intr_handler(int, void *);
irqreturn_t lpfc_sli4_intr_handler(int, void *); irqreturn_t lpfc_sli4_intr_handler(int, void *);
irqreturn_t lpfc_sli4_sp_intr_handler(int, void *); irqreturn_t lpfc_sli4_hba_intr_handler(int, void *);
irqreturn_t lpfc_sli4_fp_intr_handler(int, void *);
void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_sli4_swap_str(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_sli4_swap_str(struct lpfc_hba *, LPFC_MBOXQ_t *);
......
...@@ -2013,38 +2013,23 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes, ...@@ -2013,38 +2013,23 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
if (*ppos) if (*ppos)
return 0; return 0;
/* Get slow-path event queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Slow-path EQ information:\n");
if (phba->sli4_hba.sp_eq) {
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tEQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
phba->sli4_hba.sp_eq->queue_id,
phba->sli4_hba.sp_eq->entry_count,
phba->sli4_hba.sp_eq->entry_size,
phba->sli4_hba.sp_eq->host_index,
phba->sli4_hba.sp_eq->hba_index);
}
/* Get fast-path event queue information */ /* Get fast-path event queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Fast-path EQ information:\n"); "HBA EQ information:\n");
if (phba->sli4_hba.fp_eq) { if (phba->sli4_hba.hba_eq) {
for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
fcp_qidx++) { fcp_qidx++) {
if (phba->sli4_hba.fp_eq[fcp_qidx]) { if (phba->sli4_hba.hba_eq[fcp_qidx]) {
len += snprintf(pbuffer+len, len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tEQID[%02d], " "\tEQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], " "QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n", "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
phba->sli4_hba.fp_eq[fcp_qidx]->queue_id, phba->sli4_hba.hba_eq[fcp_qidx]->queue_id,
phba->sli4_hba.fp_eq[fcp_qidx]->entry_count, phba->sli4_hba.hba_eq[fcp_qidx]->entry_count,
phba->sli4_hba.fp_eq[fcp_qidx]->entry_size, phba->sli4_hba.hba_eq[fcp_qidx]->entry_size,
phba->sli4_hba.fp_eq[fcp_qidx]->host_index, phba->sli4_hba.hba_eq[fcp_qidx]->host_index,
phba->sli4_hba.fp_eq[fcp_qidx]->hba_index); phba->sli4_hba.hba_eq[fcp_qidx]->hba_index);
} }
} }
} }
...@@ -2108,7 +2093,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes, ...@@ -2108,7 +2093,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.fcp_cq[fcp_qidx]->host_index, phba->sli4_hba.fcp_cq[fcp_qidx]->host_index,
phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index); phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index);
} }
} while (++fcp_qidx < phba->cfg_fcp_eq_count); } while (++fcp_qidx < phba->cfg_fcp_io_channel);
len += snprintf(pbuffer+len, len += snprintf(pbuffer+len,
LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
} }
...@@ -2153,7 +2138,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes, ...@@ -2153,7 +2138,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Fast-path FCP WQ information:\n"); "Fast-path FCP WQ information:\n");
if (phba->sli4_hba.fcp_wq) { if (phba->sli4_hba.fcp_wq) {
for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
fcp_qidx++) { fcp_qidx++) {
if (!phba->sli4_hba.fcp_wq[fcp_qidx]) if (!phba->sli4_hba.fcp_wq[fcp_qidx])
continue; continue;
...@@ -2410,31 +2395,21 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf, ...@@ -2410,31 +2395,21 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
switch (quetp) { switch (quetp) {
case LPFC_IDIAG_EQ: case LPFC_IDIAG_EQ:
/* Slow-path event queue */ /* HBA event queue */
if (phba->sli4_hba.sp_eq && if (phba->sli4_hba.hba_eq) {
phba->sli4_hba.sp_eq->queue_id == queid) { for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
/* Sanity check */ qidx++) {
rc = lpfc_idiag_que_param_check( if (phba->sli4_hba.hba_eq[qidx] &&
phba->sli4_hba.sp_eq, index, count); phba->sli4_hba.hba_eq[qidx]->queue_id ==
if (rc)
goto error_out;
idiag.ptr_private = phba->sli4_hba.sp_eq;
goto pass_check;
}
/* Fast-path event queue */
if (phba->sli4_hba.fp_eq) {
for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) {
if (phba->sli4_hba.fp_eq[qidx] &&
phba->sli4_hba.fp_eq[qidx]->queue_id ==
queid) { queid) {
/* Sanity check */ /* Sanity check */
rc = lpfc_idiag_que_param_check( rc = lpfc_idiag_que_param_check(
phba->sli4_hba.fp_eq[qidx], phba->sli4_hba.hba_eq[qidx],
index, count); index, count);
if (rc) if (rc)
goto error_out; goto error_out;
idiag.ptr_private = idiag.ptr_private =
phba->sli4_hba.fp_eq[qidx]; phba->sli4_hba.hba_eq[qidx];
goto pass_check; goto pass_check;
} }
} }
...@@ -2481,7 +2456,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf, ...@@ -2481,7 +2456,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
phba->sli4_hba.fcp_cq[qidx]; phba->sli4_hba.fcp_cq[qidx];
goto pass_check; goto pass_check;
} }
} while (++qidx < phba->cfg_fcp_eq_count); } while (++qidx < phba->cfg_fcp_io_channel);
} }
goto error_out; goto error_out;
break; break;
...@@ -2513,7 +2488,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf, ...@@ -2513,7 +2488,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
} }
/* FCP work queue */ /* FCP work queue */
if (phba->sli4_hba.fcp_wq) { if (phba->sli4_hba.fcp_wq) {
for (qidx = 0; qidx < phba->cfg_fcp_wq_count; qidx++) { for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
qidx++) {
if (!phba->sli4_hba.fcp_wq[qidx]) if (!phba->sli4_hba.fcp_wq[qidx])
continue; continue;
if (phba->sli4_hba.fcp_wq[qidx]->queue_id == if (phba->sli4_hba.fcp_wq[qidx]->queue_id ==
...@@ -4492,7 +4468,7 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba) ...@@ -4492,7 +4468,7 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
lpfc_debug_dump_mbx_wq(phba); lpfc_debug_dump_mbx_wq(phba);
lpfc_debug_dump_els_wq(phba); lpfc_debug_dump_els_wq(phba);
for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++)
lpfc_debug_dump_fcp_wq(phba, fcp_wqidx); lpfc_debug_dump_fcp_wq(phba, fcp_wqidx);
lpfc_debug_dump_hdr_rq(phba); lpfc_debug_dump_hdr_rq(phba);
...@@ -4503,14 +4479,12 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba) ...@@ -4503,14 +4479,12 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
lpfc_debug_dump_mbx_cq(phba); lpfc_debug_dump_mbx_cq(phba);
lpfc_debug_dump_els_cq(phba); lpfc_debug_dump_els_cq(phba);
for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++)
lpfc_debug_dump_fcp_cq(phba, fcp_wqidx); lpfc_debug_dump_fcp_cq(phba, fcp_wqidx);
/* /*
* Dump Event Queues (EQs) * Dump Event Queues (EQs)
*/ */
lpfc_debug_dump_sp_eq(phba); for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++)
lpfc_debug_dump_hba_eq(phba, fcp_wqidx);
for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++)
lpfc_debug_dump_fcp_eq(phba, fcp_wqidx);
} }
...@@ -369,7 +369,7 @@ static inline void ...@@ -369,7 +369,7 @@ static inline void
lpfc_debug_dump_fcp_wq(struct lpfc_hba *phba, int fcp_wqidx) lpfc_debug_dump_fcp_wq(struct lpfc_hba *phba, int fcp_wqidx)
{ {
/* sanity check */ /* sanity check */
if (fcp_wqidx >= phba->cfg_fcp_wq_count) if (fcp_wqidx >= phba->cfg_fcp_io_channel)
return; return;
printk(KERN_ERR "FCP WQ: WQ[Idx:%d|Qid:%d]\n", printk(KERN_ERR "FCP WQ: WQ[Idx:%d|Qid:%d]\n",
...@@ -391,15 +391,15 @@ lpfc_debug_dump_fcp_cq(struct lpfc_hba *phba, int fcp_wqidx) ...@@ -391,15 +391,15 @@ lpfc_debug_dump_fcp_cq(struct lpfc_hba *phba, int fcp_wqidx)
int fcp_cqidx, fcp_cqid; int fcp_cqidx, fcp_cqid;
/* sanity check */ /* sanity check */
if (fcp_wqidx >= phba->cfg_fcp_wq_count) if (fcp_wqidx >= phba->cfg_fcp_io_channel)
return; return;
fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid; fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid;
for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++)
if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid) if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid)
break; break;
if (phba->intr_type == MSIX) { if (phba->intr_type == MSIX) {
if (fcp_cqidx >= phba->cfg_fcp_eq_count) if (fcp_cqidx >= phba->cfg_fcp_io_channel)
return; return;
} else { } else {
if (fcp_cqidx > 0) if (fcp_cqidx > 0)
...@@ -413,7 +413,7 @@ lpfc_debug_dump_fcp_cq(struct lpfc_hba *phba, int fcp_wqidx) ...@@ -413,7 +413,7 @@ lpfc_debug_dump_fcp_cq(struct lpfc_hba *phba, int fcp_wqidx)
} }
/** /**
* lpfc_debug_dump_fcp_eq - dump all entries from a fcp work queue's evt queue * lpfc_debug_dump_hba_eq - dump all entries from a fcp work queue's evt queue
* @phba: Pointer to HBA context object. * @phba: Pointer to HBA context object.
* @fcp_wqidx: Index to a FCP work queue. * @fcp_wqidx: Index to a FCP work queue.
* *
...@@ -421,36 +421,30 @@ lpfc_debug_dump_fcp_cq(struct lpfc_hba *phba, int fcp_wqidx) ...@@ -421,36 +421,30 @@ lpfc_debug_dump_fcp_cq(struct lpfc_hba *phba, int fcp_wqidx)
* associated to the FCP work queue specified by the @fcp_wqidx. * associated to the FCP work queue specified by the @fcp_wqidx.
**/ **/
static inline void static inline void
lpfc_debug_dump_fcp_eq(struct lpfc_hba *phba, int fcp_wqidx) lpfc_debug_dump_hba_eq(struct lpfc_hba *phba, int fcp_wqidx)
{ {
struct lpfc_queue *qdesc; struct lpfc_queue *qdesc;
int fcp_eqidx, fcp_eqid; int fcp_eqidx, fcp_eqid;
int fcp_cqidx, fcp_cqid; int fcp_cqidx, fcp_cqid;
/* sanity check */ /* sanity check */
if (fcp_wqidx >= phba->cfg_fcp_wq_count) if (fcp_wqidx >= phba->cfg_fcp_io_channel)
return; return;
fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid; fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid;
for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++)
if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid) if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid)
break; break;
if (phba->intr_type == MSIX) { if (phba->intr_type == MSIX) {
if (fcp_cqidx >= phba->cfg_fcp_eq_count) if (fcp_cqidx >= phba->cfg_fcp_io_channel)
return; return;
} else { } else {
if (fcp_cqidx > 0) if (fcp_cqidx > 0)
return; return;
} }
if (phba->cfg_fcp_eq_count == 0) {
fcp_eqidx = -1;
fcp_eqid = phba->sli4_hba.sp_eq->queue_id;
qdesc = phba->sli4_hba.sp_eq;
} else {
fcp_eqidx = fcp_cqidx; fcp_eqidx = fcp_cqidx;
fcp_eqid = phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id; fcp_eqid = phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id;
qdesc = phba->sli4_hba.fp_eq[fcp_eqidx]; qdesc = phba->sli4_hba.hba_eq[fcp_eqidx];
}
printk(KERN_ERR "FCP EQ: WQ[Idx:%d|Qid:%d]->CQ[Idx:%d|Qid:%d]->" printk(KERN_ERR "FCP EQ: WQ[Idx:%d|Qid:%d]->CQ[Idx:%d|Qid:%d]->"
"EQ[Idx:%d|Qid:%d]\n", "EQ[Idx:%d|Qid:%d]\n",
...@@ -545,25 +539,6 @@ lpfc_debug_dump_mbx_cq(struct lpfc_hba *phba) ...@@ -545,25 +539,6 @@ lpfc_debug_dump_mbx_cq(struct lpfc_hba *phba)
lpfc_debug_dump_q(phba->sli4_hba.mbx_cq); lpfc_debug_dump_q(phba->sli4_hba.mbx_cq);
} }
/**
* lpfc_debug_dump_sp_eq - dump all entries from slow-path event queue
* @phba: Pointer to HBA context object.
*
* This function dumps all entries from the slow-path event queue.
**/
static inline void
lpfc_debug_dump_sp_eq(struct lpfc_hba *phba)
{
printk(KERN_ERR "SP EQ: WQ[Qid:%d/Qid:%d]->CQ[Qid:%d/Qid:%d]->"
"EQ[Qid:%d]:\n",
phba->sli4_hba.mbx_wq->queue_id,
phba->sli4_hba.els_wq->queue_id,
phba->sli4_hba.mbx_cq->queue_id,
phba->sli4_hba.els_cq->queue_id,
phba->sli4_hba.sp_eq->queue_id);
lpfc_debug_dump_q(phba->sli4_hba.sp_eq);
}
/** /**
* lpfc_debug_dump_wq_by_id - dump all entries from a work queue by queue id * lpfc_debug_dump_wq_by_id - dump all entries from a work queue by queue id
* @phba: Pointer to HBA context object. * @phba: Pointer to HBA context object.
...@@ -577,10 +552,10 @@ lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid) ...@@ -577,10 +552,10 @@ lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid)
{ {
int wq_idx; int wq_idx;
for (wq_idx = 0; wq_idx < phba->cfg_fcp_wq_count; wq_idx++) for (wq_idx = 0; wq_idx < phba->cfg_fcp_io_channel; wq_idx++)
if (phba->sli4_hba.fcp_wq[wq_idx]->queue_id == qid) if (phba->sli4_hba.fcp_wq[wq_idx]->queue_id == qid)
break; break;
if (wq_idx < phba->cfg_fcp_wq_count) { if (wq_idx < phba->cfg_fcp_io_channel) {
printk(KERN_ERR "FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid); printk(KERN_ERR "FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
lpfc_debug_dump_q(phba->sli4_hba.fcp_wq[wq_idx]); lpfc_debug_dump_q(phba->sli4_hba.fcp_wq[wq_idx]);
return; return;
...@@ -647,9 +622,9 @@ lpfc_debug_dump_cq_by_id(struct lpfc_hba *phba, int qid) ...@@ -647,9 +622,9 @@ lpfc_debug_dump_cq_by_id(struct lpfc_hba *phba, int qid)
do { do {
if (phba->sli4_hba.fcp_cq[cq_idx]->queue_id == qid) if (phba->sli4_hba.fcp_cq[cq_idx]->queue_id == qid)
break; break;
} while (++cq_idx < phba->cfg_fcp_eq_count); } while (++cq_idx < phba->cfg_fcp_io_channel);
if (cq_idx < phba->cfg_fcp_eq_count) { if (cq_idx < phba->cfg_fcp_io_channel) {
printk(KERN_ERR "FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid); printk(KERN_ERR "FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
lpfc_debug_dump_q(phba->sli4_hba.fcp_cq[cq_idx]); lpfc_debug_dump_q(phba->sli4_hba.fcp_cq[cq_idx]);
return; return;
...@@ -680,21 +655,17 @@ lpfc_debug_dump_eq_by_id(struct lpfc_hba *phba, int qid) ...@@ -680,21 +655,17 @@ lpfc_debug_dump_eq_by_id(struct lpfc_hba *phba, int qid)
{ {
int eq_idx; int eq_idx;
for (eq_idx = 0; eq_idx < phba->cfg_fcp_eq_count; eq_idx++) { for (eq_idx = 0; eq_idx < phba->cfg_fcp_io_channel; eq_idx++) {
if (phba->sli4_hba.fp_eq[eq_idx]->queue_id == qid) if (phba->sli4_hba.hba_eq[eq_idx]->queue_id == qid)
break; break;
} }
if (eq_idx < phba->cfg_fcp_eq_count) { if (eq_idx < phba->cfg_fcp_io_channel) {
printk(KERN_ERR "FCP EQ[Idx:%d|Qid:%d]\n", eq_idx, qid); printk(KERN_ERR "FCP EQ[Idx:%d|Qid:%d]\n", eq_idx, qid);
lpfc_debug_dump_q(phba->sli4_hba.fp_eq[eq_idx]); lpfc_debug_dump_q(phba->sli4_hba.hba_eq[eq_idx]);
return; return;
} }
if (phba->sli4_hba.sp_eq->queue_id == qid) {
printk(KERN_ERR "SP EQ[|Qid:%d]\n", qid);
lpfc_debug_dump_q(phba->sli4_hba.sp_eq);
}
} }
void lpfc_debug_dump_all_queues(struct lpfc_hba *); void lpfc_debug_dump_all_queues(struct lpfc_hba *);
This diff is collapsed.
This diff is collapsed.
...@@ -34,18 +34,10 @@ ...@@ -34,18 +34,10 @@
/* Number of SGL entries can be posted in a 4KB nonembedded mbox command */ /* Number of SGL entries can be posted in a 4KB nonembedded mbox command */
#define LPFC_NEMBED_MBOX_SGL_CNT 254 #define LPFC_NEMBED_MBOX_SGL_CNT 254
/* Multi-queue arrangement for fast-path FCP work queues */ /* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */
#define LPFC_FN_EQN_MAX 8 #define LPFC_FCP_IO_CHAN_DEF 4
#define LPFC_SP_EQN_DEF 1 #define LPFC_FCP_IO_CHAN_MIN 1
#define LPFC_FP_EQN_DEF 4 #define LPFC_FCP_IO_CHAN_MAX 8
#define LPFC_FP_EQN_MIN 1
#define LPFC_FP_EQN_MAX (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF)
#define LPFC_FN_WQN_MAX 32
#define LPFC_SP_WQN_DEF 1
#define LPFC_FP_WQN_DEF 4
#define LPFC_FP_WQN_MIN 1
#define LPFC_FP_WQN_MAX (LPFC_FN_WQN_MAX - LPFC_SP_WQN_DEF)
/* /*
* Provide the default FCF Record attributes used by the driver * Provide the default FCF Record attributes used by the driver
...@@ -497,17 +489,19 @@ struct lpfc_sli4_hba { ...@@ -497,17 +489,19 @@ struct lpfc_sli4_hba {
uint32_t cfg_eqn; uint32_t cfg_eqn;
uint32_t msix_vec_nr; uint32_t msix_vec_nr;
struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */ struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
/* Pointers to the constructed SLI4 queues */ /* Pointers to the constructed SLI4 queues */
struct lpfc_queue **fp_eq; /* Fast-path event queue */ struct lpfc_queue **hba_eq;/* Event queues for HBA */
struct lpfc_queue *sp_eq; /* Slow-path event queue */ struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */
struct lpfc_queue **fcp_wq;/* Fast-path FCP work queue */ struct lpfc_queue **fcp_wq;/* Fast-path FCP work queue */
uint16_t *fcp_cq_map;
struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
struct lpfc_queue *mbx_wq; /* Slow-path MBOX work queue */ struct lpfc_queue *mbx_wq; /* Slow-path MBOX work queue */
struct lpfc_queue *els_wq; /* Slow-path ELS work queue */ struct lpfc_queue *els_wq; /* Slow-path ELS work queue */
struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */ struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */
struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */ struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */
struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */
struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
/* Setup information for various queue parameters */ /* Setup information for various queue parameters */
int eq_esize; int eq_esize;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment