Commit bcb24f65 authored by James Smart's avatar James Smart Committed by Martin K. Petersen

scsi: lpfc: Adjust default value of lpfc_nvmet_mrq

The current default for async hw receive queues is 1, which presents
issues under heavy load as number of queues influence the available
async receive buffer limits.

Raise the default to the either the current hw limit (16) or the number
of hw qs configured (io channel value).

Revise the attribute definition for mrq to better reflect what we do for
hw queues. E.g. 0 means default to optimal (# of cpus), non-zero
specifies a specific limit. Before this change, mrq=0 meant target mode
was disabled. As 0 now has a different meaning, rework the if tests to
use the better nvmet_support check.
Signed-off-by: default avatarDick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: default avatarJames Smart <james.smart@broadcom.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 07d494f7
...@@ -3366,12 +3366,13 @@ LPFC_ATTR_R(suppress_rsp, 1, 0, 1, ...@@ -3366,12 +3366,13 @@ LPFC_ATTR_R(suppress_rsp, 1, 0, 1,
/* /*
* lpfc_nvmet_mrq: Specify number of RQ pairs for processing NVMET cmds * lpfc_nvmet_mrq: Specify number of RQ pairs for processing NVMET cmds
* lpfc_nvmet_mrq = 0 driver will calcualte optimal number of RQ pairs
* lpfc_nvmet_mrq = 1 use a single RQ pair * lpfc_nvmet_mrq = 1 use a single RQ pair
* lpfc_nvmet_mrq >= 2 use specified RQ pairs for MRQ * lpfc_nvmet_mrq >= 2 use specified RQ pairs for MRQ
* *
*/ */
LPFC_ATTR_R(nvmet_mrq, LPFC_ATTR_R(nvmet_mrq,
1, 1, 16, LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_MAX,
"Specify number of RQ pairs for processing NVMET cmds"); "Specify number of RQ pairs for processing NVMET cmds");
/* /*
...@@ -6362,6 +6363,9 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba) ...@@ -6362,6 +6363,9 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
phba->cfg_nvmet_fb_size = LPFC_NVMET_FB_SZ_MAX; phba->cfg_nvmet_fb_size = LPFC_NVMET_FB_SZ_MAX;
} }
if (!phba->cfg_nvmet_mrq)
phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
/* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */ /* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */
if (phba->cfg_nvmet_mrq > phba->cfg_nvme_io_channel) { if (phba->cfg_nvmet_mrq > phba->cfg_nvme_io_channel) {
phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel; phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
...@@ -6369,10 +6373,13 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba) ...@@ -6369,10 +6373,13 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
"6018 Adjust lpfc_nvmet_mrq to %d\n", "6018 Adjust lpfc_nvmet_mrq to %d\n",
phba->cfg_nvmet_mrq); phba->cfg_nvmet_mrq);
} }
if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
} else { } else {
/* Not NVME Target mode. Turn off Target parameters. */ /* Not NVME Target mode. Turn off Target parameters. */
phba->nvmet_support = 0; phba->nvmet_support = 0;
phba->cfg_nvmet_mrq = 0; phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF;
phba->cfg_nvmet_fb_size = 0; phba->cfg_nvmet_fb_size = 0;
} }
......
...@@ -3213,7 +3213,7 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer, ...@@ -3213,7 +3213,7 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer,
return 1; return 1;
} }
if (eqidx < phba->cfg_nvmet_mrq) { if ((eqidx < phba->cfg_nvmet_mrq) && phba->nvmet_support) {
/* NVMET CQset */ /* NVMET CQset */
qp = phba->sli4_hba.nvmet_cqset[eqidx]; qp = phba->sli4_hba.nvmet_cqset[eqidx];
*len = __lpfc_idiag_print_cq(qp, "NVMET CQset", pbuffer, *len); *len = __lpfc_idiag_print_cq(qp, "NVMET CQset", pbuffer, *len);
......
...@@ -7933,8 +7933,12 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba) ...@@ -7933,8 +7933,12 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
phba->cfg_fcp_io_channel = io_channel; phba->cfg_fcp_io_channel = io_channel;
if (phba->cfg_nvme_io_channel > io_channel) if (phba->cfg_nvme_io_channel > io_channel)
phba->cfg_nvme_io_channel = io_channel; phba->cfg_nvme_io_channel = io_channel;
if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq) if (phba->nvmet_support) {
phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel; if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq)
phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
}
if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2574 IO channels: irqs %d fcp %d nvme %d MRQ: %d\n", "2574 IO channels: irqs %d fcp %d nvme %d MRQ: %d\n",
...@@ -8448,13 +8452,15 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba) ...@@ -8448,13 +8452,15 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
/* Release NVME CQ mapping array */ /* Release NVME CQ mapping array */
lpfc_sli4_release_queue_map(&phba->sli4_hba.nvme_cq_map); lpfc_sli4_release_queue_map(&phba->sli4_hba.nvme_cq_map);
lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset, if (phba->nvmet_support) {
phba->cfg_nvmet_mrq); lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
phba->cfg_nvmet_mrq);
lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr, lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
phba->cfg_nvmet_mrq); phba->cfg_nvmet_mrq);
lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data, lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
phba->cfg_nvmet_mrq); phba->cfg_nvmet_mrq);
}
/* Release mailbox command work queue */ /* Release mailbox command work queue */
__lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq); __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
...@@ -9009,19 +9015,22 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba) ...@@ -9009,19 +9015,22 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
lpfc_cq_destroy(phba, phba->sli4_hba.nvme_cq[qidx]); lpfc_cq_destroy(phba, phba->sli4_hba.nvme_cq[qidx]);
/* Unset NVMET MRQ queue */ if (phba->nvmet_support) {
if (phba->sli4_hba.nvmet_mrq_hdr) { /* Unset NVMET MRQ queue */
for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) if (phba->sli4_hba.nvmet_mrq_hdr) {
lpfc_rq_destroy(phba, for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
lpfc_rq_destroy(
phba,
phba->sli4_hba.nvmet_mrq_hdr[qidx], phba->sli4_hba.nvmet_mrq_hdr[qidx],
phba->sli4_hba.nvmet_mrq_data[qidx]); phba->sli4_hba.nvmet_mrq_data[qidx]);
} }
/* Unset NVMET CQ Set complete queue */ /* Unset NVMET CQ Set complete queue */
if (phba->sli4_hba.nvmet_cqset) { if (phba->sli4_hba.nvmet_cqset) {
for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
lpfc_cq_destroy(phba, lpfc_cq_destroy(
phba->sli4_hba.nvmet_cqset[qidx]); phba, phba->sli4_hba.nvmet_cqset[qidx]);
}
} }
/* Unset FCP response complete queue */ /* Unset FCP response complete queue */
...@@ -10397,7 +10406,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) ...@@ -10397,7 +10406,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
!phba->nvme_support) { !phba->nvme_support) {
phba->nvme_support = 0; phba->nvme_support = 0;
phba->nvmet_support = 0; phba->nvmet_support = 0;
phba->cfg_nvmet_mrq = 0; phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF;
phba->cfg_nvme_io_channel = 0; phba->cfg_nvme_io_channel = 0;
phba->io_channel_irqs = phba->cfg_fcp_io_channel; phba->io_channel_irqs = phba->cfg_fcp_io_channel;
lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME, lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
......
...@@ -25,6 +25,10 @@ ...@@ -25,6 +25,10 @@
#define LPFC_NVMET_RQE_DEF_COUNT 512 #define LPFC_NVMET_RQE_DEF_COUNT 512
#define LPFC_NVMET_SUCCESS_LEN 12 #define LPFC_NVMET_SUCCESS_LEN 12
#define LPFC_NVMET_MRQ_OFF 0xffff
#define LPFC_NVMET_MRQ_AUTO 0
#define LPFC_NVMET_MRQ_MAX 16
/* Used for NVME Target */ /* Used for NVME Target */
struct lpfc_nvmet_tgtport { struct lpfc_nvmet_tgtport {
struct lpfc_hba *phba; struct lpfc_hba *phba;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment