Commit 81e6a637 authored by James Smart's avatar James Smart Committed by Martin K. Petersen

scsi: lpfc: small sg cnt cleanup

The logic for sg_seg_cnt is a bit convoluted. This patch tries to clean
up a couple of areas, especially around the +2 and +1 logic.

This patch:

- Cleans up the lpfc_sg_seg_cnt attribute to specify a real minimum
  rather than making the minimum be whatever the default is.

- Removes the hardcoding of +2 (for the number of elements we use in a
  sgl for cmd iu and rsp iu) and +1 (an additional entry to compensate
  for nvme's reduction of io size based on a possible partial page)
  logic in sg list initialization. In the case where the +1 logic is
  referenced in host and target io checks, use the values set in the
  transport template as that value was properly set.

There can certainly be more done in this area and it will be addressed
in combined host/target driver effort.
Signed-off-by: default avatarDick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: default avatarJames Smart <james.smart@broadcom.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent c3725bdc
...@@ -55,6 +55,7 @@ struct lpfc_sli2_slim; ...@@ -55,6 +55,7 @@ struct lpfc_sli2_slim;
#define LPFC_MAX_SG_SLI4_SEG_CNT_DIF 128 /* sg element count per scsi cmnd */ #define LPFC_MAX_SG_SLI4_SEG_CNT_DIF 128 /* sg element count per scsi cmnd */
#define LPFC_MAX_SG_SEG_CNT_DIF 512 /* sg element count per scsi cmnd */ #define LPFC_MAX_SG_SEG_CNT_DIF 512 /* sg element count per scsi cmnd */
#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */ #define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
#define LPFC_MIN_SG_SEG_CNT 32 /* sg element count per scsi cmnd */
#define LPFC_MAX_SGL_SEG_CNT 512 /* SGL element count per scsi cmnd */ #define LPFC_MAX_SGL_SEG_CNT 512 /* SGL element count per scsi cmnd */
#define LPFC_MAX_BPL_SEG_CNT 4096 /* BPL element count per scsi cmnd */ #define LPFC_MAX_BPL_SEG_CNT 4096 /* BPL element count per scsi cmnd */
#define LPFC_MAX_NVME_SEG_CNT 256 /* max SGL element cnt per NVME cmnd */ #define LPFC_MAX_NVME_SEG_CNT 256 /* max SGL element cnt per NVME cmnd */
......
...@@ -5140,7 +5140,7 @@ LPFC_ATTR(delay_discovery, 0, 0, 1, ...@@ -5140,7 +5140,7 @@ LPFC_ATTR(delay_discovery, 0, 0, 1,
* this parameter will be limited to 128 if BlockGuard is enabled under SLI4 * this parameter will be limited to 128 if BlockGuard is enabled under SLI4
* and will be limited to 512 if BlockGuard is enabled under SLI3. * and will be limited to 512 if BlockGuard is enabled under SLI3.
*/ */
LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT, LPFC_ATTR_R(sg_seg_cnt, LPFC_MIN_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT,
LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count"); LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count");
/* /*
......
...@@ -5806,6 +5806,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) ...@@ -5806,6 +5806,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
struct lpfc_mqe *mqe; struct lpfc_mqe *mqe;
int longs; int longs;
int fof_vectors = 0; int fof_vectors = 0;
int extra;
uint64_t wwn; uint64_t wwn;
phba->sli4_hba.num_online_cpu = num_online_cpus(); phba->sli4_hba.num_online_cpu = num_online_cpus();
...@@ -5859,14 +5860,22 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) ...@@ -5859,14 +5860,22 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
* The WQ create will allocate the ring. * The WQ create will allocate the ring.
*/ */
/*
* 1 for cmd, 1 for rsp, NVME adds an extra one
* for boundary conditions in its max_sgl_segment template.
*/
extra = 2;
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
extra++;
/* /*
* It doesn't matter what family our adapter is in, we are * It doesn't matter what family our adapter is in, we are
* limited to 2 Pages, 512 SGEs, for our SGL. * limited to 2 Pages, 512 SGEs, for our SGL.
* There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
*/ */
max_buf_size = (2 * SLI4_PAGE_SIZE); max_buf_size = (2 * SLI4_PAGE_SIZE);
if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2) if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - extra)
phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2; phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - extra;
/* /*
* Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
...@@ -5899,14 +5908,14 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) ...@@ -5899,14 +5908,14 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
*/ */
phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
sizeof(struct fcp_rsp) + sizeof(struct fcp_rsp) +
((phba->cfg_sg_seg_cnt + 2) * ((phba->cfg_sg_seg_cnt + extra) *
sizeof(struct sli4_sge)); sizeof(struct sli4_sge));
/* Total SGEs for scsi_sg_list */ /* Total SGEs for scsi_sg_list */
phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
/* /*
* NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
* need to post 1 page for the SGL. * need to post 1 page for the SGL.
*/ */
} }
......
...@@ -62,6 +62,7 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp); ...@@ -62,6 +62,7 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp);
static void static void
lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_nvme_buf *); lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_nvme_buf *);
static struct nvme_fc_port_template lpfc_nvme_template;
/** /**
* lpfc_nvme_create_queue - * lpfc_nvme_create_queue -
...@@ -1174,7 +1175,7 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport, ...@@ -1174,7 +1175,7 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
first_data_sgl = sgl; first_data_sgl = sgl;
lpfc_ncmd->seg_cnt = nCmd->sg_cnt; lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
if (lpfc_ncmd->seg_cnt > phba->cfg_nvme_seg_cnt + 1) { if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6058 Too many sg segments from " "6058 Too many sg segments from "
"NVME Transport. Max %d, " "NVME Transport. Max %d, "
......
...@@ -2003,7 +2003,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, ...@@ -2003,7 +2003,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
return NULL; return NULL;
} }
if (rsp->sg_cnt > phba->cfg_nvme_seg_cnt) { if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6109 NVMET prep FCP wqe: seg cnt err: " "6109 NVMET prep FCP wqe: seg cnt err: "
"NPORT x%x oxid x%x ste %d cnt %d\n", "NPORT x%x oxid x%x ste %d cnt %d\n",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment