Commit 966bb5b7 authored by James Smart's avatar James Smart Committed by Martin K. Petersen

scsi: lpfc: Break up IO ctx list into a separate get and put list

Since unsol rcv ISR and command cmpl ISR both access/lock this list,
separate get/put lists will reduce contention.

Replaced
struct list_head lpfc_nvmet_ctx_list;
with
struct list_head lpfc_nvmet_ctx_get_list;
struct list_head lpfc_nvmet_ctx_put_list;
and all correpsonding locks and counters.
Signed-off-by: default avatarDick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: default avatarJames Smart <james.smart@broadcom.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 810ffa47
...@@ -245,15 +245,18 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, ...@@ -245,15 +245,18 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
atomic_read(&tgtp->xmt_abort_rsp), atomic_read(&tgtp->xmt_abort_rsp),
atomic_read(&tgtp->xmt_abort_rsp_error)); atomic_read(&tgtp->xmt_abort_rsp_error));
spin_lock(&phba->sli4_hba.nvmet_io_lock); spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock);
spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
tot = phba->sli4_hba.nvmet_xri_cnt - tot = phba->sli4_hba.nvmet_xri_cnt -
phba->sli4_hba.nvmet_ctx_cnt; (phba->sli4_hba.nvmet_ctx_get_cnt +
spin_unlock(&phba->sli4_hba.nvmet_io_lock); phba->sli4_hba.nvmet_ctx_put_cnt);
spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock);
len += snprintf(buf + len, PAGE_SIZE - len, len += snprintf(buf + len, PAGE_SIZE - len,
"IO_CTX: %08x WAIT: cur %08x tot %08x\n" "IO_CTX: %08x WAIT: cur %08x tot %08x\n"
"CTX Outstanding %08llx\n", "CTX Outstanding %08llx\n",
phba->sli4_hba.nvmet_ctx_cnt, phba->sli4_hba.nvmet_xri_cnt,
phba->sli4_hba.nvmet_io_wait_cnt, phba->sli4_hba.nvmet_io_wait_cnt,
phba->sli4_hba.nvmet_io_wait_total, phba->sli4_hba.nvmet_io_wait_total,
tot); tot);
......
...@@ -848,15 +848,18 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) ...@@ -848,15 +848,18 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
} }
spin_lock(&phba->sli4_hba.nvmet_io_lock); spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock);
spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
tot = phba->sli4_hba.nvmet_xri_cnt - tot = phba->sli4_hba.nvmet_xri_cnt -
phba->sli4_hba.nvmet_ctx_cnt; (phba->sli4_hba.nvmet_ctx_get_cnt +
spin_unlock(&phba->sli4_hba.nvmet_io_lock); phba->sli4_hba.nvmet_ctx_put_cnt);
spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock);
len += snprintf(buf + len, size - len, len += snprintf(buf + len, size - len,
"IO_CTX: %08x WAIT: cur %08x tot %08x\n" "IO_CTX: %08x WAIT: cur %08x tot %08x\n"
"CTX Outstanding %08llx\n", "CTX Outstanding %08llx\n",
phba->sli4_hba.nvmet_ctx_cnt, phba->sli4_hba.nvmet_xri_cnt,
phba->sli4_hba.nvmet_io_wait_cnt, phba->sli4_hba.nvmet_io_wait_cnt,
phba->sli4_hba.nvmet_io_wait_total, phba->sli4_hba.nvmet_io_wait_total,
tot); tot);
......
...@@ -1281,10 +1281,13 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) ...@@ -1281,10 +1281,13 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
/* Check outstanding IO count */ /* Check outstanding IO count */
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
if (phba->nvmet_support) { if (phba->nvmet_support) {
spin_lock(&phba->sli4_hba.nvmet_io_lock); spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock);
spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
tot = phba->sli4_hba.nvmet_xri_cnt - tot = phba->sli4_hba.nvmet_xri_cnt -
phba->sli4_hba.nvmet_ctx_cnt; (phba->sli4_hba.nvmet_ctx_get_cnt +
spin_unlock(&phba->sli4_hba.nvmet_io_lock); phba->sli4_hba.nvmet_ctx_put_cnt);
spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock);
} else { } else {
tot = atomic_read(&phba->fc4NvmeIoCmpls); tot = atomic_read(&phba->fc4NvmeIoCmpls);
data1 = atomic_read( data1 = atomic_read(
...@@ -3487,7 +3490,6 @@ lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba) ...@@ -3487,7 +3490,6 @@ lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
/* For NVMET, ALL remaining XRIs are dedicated for IO processing */ /* For NVMET, ALL remaining XRIs are dedicated for IO processing */
nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) { if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
/* els xri-sgl expanded */ /* els xri-sgl expanded */
xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt; xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
...@@ -5935,7 +5937,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) ...@@ -5935,7 +5937,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock); spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list); INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_list); INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_get_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_put_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list); INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
/* Fast-path XRI aborted CQ Event work queue list */ /* Fast-path XRI aborted CQ Event work queue list */
...@@ -5944,7 +5947,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) ...@@ -5944,7 +5947,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* This abort list used by worker thread */ /* This abort list used by worker thread */
spin_lock_init(&phba->sli4_hba.sgl_list_lock); spin_lock_init(&phba->sli4_hba.sgl_list_lock);
spin_lock_init(&phba->sli4_hba.nvmet_io_lock); spin_lock_init(&phba->sli4_hba.nvmet_ctx_get_lock);
spin_lock_init(&phba->sli4_hba.nvmet_ctx_put_lock);
spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock); spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
/* /*
......
...@@ -267,11 +267,11 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) ...@@ -267,11 +267,11 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
} }
spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag); spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
spin_lock_irqsave(&phba->sli4_hba.nvmet_io_lock, iflag); spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_put_lock, iflag);
list_add_tail(&ctx_buf->list, list_add_tail(&ctx_buf->list,
&phba->sli4_hba.lpfc_nvmet_ctx_list); &phba->sli4_hba.lpfc_nvmet_ctx_put_list);
phba->sli4_hba.nvmet_ctx_cnt++; phba->sli4_hba.nvmet_ctx_put_cnt++;
spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_lock, iflag); spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_put_lock, iflag);
#endif #endif
} }
...@@ -865,28 +865,46 @@ lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba) ...@@ -865,28 +865,46 @@ lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf; struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
unsigned long flags; unsigned long flags;
list_for_each_entry_safe( spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_get_lock, flags);
ctx_buf, next_ctx_buf, spin_lock_irq(&phba->sli4_hba.nvmet_ctx_put_lock);
&phba->sli4_hba.lpfc_nvmet_ctx_list, list) { list_for_each_entry_safe(ctx_buf, next_ctx_buf,
spin_lock_irqsave( &phba->sli4_hba.lpfc_nvmet_ctx_get_list, list) {
&phba->sli4_hba.abts_nvme_buf_list_lock, flags); spin_lock_irq(&phba->sli4_hba.abts_nvme_buf_list_lock);
list_del_init(&ctx_buf->list); list_del_init(&ctx_buf->list);
spin_unlock_irqrestore( spin_unlock_irq(&phba->sli4_hba.abts_nvme_buf_list_lock);
&phba->sli4_hba.abts_nvme_buf_list_lock, flags);
__lpfc_clear_active_sglq(phba, __lpfc_clear_active_sglq(phba,
ctx_buf->sglq->sli4_lxritag); ctx_buf->sglq->sli4_lxritag);
ctx_buf->sglq->state = SGL_FREED; ctx_buf->sglq->state = SGL_FREED;
ctx_buf->sglq->ndlp = NULL; ctx_buf->sglq->ndlp = NULL;
spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, flags); spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
list_add_tail(&ctx_buf->sglq->list, list_add_tail(&ctx_buf->sglq->list,
&phba->sli4_hba.lpfc_nvmet_sgl_list); &phba->sli4_hba.lpfc_nvmet_sgl_list);
spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
flags);
lpfc_sli_release_iocbq(phba, ctx_buf->iocbq); lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
kfree(ctx_buf->context); kfree(ctx_buf->context);
} }
list_for_each_entry_safe(ctx_buf, next_ctx_buf,
&phba->sli4_hba.lpfc_nvmet_ctx_put_list, list) {
spin_lock_irq(&phba->sli4_hba.abts_nvme_buf_list_lock);
list_del_init(&ctx_buf->list);
spin_unlock_irq(&phba->sli4_hba.abts_nvme_buf_list_lock);
__lpfc_clear_active_sglq(phba,
ctx_buf->sglq->sli4_lxritag);
ctx_buf->sglq->state = SGL_FREED;
ctx_buf->sglq->ndlp = NULL;
spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
list_add_tail(&ctx_buf->sglq->list,
&phba->sli4_hba.lpfc_nvmet_sgl_list);
spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
kfree(ctx_buf->context);
}
spin_unlock_irq(&phba->sli4_hba.nvmet_ctx_put_lock);
spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_get_lock, flags);
} }
static int static int
...@@ -958,12 +976,12 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) ...@@ -958,12 +976,12 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
"6407 Ran out of NVMET XRIs\n"); "6407 Ran out of NVMET XRIs\n");
return -ENOMEM; return -ENOMEM;
} }
spin_lock(&phba->sli4_hba.nvmet_io_lock); spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock);
list_add_tail(&ctx_buf->list, list_add_tail(&ctx_buf->list,
&phba->sli4_hba.lpfc_nvmet_ctx_list); &phba->sli4_hba.lpfc_nvmet_ctx_get_list);
spin_unlock(&phba->sli4_hba.nvmet_io_lock); spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock);
} }
phba->sli4_hba.nvmet_ctx_cnt = phba->sli4_hba.nvmet_xri_cnt; phba->sli4_hba.nvmet_ctx_get_cnt = phba->sli4_hba.nvmet_xri_cnt;
return 0; return 0;
} }
...@@ -1370,13 +1388,31 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, ...@@ -1370,13 +1388,31 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
goto dropit; goto dropit;
} }
spin_lock_irqsave(&phba->sli4_hba.nvmet_io_lock, iflag); spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_get_lock, iflag);
if (phba->sli4_hba.nvmet_ctx_cnt) { if (phba->sli4_hba.nvmet_ctx_get_cnt) {
list_remove_head(&phba->sli4_hba.lpfc_nvmet_ctx_list, list_remove_head(&phba->sli4_hba.lpfc_nvmet_ctx_get_list,
ctx_buf, struct lpfc_nvmet_ctxbuf, list); ctx_buf, struct lpfc_nvmet_ctxbuf, list);
phba->sli4_hba.nvmet_ctx_cnt--; phba->sli4_hba.nvmet_ctx_get_cnt--;
} else {
spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
if (phba->sli4_hba.nvmet_ctx_put_cnt) {
list_splice(&phba->sli4_hba.lpfc_nvmet_ctx_put_list,
&phba->sli4_hba.lpfc_nvmet_ctx_get_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_put_list);
phba->sli4_hba.nvmet_ctx_get_cnt =
phba->sli4_hba.nvmet_ctx_put_cnt;
phba->sli4_hba.nvmet_ctx_put_cnt = 0;
spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
list_remove_head(
&phba->sli4_hba.lpfc_nvmet_ctx_get_list,
ctx_buf, struct lpfc_nvmet_ctxbuf, list);
phba->sli4_hba.nvmet_ctx_get_cnt--;
} else {
spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
}
} }
spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_lock, iflag); spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_get_lock, iflag);
fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
oxid = be16_to_cpu(fc_hdr->fh_ox_id); oxid = be16_to_cpu(fc_hdr->fh_ox_id);
......
...@@ -621,7 +621,8 @@ struct lpfc_sli4_hba { ...@@ -621,7 +621,8 @@ struct lpfc_sli4_hba {
uint16_t scsi_xri_start; uint16_t scsi_xri_start;
uint16_t els_xri_cnt; uint16_t els_xri_cnt;
uint16_t nvmet_xri_cnt; uint16_t nvmet_xri_cnt;
uint16_t nvmet_ctx_cnt; uint16_t nvmet_ctx_get_cnt;
uint16_t nvmet_ctx_put_cnt;
uint16_t nvmet_io_wait_cnt; uint16_t nvmet_io_wait_cnt;
uint16_t nvmet_io_wait_total; uint16_t nvmet_io_wait_total;
struct list_head lpfc_els_sgl_list; struct list_head lpfc_els_sgl_list;
...@@ -630,7 +631,8 @@ struct lpfc_sli4_hba { ...@@ -630,7 +631,8 @@ struct lpfc_sli4_hba {
struct list_head lpfc_abts_nvmet_ctx_list; struct list_head lpfc_abts_nvmet_ctx_list;
struct list_head lpfc_abts_scsi_buf_list; struct list_head lpfc_abts_scsi_buf_list;
struct list_head lpfc_abts_nvme_buf_list; struct list_head lpfc_abts_nvme_buf_list;
struct list_head lpfc_nvmet_ctx_list; struct list_head lpfc_nvmet_ctx_get_list;
struct list_head lpfc_nvmet_ctx_put_list;
struct list_head lpfc_nvmet_io_wait_list; struct list_head lpfc_nvmet_io_wait_list;
struct lpfc_sglq **lpfc_sglq_active_list; struct lpfc_sglq **lpfc_sglq_active_list;
struct list_head lpfc_rpi_hdr_list; struct list_head lpfc_rpi_hdr_list;
...@@ -662,7 +664,8 @@ struct lpfc_sli4_hba { ...@@ -662,7 +664,8 @@ struct lpfc_sli4_hba {
spinlock_t abts_nvme_buf_list_lock; /* list of aborted SCSI IOs */ spinlock_t abts_nvme_buf_list_lock; /* list of aborted SCSI IOs */
spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
spinlock_t sgl_list_lock; /* list of aborted els IOs */ spinlock_t sgl_list_lock; /* list of aborted els IOs */
spinlock_t nvmet_io_lock; spinlock_t nvmet_ctx_get_lock; /* list of avail XRI contexts */
spinlock_t nvmet_ctx_put_lock; /* list of avail XRI contexts */
spinlock_t nvmet_io_wait_lock; /* IOs waiting for ctx resources */ spinlock_t nvmet_io_wait_lock; /* IOs waiting for ctx resources */
uint32_t physical_port; uint32_t physical_port;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment