Commit 4a8b53be authored by Jens Axboe's avatar Jens Axboe

Merge branch 'nvme-4.13' of git://git.infradead.org/nvme into for-linus

Pull NVMe fixes from Christoph:

"A few more small fixes - the fc/lpfc update is the biggest by far."
parents d4acf365 a082b426
...@@ -336,7 +336,7 @@ static int nvme_get_stream_params(struct nvme_ctrl *ctrl, ...@@ -336,7 +336,7 @@ static int nvme_get_stream_params(struct nvme_ctrl *ctrl,
c.directive.opcode = nvme_admin_directive_recv; c.directive.opcode = nvme_admin_directive_recv;
c.directive.nsid = cpu_to_le32(nsid); c.directive.nsid = cpu_to_le32(nsid);
c.directive.numd = cpu_to_le32(sizeof(*s)); c.directive.numd = cpu_to_le32((sizeof(*s) >> 2) - 1);
c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM; c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM;
c.directive.dtype = NVME_DIR_STREAMS; c.directive.dtype = NVME_DIR_STREAMS;
...@@ -1509,7 +1509,7 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, ...@@ -1509,7 +1509,7 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
blk_queue_write_cache(q, vwc, vwc); blk_queue_write_cache(q, vwc, vwc);
} }
static void nvme_configure_apst(struct nvme_ctrl *ctrl) static int nvme_configure_apst(struct nvme_ctrl *ctrl)
{ {
/* /*
* APST (Autonomous Power State Transition) lets us program a * APST (Autonomous Power State Transition) lets us program a
...@@ -1538,16 +1538,16 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl) ...@@ -1538,16 +1538,16 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
* then don't do anything. * then don't do anything.
*/ */
if (!ctrl->apsta) if (!ctrl->apsta)
return; return 0;
if (ctrl->npss > 31) { if (ctrl->npss > 31) {
dev_warn(ctrl->device, "NPSS is invalid; not using APST\n"); dev_warn(ctrl->device, "NPSS is invalid; not using APST\n");
return; return 0;
} }
table = kzalloc(sizeof(*table), GFP_KERNEL); table = kzalloc(sizeof(*table), GFP_KERNEL);
if (!table) if (!table)
return; return 0;
if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) { if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) {
/* Turn off APST. */ /* Turn off APST. */
...@@ -1629,6 +1629,7 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl) ...@@ -1629,6 +1629,7 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret); dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);
kfree(table); kfree(table);
return ret;
} }
static void nvme_set_latency_tolerance(struct device *dev, s32 val) static void nvme_set_latency_tolerance(struct device *dev, s32 val)
...@@ -1835,13 +1836,16 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) ...@@ -1835,13 +1836,16 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
* In fabrics we need to verify the cntlid matches the * In fabrics we need to verify the cntlid matches the
* admin connect * admin connect
*/ */
if (ctrl->cntlid != le16_to_cpu(id->cntlid)) if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
ret = -EINVAL; ret = -EINVAL;
goto out_free;
}
if (!ctrl->opts->discovery_nqn && !ctrl->kas) { if (!ctrl->opts->discovery_nqn && !ctrl->kas) {
dev_err(ctrl->device, dev_err(ctrl->device,
"keep-alive support is mandatory for fabrics\n"); "keep-alive support is mandatory for fabrics\n");
ret = -EINVAL; ret = -EINVAL;
goto out_free;
} }
} else { } else {
ctrl->cntlid = le16_to_cpu(id->cntlid); ctrl->cntlid = le16_to_cpu(id->cntlid);
...@@ -1856,11 +1860,20 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) ...@@ -1856,11 +1860,20 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
else if (!ctrl->apst_enabled && prev_apst_enabled) else if (!ctrl->apst_enabled && prev_apst_enabled)
dev_pm_qos_hide_latency_tolerance(ctrl->device); dev_pm_qos_hide_latency_tolerance(ctrl->device);
nvme_configure_apst(ctrl); ret = nvme_configure_apst(ctrl);
nvme_configure_directives(ctrl); if (ret < 0)
return ret;
ret = nvme_configure_directives(ctrl);
if (ret < 0)
return ret;
ctrl->identified = true; ctrl->identified = true;
return 0;
out_free:
kfree(id);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(nvme_init_identify); EXPORT_SYMBOL_GPL(nvme_init_identify);
...@@ -2004,9 +2017,11 @@ static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, ...@@ -2004,9 +2017,11 @@ static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
if (memchr_inv(ns->eui, 0, sizeof(ns->eui))) if (memchr_inv(ns->eui, 0, sizeof(ns->eui)))
return sprintf(buf, "eui.%8phN\n", ns->eui); return sprintf(buf, "eui.%8phN\n", ns->eui);
while (ctrl->serial[serial_len - 1] == ' ') while (serial_len > 0 && (ctrl->serial[serial_len - 1] == ' ' ||
ctrl->serial[serial_len - 1] == '\0'))
serial_len--; serial_len--;
while (ctrl->model[model_len - 1] == ' ') while (model_len > 0 && (ctrl->model[model_len - 1] == ' ' ||
ctrl->model[model_len - 1] == '\0'))
model_len--; model_len--;
return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid, return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid,
......
...@@ -1558,12 +1558,10 @@ static inline void nvme_release_cmb(struct nvme_dev *dev) ...@@ -1558,12 +1558,10 @@ static inline void nvme_release_cmb(struct nvme_dev *dev)
if (dev->cmb) { if (dev->cmb) {
iounmap(dev->cmb); iounmap(dev->cmb);
dev->cmb = NULL; dev->cmb = NULL;
if (dev->cmbsz) {
sysfs_remove_file_from_group(&dev->ctrl.device->kobj, sysfs_remove_file_from_group(&dev->ctrl.device->kobj,
&dev_attr_cmb.attr, NULL); &dev_attr_cmb.attr, NULL);
dev->cmbsz = 0; dev->cmbsz = 0;
} }
}
} }
static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits) static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
...@@ -1953,16 +1951,14 @@ static int nvme_pci_enable(struct nvme_dev *dev) ...@@ -1953,16 +1951,14 @@ static int nvme_pci_enable(struct nvme_dev *dev)
/* /*
* CMBs can currently only exist on >=1.2 PCIe devices. We only * CMBs can currently only exist on >=1.2 PCIe devices. We only
* populate sysfs if a CMB is implemented. Note that we add the * populate sysfs if a CMB is implemented. Since nvme_dev_attrs_group
* CMB attribute to the nvme_ctrl kobj which removes the need to remove * has no name we can pass NULL as final argument to
* it on exit. Since nvme_dev_attrs_group has no name we can pass * sysfs_add_file_to_group.
* NULL as final argument to sysfs_add_file_to_group.
*/ */
if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) { if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) {
dev->cmb = nvme_map_cmb(dev); dev->cmb = nvme_map_cmb(dev);
if (dev->cmb) {
if (dev->cmbsz) {
if (sysfs_add_file_to_group(&dev->ctrl.device->kobj, if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
&dev_attr_cmb.attr, NULL)) &dev_attr_cmb.attr, NULL))
dev_warn(dev->ctrl.device, dev_warn(dev->ctrl.device,
......
...@@ -114,6 +114,11 @@ struct nvmet_fc_tgtport { ...@@ -114,6 +114,11 @@ struct nvmet_fc_tgtport {
struct kref ref; struct kref ref;
}; };
struct nvmet_fc_defer_fcp_req {
struct list_head req_list;
struct nvmefc_tgt_fcp_req *fcp_req;
};
struct nvmet_fc_tgt_queue { struct nvmet_fc_tgt_queue {
bool ninetypercent; bool ninetypercent;
u16 qid; u16 qid;
...@@ -132,6 +137,8 @@ struct nvmet_fc_tgt_queue { ...@@ -132,6 +137,8 @@ struct nvmet_fc_tgt_queue {
struct nvmet_fc_tgt_assoc *assoc; struct nvmet_fc_tgt_assoc *assoc;
struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */ struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */
struct list_head fod_list; struct list_head fod_list;
struct list_head pending_cmd_list;
struct list_head avail_defer_list;
struct workqueue_struct *work_q; struct workqueue_struct *work_q;
struct kref ref; struct kref ref;
} __aligned(sizeof(unsigned long long)); } __aligned(sizeof(unsigned long long));
...@@ -223,6 +230,8 @@ static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue); ...@@ -223,6 +230,8 @@ static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue); static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport); static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport); static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_fcp_iod *fod);
/* *********************** FC-NVME DMA Handling **************************** */ /* *********************** FC-NVME DMA Handling **************************** */
...@@ -463,9 +472,9 @@ static struct nvmet_fc_fcp_iod * ...@@ -463,9 +472,9 @@ static struct nvmet_fc_fcp_iod *
nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue) nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
{ {
static struct nvmet_fc_fcp_iod *fod; static struct nvmet_fc_fcp_iod *fod;
unsigned long flags;
spin_lock_irqsave(&queue->qlock, flags); lockdep_assert_held(&queue->qlock);
fod = list_first_entry_or_null(&queue->fod_list, fod = list_first_entry_or_null(&queue->fod_list,
struct nvmet_fc_fcp_iod, fcp_list); struct nvmet_fc_fcp_iod, fcp_list);
if (fod) { if (fod) {
...@@ -477,17 +486,37 @@ nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue) ...@@ -477,17 +486,37 @@ nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
* will "inherit" that reference. * will "inherit" that reference.
*/ */
} }
spin_unlock_irqrestore(&queue->qlock, flags);
return fod; return fod;
} }
static void
nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_tgt_queue *queue,
struct nvmefc_tgt_fcp_req *fcpreq)
{
struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
/*
* put all admin cmds on hw queue id 0. All io commands go to
* the respective hw queue based on a modulo basis
*/
fcpreq->hwqid = queue->qid ?
((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR)
queue_work_on(queue->cpu, queue->work_q, &fod->work);
else
nvmet_fc_handle_fcp_rqst(tgtport, fod);
}
static void static void
nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
struct nvmet_fc_fcp_iod *fod) struct nvmet_fc_fcp_iod *fod)
{ {
struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
struct nvmet_fc_tgtport *tgtport = fod->tgtport; struct nvmet_fc_tgtport *tgtport = fod->tgtport;
struct nvmet_fc_defer_fcp_req *deferfcp;
unsigned long flags; unsigned long flags;
fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma, fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
...@@ -495,21 +524,56 @@ nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, ...@@ -495,21 +524,56 @@ nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
fcpreq->nvmet_fc_private = NULL; fcpreq->nvmet_fc_private = NULL;
spin_lock_irqsave(&queue->qlock, flags);
list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
fod->active = false; fod->active = false;
fod->abort = false; fod->abort = false;
fod->aborted = false; fod->aborted = false;
fod->writedataactive = false; fod->writedataactive = false;
fod->fcpreq = NULL; fod->fcpreq = NULL;
tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
spin_lock_irqsave(&queue->qlock, flags);
deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
struct nvmet_fc_defer_fcp_req, req_list);
if (!deferfcp) {
list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
spin_unlock_irqrestore(&queue->qlock, flags); spin_unlock_irqrestore(&queue->qlock, flags);
/* /* Release reference taken at queue lookup and fod allocation */
* release the reference taken at queue lookup and fod allocation
*/
nvmet_fc_tgt_q_put(queue); nvmet_fc_tgt_q_put(queue);
return;
}
tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq); /* Re-use the fod for the next pending cmd that was deferred */
list_del(&deferfcp->req_list);
fcpreq = deferfcp->fcp_req;
/* deferfcp can be reused for another IO at a later date */
list_add_tail(&deferfcp->req_list, &queue->avail_defer_list);
spin_unlock_irqrestore(&queue->qlock, flags);
/* Save NVME CMD IO in fod */
memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen);
/* Setup new fcpreq to be processed */
fcpreq->rspaddr = NULL;
fcpreq->rsplen = 0;
fcpreq->nvmet_fc_private = fod;
fod->fcpreq = fcpreq;
fod->active = true;
/* inform LLDD IO is now being processed */
tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);
/* Submit deferred IO for processing */
nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
/*
* Leave the queue lookup get reference taken when
* fod was originally allocated.
*/
} }
static int static int
...@@ -569,6 +633,8 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, ...@@ -569,6 +633,8 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
queue->port = assoc->tgtport->port; queue->port = assoc->tgtport->port;
queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid); queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);
INIT_LIST_HEAD(&queue->fod_list); INIT_LIST_HEAD(&queue->fod_list);
INIT_LIST_HEAD(&queue->avail_defer_list);
INIT_LIST_HEAD(&queue->pending_cmd_list);
atomic_set(&queue->connected, 0); atomic_set(&queue->connected, 0);
atomic_set(&queue->sqtail, 0); atomic_set(&queue->sqtail, 0);
atomic_set(&queue->rsn, 1); atomic_set(&queue->rsn, 1);
...@@ -638,6 +704,7 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue) ...@@ -638,6 +704,7 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
{ {
struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport; struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
struct nvmet_fc_fcp_iod *fod = queue->fod; struct nvmet_fc_fcp_iod *fod = queue->fod;
struct nvmet_fc_defer_fcp_req *deferfcp;
unsigned long flags; unsigned long flags;
int i, writedataactive; int i, writedataactive;
bool disconnect; bool disconnect;
...@@ -666,6 +733,35 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue) ...@@ -666,6 +733,35 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
} }
} }
} }
/* Cleanup defer'ed IOs in queue */
list_for_each_entry(deferfcp, &queue->avail_defer_list, req_list) {
list_del(&deferfcp->req_list);
kfree(deferfcp);
}
for (;;) {
deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
struct nvmet_fc_defer_fcp_req, req_list);
if (!deferfcp)
break;
list_del(&deferfcp->req_list);
spin_unlock_irqrestore(&queue->qlock, flags);
tgtport->ops->defer_rcv(&tgtport->fc_target_port,
deferfcp->fcp_req);
tgtport->ops->fcp_abort(&tgtport->fc_target_port,
deferfcp->fcp_req);
tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
deferfcp->fcp_req);
kfree(deferfcp);
spin_lock_irqsave(&queue->qlock, flags);
}
spin_unlock_irqrestore(&queue->qlock, flags); spin_unlock_irqrestore(&queue->qlock, flags);
flush_workqueue(queue->work_q); flush_workqueue(queue->work_q);
...@@ -2172,11 +2268,38 @@ nvmet_fc_handle_fcp_rqst_work(struct work_struct *work) ...@@ -2172,11 +2268,38 @@ nvmet_fc_handle_fcp_rqst_work(struct work_struct *work)
* Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
* layer for processing. * layer for processing.
* *
* The nvmet-fc layer will copy cmd payload to an internal structure for * The nvmet_fc layer allocates a local job structure (struct
* processing. As such, upon completion of the routine, the LLDD may * nvmet_fc_fcp_iod) from the queue for the io and copies the
* immediately free/reuse the CMD IU buffer passed in the call. * CMD IU buffer to the job structure. As such, on a successful
* completion (returns 0), the LLDD may immediately free/reuse
* the CMD IU buffer passed in the call.
*
* However, in some circumstances, due to the packetized nature of FC
* and the api of the FC LLDD which may issue a hw command to send the
* response, but the LLDD may not get the hw completion for that command
* and upcall the nvmet_fc layer before a new command may be
* asynchronously received - its possible for a command to be received
* before the LLDD and nvmet_fc have recycled the job structure. It gives
* the appearance of more commands received than fits in the sq.
* To alleviate this scenario, a temporary queue is maintained in the
* transport for pending LLDD requests waiting for a queue job structure.
* In these "overrun" cases, a temporary queue element is allocated
* the LLDD request and CMD iu buffer information remembered, and the
* routine returns a -EOVERFLOW status. Subsequently, when a queue job
* structure is freed, it is immediately reallocated for anything on the
* pending request list. The LLDDs defer_rcv() callback is called,
* informing the LLDD that it may reuse the CMD IU buffer, and the io
* is then started normally with the transport.
* *
* If this routine returns error, the lldd should abort the exchange. * The LLDD, when receiving an -EOVERFLOW completion status, is to treat
* the completion as successful but must not reuse the CMD IU buffer
* until the LLDD's defer_rcv() callback has been called for the
* corresponding struct nvmefc_tgt_fcp_req pointer.
*
* If there is any other condition in which an error occurs, the
* transport will return a non-zero status indicating the error.
* In all cases other than -EOVERFLOW, the transport has not accepted the
* request and the LLDD should abort the exchange.
* *
* @target_port: pointer to the (registered) target port the FCP CMD IU * @target_port: pointer to the (registered) target port the FCP CMD IU
* was received on. * was received on.
...@@ -2194,6 +2317,8 @@ nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port, ...@@ -2194,6 +2317,8 @@ nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
struct nvme_fc_cmd_iu *cmdiu = cmdiubuf; struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
struct nvmet_fc_tgt_queue *queue; struct nvmet_fc_tgt_queue *queue;
struct nvmet_fc_fcp_iod *fod; struct nvmet_fc_fcp_iod *fod;
struct nvmet_fc_defer_fcp_req *deferfcp;
unsigned long flags;
/* validate iu, so the connection id can be used to find the queue */ /* validate iu, so the connection id can be used to find the queue */
if ((cmdiubuf_len != sizeof(*cmdiu)) || if ((cmdiubuf_len != sizeof(*cmdiu)) ||
...@@ -2214,29 +2339,60 @@ nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port, ...@@ -2214,29 +2339,60 @@ nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
* when the fod is freed. * when the fod is freed.
*/ */
spin_lock_irqsave(&queue->qlock, flags);
fod = nvmet_fc_alloc_fcp_iod(queue); fod = nvmet_fc_alloc_fcp_iod(queue);
if (!fod) { if (fod) {
/* release the queue lookup reference */ spin_unlock_irqrestore(&queue->qlock, flags);
nvmet_fc_tgt_q_put(queue);
return -ENOENT;
}
fcpreq->nvmet_fc_private = fod; fcpreq->nvmet_fc_private = fod;
fod->fcpreq = fcpreq; fod->fcpreq = fcpreq;
/*
* put all admin cmds on hw queue id 0. All io commands go to
* the respective hw queue based on a modulo basis
*/
fcpreq->hwqid = queue->qid ?
((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len); memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR) nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
queue_work_on(queue->cpu, queue->work_q, &fod->work);
else
nvmet_fc_handle_fcp_rqst(tgtport, fod);
return 0; return 0;
}
if (!tgtport->ops->defer_rcv) {
spin_unlock_irqrestore(&queue->qlock, flags);
/* release the queue lookup reference */
nvmet_fc_tgt_q_put(queue);
return -ENOENT;
}
deferfcp = list_first_entry_or_null(&queue->avail_defer_list,
struct nvmet_fc_defer_fcp_req, req_list);
if (deferfcp) {
/* Just re-use one that was previously allocated */
list_del(&deferfcp->req_list);
} else {
spin_unlock_irqrestore(&queue->qlock, flags);
/* Now we need to dynamically allocate one */
deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL);
if (!deferfcp) {
/* release the queue lookup reference */
nvmet_fc_tgt_q_put(queue);
return -ENOMEM;
}
spin_lock_irqsave(&queue->qlock, flags);
}
/* For now, use rspaddr / rsplen to save payload information */
fcpreq->rspaddr = cmdiubuf;
fcpreq->rsplen = cmdiubuf_len;
deferfcp->fcp_req = fcpreq;
/* defer processing till a fod becomes available */
list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list);
/* NOTE: the queue lookup reference is still valid */
spin_unlock_irqrestore(&queue->qlock, flags);
return -EOVERFLOW;
} }
EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req); EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
......
...@@ -205,8 +205,10 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, ...@@ -205,8 +205,10 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
atomic_read(&tgtp->xmt_ls_rsp_error)); atomic_read(&tgtp->xmt_ls_rsp_error));
len += snprintf(buf+len, PAGE_SIZE-len, len += snprintf(buf+len, PAGE_SIZE-len,
"FCP: Rcv %08x Release %08x Drop %08x\n", "FCP: Rcv %08x Defer %08x Release %08x "
"Drop %08x\n",
atomic_read(&tgtp->rcv_fcp_cmd_in), atomic_read(&tgtp->rcv_fcp_cmd_in),
atomic_read(&tgtp->rcv_fcp_cmd_defer),
atomic_read(&tgtp->xmt_fcp_release), atomic_read(&tgtp->xmt_fcp_release),
atomic_read(&tgtp->rcv_fcp_cmd_drop)); atomic_read(&tgtp->rcv_fcp_cmd_drop));
......
...@@ -782,8 +782,11 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) ...@@ -782,8 +782,11 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
atomic_read(&tgtp->xmt_ls_rsp_error)); atomic_read(&tgtp->xmt_ls_rsp_error));
len += snprintf(buf + len, size - len, len += snprintf(buf + len, size - len,
"FCP: Rcv %08x Drop %08x\n", "FCP: Rcv %08x Defer %08x Release %08x "
"Drop %08x\n",
atomic_read(&tgtp->rcv_fcp_cmd_in), atomic_read(&tgtp->rcv_fcp_cmd_in),
atomic_read(&tgtp->rcv_fcp_cmd_defer),
atomic_read(&tgtp->xmt_fcp_release),
atomic_read(&tgtp->rcv_fcp_cmd_drop)); atomic_read(&tgtp->rcv_fcp_cmd_drop));
if (atomic_read(&tgtp->rcv_fcp_cmd_in) != if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
......
...@@ -841,12 +841,31 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport, ...@@ -841,12 +841,31 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
} }
static void
lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *rsp)
{
struct lpfc_nvmet_tgtport *tgtp;
struct lpfc_nvmet_rcv_ctx *ctxp =
container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
struct lpfc_hba *phba = ctxp->phba;
lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
ctxp->oxid, ctxp->size, smp_processor_id());
tgtp = phba->targetport->private;
atomic_inc(&tgtp->rcv_fcp_cmd_defer);
lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
}
static struct nvmet_fc_target_template lpfc_tgttemplate = { static struct nvmet_fc_target_template lpfc_tgttemplate = {
.targetport_delete = lpfc_nvmet_targetport_delete, .targetport_delete = lpfc_nvmet_targetport_delete,
.xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp, .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp,
.fcp_op = lpfc_nvmet_xmt_fcp_op, .fcp_op = lpfc_nvmet_xmt_fcp_op,
.fcp_abort = lpfc_nvmet_xmt_fcp_abort, .fcp_abort = lpfc_nvmet_xmt_fcp_abort,
.fcp_req_release = lpfc_nvmet_xmt_fcp_release, .fcp_req_release = lpfc_nvmet_xmt_fcp_release,
.defer_rcv = lpfc_nvmet_defer_rcv,
.max_hw_queues = 1, .max_hw_queues = 1,
.max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS, .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
...@@ -1504,6 +1523,17 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, ...@@ -1504,6 +1523,17 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
return; return;
} }
/* Processing of FCP command is deferred */
if (rc == -EOVERFLOW) {
lpfc_nvmeio_data(phba,
"NVMET RCV BUSY: xri x%x sz %d from %06x\n",
oxid, size, sid);
/* defer reposting rcv buffer till .defer_rcv callback */
ctxp->rqb_buffer = nvmebuf;
atomic_inc(&tgtp->rcv_fcp_cmd_out);
return;
}
atomic_inc(&tgtp->rcv_fcp_cmd_drop); atomic_inc(&tgtp->rcv_fcp_cmd_drop);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n", "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
......
...@@ -49,6 +49,7 @@ struct lpfc_nvmet_tgtport { ...@@ -49,6 +49,7 @@ struct lpfc_nvmet_tgtport {
atomic_t rcv_fcp_cmd_in; atomic_t rcv_fcp_cmd_in;
atomic_t rcv_fcp_cmd_out; atomic_t rcv_fcp_cmd_out;
atomic_t rcv_fcp_cmd_drop; atomic_t rcv_fcp_cmd_drop;
atomic_t rcv_fcp_cmd_defer;
atomic_t xmt_fcp_release; atomic_t xmt_fcp_release;
/* Stats counters - lpfc_nvmet_xmt_fcp_op */ /* Stats counters - lpfc_nvmet_xmt_fcp_op */
......
...@@ -346,6 +346,11 @@ struct nvme_fc_remote_port { ...@@ -346,6 +346,11 @@ struct nvme_fc_remote_port {
* indicating an FC transport Aborted status. * indicating an FC transport Aborted status.
* Entrypoint is Mandatory. * Entrypoint is Mandatory.
* *
* @defer_rcv: Called by the transport to signal the LLLD that it has
* begun processing of a previously received NVME CMD IU. The LLDD
* is now free to re-use the rcv buffer associated with the
* nvmefc_tgt_fcp_req.
*
* @max_hw_queues: indicates the maximum number of hw queues the LLDD * @max_hw_queues: indicates the maximum number of hw queues the LLDD
* supports for cpu affinitization. * supports for cpu affinitization.
* Value is Mandatory. Must be at least 1. * Value is Mandatory. Must be at least 1.
...@@ -846,6 +851,8 @@ struct nvmet_fc_target_template { ...@@ -846,6 +851,8 @@ struct nvmet_fc_target_template {
struct nvmefc_tgt_fcp_req *fcpreq); struct nvmefc_tgt_fcp_req *fcpreq);
void (*fcp_req_release)(struct nvmet_fc_target_port *tgtport, void (*fcp_req_release)(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *fcpreq); struct nvmefc_tgt_fcp_req *fcpreq);
void (*defer_rcv)(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *fcpreq);
u32 max_hw_queues; u32 max_hw_queues;
u16 max_sgl_segments; u16 max_sgl_segments;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment