Commit e0d0e045 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "A set of fixes that should go into this series. This contains:

   - Fix from Bart for blk-mq requeue queue running, preventing a
     continued loop of run/restart.

   - Fix for a bio/blk-integrity issue, in two parts. One from
     Christoph, fixing where verification happens, and one from Milan,
     for a NULL profile.

   - NVMe pull request, most of the changes being for nvme-fc, but also
     a few trivial core/pci fixes"

* 'for-linus' of git://git.kernel.dk/linux-block:
  nvme: fix directive command numd calculation
  nvme: fix nvme reset command timeout handling
  nvme-pci: fix CMB sysfs file removal in reset path
  lpfc: support nvmet_fc defer_rcv callback
  nvmet_fc: add defer_req callback for deferment of cmd buffer return
  nvme: strip trailing 0-bytes in wwid_show
  block: Make blk_mq_delay_kick_requeue_list() rerun the queue at a quiet time
  bio-integrity: only verify integrity on the lowest stacked driver
  bio-integrity: Fix regression if profile verify_fn is NULL
parents 0993133b 4a8b53be
...@@ -387,9 +387,11 @@ static void bio_integrity_verify_fn(struct work_struct *work) ...@@ -387,9 +387,11 @@ static void bio_integrity_verify_fn(struct work_struct *work)
*/ */
bool __bio_integrity_endio(struct bio *bio) bool __bio_integrity_endio(struct bio *bio)
{ {
if (bio_op(bio) == REQ_OP_READ && !bio->bi_status) { struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
struct bio_integrity_payload *bip = bio_integrity(bio); struct bio_integrity_payload *bip = bio_integrity(bio);
if (bio_op(bio) == REQ_OP_READ && !bio->bi_status &&
(bip->bip_flags & BIP_BLOCK_INTEGRITY) && bi->profile->verify_fn) {
INIT_WORK(&bip->bip_work, bio_integrity_verify_fn); INIT_WORK(&bip->bip_work, bio_integrity_verify_fn);
queue_work(kintegrityd_wq, &bip->bip_work); queue_work(kintegrityd_wq, &bip->bip_work);
return false; return false;
......
...@@ -684,8 +684,8 @@ EXPORT_SYMBOL(blk_mq_kick_requeue_list); ...@@ -684,8 +684,8 @@ EXPORT_SYMBOL(blk_mq_kick_requeue_list);
void blk_mq_delay_kick_requeue_list(struct request_queue *q, void blk_mq_delay_kick_requeue_list(struct request_queue *q,
unsigned long msecs) unsigned long msecs)
{ {
kblockd_schedule_delayed_work(&q->requeue_work, kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
msecs_to_jiffies(msecs)); msecs_to_jiffies(msecs));
} }
EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list); EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
......
...@@ -336,7 +336,7 @@ static int nvme_get_stream_params(struct nvme_ctrl *ctrl, ...@@ -336,7 +336,7 @@ static int nvme_get_stream_params(struct nvme_ctrl *ctrl,
c.directive.opcode = nvme_admin_directive_recv; c.directive.opcode = nvme_admin_directive_recv;
c.directive.nsid = cpu_to_le32(nsid); c.directive.nsid = cpu_to_le32(nsid);
c.directive.numd = cpu_to_le32(sizeof(*s)); c.directive.numd = cpu_to_le32((sizeof(*s) >> 2) - 1);
c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM; c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM;
c.directive.dtype = NVME_DIR_STREAMS; c.directive.dtype = NVME_DIR_STREAMS;
...@@ -1509,7 +1509,7 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, ...@@ -1509,7 +1509,7 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
blk_queue_write_cache(q, vwc, vwc); blk_queue_write_cache(q, vwc, vwc);
} }
static void nvme_configure_apst(struct nvme_ctrl *ctrl) static int nvme_configure_apst(struct nvme_ctrl *ctrl)
{ {
/* /*
* APST (Autonomous Power State Transition) lets us program a * APST (Autonomous Power State Transition) lets us program a
...@@ -1538,16 +1538,16 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl) ...@@ -1538,16 +1538,16 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
* then don't do anything. * then don't do anything.
*/ */
if (!ctrl->apsta) if (!ctrl->apsta)
return; return 0;
if (ctrl->npss > 31) { if (ctrl->npss > 31) {
dev_warn(ctrl->device, "NPSS is invalid; not using APST\n"); dev_warn(ctrl->device, "NPSS is invalid; not using APST\n");
return; return 0;
} }
table = kzalloc(sizeof(*table), GFP_KERNEL); table = kzalloc(sizeof(*table), GFP_KERNEL);
if (!table) if (!table)
return; return 0;
if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) { if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) {
/* Turn off APST. */ /* Turn off APST. */
...@@ -1629,6 +1629,7 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl) ...@@ -1629,6 +1629,7 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret); dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);
kfree(table); kfree(table);
return ret;
} }
static void nvme_set_latency_tolerance(struct device *dev, s32 val) static void nvme_set_latency_tolerance(struct device *dev, s32 val)
...@@ -1835,13 +1836,16 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) ...@@ -1835,13 +1836,16 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
* In fabrics we need to verify the cntlid matches the * In fabrics we need to verify the cntlid matches the
* admin connect * admin connect
*/ */
if (ctrl->cntlid != le16_to_cpu(id->cntlid)) if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
ret = -EINVAL; ret = -EINVAL;
goto out_free;
}
if (!ctrl->opts->discovery_nqn && !ctrl->kas) { if (!ctrl->opts->discovery_nqn && !ctrl->kas) {
dev_err(ctrl->device, dev_err(ctrl->device,
"keep-alive support is mandatory for fabrics\n"); "keep-alive support is mandatory for fabrics\n");
ret = -EINVAL; ret = -EINVAL;
goto out_free;
} }
} else { } else {
ctrl->cntlid = le16_to_cpu(id->cntlid); ctrl->cntlid = le16_to_cpu(id->cntlid);
...@@ -1856,11 +1860,20 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) ...@@ -1856,11 +1860,20 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
else if (!ctrl->apst_enabled && prev_apst_enabled) else if (!ctrl->apst_enabled && prev_apst_enabled)
dev_pm_qos_hide_latency_tolerance(ctrl->device); dev_pm_qos_hide_latency_tolerance(ctrl->device);
nvme_configure_apst(ctrl); ret = nvme_configure_apst(ctrl);
nvme_configure_directives(ctrl); if (ret < 0)
return ret;
ret = nvme_configure_directives(ctrl);
if (ret < 0)
return ret;
ctrl->identified = true; ctrl->identified = true;
return 0;
out_free:
kfree(id);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(nvme_init_identify); EXPORT_SYMBOL_GPL(nvme_init_identify);
...@@ -2004,9 +2017,11 @@ static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, ...@@ -2004,9 +2017,11 @@ static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
if (memchr_inv(ns->eui, 0, sizeof(ns->eui))) if (memchr_inv(ns->eui, 0, sizeof(ns->eui)))
return sprintf(buf, "eui.%8phN\n", ns->eui); return sprintf(buf, "eui.%8phN\n", ns->eui);
while (ctrl->serial[serial_len - 1] == ' ') while (serial_len > 0 && (ctrl->serial[serial_len - 1] == ' ' ||
ctrl->serial[serial_len - 1] == '\0'))
serial_len--; serial_len--;
while (ctrl->model[model_len - 1] == ' ') while (model_len > 0 && (ctrl->model[model_len - 1] == ' ' ||
ctrl->model[model_len - 1] == '\0'))
model_len--; model_len--;
return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid, return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid,
......
...@@ -1558,11 +1558,9 @@ static inline void nvme_release_cmb(struct nvme_dev *dev) ...@@ -1558,11 +1558,9 @@ static inline void nvme_release_cmb(struct nvme_dev *dev)
if (dev->cmb) { if (dev->cmb) {
iounmap(dev->cmb); iounmap(dev->cmb);
dev->cmb = NULL; dev->cmb = NULL;
if (dev->cmbsz) { sysfs_remove_file_from_group(&dev->ctrl.device->kobj,
sysfs_remove_file_from_group(&dev->ctrl.device->kobj, &dev_attr_cmb.attr, NULL);
&dev_attr_cmb.attr, NULL); dev->cmbsz = 0;
dev->cmbsz = 0;
}
} }
} }
...@@ -1953,16 +1951,14 @@ static int nvme_pci_enable(struct nvme_dev *dev) ...@@ -1953,16 +1951,14 @@ static int nvme_pci_enable(struct nvme_dev *dev)
/* /*
* CMBs can currently only exist on >=1.2 PCIe devices. We only * CMBs can currently only exist on >=1.2 PCIe devices. We only
* populate sysfs if a CMB is implemented. Note that we add the * populate sysfs if a CMB is implemented. Since nvme_dev_attrs_group
* CMB attribute to the nvme_ctrl kobj which removes the need to remove * has no name we can pass NULL as final argument to
* it on exit. Since nvme_dev_attrs_group has no name we can pass * sysfs_add_file_to_group.
* NULL as final argument to sysfs_add_file_to_group.
*/ */
if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) { if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) {
dev->cmb = nvme_map_cmb(dev); dev->cmb = nvme_map_cmb(dev);
if (dev->cmb) {
if (dev->cmbsz) {
if (sysfs_add_file_to_group(&dev->ctrl.device->kobj, if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
&dev_attr_cmb.attr, NULL)) &dev_attr_cmb.attr, NULL))
dev_warn(dev->ctrl.device, dev_warn(dev->ctrl.device,
......
This diff is collapsed.
...@@ -205,8 +205,10 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, ...@@ -205,8 +205,10 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
atomic_read(&tgtp->xmt_ls_rsp_error)); atomic_read(&tgtp->xmt_ls_rsp_error));
len += snprintf(buf+len, PAGE_SIZE-len, len += snprintf(buf+len, PAGE_SIZE-len,
"FCP: Rcv %08x Release %08x Drop %08x\n", "FCP: Rcv %08x Defer %08x Release %08x "
"Drop %08x\n",
atomic_read(&tgtp->rcv_fcp_cmd_in), atomic_read(&tgtp->rcv_fcp_cmd_in),
atomic_read(&tgtp->rcv_fcp_cmd_defer),
atomic_read(&tgtp->xmt_fcp_release), atomic_read(&tgtp->xmt_fcp_release),
atomic_read(&tgtp->rcv_fcp_cmd_drop)); atomic_read(&tgtp->rcv_fcp_cmd_drop));
......
...@@ -782,8 +782,11 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) ...@@ -782,8 +782,11 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
atomic_read(&tgtp->xmt_ls_rsp_error)); atomic_read(&tgtp->xmt_ls_rsp_error));
len += snprintf(buf + len, size - len, len += snprintf(buf + len, size - len,
"FCP: Rcv %08x Drop %08x\n", "FCP: Rcv %08x Defer %08x Release %08x "
"Drop %08x\n",
atomic_read(&tgtp->rcv_fcp_cmd_in), atomic_read(&tgtp->rcv_fcp_cmd_in),
atomic_read(&tgtp->rcv_fcp_cmd_defer),
atomic_read(&tgtp->xmt_fcp_release),
atomic_read(&tgtp->rcv_fcp_cmd_drop)); atomic_read(&tgtp->rcv_fcp_cmd_drop));
if (atomic_read(&tgtp->rcv_fcp_cmd_in) != if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
......
...@@ -841,12 +841,31 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport, ...@@ -841,12 +841,31 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
} }
static void
lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *rsp)
{
struct lpfc_nvmet_tgtport *tgtp;
struct lpfc_nvmet_rcv_ctx *ctxp =
container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
struct lpfc_hba *phba = ctxp->phba;
lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
ctxp->oxid, ctxp->size, smp_processor_id());
tgtp = phba->targetport->private;
atomic_inc(&tgtp->rcv_fcp_cmd_defer);
lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
}
static struct nvmet_fc_target_template lpfc_tgttemplate = { static struct nvmet_fc_target_template lpfc_tgttemplate = {
.targetport_delete = lpfc_nvmet_targetport_delete, .targetport_delete = lpfc_nvmet_targetport_delete,
.xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp, .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp,
.fcp_op = lpfc_nvmet_xmt_fcp_op, .fcp_op = lpfc_nvmet_xmt_fcp_op,
.fcp_abort = lpfc_nvmet_xmt_fcp_abort, .fcp_abort = lpfc_nvmet_xmt_fcp_abort,
.fcp_req_release = lpfc_nvmet_xmt_fcp_release, .fcp_req_release = lpfc_nvmet_xmt_fcp_release,
.defer_rcv = lpfc_nvmet_defer_rcv,
.max_hw_queues = 1, .max_hw_queues = 1,
.max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS, .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
...@@ -1504,6 +1523,17 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, ...@@ -1504,6 +1523,17 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
return; return;
} }
/* Processing of FCP command is deferred */
if (rc == -EOVERFLOW) {
lpfc_nvmeio_data(phba,
"NVMET RCV BUSY: xri x%x sz %d from %06x\n",
oxid, size, sid);
/* defer reposting rcv buffer till .defer_rcv callback */
ctxp->rqb_buffer = nvmebuf;
atomic_inc(&tgtp->rcv_fcp_cmd_out);
return;
}
atomic_inc(&tgtp->rcv_fcp_cmd_drop); atomic_inc(&tgtp->rcv_fcp_cmd_drop);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n", "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
......
...@@ -49,6 +49,7 @@ struct lpfc_nvmet_tgtport { ...@@ -49,6 +49,7 @@ struct lpfc_nvmet_tgtport {
atomic_t rcv_fcp_cmd_in; atomic_t rcv_fcp_cmd_in;
atomic_t rcv_fcp_cmd_out; atomic_t rcv_fcp_cmd_out;
atomic_t rcv_fcp_cmd_drop; atomic_t rcv_fcp_cmd_drop;
atomic_t rcv_fcp_cmd_defer;
atomic_t xmt_fcp_release; atomic_t xmt_fcp_release;
/* Stats counters - lpfc_nvmet_xmt_fcp_op */ /* Stats counters - lpfc_nvmet_xmt_fcp_op */
......
...@@ -346,6 +346,11 @@ struct nvme_fc_remote_port { ...@@ -346,6 +346,11 @@ struct nvme_fc_remote_port {
* indicating an FC transport Aborted status. * indicating an FC transport Aborted status.
* Entrypoint is Mandatory. * Entrypoint is Mandatory.
* *
* @defer_rcv: Called by the transport to signal the LLLD that it has
* begun processing of a previously received NVME CMD IU. The LLDD
* is now free to re-use the rcv buffer associated with the
* nvmefc_tgt_fcp_req.
*
* @max_hw_queues: indicates the maximum number of hw queues the LLDD * @max_hw_queues: indicates the maximum number of hw queues the LLDD
* supports for cpu affinitization. * supports for cpu affinitization.
* Value is Mandatory. Must be at least 1. * Value is Mandatory. Must be at least 1.
...@@ -846,6 +851,8 @@ struct nvmet_fc_target_template { ...@@ -846,6 +851,8 @@ struct nvmet_fc_target_template {
struct nvmefc_tgt_fcp_req *fcpreq); struct nvmefc_tgt_fcp_req *fcpreq);
void (*fcp_req_release)(struct nvmet_fc_target_port *tgtport, void (*fcp_req_release)(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *fcpreq); struct nvmefc_tgt_fcp_req *fcpreq);
void (*defer_rcv)(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *fcpreq);
u32 max_hw_queues; u32 max_hw_queues;
u16 max_sgl_segments; u16 max_sgl_segments;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment