Commit a39c330d authored by Jens Axboe's avatar Jens Axboe

Merge branch 'nvme-5.0' of git://git.infradead.org/nvme into for-linus

Pull NVMe fixes from Christoph.

* 'nvme-5.0' of git://git.infradead.org/nvme:
  nvme: don't initlialize ctrl->cntlid twice
  nvme: introduce NVME_QUIRK_IGNORE_DEV_SUBNQN
  nvme: pad fake subsys NQN vid and ssvid with zeros
  nvme-multipath: zero out ANA log buffer
  nvme-fabrics: unset write/poll queues for discovery controllers
  nvme-tcp: don't ask if controller is fabrics
  nvme-tcp: remove dead code
  nvme-pci: fix out of bounds access in nvme_cqe_pending
  nvme-pci: rerun irq setup on IO queue init errors
  nvme-pci: use the same attributes when freeing host_mem_desc_bufs.
  nvme-pci: fix the wrong setting of nr_maps
parents 5db470e2 b8a38ea6
...@@ -2173,6 +2173,7 @@ static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ct ...@@ -2173,6 +2173,7 @@ static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ct
size_t nqnlen; size_t nqnlen;
int off; int off;
if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) {
nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE); nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) { if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) {
strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE); strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
...@@ -2181,10 +2182,11 @@ static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ct ...@@ -2181,10 +2182,11 @@ static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ct
if (ctrl->vs >= NVME_VS(1, 2, 1)) if (ctrl->vs >= NVME_VS(1, 2, 1))
dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n"); dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n");
}
/* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */ /* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */
off = snprintf(subsys->subnqn, NVMF_NQN_SIZE, off = snprintf(subsys->subnqn, NVMF_NQN_SIZE,
"nqn.2014.08.org.nvmexpress:%4x%4x", "nqn.2014.08.org.nvmexpress:%04x%04x",
le16_to_cpu(id->vid), le16_to_cpu(id->ssvid)); le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn)); memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn));
off += sizeof(id->sn); off += sizeof(id->sn);
...@@ -2500,7 +2502,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) ...@@ -2500,7 +2502,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
ctrl->oaes = le32_to_cpu(id->oaes); ctrl->oaes = le32_to_cpu(id->oaes);
atomic_set(&ctrl->abort_limit, id->acl + 1); atomic_set(&ctrl->abort_limit, id->acl + 1);
ctrl->vwc = id->vwc; ctrl->vwc = id->vwc;
ctrl->cntlid = le16_to_cpup(&id->cntlid);
if (id->mdts) if (id->mdts)
max_hw_sectors = 1 << (id->mdts + page_shift - 9); max_hw_sectors = 1 << (id->mdts + page_shift - 9);
else else
......
...@@ -874,6 +874,8 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ...@@ -874,6 +874,8 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
if (opts->discovery_nqn) { if (opts->discovery_nqn) {
opts->kato = 0; opts->kato = 0;
opts->nr_io_queues = 0; opts->nr_io_queues = 0;
opts->nr_write_queues = 0;
opts->nr_poll_queues = 0;
opts->duplicate_connect = true; opts->duplicate_connect = true;
} }
if (ctrl_loss_tmo < 0) if (ctrl_loss_tmo < 0)
......
...@@ -570,6 +570,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) ...@@ -570,6 +570,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
return 0; return 0;
out_free_ana_log_buf: out_free_ana_log_buf:
kfree(ctrl->ana_log_buf); kfree(ctrl->ana_log_buf);
ctrl->ana_log_buf = NULL;
out: out:
return error; return error;
} }
...@@ -577,5 +578,6 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) ...@@ -577,5 +578,6 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
void nvme_mpath_uninit(struct nvme_ctrl *ctrl) void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
{ {
kfree(ctrl->ana_log_buf); kfree(ctrl->ana_log_buf);
ctrl->ana_log_buf = NULL;
} }
...@@ -90,6 +90,11 @@ enum nvme_quirks { ...@@ -90,6 +90,11 @@ enum nvme_quirks {
* Set MEDIUM priority on SQ creation * Set MEDIUM priority on SQ creation
*/ */
NVME_QUIRK_MEDIUM_PRIO_SQ = (1 << 7), NVME_QUIRK_MEDIUM_PRIO_SQ = (1 << 7),
/*
* Ignore device provided subnqn.
*/
NVME_QUIRK_IGNORE_DEV_SUBNQN = (1 << 8),
}; };
/* /*
......
...@@ -95,6 +95,7 @@ struct nvme_dev; ...@@ -95,6 +95,7 @@ struct nvme_dev;
struct nvme_queue; struct nvme_queue;
static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown); static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode);
/* /*
* Represents an NVM Express device. Each nvme_dev is a PCI function. * Represents an NVM Express device. Each nvme_dev is a PCI function.
...@@ -1019,9 +1020,11 @@ static void nvme_complete_cqes(struct nvme_queue *nvmeq, u16 start, u16 end) ...@@ -1019,9 +1020,11 @@ static void nvme_complete_cqes(struct nvme_queue *nvmeq, u16 start, u16 end)
static inline void nvme_update_cq_head(struct nvme_queue *nvmeq) static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
{ {
if (++nvmeq->cq_head == nvmeq->q_depth) { if (nvmeq->cq_head == nvmeq->q_depth - 1) {
nvmeq->cq_head = 0; nvmeq->cq_head = 0;
nvmeq->cq_phase = !nvmeq->cq_phase; nvmeq->cq_phase = !nvmeq->cq_phase;
} else {
nvmeq->cq_head++;
} }
} }
...@@ -1420,6 +1423,14 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq) ...@@ -1420,6 +1423,14 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
return 0; return 0;
} }
static void nvme_suspend_io_queues(struct nvme_dev *dev)
{
int i;
for (i = dev->ctrl.queue_count - 1; i > 0; i--)
nvme_suspend_queue(&dev->queues[i]);
}
static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown) static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
{ {
struct nvme_queue *nvmeq = &dev->queues[0]; struct nvme_queue *nvmeq = &dev->queues[0];
...@@ -1885,8 +1896,9 @@ static void nvme_free_host_mem(struct nvme_dev *dev) ...@@ -1885,8 +1896,9 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i]; struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i];
size_t size = le32_to_cpu(desc->size) * dev->ctrl.page_size; size_t size = le32_to_cpu(desc->size) * dev->ctrl.page_size;
dma_free_coherent(dev->dev, size, dev->host_mem_desc_bufs[i], dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i],
le64_to_cpu(desc->addr)); le64_to_cpu(desc->addr),
DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
} }
kfree(dev->host_mem_desc_bufs); kfree(dev->host_mem_desc_bufs);
...@@ -1952,8 +1964,9 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, ...@@ -1952,8 +1964,9 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
while (--i >= 0) { while (--i >= 0) {
size_t size = le32_to_cpu(descs[i].size) * dev->ctrl.page_size; size_t size = le32_to_cpu(descs[i].size) * dev->ctrl.page_size;
dma_free_coherent(dev->dev, size, bufs[i], dma_free_attrs(dev->dev, size, bufs[i],
le64_to_cpu(descs[i].addr)); le64_to_cpu(descs[i].addr),
DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
} }
kfree(bufs); kfree(bufs);
...@@ -2132,6 +2145,12 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) ...@@ -2132,6 +2145,12 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
return result; return result;
} }
static void nvme_disable_io_queues(struct nvme_dev *dev)
{
if (__nvme_disable_io_queues(dev, nvme_admin_delete_sq))
__nvme_disable_io_queues(dev, nvme_admin_delete_cq);
}
static int nvme_setup_io_queues(struct nvme_dev *dev) static int nvme_setup_io_queues(struct nvme_dev *dev)
{ {
struct nvme_queue *adminq = &dev->queues[0]; struct nvme_queue *adminq = &dev->queues[0];
...@@ -2168,6 +2187,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) ...@@ -2168,6 +2187,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
} while (1); } while (1);
adminq->q_db = dev->dbs; adminq->q_db = dev->dbs;
retry:
/* Deregister the admin queue's interrupt */ /* Deregister the admin queue's interrupt */
pci_free_irq(pdev, 0, adminq); pci_free_irq(pdev, 0, adminq);
...@@ -2185,25 +2205,34 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) ...@@ -2185,25 +2205,34 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
result = max(result - 1, 1); result = max(result - 1, 1);
dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL]; dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL];
dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n",
dev->io_queues[HCTX_TYPE_DEFAULT],
dev->io_queues[HCTX_TYPE_READ],
dev->io_queues[HCTX_TYPE_POLL]);
/* /*
* Should investigate if there's a performance win from allocating * Should investigate if there's a performance win from allocating
* more queues than interrupt vectors; it might allow the submission * more queues than interrupt vectors; it might allow the submission
* path to scale better, even if the receive path is limited by the * path to scale better, even if the receive path is limited by the
* number of interrupts. * number of interrupts.
*/ */
result = queue_request_irq(adminq); result = queue_request_irq(adminq);
if (result) { if (result) {
adminq->cq_vector = -1; adminq->cq_vector = -1;
return result; return result;
} }
set_bit(NVMEQ_ENABLED, &adminq->flags); set_bit(NVMEQ_ENABLED, &adminq->flags);
return nvme_create_io_queues(dev);
result = nvme_create_io_queues(dev);
if (result || dev->online_queues < 2)
return result;
if (dev->online_queues - 1 < dev->max_qid) {
nr_io_queues = dev->online_queues - 1;
nvme_disable_io_queues(dev);
nvme_suspend_io_queues(dev);
goto retry;
}
dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n",
dev->io_queues[HCTX_TYPE_DEFAULT],
dev->io_queues[HCTX_TYPE_READ],
dev->io_queues[HCTX_TYPE_POLL]);
return 0;
} }
static void nvme_del_queue_end(struct request *req, blk_status_t error) static void nvme_del_queue_end(struct request *req, blk_status_t error)
...@@ -2248,7 +2277,7 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) ...@@ -2248,7 +2277,7 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
return 0; return 0;
} }
static bool nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode) static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode)
{ {
int nr_queues = dev->online_queues - 1, sent = 0; int nr_queues = dev->online_queues - 1, sent = 0;
unsigned long timeout; unsigned long timeout;
...@@ -2294,7 +2323,6 @@ static int nvme_dev_add(struct nvme_dev *dev) ...@@ -2294,7 +2323,6 @@ static int nvme_dev_add(struct nvme_dev *dev)
dev->tagset.nr_maps = 2; /* default + read */ dev->tagset.nr_maps = 2; /* default + read */
if (dev->io_queues[HCTX_TYPE_POLL]) if (dev->io_queues[HCTX_TYPE_POLL])
dev->tagset.nr_maps++; dev->tagset.nr_maps++;
dev->tagset.nr_maps = HCTX_MAX_TYPES;
dev->tagset.timeout = NVME_IO_TIMEOUT; dev->tagset.timeout = NVME_IO_TIMEOUT;
dev->tagset.numa_node = dev_to_node(dev->dev); dev->tagset.numa_node = dev_to_node(dev->dev);
dev->tagset.queue_depth = dev->tagset.queue_depth =
...@@ -2410,7 +2438,6 @@ static void nvme_pci_disable(struct nvme_dev *dev) ...@@ -2410,7 +2438,6 @@ static void nvme_pci_disable(struct nvme_dev *dev)
static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
{ {
int i;
bool dead = true; bool dead = true;
struct pci_dev *pdev = to_pci_dev(dev->dev); struct pci_dev *pdev = to_pci_dev(dev->dev);
...@@ -2437,13 +2464,11 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) ...@@ -2437,13 +2464,11 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
nvme_stop_queues(&dev->ctrl); nvme_stop_queues(&dev->ctrl);
if (!dead && dev->ctrl.queue_count > 0) { if (!dead && dev->ctrl.queue_count > 0) {
if (nvme_disable_io_queues(dev, nvme_admin_delete_sq)) nvme_disable_io_queues(dev);
nvme_disable_io_queues(dev, nvme_admin_delete_cq);
nvme_disable_admin_queue(dev, shutdown); nvme_disable_admin_queue(dev, shutdown);
} }
for (i = dev->ctrl.queue_count - 1; i >= 0; i--) nvme_suspend_io_queues(dev);
nvme_suspend_queue(&dev->queues[i]); nvme_suspend_queue(&dev->queues[0]);
nvme_pci_disable(dev); nvme_pci_disable(dev);
blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl); blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl);
...@@ -2946,6 +2971,8 @@ static const struct pci_device_id nvme_id_table[] = { ...@@ -2946,6 +2971,8 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */ { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS | .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
NVME_QUIRK_MEDIUM_PRIO_SQ }, NVME_QUIRK_MEDIUM_PRIO_SQ },
{ PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
.driver_data = NVME_QUIRK_IDENTIFY_CNS, }, .driver_data = NVME_QUIRK_IDENTIFY_CNS, },
{ PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */ { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
......
...@@ -1565,7 +1565,6 @@ static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove) ...@@ -1565,7 +1565,6 @@ static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
{ {
nvme_tcp_stop_io_queues(ctrl); nvme_tcp_stop_io_queues(ctrl);
if (remove) { if (remove) {
if (ctrl->ops->flags & NVME_F_FABRICS)
blk_cleanup_queue(ctrl->connect_q); blk_cleanup_queue(ctrl->connect_q);
blk_mq_free_tag_set(ctrl->tagset); blk_mq_free_tag_set(ctrl->tagset);
} }
...@@ -1587,13 +1586,11 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new) ...@@ -1587,13 +1586,11 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
goto out_free_io_queues; goto out_free_io_queues;
} }
if (ctrl->ops->flags & NVME_F_FABRICS) {
ctrl->connect_q = blk_mq_init_queue(ctrl->tagset); ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
if (IS_ERR(ctrl->connect_q)) { if (IS_ERR(ctrl->connect_q)) {
ret = PTR_ERR(ctrl->connect_q); ret = PTR_ERR(ctrl->connect_q);
goto out_free_tag_set; goto out_free_tag_set;
} }
}
} else { } else {
blk_mq_update_nr_hw_queues(ctrl->tagset, blk_mq_update_nr_hw_queues(ctrl->tagset,
ctrl->queue_count - 1); ctrl->queue_count - 1);
...@@ -1606,7 +1603,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new) ...@@ -1606,7 +1603,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
return 0; return 0;
out_cleanup_connect_q: out_cleanup_connect_q:
if (new && (ctrl->ops->flags & NVME_F_FABRICS)) if (new)
blk_cleanup_queue(ctrl->connect_q); blk_cleanup_queue(ctrl->connect_q);
out_free_tag_set: out_free_tag_set:
if (new) if (new)
...@@ -1620,7 +1617,6 @@ static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove) ...@@ -1620,7 +1617,6 @@ static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
{ {
nvme_tcp_stop_queue(ctrl, 0); nvme_tcp_stop_queue(ctrl, 0);
if (remove) { if (remove) {
free_opal_dev(ctrl->opal_dev);
blk_cleanup_queue(ctrl->admin_q); blk_cleanup_queue(ctrl->admin_q);
blk_mq_free_tag_set(ctrl->admin_tagset); blk_mq_free_tag_set(ctrl->admin_tagset);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment