Commit da14f237 authored by Jens Axboe's avatar Jens Axboe

Merge tag 'nvme-5.19-2022-05-18' of git://git.infradead.org/nvme into for-5.19/drivers

Pull NVMe updates from Christoph:

"nvme updates for Linux 5.19

 - tighten the PCI presence check (Stefan Roese):
 - fix a potential NULL pointer dereference in an error path
   (Kyle Miller Smith)
 - fix interpretation of the DMRSL field (Tom Yan)
 - relax the data transfer alignment (Keith Busch)
 - verbose error logging improvements (Max Gurtovoy, Chaitanya Kulkarni)
 - misc cleanups (Chaitanya Kulkarni, me)"

* tag 'nvme-5.19-2022-05-18' of git://git.infradead.org/nvme:
  nvme: split the enum used for various register constants
  nvme-fabrics: add a request timeout helper
  nvme-pci: harden drive presence detect in nvme_dev_disable()
  nvme-pci: fix a NULL pointer dereference in nvme_alloc_admin_tags
  nvme: mark internal passthru request RQF_QUIET
  nvme: remove unneeded include from constants file
  nvme: add missing status values to verbose logging
  nvme: set dma alignment to dword
  nvme: fix interpretation of DMRSL
parents 491bf8f2 e626f37e
......@@ -4,7 +4,6 @@
* Copyright (c) 2022, Oracle and/or its affiliates
*/
#include <linux/blkdev.h>
#include "nvme.h"
#ifdef CONFIG_NVME_VERBOSE_ERRORS
......@@ -155,10 +154,13 @@ static const char * const nvme_statuses[] = {
[NVME_SC_COMPARE_FAILED] = "Compare Failure",
[NVME_SC_ACCESS_DENIED] = "Access Denied",
[NVME_SC_UNWRITTEN_BLOCK] = "Deallocated or Unwritten Logical Block",
[NVME_SC_INTERNAL_PATH_ERROR] = "Internal Pathing Error",
[NVME_SC_ANA_PERSISTENT_LOSS] = "Asymmetric Access Persistent Loss",
[NVME_SC_ANA_INACCESSIBLE] = "Asymmetric Access Inaccessible",
[NVME_SC_ANA_TRANSITION] = "Asymmetric Access Transition",
[NVME_SC_CTRL_PATH_ERROR] = "Controller Pathing Error",
[NVME_SC_HOST_PATH_ERROR] = "Host Pathing Error",
[NVME_SC_HOST_ABORTED_CMD] = "Host Aborted Command",
};
const unsigned char *nvme_get_error_status_str(u16 status)
......
......@@ -1207,6 +1207,7 @@ static void nvme_keep_alive_work(struct work_struct *work)
rq->timeout = ctrl->kato * HZ;
rq->end_io_data = ctrl;
rq->rq_flags |= RQF_QUIET;
blk_execute_rq_nowait(rq, false, nvme_keep_alive_end_io);
}
......@@ -1634,6 +1635,9 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
if (queue->limits.max_discard_sectors)
return;
if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns, UINT_MAX))
ctrl->max_discard_sectors = nvme_lba_to_sect(ns, ctrl->dmrsl);
blk_queue_max_discard_sectors(queue, ctrl->max_discard_sectors);
blk_queue_max_discard_segments(queue, ctrl->max_discard_segments);
......@@ -1770,7 +1774,7 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
}
blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1);
blk_queue_dma_alignment(q, 7);
blk_queue_dma_alignment(q, 3);
blk_queue_write_cache(q, vwc, vwc);
}
......@@ -2893,8 +2897,7 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
if (id->dmrl)
ctrl->max_discard_segments = id->dmrl;
if (id->dmrsl)
ctrl->max_discard_sectors = le32_to_cpu(id->dmrsl);
ctrl->dmrsl = le32_to_cpu(id->dmrsl);
if (id->wzsl)
ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, id->wzsl);
......
......@@ -187,6 +187,14 @@ static inline char *nvmf_ctrl_subsysnqn(struct nvme_ctrl *ctrl)
return ctrl->subsys->subnqn;
}
static inline void nvmf_complete_timed_out_request(struct request *rq)
{
if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) {
nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
blk_mq_complete_request(rq);
}
}
int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val);
int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val);
......
......@@ -284,6 +284,7 @@ struct nvme_ctrl {
#endif
u16 crdt[3];
u16 oncs;
u32 dmrsl;
u16 oacs;
u16 sqsize;
u32 max_namespaces;
......
......@@ -1439,6 +1439,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
nvme_init_request(abort_req, &cmd);
abort_req->end_io_data = NULL;
abort_req->rq_flags |= RQF_QUIET;
blk_execute_rq_nowait(abort_req, false, abort_endio);
/*
......@@ -1775,6 +1776,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset);
if (IS_ERR(dev->ctrl.admin_q)) {
blk_mq_free_tag_set(&dev->admin_tagset);
dev->ctrl.admin_q = NULL;
return -ENOMEM;
}
if (!blk_get_queue(dev->ctrl.admin_q)) {
......@@ -2486,6 +2488,7 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
req->end_io_data = nvmeq;
init_completion(&nvmeq->delete_done);
req->rq_flags |= RQF_QUIET;
blk_execute_rq_nowait(req, false, opcode == nvme_admin_delete_cq ?
nvme_del_cq_end : nvme_del_queue_end);
return 0;
......@@ -2675,7 +2678,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
struct pci_dev *pdev = to_pci_dev(dev->dev);
mutex_lock(&dev->shutdown_lock);
if (pci_is_enabled(pdev)) {
if (pci_device_is_present(pdev) && pci_is_enabled(pdev)) {
u32 csts = readl(dev->bar + NVME_REG_CSTS);
if (dev->ctrl.state == NVME_CTRL_LIVE ||
......
......@@ -2010,10 +2010,7 @@ static void nvme_rdma_complete_timed_out(struct request *rq)
struct nvme_rdma_queue *queue = req->queue;
nvme_rdma_stop_queue(queue);
if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) {
nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
blk_mq_complete_request(rq);
}
nvmf_complete_timed_out_request(rq);
}
static enum blk_eh_timer_return
......
......@@ -2318,10 +2318,7 @@ static void nvme_tcp_complete_timed_out(struct request *rq)
struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) {
nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
blk_mq_complete_request(rq);
}
nvmf_complete_timed_out_request(rq);
}
static enum blk_eh_timer_return
......
......@@ -204,8 +204,9 @@ enum {
NVME_CC_SHN_MASK = 3 << NVME_CC_SHN_SHIFT,
NVME_CC_IOSQES = NVME_NVM_IOSQES << NVME_CC_IOSQES_SHIFT,
NVME_CC_IOCQES = NVME_NVM_IOCQES << NVME_CC_IOCQES_SHIFT,
NVME_CAP_CSS_NVM = 1 << 0,
NVME_CAP_CSS_CSI = 1 << 6,
};
enum {
NVME_CSTS_RDY = 1 << 0,
NVME_CSTS_CFS = 1 << 1,
NVME_CSTS_NSSRO = 1 << 4,
......@@ -214,10 +215,18 @@ enum {
NVME_CSTS_SHST_OCCUR = 1 << 2,
NVME_CSTS_SHST_CMPLT = 2 << 2,
NVME_CSTS_SHST_MASK = 3 << 2,
};
enum {
NVME_CMBMSC_CRE = 1 << 0,
NVME_CMBMSC_CMSE = 1 << 1,
};
enum {
NVME_CAP_CSS_NVM = 1 << 0,
NVME_CAP_CSS_CSI = 1 << 6,
};
struct nvme_id_power_state {
__le16 max_power; /* centiwatts */
__u8 rsvd2;
......@@ -1679,9 +1688,11 @@ enum {
/*
* Path-related Errors:
*/
NVME_SC_INTERNAL_PATH_ERROR = 0x300,
NVME_SC_ANA_PERSISTENT_LOSS = 0x301,
NVME_SC_ANA_INACCESSIBLE = 0x302,
NVME_SC_ANA_TRANSITION = 0x303,
NVME_SC_CTRL_PATH_ERROR = 0x360,
NVME_SC_HOST_PATH_ERROR = 0x370,
NVME_SC_HOST_ABORTED_CMD = 0x371,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment