Commit 6bdf2fbc authored by Jens Axboe's avatar Jens Axboe

Merge tag 'nvme-5.13-2021-05-13' of git://git.infradead.org/nvme into block-5.13

Pull NVMe fixes from Christoph:

"nvme fix for Linux 5.13

 - correct the check for using the inline bio in nvmet
   (Chaitanya Kulkarni)
 - demote unsupported command warnings (Chaitanya Kulkarni)
 - fix corruption due to double initializing ANA state (me, Hou Pu)
 - reset ns->file when open fails (Daniel Wagner)
 - fix a NULL deref when SEND is completed with error in nvmet-rdma
   (Michal Kalderon)"

* tag 'nvme-5.13-2021-05-13' of git://git.infradead.org/nvme:
  nvmet: use new ana_log_size instead the old one
  nvmet: seset ns->file when open fails
  nvmet: demote fabrics cmd parse err msg to debug
  nvmet: use helper to remove the duplicate code
  nvmet: demote discovery cmd parse err msg to debug
  nvmet-rdma: Fix NULL deref when SEND is completed with error
  nvmet: fix inline bio check for passthru
  nvmet: fix inline bio check for bdev-ns
  nvme-multipath: fix double initialization of ANA state
parents bedf78c4 e181811b
...@@ -2901,7 +2901,7 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl) ...@@ -2901,7 +2901,7 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl)
ctrl->hmmaxd = le16_to_cpu(id->hmmaxd); ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
} }
ret = nvme_mpath_init(ctrl, id); ret = nvme_mpath_init_identify(ctrl, id);
if (ret < 0) if (ret < 0)
goto out_free; goto out_free;
...@@ -4364,6 +4364,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, ...@@ -4364,6 +4364,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
min(default_ps_max_latency_us, (unsigned long)S32_MAX)); min(default_ps_max_latency_us, (unsigned long)S32_MAX));
nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device)); nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
nvme_mpath_init_ctrl(ctrl);
return 0; return 0;
out_free_name: out_free_name:
......
...@@ -781,9 +781,18 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head) ...@@ -781,9 +781,18 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
put_disk(head->disk); put_disk(head->disk);
} }
int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
{ {
int error; mutex_init(&ctrl->ana_lock);
timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
INIT_WORK(&ctrl->ana_work, nvme_ana_work);
}
int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
{
size_t max_transfer_size = ctrl->max_hw_sectors << SECTOR_SHIFT;
size_t ana_log_size;
int error = 0;
/* check if multipath is enabled and we have the capability */ /* check if multipath is enabled and we have the capability */
if (!multipath || !ctrl->subsys || if (!multipath || !ctrl->subsys ||
...@@ -795,37 +804,31 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) ...@@ -795,37 +804,31 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
ctrl->nanagrpid = le32_to_cpu(id->nanagrpid); ctrl->nanagrpid = le32_to_cpu(id->nanagrpid);
ctrl->anagrpmax = le32_to_cpu(id->anagrpmax); ctrl->anagrpmax = le32_to_cpu(id->anagrpmax);
mutex_init(&ctrl->ana_lock); ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0); ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc) +
ctrl->ana_log_size = sizeof(struct nvme_ana_rsp_hdr) + ctrl->max_namespaces * sizeof(__le32);
ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc); if (ana_log_size > max_transfer_size) {
ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32);
if (ctrl->ana_log_size > ctrl->max_hw_sectors << SECTOR_SHIFT) {
dev_err(ctrl->device, dev_err(ctrl->device,
"ANA log page size (%zd) larger than MDTS (%d).\n", "ANA log page size (%zd) larger than MDTS (%zd).\n",
ctrl->ana_log_size, ana_log_size, max_transfer_size);
ctrl->max_hw_sectors << SECTOR_SHIFT);
dev_err(ctrl->device, "disabling ANA support.\n"); dev_err(ctrl->device, "disabling ANA support.\n");
return 0; goto out_uninit;
} }
if (ana_log_size > ctrl->ana_log_size) {
INIT_WORK(&ctrl->ana_work, nvme_ana_work); nvme_mpath_stop(ctrl);
kfree(ctrl->ana_log_buf); kfree(ctrl->ana_log_buf);
ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL); ctrl->ana_log_buf = kmalloc(ana_log_size, GFP_KERNEL);
if (!ctrl->ana_log_buf) { if (!ctrl->ana_log_buf)
error = -ENOMEM; return -ENOMEM;
goto out;
} }
ctrl->ana_log_size = ana_log_size;
error = nvme_read_ana_log(ctrl); error = nvme_read_ana_log(ctrl);
if (error) if (error)
goto out_free_ana_log_buf; goto out_uninit;
return 0; return 0;
out_free_ana_log_buf:
kfree(ctrl->ana_log_buf); out_uninit:
ctrl->ana_log_buf = NULL; nvme_mpath_uninit(ctrl);
out:
return error; return error;
} }
......
...@@ -712,7 +712,8 @@ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl); ...@@ -712,7 +712,8 @@ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head); int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id); void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
void nvme_mpath_remove_disk(struct nvme_ns_head *head); void nvme_mpath_remove_disk(struct nvme_ns_head *head);
int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id); int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
void nvme_mpath_uninit(struct nvme_ctrl *ctrl); void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
void nvme_mpath_stop(struct nvme_ctrl *ctrl); void nvme_mpath_stop(struct nvme_ctrl *ctrl);
bool nvme_mpath_clear_current_path(struct nvme_ns *ns); bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
...@@ -780,7 +781,10 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) ...@@ -780,7 +781,10 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
static inline void nvme_trace_bio_complete(struct request *req) static inline void nvme_trace_bio_complete(struct request *req)
{ {
} }
static inline int nvme_mpath_init(struct nvme_ctrl *ctrl, static inline void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
{
}
static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl,
struct nvme_id_ctrl *id) struct nvme_id_ctrl *id)
{ {
if (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA) if (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA)
......
...@@ -975,10 +975,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req) ...@@ -975,10 +975,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
case nvme_admin_keep_alive: case nvme_admin_keep_alive:
req->execute = nvmet_execute_keep_alive; req->execute = nvmet_execute_keep_alive;
return 0; return 0;
default:
return nvmet_report_invalid_opcode(req);
} }
pr_debug("unhandled cmd %d on qid %d\n", cmd->common.opcode,
req->sq->qid);
req->error_loc = offsetof(struct nvme_common_command, opcode);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
} }
...@@ -379,7 +379,7 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req) ...@@ -379,7 +379,7 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
req->execute = nvmet_execute_disc_identify; req->execute = nvmet_execute_disc_identify;
return 0; return 0;
default: default:
pr_err("unhandled cmd %d\n", cmd->common.opcode); pr_debug("unhandled cmd %d\n", cmd->common.opcode);
req->error_loc = offsetof(struct nvme_common_command, opcode); req->error_loc = offsetof(struct nvme_common_command, opcode);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
} }
......
...@@ -94,7 +94,7 @@ u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req) ...@@ -94,7 +94,7 @@ u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req)
req->execute = nvmet_execute_prop_get; req->execute = nvmet_execute_prop_get;
break; break;
default: default:
pr_err("received unknown capsule type 0x%x\n", pr_debug("received unknown capsule type 0x%x\n",
cmd->fabrics.fctype); cmd->fabrics.fctype);
req->error_loc = offsetof(struct nvmf_common_command, fctype); req->error_loc = offsetof(struct nvmf_common_command, fctype);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
...@@ -284,13 +284,13 @@ u16 nvmet_parse_connect_cmd(struct nvmet_req *req) ...@@ -284,13 +284,13 @@ u16 nvmet_parse_connect_cmd(struct nvmet_req *req)
struct nvme_command *cmd = req->cmd; struct nvme_command *cmd = req->cmd;
if (!nvme_is_fabrics(cmd)) { if (!nvme_is_fabrics(cmd)) {
pr_err("invalid command 0x%x on unconnected queue.\n", pr_debug("invalid command 0x%x on unconnected queue.\n",
cmd->fabrics.opcode); cmd->fabrics.opcode);
req->error_loc = offsetof(struct nvme_common_command, opcode); req->error_loc = offsetof(struct nvme_common_command, opcode);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
} }
if (cmd->fabrics.fctype != nvme_fabrics_type_connect) { if (cmd->fabrics.fctype != nvme_fabrics_type_connect) {
pr_err("invalid capsule type 0x%x on unconnected queue.\n", pr_debug("invalid capsule type 0x%x on unconnected queue.\n",
cmd->fabrics.fctype); cmd->fabrics.fctype);
req->error_loc = offsetof(struct nvmf_common_command, fctype); req->error_loc = offsetof(struct nvmf_common_command, fctype);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
......
...@@ -258,7 +258,7 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req) ...@@ -258,7 +258,7 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba); sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) { if (nvmet_use_inline_bvec(req)) {
bio = &req->b.inline_bio; bio = &req->b.inline_bio;
bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
} else { } else {
......
...@@ -49,9 +49,11 @@ int nvmet_file_ns_enable(struct nvmet_ns *ns) ...@@ -49,9 +49,11 @@ int nvmet_file_ns_enable(struct nvmet_ns *ns)
ns->file = filp_open(ns->device_path, flags, 0); ns->file = filp_open(ns->device_path, flags, 0);
if (IS_ERR(ns->file)) { if (IS_ERR(ns->file)) {
pr_err("failed to open file %s: (%ld)\n", ret = PTR_ERR(ns->file);
ns->device_path, PTR_ERR(ns->file)); pr_err("failed to open file %s: (%d)\n",
return PTR_ERR(ns->file); ns->device_path, ret);
ns->file = NULL;
return ret;
} }
ret = nvmet_file_ns_revalidate(ns); ret = nvmet_file_ns_revalidate(ns);
......
...@@ -616,4 +616,10 @@ static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba) ...@@ -616,4 +616,10 @@ static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba)
return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT); return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT);
} }
static inline bool nvmet_use_inline_bvec(struct nvmet_req *req)
{
return req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN &&
req->sg_cnt <= NVMET_MAX_INLINE_BIOVEC;
}
#endif /* _NVMET_H */ #endif /* _NVMET_H */
...@@ -194,7 +194,7 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq) ...@@ -194,7 +194,7 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
if (req->sg_cnt > BIO_MAX_VECS) if (req->sg_cnt > BIO_MAX_VECS)
return -EINVAL; return -EINVAL;
if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) { if (nvmet_use_inline_bvec(req)) {
bio = &req->p.inline_bio; bio = &req->p.inline_bio;
bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
} else { } else {
......
...@@ -700,7 +700,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) ...@@ -700,7 +700,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
{ {
struct nvmet_rdma_rsp *rsp = struct nvmet_rdma_rsp *rsp =
container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe); container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
struct nvmet_rdma_queue *queue = cq->cq_context; struct nvmet_rdma_queue *queue = wc->qp->qp_context;
nvmet_rdma_release_rsp(rsp); nvmet_rdma_release_rsp(rsp);
...@@ -786,7 +786,7 @@ static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc) ...@@ -786,7 +786,7 @@ static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc)
{ {
struct nvmet_rdma_rsp *rsp = struct nvmet_rdma_rsp *rsp =
container_of(wc->wr_cqe, struct nvmet_rdma_rsp, write_cqe); container_of(wc->wr_cqe, struct nvmet_rdma_rsp, write_cqe);
struct nvmet_rdma_queue *queue = cq->cq_context; struct nvmet_rdma_queue *queue = wc->qp->qp_context;
struct rdma_cm_id *cm_id = rsp->queue->cm_id; struct rdma_cm_id *cm_id = rsp->queue->cm_id;
u16 status; u16 status;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment