Commit 9754d6cb authored by Jens Axboe's avatar Jens Axboe

Merge tag 'nvme-5.9-2020-09-24' of git://git.infradead.org/nvme into block-5.9

Pull NVMe fixes from Christoph:

"nvme fixes for 5.9

  - fix error during controller probe that cause double free irqs
    (Keith Busch)
  - FC connection establishment fix (James Smart)
  - properly handle completions for invalid tags (Xianting Tian)
  - pass the correct nsid to the command effects and supported log
    (Chaitanya Kulkarni)"

* tag 'nvme-5.9-2020-09-24' of git://git.infradead.org/nvme:
  nvme-core: don't use NVME_NSID_ALL for command effects and supported log
  nvme-fc: fail new connections to a deleted host or remote port
  nvme-pci: fix NULL req in completion handler
  nvme: return errors for hwmon init
parents 4a2dd2c7 46d2613e
......@@ -3041,7 +3041,7 @@ static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
if (!cel)
return -ENOMEM;
ret = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CMD_EFFECTS, 0, csi,
ret = nvme_get_log(ctrl, 0x00, NVME_LOG_CMD_EFFECTS, 0, csi,
&cel->log, sizeof(cel->log), 0);
if (ret) {
kfree(cel);
......@@ -3236,8 +3236,11 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
if (ret < 0)
return ret;
if (!ctrl->identified)
nvme_hwmon_init(ctrl);
if (!ctrl->identified) {
ret = nvme_hwmon_init(ctrl);
if (ret < 0)
return ret;
}
ctrl->identified = true;
......
......@@ -3671,12 +3671,14 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
spin_lock_irqsave(&nvme_fc_lock, flags);
list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
if (lport->localport.node_name != laddr.nn ||
lport->localport.port_name != laddr.pn)
lport->localport.port_name != laddr.pn ||
lport->localport.port_state != FC_OBJSTATE_ONLINE)
continue;
list_for_each_entry(rport, &lport->endp_list, endp_list) {
if (rport->remoteport.node_name != raddr.nn ||
rport->remoteport.port_name != raddr.pn)
rport->remoteport.port_name != raddr.pn ||
rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
continue;
/* if fail to get reference fall through. Will error */
......
......@@ -59,12 +59,8 @@ static int nvme_set_temp_thresh(struct nvme_ctrl *ctrl, int sensor, bool under,
static int nvme_hwmon_get_smart_log(struct nvme_hwmon_data *data)
{
int ret;
ret = nvme_get_log(data->ctrl, NVME_NSID_ALL, NVME_LOG_SMART, 0,
return nvme_get_log(data->ctrl, NVME_NSID_ALL, NVME_LOG_SMART, 0,
NVME_CSI_NVM, &data->log, sizeof(data->log), 0);
return ret <= 0 ? ret : -EIO;
}
static int nvme_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
......@@ -225,7 +221,7 @@ static const struct hwmon_chip_info nvme_hwmon_chip_info = {
.info = nvme_hwmon_info,
};
void nvme_hwmon_init(struct nvme_ctrl *ctrl)
int nvme_hwmon_init(struct nvme_ctrl *ctrl)
{
struct device *dev = ctrl->dev;
struct nvme_hwmon_data *data;
......@@ -234,7 +230,7 @@ void nvme_hwmon_init(struct nvme_ctrl *ctrl)
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return;
return 0;
data->ctrl = ctrl;
mutex_init(&data->read_lock);
......@@ -244,7 +240,7 @@ void nvme_hwmon_init(struct nvme_ctrl *ctrl)
dev_warn(ctrl->device,
"Failed to read smart log (error %d)\n", err);
devm_kfree(dev, data);
return;
return err;
}
hwmon = devm_hwmon_device_register_with_info(dev, "nvme", data,
......@@ -254,4 +250,6 @@ void nvme_hwmon_init(struct nvme_ctrl *ctrl)
dev_warn(dev, "Failed to instantiate hwmon device\n");
devm_kfree(dev, data);
}
return 0;
}
......@@ -827,9 +827,12 @@ static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
}
#ifdef CONFIG_NVME_HWMON
void nvme_hwmon_init(struct nvme_ctrl *ctrl);
int nvme_hwmon_init(struct nvme_ctrl *ctrl);
#else
static inline void nvme_hwmon_init(struct nvme_ctrl *ctrl) { }
static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl)
{
return 0;
}
#endif
u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
......
......@@ -940,13 +940,6 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
struct nvme_completion *cqe = &nvmeq->cqes[idx];
struct request *req;
if (unlikely(cqe->command_id >= nvmeq->q_depth)) {
dev_warn(nvmeq->dev->ctrl.device,
"invalid id %d completed on queue %d\n",
cqe->command_id, le16_to_cpu(cqe->sq_id));
return;
}
/*
* AEN requests are special as they don't time out and can
* survive any kind of queue freeze and often don't respond to
......@@ -960,6 +953,13 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
}
req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id);
if (unlikely(!req)) {
dev_warn(nvmeq->dev->ctrl.device,
"invalid id %d completed on queue %d\n",
cqe->command_id, le16_to_cpu(cqe->sq_id));
return;
}
trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
if (!nvme_try_complete_req(req, cqe->status, cqe->result))
nvme_pci_complete_rq(req);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment