Commit 9d2fbaef authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-5.9-2020-09-25' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "NVMe pull request from Christoph, and removal of a dead define.

   - fix error during controller probe that cause double free irqs
     (Keith Busch)

   - FC connection establishment fix (James Smart)

   - properly handle completions for invalid tags (Xianting Tian)

   - pass the correct nsid to the command effects and supported log
     (Chaitanya Kulkarni)"

* tag 'block-5.9-2020-09-25' of git://git.kernel.dk/linux-block:
  block: remove unused BLK_QC_T_EAGAIN flag
  nvme-core: don't use NVME_NSID_ALL for command effects and supported log
  nvme-fc: fail new connections to a deleted host or remote port
  nvme-pci: fix NULL req in completion handler
  nvme: return errors for hwmon init
parents eeddbe68 3aab9177
...@@ -3041,7 +3041,7 @@ static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi, ...@@ -3041,7 +3041,7 @@ static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
if (!cel) if (!cel)
return -ENOMEM; return -ENOMEM;
ret = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CMD_EFFECTS, 0, csi, ret = nvme_get_log(ctrl, 0x00, NVME_LOG_CMD_EFFECTS, 0, csi,
&cel->log, sizeof(cel->log), 0); &cel->log, sizeof(cel->log), 0);
if (ret) { if (ret) {
kfree(cel); kfree(cel);
...@@ -3236,8 +3236,11 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) ...@@ -3236,8 +3236,11 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
if (ret < 0) if (ret < 0)
return ret; return ret;
if (!ctrl->identified) if (!ctrl->identified) {
nvme_hwmon_init(ctrl); ret = nvme_hwmon_init(ctrl);
if (ret < 0)
return ret;
}
ctrl->identified = true; ctrl->identified = true;
......
...@@ -3671,12 +3671,14 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts) ...@@ -3671,12 +3671,14 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
spin_lock_irqsave(&nvme_fc_lock, flags); spin_lock_irqsave(&nvme_fc_lock, flags);
list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
if (lport->localport.node_name != laddr.nn || if (lport->localport.node_name != laddr.nn ||
lport->localport.port_name != laddr.pn) lport->localport.port_name != laddr.pn ||
lport->localport.port_state != FC_OBJSTATE_ONLINE)
continue; continue;
list_for_each_entry(rport, &lport->endp_list, endp_list) { list_for_each_entry(rport, &lport->endp_list, endp_list) {
if (rport->remoteport.node_name != raddr.nn || if (rport->remoteport.node_name != raddr.nn ||
rport->remoteport.port_name != raddr.pn) rport->remoteport.port_name != raddr.pn ||
rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
continue; continue;
/* if fail to get reference fall through. Will error */ /* if fail to get reference fall through. Will error */
......
...@@ -59,12 +59,8 @@ static int nvme_set_temp_thresh(struct nvme_ctrl *ctrl, int sensor, bool under, ...@@ -59,12 +59,8 @@ static int nvme_set_temp_thresh(struct nvme_ctrl *ctrl, int sensor, bool under,
static int nvme_hwmon_get_smart_log(struct nvme_hwmon_data *data) static int nvme_hwmon_get_smart_log(struct nvme_hwmon_data *data)
{ {
int ret; return nvme_get_log(data->ctrl, NVME_NSID_ALL, NVME_LOG_SMART, 0,
ret = nvme_get_log(data->ctrl, NVME_NSID_ALL, NVME_LOG_SMART, 0,
NVME_CSI_NVM, &data->log, sizeof(data->log), 0); NVME_CSI_NVM, &data->log, sizeof(data->log), 0);
return ret <= 0 ? ret : -EIO;
} }
static int nvme_hwmon_read(struct device *dev, enum hwmon_sensor_types type, static int nvme_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
...@@ -225,7 +221,7 @@ static const struct hwmon_chip_info nvme_hwmon_chip_info = { ...@@ -225,7 +221,7 @@ static const struct hwmon_chip_info nvme_hwmon_chip_info = {
.info = nvme_hwmon_info, .info = nvme_hwmon_info,
}; };
void nvme_hwmon_init(struct nvme_ctrl *ctrl) int nvme_hwmon_init(struct nvme_ctrl *ctrl)
{ {
struct device *dev = ctrl->dev; struct device *dev = ctrl->dev;
struct nvme_hwmon_data *data; struct nvme_hwmon_data *data;
...@@ -234,7 +230,7 @@ void nvme_hwmon_init(struct nvme_ctrl *ctrl) ...@@ -234,7 +230,7 @@ void nvme_hwmon_init(struct nvme_ctrl *ctrl)
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data) if (!data)
return; return 0;
data->ctrl = ctrl; data->ctrl = ctrl;
mutex_init(&data->read_lock); mutex_init(&data->read_lock);
...@@ -244,7 +240,7 @@ void nvme_hwmon_init(struct nvme_ctrl *ctrl) ...@@ -244,7 +240,7 @@ void nvme_hwmon_init(struct nvme_ctrl *ctrl)
dev_warn(ctrl->device, dev_warn(ctrl->device,
"Failed to read smart log (error %d)\n", err); "Failed to read smart log (error %d)\n", err);
devm_kfree(dev, data); devm_kfree(dev, data);
return; return err;
} }
hwmon = devm_hwmon_device_register_with_info(dev, "nvme", data, hwmon = devm_hwmon_device_register_with_info(dev, "nvme", data,
...@@ -254,4 +250,6 @@ void nvme_hwmon_init(struct nvme_ctrl *ctrl) ...@@ -254,4 +250,6 @@ void nvme_hwmon_init(struct nvme_ctrl *ctrl)
dev_warn(dev, "Failed to instantiate hwmon device\n"); dev_warn(dev, "Failed to instantiate hwmon device\n");
devm_kfree(dev, data); devm_kfree(dev, data);
} }
return 0;
} }
...@@ -827,9 +827,12 @@ static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev) ...@@ -827,9 +827,12 @@ static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
} }
#ifdef CONFIG_NVME_HWMON #ifdef CONFIG_NVME_HWMON
void nvme_hwmon_init(struct nvme_ctrl *ctrl); int nvme_hwmon_init(struct nvme_ctrl *ctrl);
#else #else
static inline void nvme_hwmon_init(struct nvme_ctrl *ctrl) { } static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl)
{
return 0;
}
#endif #endif
u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
......
...@@ -940,13 +940,6 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) ...@@ -940,13 +940,6 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
struct nvme_completion *cqe = &nvmeq->cqes[idx]; struct nvme_completion *cqe = &nvmeq->cqes[idx];
struct request *req; struct request *req;
if (unlikely(cqe->command_id >= nvmeq->q_depth)) {
dev_warn(nvmeq->dev->ctrl.device,
"invalid id %d completed on queue %d\n",
cqe->command_id, le16_to_cpu(cqe->sq_id));
return;
}
/* /*
* AEN requests are special as they don't time out and can * AEN requests are special as they don't time out and can
* survive any kind of queue freeze and often don't respond to * survive any kind of queue freeze and often don't respond to
...@@ -960,6 +953,13 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) ...@@ -960,6 +953,13 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
} }
req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id); req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id);
if (unlikely(!req)) {
dev_warn(nvmeq->dev->ctrl.device,
"invalid id %d completed on queue %d\n",
cqe->command_id, le16_to_cpu(cqe->sq_id));
return;
}
trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail); trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
if (!nvme_try_complete_req(req, cqe->status, cqe->result)) if (!nvme_try_complete_req(req, cqe->status, cqe->result))
nvme_pci_complete_rq(req); nvme_pci_complete_rq(req);
......
...@@ -497,13 +497,12 @@ static inline int op_stat_group(unsigned int op) ...@@ -497,13 +497,12 @@ static inline int op_stat_group(unsigned int op)
typedef unsigned int blk_qc_t; typedef unsigned int blk_qc_t;
#define BLK_QC_T_NONE -1U #define BLK_QC_T_NONE -1U
#define BLK_QC_T_EAGAIN -2U
#define BLK_QC_T_SHIFT 16 #define BLK_QC_T_SHIFT 16
#define BLK_QC_T_INTERNAL (1U << 31) #define BLK_QC_T_INTERNAL (1U << 31)
static inline bool blk_qc_t_valid(blk_qc_t cookie) static inline bool blk_qc_t_valid(blk_qc_t cookie)
{ {
return cookie != BLK_QC_T_NONE && cookie != BLK_QC_T_EAGAIN; return cookie != BLK_QC_T_NONE;
} }
static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie) static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment