Commit 0217a27e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-5.13-2021-05-28' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:

 - NVMe pull request (Christoph):
      - fix a memory leak in nvme_cdev_add (Guoqing Jiang)
      - fix inline data size comparison in nvmet_tcp_queue_response (Hou
        Pu)
      - fix false keep-alive timeout when a controller is torn down
        (Sagi Grimberg)
      - fix a nvme-tcp Kconfig dependency (Sagi Grimberg)
      - short-circuit reconnect retries for FC (Hannes Reinecke)
      - decode host pathing error for connect (Hannes Reinecke)

 - MD pull request (Song):
      - Fix incorrect chunk boundary assert (Christoph)

 - Fix s390/dasd verification panic (Stefan)

* tag 'block-5.13-2021-05-28' of git://git.kernel.dk/linux-block:
  nvmet: fix false keep-alive timeout when a controller is torn down
  nvmet-tcp: fix inline data size comparison in nvmet_tcp_queue_response
  nvme-tcp: remove incorrect Kconfig dep in BLK_DEV_NVME
  md/raid5: remove an incorrect assert in in_chunk_boundary
  s390/dasd: add missing discipline function
  nvme-fabrics: decode host pathing error for connect
  nvme-fc: short-circuit reconnect retries
  nvme: fix potential memory leaks in nvme_cdev_add
parents b3dbbae6 a4b58f17
...@@ -5311,8 +5311,6 @@ static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) ...@@ -5311,8 +5311,6 @@ static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
unsigned int chunk_sectors; unsigned int chunk_sectors;
unsigned int bio_sectors = bio_sectors(bio); unsigned int bio_sectors = bio_sectors(bio);
WARN_ON_ONCE(bio->bi_bdev->bd_partno);
chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors); chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors);
return chunk_sectors >= return chunk_sectors >=
((sector & (chunk_sectors - 1)) + bio_sectors); ((sector & (chunk_sectors - 1)) + bio_sectors);
......
...@@ -71,7 +71,8 @@ config NVME_FC ...@@ -71,7 +71,8 @@ config NVME_FC
config NVME_TCP config NVME_TCP
tristate "NVM Express over Fabrics TCP host driver" tristate "NVM Express over Fabrics TCP host driver"
depends on INET depends on INET
depends on BLK_DEV_NVME depends on BLOCK
select NVME_CORE
select NVME_FABRICS select NVME_FABRICS
select CRYPTO select CRYPTO
select CRYPTO_CRC32C select CRYPTO_CRC32C
......
...@@ -3485,8 +3485,10 @@ int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device, ...@@ -3485,8 +3485,10 @@ int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
cdev_init(cdev, fops); cdev_init(cdev, fops);
cdev->owner = owner; cdev->owner = owner;
ret = cdev_device_add(cdev, cdev_device); ret = cdev_device_add(cdev, cdev_device);
if (ret) if (ret) {
put_device(cdev_device);
ida_simple_remove(&nvme_ns_chr_minor_ida, minor); ida_simple_remove(&nvme_ns_chr_minor_ida, minor);
}
return ret; return ret;
} }
......
...@@ -336,6 +336,11 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl, ...@@ -336,6 +336,11 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
cmd->connect.recfmt); cmd->connect.recfmt);
break; break;
case NVME_SC_HOST_PATH_ERROR:
dev_err(ctrl->device,
"Connect command failed: host path error\n");
break;
default: default:
dev_err(ctrl->device, dev_err(ctrl->device,
"Connect command failed, error wo/DNR bit: %d\n", "Connect command failed, error wo/DNR bit: %d\n",
......
...@@ -3107,6 +3107,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) ...@@ -3107,6 +3107,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
if (ctrl->ctrl.icdoff) { if (ctrl->ctrl.icdoff) {
dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n", dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
ctrl->ctrl.icdoff); ctrl->ctrl.icdoff);
ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
goto out_disconnect_admin_queue; goto out_disconnect_admin_queue;
} }
...@@ -3114,6 +3115,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) ...@@ -3114,6 +3115,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
if (!(ctrl->ctrl.sgls & ((1 << 0) | (1 << 1)))) { if (!(ctrl->ctrl.sgls & ((1 << 0) | (1 << 1)))) {
dev_err(ctrl->ctrl.device, dev_err(ctrl->ctrl.device,
"Mandatory sgls are not supported!\n"); "Mandatory sgls are not supported!\n");
ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
goto out_disconnect_admin_queue; goto out_disconnect_admin_queue;
} }
...@@ -3280,11 +3282,13 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status) ...@@ -3280,11 +3282,13 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) if (ctrl->ctrl.state != NVME_CTRL_CONNECTING)
return; return;
if (portptr->port_state == FC_OBJSTATE_ONLINE) if (portptr->port_state == FC_OBJSTATE_ONLINE) {
dev_info(ctrl->ctrl.device, dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n", "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
ctrl->cnum, status); ctrl->cnum, status);
else if (time_after_eq(jiffies, rport->dev_loss_end)) if (status > 0 && (status & NVME_SC_DNR))
recon = false;
} else if (time_after_eq(jiffies, rport->dev_loss_end))
recon = false; recon = false;
if (recon && nvmf_should_reconnect(&ctrl->ctrl)) { if (recon && nvmf_should_reconnect(&ctrl->ctrl)) {
...@@ -3298,12 +3302,17 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status) ...@@ -3298,12 +3302,17 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay); queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay);
} else { } else {
if (portptr->port_state == FC_OBJSTATE_ONLINE) if (portptr->port_state == FC_OBJSTATE_ONLINE) {
if (status > 0 && (status & NVME_SC_DNR))
dev_warn(ctrl->ctrl.device, dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: Max reconnect attempts (%d) " "NVME-FC{%d}: reconnect failure\n",
"reached.\n", ctrl->cnum);
ctrl->cnum, ctrl->ctrl.nr_reconnects);
else else
dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: Max reconnect attempts "
"(%d) reached.\n",
ctrl->cnum, ctrl->ctrl.nr_reconnects);
} else
dev_warn(ctrl->ctrl.device, dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: dev_loss_tmo (%d) expired " "NVME-FC{%d}: dev_loss_tmo (%d) expired "
"while waiting for remoteport connectivity.\n", "while waiting for remoteport connectivity.\n",
......
...@@ -388,10 +388,10 @@ static void nvmet_keep_alive_timer(struct work_struct *work) ...@@ -388,10 +388,10 @@ static void nvmet_keep_alive_timer(struct work_struct *work)
{ {
struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work), struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
struct nvmet_ctrl, ka_work); struct nvmet_ctrl, ka_work);
bool cmd_seen = ctrl->cmd_seen; bool reset_tbkas = ctrl->reset_tbkas;
ctrl->cmd_seen = false; ctrl->reset_tbkas = false;
if (cmd_seen) { if (reset_tbkas) {
pr_debug("ctrl %d reschedule traffic based keep-alive timer\n", pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
ctrl->cntlid); ctrl->cntlid);
schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
...@@ -804,6 +804,13 @@ void nvmet_sq_destroy(struct nvmet_sq *sq) ...@@ -804,6 +804,13 @@ void nvmet_sq_destroy(struct nvmet_sq *sq)
percpu_ref_exit(&sq->ref); percpu_ref_exit(&sq->ref);
if (ctrl) { if (ctrl) {
/*
* The teardown flow may take some time, and the host may not
* send us keep-alive during this period, hence reset the
* traffic based keep-alive timer so we don't trigger a
* controller teardown as a result of a keep-alive expiration.
*/
ctrl->reset_tbkas = true;
nvmet_ctrl_put(ctrl); nvmet_ctrl_put(ctrl);
sq->ctrl = NULL; /* allows reusing the queue later */ sq->ctrl = NULL; /* allows reusing the queue later */
} }
...@@ -952,7 +959,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, ...@@ -952,7 +959,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
} }
if (sq->ctrl) if (sq->ctrl)
sq->ctrl->cmd_seen = true; sq->ctrl->reset_tbkas = true;
return true; return true;
......
...@@ -167,7 +167,7 @@ struct nvmet_ctrl { ...@@ -167,7 +167,7 @@ struct nvmet_ctrl {
struct nvmet_subsys *subsys; struct nvmet_subsys *subsys;
struct nvmet_sq **sqs; struct nvmet_sq **sqs;
bool cmd_seen; bool reset_tbkas;
struct mutex lock; struct mutex lock;
u64 cap; u64 cap;
......
...@@ -550,7 +550,7 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req) ...@@ -550,7 +550,7 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req)
* nvmet_req_init is completed. * nvmet_req_init is completed.
*/ */
if (queue->rcv_state == NVMET_TCP_RECV_PDU && if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
len && len < cmd->req.port->inline_data_size && len && len <= cmd->req.port->inline_data_size &&
nvme_is_write(cmd->req.cmd)) nvme_is_write(cmd->req.cmd))
return; return;
} }
......
...@@ -642,12 +642,18 @@ static void dasd_diag_setup_blk_queue(struct dasd_block *block) ...@@ -642,12 +642,18 @@ static void dasd_diag_setup_blk_queue(struct dasd_block *block)
blk_queue_segment_boundary(q, PAGE_SIZE - 1); blk_queue_segment_boundary(q, PAGE_SIZE - 1);
} }
static int dasd_diag_pe_handler(struct dasd_device *device,
__u8 tbvpm, __u8 fcsecpm)
{
return dasd_generic_verify_path(device, tbvpm);
}
static struct dasd_discipline dasd_diag_discipline = { static struct dasd_discipline dasd_diag_discipline = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.name = "DIAG", .name = "DIAG",
.ebcname = "DIAG", .ebcname = "DIAG",
.check_device = dasd_diag_check_device, .check_device = dasd_diag_check_device,
.verify_path = dasd_generic_verify_path, .pe_handler = dasd_diag_pe_handler,
.fill_geometry = dasd_diag_fill_geometry, .fill_geometry = dasd_diag_fill_geometry,
.setup_blk_queue = dasd_diag_setup_blk_queue, .setup_blk_queue = dasd_diag_setup_blk_queue,
.start_IO = dasd_start_diag, .start_IO = dasd_start_diag,
......
...@@ -794,13 +794,19 @@ static void dasd_fba_setup_blk_queue(struct dasd_block *block) ...@@ -794,13 +794,19 @@ static void dasd_fba_setup_blk_queue(struct dasd_block *block)
blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
} }
static int dasd_fba_pe_handler(struct dasd_device *device,
__u8 tbvpm, __u8 fcsecpm)
{
return dasd_generic_verify_path(device, tbvpm);
}
static struct dasd_discipline dasd_fba_discipline = { static struct dasd_discipline dasd_fba_discipline = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.name = "FBA ", .name = "FBA ",
.ebcname = "FBA ", .ebcname = "FBA ",
.check_device = dasd_fba_check_characteristics, .check_device = dasd_fba_check_characteristics,
.do_analysis = dasd_fba_do_analysis, .do_analysis = dasd_fba_do_analysis,
.verify_path = dasd_generic_verify_path, .pe_handler = dasd_fba_pe_handler,
.setup_blk_queue = dasd_fba_setup_blk_queue, .setup_blk_queue = dasd_fba_setup_blk_queue,
.fill_geometry = dasd_fba_fill_geometry, .fill_geometry = dasd_fba_fill_geometry,
.start_IO = dasd_start_IO, .start_IO = dasd_start_IO,
......
...@@ -297,7 +297,6 @@ struct dasd_discipline { ...@@ -297,7 +297,6 @@ struct dasd_discipline {
* e.g. verify that new path is compatible with the current * e.g. verify that new path is compatible with the current
* configuration. * configuration.
*/ */
int (*verify_path)(struct dasd_device *, __u8);
int (*pe_handler)(struct dasd_device *, __u8, __u8); int (*pe_handler)(struct dasd_device *, __u8, __u8);
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment