Commit dfdcbf1f authored by Jens Axboe's avatar Jens Axboe

Merge tag 'nvme-6.1-2022-09-28' of git://git.infradead.org/nvme into for-6.1/block

Pull NVMe updates from Christoph:

"nvme updates for Linux 6.1

 - handle effects after freeing the request (Keith Busch)
 - copy firmware_rev on each init (Keith Busch)
 - restrict management ioctls to admin (Keith Busch)
 - ensure subsystem reset is single threaded (Keith Busch)
 - report the actual number of tagset maps in nvme-pci (Keith Busch)
 - small fabrics authentication fixups (Christoph Hellwig)
 - add common code for tagset allocation and freeing (Christoph Hellwig)
 - stop using the request_queue in nvmet (Christoph Hellwig)
 - set min_align_mask before calculating max_hw_sectors
   (Rishabh Bhatnagar)
 - send a rediscover uevent when a persistent discovery controller
   reconnects (Sagi Grimberg)
 - misc nvmet-tcp fixes (Varun Prakash, zhenwei pi)"

* tag 'nvme-6.1-2022-09-28' of git://git.infradead.org/nvme: (31 commits)
  nvmet: don't look at the request_queue in nvmet_bdev_set_limits
  nvmet: don't look at the request_queue in nvmet_bdev_zone_mgmt_emulate_all
  nvme: remove nvme_ctrl_init_connect_q
  nvme-loop: use the tagset alloc/free helpers
  nvme-loop: store the generic nvme_ctrl in set->driver_data
  nvme-loop: initialize sqsize later
  nvme-fc: use the tagset alloc/free helpers
  nvme-fc: store the generic nvme_ctrl in set->driver_data
  nvme-fc: keep ctrl->sqsize in sync with opts->queue_size
  nvme-rdma: use the tagset alloc/free helpers
  nvme-rdma: store the generic nvme_ctrl in set->driver_data
  nvme-tcp: use the tagset alloc/free helpers
  nvme-tcp: store the generic nvme_ctrl in set->driver_data
  nvme-tcp: remove the unused queue_size member in nvme_tcp_queue
  nvme: add common helpers to allocate and free tagsets
  nvme-auth: add a MAINTAINERS entry
  nvmet: add helpers to set the result field for connect commands
  nvme: improve the NVME_CONNECT_AUTHREQ* definitions
  nvmet-auth: don't try to cancel a non-initialized work_struct
  nvmet-tcp: remove nvmet_tcp_finish_cmd
  ...
parents c68f4f4e 84fe64f8
...@@ -14542,6 +14542,15 @@ F: drivers/nvme/common/ ...@@ -14542,6 +14542,15 @@ F: drivers/nvme/common/
F: include/linux/nvme* F: include/linux/nvme*
F: include/uapi/linux/nvme_ioctl.h F: include/uapi/linux/nvme_ioctl.h
NVM EXPRESS FABRICS AUTHENTICATION
M: Hannes Reinecke <hare@suse.de>
L: linux-nvme@lists.infradead.org
S: Supported
F: drivers/nvme/host/auth.c
F: drivers/nvme/target/auth.c
F: drivers/nvme/target/fabrics-cmd-auth.c
F: include/linux/nvme-auth.h
NVM EXPRESS FC TRANSPORT DRIVERS NVM EXPRESS FC TRANSPORT DRIVERS
M: James Smart <james.smart@broadcom.com> M: James Smart <james.smart@broadcom.com>
L: linux-nvme@lists.infradead.org L: linux-nvme@lists.infradead.org
......
...@@ -1111,8 +1111,8 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, ...@@ -1111,8 +1111,8 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
return effects; return effects;
} }
static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects, void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
struct nvme_command *cmd, int status) struct nvme_command *cmd, int status)
{ {
if (effects & NVME_CMD_EFFECTS_CSE_MASK) { if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
nvme_unfreeze(ctrl); nvme_unfreeze(ctrl);
...@@ -1148,21 +1148,16 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects, ...@@ -1148,21 +1148,16 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
break; break;
} }
} }
EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, NVME_TARGET_PASSTHRU);
int nvme_execute_passthru_rq(struct request *rq) int nvme_execute_passthru_rq(struct request *rq, u32 *effects)
{ {
struct nvme_command *cmd = nvme_req(rq)->cmd; struct nvme_command *cmd = nvme_req(rq)->cmd;
struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl; struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
struct nvme_ns *ns = rq->q->queuedata; struct nvme_ns *ns = rq->q->queuedata;
u32 effects;
int ret;
effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode); *effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
ret = nvme_execute_rq(rq, false); return nvme_execute_rq(rq, false);
if (effects) /* nothing to be done for zero cmd effects */
nvme_passthru_end(ctrl, effects, cmd, ret);
return ret;
} }
EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq, NVME_TARGET_PASSTHRU); EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq, NVME_TARGET_PASSTHRU);
...@@ -2898,7 +2893,6 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) ...@@ -2898,7 +2893,6 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
nvme_init_subnqn(subsys, ctrl, id); nvme_init_subnqn(subsys, ctrl, id);
memcpy(subsys->serial, id->sn, sizeof(subsys->serial)); memcpy(subsys->serial, id->sn, sizeof(subsys->serial));
memcpy(subsys->model, id->mn, sizeof(subsys->model)); memcpy(subsys->model, id->mn, sizeof(subsys->model));
memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev));
subsys->vendor_id = le16_to_cpu(id->vid); subsys->vendor_id = le16_to_cpu(id->vid);
subsys->cmic = id->cmic; subsys->cmic = id->cmic;
...@@ -3117,6 +3111,8 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl) ...@@ -3117,6 +3111,8 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl)
ctrl->quirks |= core_quirks[i].quirks; ctrl->quirks |= core_quirks[i].quirks;
} }
} }
memcpy(ctrl->subsys->firmware_rev, id->fr,
sizeof(ctrl->subsys->firmware_rev));
if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) { if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n"); dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
...@@ -4800,6 +4796,108 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, ...@@ -4800,6 +4796,108 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
} }
EXPORT_SYMBOL_GPL(nvme_complete_async_event); EXPORT_SYMBOL_GPL(nvme_complete_async_event);
int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
const struct blk_mq_ops *ops, unsigned int flags,
unsigned int cmd_size)
{
int ret;
memset(set, 0, sizeof(*set));
set->ops = ops;
set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
if (ctrl->ops->flags & NVME_F_FABRICS)
set->reserved_tags = NVMF_RESERVED_TAGS;
set->numa_node = ctrl->numa_node;
set->flags = flags;
set->cmd_size = cmd_size;
set->driver_data = ctrl;
set->nr_hw_queues = 1;
set->timeout = NVME_ADMIN_TIMEOUT;
ret = blk_mq_alloc_tag_set(set);
if (ret)
return ret;
ctrl->admin_q = blk_mq_init_queue(set);
if (IS_ERR(ctrl->admin_q)) {
ret = PTR_ERR(ctrl->admin_q);
goto out_free_tagset;
}
if (ctrl->ops->flags & NVME_F_FABRICS) {
ctrl->fabrics_q = blk_mq_init_queue(set);
if (IS_ERR(ctrl->fabrics_q)) {
ret = PTR_ERR(ctrl->fabrics_q);
goto out_cleanup_admin_q;
}
}
ctrl->admin_tagset = set;
return 0;
out_cleanup_admin_q:
blk_mq_destroy_queue(ctrl->fabrics_q);
out_free_tagset:
blk_mq_free_tag_set(ctrl->admin_tagset);
return ret;
}
EXPORT_SYMBOL_GPL(nvme_alloc_admin_tag_set);
void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl)
{
blk_mq_destroy_queue(ctrl->admin_q);
if (ctrl->ops->flags & NVME_F_FABRICS)
blk_mq_destroy_queue(ctrl->fabrics_q);
blk_mq_free_tag_set(ctrl->admin_tagset);
}
EXPORT_SYMBOL_GPL(nvme_remove_admin_tag_set);
int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
const struct blk_mq_ops *ops, unsigned int flags,
unsigned int cmd_size)
{
int ret;
memset(set, 0, sizeof(*set));
set->ops = ops;
set->queue_depth = ctrl->sqsize + 1;
set->reserved_tags = NVMF_RESERVED_TAGS;
set->numa_node = ctrl->numa_node;
set->flags = flags;
set->cmd_size = cmd_size,
set->driver_data = ctrl;
set->nr_hw_queues = ctrl->queue_count - 1;
set->timeout = NVME_IO_TIMEOUT;
if (ops->map_queues)
set->nr_maps = ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
ret = blk_mq_alloc_tag_set(set);
if (ret)
return ret;
if (ctrl->ops->flags & NVME_F_FABRICS) {
ctrl->connect_q = blk_mq_init_queue(set);
if (IS_ERR(ctrl->connect_q)) {
ret = PTR_ERR(ctrl->connect_q);
goto out_free_tag_set;
}
}
ctrl->tagset = set;
return 0;
out_free_tag_set:
blk_mq_free_tag_set(set);
return ret;
}
EXPORT_SYMBOL_GPL(nvme_alloc_io_tag_set);
void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl)
{
if (ctrl->ops->flags & NVME_F_FABRICS)
blk_mq_destroy_queue(ctrl->connect_q);
blk_mq_free_tag_set(ctrl->tagset);
}
EXPORT_SYMBOL_GPL(nvme_remove_io_tag_set);
void nvme_stop_ctrl(struct nvme_ctrl *ctrl) void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
{ {
nvme_mpath_stop(ctrl); nvme_mpath_stop(ctrl);
...@@ -4819,6 +4917,16 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl) ...@@ -4819,6 +4917,16 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
nvme_enable_aen(ctrl); nvme_enable_aen(ctrl);
/*
* persistent discovery controllers need to send indication to userspace
* to re-read the discovery log page to learn about possible changes
* that were missed. We identify persistent discovery controllers by
* checking that they started once before, hence are reconnecting back.
*/
if (test_and_set_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) &&
nvme_discovery_ctrl(ctrl))
nvme_change_uevent(ctrl, "NVME_EVENT=rediscover");
if (ctrl->queue_count > 1) { if (ctrl->queue_count > 1) {
nvme_queue_scan(ctrl); nvme_queue_scan(ctrl);
nvme_start_queues(ctrl); nvme_start_queues(ctrl);
......
...@@ -1829,7 +1829,7 @@ nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq, ...@@ -1829,7 +1829,7 @@ nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
{ {
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
return __nvme_fc_exit_request(set->driver_data, op); return __nvme_fc_exit_request(to_fc_ctrl(set->driver_data), op);
} }
static int static int
...@@ -2135,7 +2135,7 @@ static int ...@@ -2135,7 +2135,7 @@ static int
nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq, nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node) unsigned int hctx_idx, unsigned int numa_node)
{ {
struct nvme_fc_ctrl *ctrl = set->driver_data; struct nvme_fc_ctrl *ctrl = to_fc_ctrl(set->driver_data);
struct nvme_fcp_op_w_sgl *op = blk_mq_rq_to_pdu(rq); struct nvme_fcp_op_w_sgl *op = blk_mq_rq_to_pdu(rq);
int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
struct nvme_fc_queue *queue = &ctrl->queues[queue_idx]; struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
...@@ -2206,36 +2206,28 @@ nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl) ...@@ -2206,36 +2206,28 @@ nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
} }
} }
static inline void static inline int
__nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl, __nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int qidx)
unsigned int qidx)
{ {
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(data);
struct nvme_fc_queue *queue = &ctrl->queues[qidx]; struct nvme_fc_queue *queue = &ctrl->queues[qidx];
hctx->driver_data = queue; hctx->driver_data = queue;
queue->hctx = hctx; queue->hctx = hctx;
return 0;
} }
static int static int
nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int hctx_idx)
unsigned int hctx_idx)
{ {
struct nvme_fc_ctrl *ctrl = data; return __nvme_fc_init_hctx(hctx, data, hctx_idx + 1);
__nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1);
return 0;
} }
static int static int
nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx) unsigned int hctx_idx)
{ {
struct nvme_fc_ctrl *ctrl = data; return __nvme_fc_init_hctx(hctx, data, hctx_idx);
__nvme_fc_init_hctx(hctx, ctrl, hctx_idx);
return 0;
} }
static void static void
...@@ -2391,10 +2383,8 @@ nvme_fc_ctrl_free(struct kref *ref) ...@@ -2391,10 +2383,8 @@ nvme_fc_ctrl_free(struct kref *ref)
container_of(ref, struct nvme_fc_ctrl, ref); container_of(ref, struct nvme_fc_ctrl, ref);
unsigned long flags; unsigned long flags;
if (ctrl->ctrl.tagset) { if (ctrl->ctrl.tagset)
blk_mq_destroy_queue(ctrl->ctrl.connect_q); nvme_remove_io_tag_set(&ctrl->ctrl);
blk_mq_free_tag_set(&ctrl->tag_set);
}
/* remove from rport list */ /* remove from rport list */
spin_lock_irqsave(&ctrl->rport->lock, flags); spin_lock_irqsave(&ctrl->rport->lock, flags);
...@@ -2402,9 +2392,7 @@ nvme_fc_ctrl_free(struct kref *ref) ...@@ -2402,9 +2392,7 @@ nvme_fc_ctrl_free(struct kref *ref)
spin_unlock_irqrestore(&ctrl->rport->lock, flags); spin_unlock_irqrestore(&ctrl->rport->lock, flags);
nvme_start_admin_queue(&ctrl->ctrl); nvme_start_admin_queue(&ctrl->ctrl);
blk_mq_destroy_queue(ctrl->ctrl.admin_q); nvme_remove_admin_tag_set(&ctrl->ctrl);
blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
blk_mq_free_tag_set(&ctrl->admin_tag_set);
kfree(ctrl->queues); kfree(ctrl->queues);
...@@ -2862,7 +2850,7 @@ nvme_fc_complete_rq(struct request *rq) ...@@ -2862,7 +2850,7 @@ nvme_fc_complete_rq(struct request *rq)
static void nvme_fc_map_queues(struct blk_mq_tag_set *set) static void nvme_fc_map_queues(struct blk_mq_tag_set *set)
{ {
struct nvme_fc_ctrl *ctrl = set->driver_data; struct nvme_fc_ctrl *ctrl = to_fc_ctrl(set->driver_data);
int i; int i;
for (i = 0; i < set->nr_maps; i++) { for (i = 0; i < set->nr_maps; i++) {
...@@ -2914,32 +2902,16 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl) ...@@ -2914,32 +2902,16 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
nvme_fc_init_io_queues(ctrl); nvme_fc_init_io_queues(ctrl);
memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
ctrl->tag_set.ops = &nvme_fc_mq_ops; &nvme_fc_mq_ops, BLK_MQ_F_SHOULD_MERGE,
ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
ctrl->tag_set.reserved_tags = NVMF_RESERVED_TAGS; ctrl->lport->ops->fcprqst_priv_sz));
ctrl->tag_set.numa_node = ctrl->ctrl.numa_node;
ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
ctrl->tag_set.cmd_size =
struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
ctrl->lport->ops->fcprqst_priv_sz);
ctrl->tag_set.driver_data = ctrl;
ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
if (ret) if (ret)
return ret; return ret;
ctrl->ctrl.tagset = &ctrl->tag_set;
ret = nvme_ctrl_init_connect_q(&(ctrl->ctrl));
if (ret)
goto out_free_tag_set;
ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
if (ret) if (ret)
goto out_cleanup_blk_queue; goto out_cleanup_tagset;
ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
if (ret) if (ret)
...@@ -2951,10 +2923,8 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl) ...@@ -2951,10 +2923,8 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
out_delete_hw_queues: out_delete_hw_queues:
nvme_fc_delete_hw_io_queues(ctrl); nvme_fc_delete_hw_io_queues(ctrl);
out_cleanup_blk_queue: out_cleanup_tagset:
blk_mq_destroy_queue(ctrl->ctrl.connect_q); nvme_remove_io_tag_set(&ctrl->ctrl);
out_free_tag_set:
blk_mq_free_tag_set(&ctrl->tag_set);
nvme_fc_free_io_queues(ctrl); nvme_fc_free_io_queues(ctrl);
/* force put free routine to ignore io queues */ /* force put free routine to ignore io queues */
...@@ -3165,15 +3135,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) ...@@ -3165,15 +3135,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
"to maxcmd\n", "to maxcmd\n",
opts->queue_size, ctrl->ctrl.maxcmd); opts->queue_size, ctrl->ctrl.maxcmd);
opts->queue_size = ctrl->ctrl.maxcmd; opts->queue_size = ctrl->ctrl.maxcmd;
} ctrl->ctrl.sqsize = opts->queue_size - 1;
if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
/* warn if sqsize is lower than queue_size */
dev_warn(ctrl->ctrl.device,
"queue_size %zu > ctrl sqsize %u, reducing "
"to sqsize\n",
opts->queue_size, ctrl->ctrl.sqsize + 1);
opts->queue_size = ctrl->ctrl.sqsize + 1;
} }
ret = nvme_fc_init_aen_ops(ctrl); ret = nvme_fc_init_aen_ops(ctrl);
...@@ -3546,35 +3508,12 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, ...@@ -3546,35 +3508,12 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
nvme_fc_init_queue(ctrl, 0); nvme_fc_init_queue(ctrl, 0);
memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set)); ret = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops; &nvme_fc_admin_mq_ops, BLK_MQ_F_NO_SCHED,
ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH; struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
ctrl->admin_tag_set.reserved_tags = NVMF_RESERVED_TAGS; ctrl->lport->ops->fcprqst_priv_sz));
ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
ctrl->admin_tag_set.cmd_size =
struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
ctrl->lport->ops->fcprqst_priv_sz);
ctrl->admin_tag_set.driver_data = ctrl;
ctrl->admin_tag_set.nr_hw_queues = 1;
ctrl->admin_tag_set.timeout = NVME_ADMIN_TIMEOUT;
ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
if (ret) if (ret)
goto out_free_queues; goto out_free_queues;
ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
if (IS_ERR(ctrl->ctrl.fabrics_q)) {
ret = PTR_ERR(ctrl->ctrl.fabrics_q);
goto out_free_admin_tag_set;
}
ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
if (IS_ERR(ctrl->ctrl.admin_q)) {
ret = PTR_ERR(ctrl->ctrl.admin_q);
goto out_cleanup_fabrics_q;
}
/* /*
* Would have been nice to init io queues tag set as well. * Would have been nice to init io queues tag set as well.
...@@ -3585,7 +3524,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, ...@@ -3585,7 +3524,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0); ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
if (ret) if (ret)
goto out_cleanup_admin_q; goto out_cleanup_tagset;
/* at this point, teardown path changes to ref counting on nvme ctrl */ /* at this point, teardown path changes to ref counting on nvme ctrl */
...@@ -3640,12 +3579,8 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, ...@@ -3640,12 +3579,8 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
return ERR_PTR(-EIO); return ERR_PTR(-EIO);
out_cleanup_admin_q: out_cleanup_tagset:
blk_mq_destroy_queue(ctrl->ctrl.admin_q); nvme_remove_admin_tag_set(&ctrl->ctrl);
out_cleanup_fabrics_q:
blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
out_free_admin_tag_set:
blk_mq_free_tag_set(&ctrl->admin_tag_set);
out_free_queues: out_free_queues:
kfree(ctrl->queues); kfree(ctrl->queues);
out_free_ida: out_free_ida:
......
...@@ -136,9 +136,11 @@ static int nvme_submit_user_cmd(struct request_queue *q, ...@@ -136,9 +136,11 @@ static int nvme_submit_user_cmd(struct request_queue *q,
unsigned bufflen, void __user *meta_buffer, unsigned meta_len, unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
u32 meta_seed, u64 *result, unsigned timeout, bool vec) u32 meta_seed, u64 *result, unsigned timeout, bool vec)
{ {
struct nvme_ctrl *ctrl;
struct request *req; struct request *req;
void *meta = NULL; void *meta = NULL;
struct bio *bio; struct bio *bio;
u32 effects;
int ret; int ret;
req = nvme_alloc_user_request(q, cmd, ubuffer, bufflen, meta_buffer, req = nvme_alloc_user_request(q, cmd, ubuffer, bufflen, meta_buffer,
...@@ -147,8 +149,9 @@ static int nvme_submit_user_cmd(struct request_queue *q, ...@@ -147,8 +149,9 @@ static int nvme_submit_user_cmd(struct request_queue *q,
return PTR_ERR(req); return PTR_ERR(req);
bio = req->bio; bio = req->bio;
ctrl = nvme_req(req)->ctrl;
ret = nvme_execute_passthru_rq(req); ret = nvme_execute_passthru_rq(req, &effects);
if (result) if (result)
*result = le64_to_cpu(nvme_req(req)->result.u64); *result = le64_to_cpu(nvme_req(req)->result.u64);
...@@ -158,6 +161,10 @@ static int nvme_submit_user_cmd(struct request_queue *q, ...@@ -158,6 +161,10 @@ static int nvme_submit_user_cmd(struct request_queue *q,
if (bio) if (bio)
blk_rq_unmap_user(bio); blk_rq_unmap_user(bio);
blk_mq_free_request(req); blk_mq_free_request(req);
if (effects)
nvme_passthru_end(ctrl, effects, cmd, ret);
return ret; return ret;
} }
...@@ -757,11 +764,17 @@ long nvme_dev_ioctl(struct file *file, unsigned int cmd, ...@@ -757,11 +764,17 @@ long nvme_dev_ioctl(struct file *file, unsigned int cmd,
case NVME_IOCTL_IO_CMD: case NVME_IOCTL_IO_CMD:
return nvme_dev_user_cmd(ctrl, argp); return nvme_dev_user_cmd(ctrl, argp);
case NVME_IOCTL_RESET: case NVME_IOCTL_RESET:
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
dev_warn(ctrl->device, "resetting controller\n"); dev_warn(ctrl->device, "resetting controller\n");
return nvme_reset_ctrl_sync(ctrl); return nvme_reset_ctrl_sync(ctrl);
case NVME_IOCTL_SUBSYS_RESET: case NVME_IOCTL_SUBSYS_RESET:
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
return nvme_reset_subsystem(ctrl); return nvme_reset_subsystem(ctrl);
case NVME_IOCTL_RESCAN: case NVME_IOCTL_RESCAN:
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
nvme_queue_scan(ctrl); nvme_queue_scan(ctrl);
return 0; return 0;
default: default:
......
...@@ -233,6 +233,12 @@ struct nvme_fault_inject { ...@@ -233,6 +233,12 @@ struct nvme_fault_inject {
#endif #endif
}; };
enum nvme_ctrl_flags {
NVME_CTRL_FAILFAST_EXPIRED = 0,
NVME_CTRL_ADMIN_Q_STOPPED = 1,
NVME_CTRL_STARTED_ONCE = 2,
};
struct nvme_ctrl { struct nvme_ctrl {
bool comp_seen; bool comp_seen;
enum nvme_ctrl_state state; enum nvme_ctrl_state state;
...@@ -354,8 +360,6 @@ struct nvme_ctrl { ...@@ -354,8 +360,6 @@ struct nvme_ctrl {
u16 maxcmd; u16 maxcmd;
int nr_reconnects; int nr_reconnects;
unsigned long flags; unsigned long flags;
#define NVME_CTRL_FAILFAST_EXPIRED 0
#define NVME_CTRL_ADMIN_Q_STOPPED 1
struct nvmf_ctrl_options *opts; struct nvmf_ctrl_options *opts;
struct page *discard_page; struct page *discard_page;
...@@ -602,11 +606,23 @@ static inline void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inj) ...@@ -602,11 +606,23 @@ static inline void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inj)
static inline void nvme_should_fail(struct request *req) {} static inline void nvme_should_fail(struct request *req) {}
#endif #endif
bool nvme_wait_reset(struct nvme_ctrl *ctrl);
int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl) static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
{ {
int ret;
if (!ctrl->subsystem) if (!ctrl->subsystem)
return -ENOTTY; return -ENOTTY;
return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65); if (!nvme_wait_reset(ctrl))
return -EBUSY;
ret = ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
if (ret)
return ret;
return nvme_try_sched_reset(ctrl);
} }
/* /*
...@@ -712,7 +728,6 @@ void nvme_cancel_tagset(struct nvme_ctrl *ctrl); ...@@ -712,7 +728,6 @@ void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl); void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
enum nvme_ctrl_state new_state); enum nvme_ctrl_state new_state);
bool nvme_wait_reset(struct nvme_ctrl *ctrl);
int nvme_disable_ctrl(struct nvme_ctrl *ctrl); int nvme_disable_ctrl(struct nvme_ctrl *ctrl);
int nvme_enable_ctrl(struct nvme_ctrl *ctrl); int nvme_enable_ctrl(struct nvme_ctrl *ctrl);
int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl); int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
...@@ -722,6 +737,14 @@ void nvme_uninit_ctrl(struct nvme_ctrl *ctrl); ...@@ -722,6 +737,14 @@ void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
void nvme_start_ctrl(struct nvme_ctrl *ctrl); void nvme_start_ctrl(struct nvme_ctrl *ctrl);
void nvme_stop_ctrl(struct nvme_ctrl *ctrl); void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl); int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl);
int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
const struct blk_mq_ops *ops, unsigned int flags,
unsigned int cmd_size);
void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl);
int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
const struct blk_mq_ops *ops, unsigned int flags,
unsigned int cmd_size);
void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl);
void nvme_remove_namespaces(struct nvme_ctrl *ctrl); void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
...@@ -802,7 +825,6 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count); ...@@ -802,7 +825,6 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl); void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl(struct nvme_ctrl *ctrl); int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl); int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl(struct nvme_ctrl *ctrl); int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
void nvme_queue_scan(struct nvme_ctrl *ctrl); void nvme_queue_scan(struct nvme_ctrl *ctrl);
int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi, int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
...@@ -968,14 +990,6 @@ static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf) ...@@ -968,14 +990,6 @@ static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
} }
#endif #endif
static inline int nvme_ctrl_init_connect_q(struct nvme_ctrl *ctrl)
{
ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
if (IS_ERR(ctrl->connect_q))
return PTR_ERR(ctrl->connect_q);
return 0;
}
static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev) static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
{ {
return dev_to_disk(dev)->private_data; return dev_to_disk(dev)->private_data;
...@@ -1023,7 +1037,9 @@ static inline void nvme_auth_free(struct nvme_ctrl *ctrl) {}; ...@@ -1023,7 +1037,9 @@ static inline void nvme_auth_free(struct nvme_ctrl *ctrl) {};
u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
u8 opcode); u8 opcode);
int nvme_execute_passthru_rq(struct request *rq); int nvme_execute_passthru_rq(struct request *rq, u32 *effects);
void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
struct nvme_command *cmd, int status);
struct nvme_ctrl *nvme_ctrl_from_file(struct file *file); struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid); struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
void nvme_put_ns(struct nvme_ns *ns); void nvme_put_ns(struct nvme_ns *ns);
......
...@@ -2526,9 +2526,11 @@ static void nvme_pci_alloc_tag_set(struct nvme_dev *dev) ...@@ -2526,9 +2526,11 @@ static void nvme_pci_alloc_tag_set(struct nvme_dev *dev)
set->ops = &nvme_mq_ops; set->ops = &nvme_mq_ops;
set->nr_hw_queues = dev->online_queues - 1; set->nr_hw_queues = dev->online_queues - 1;
set->nr_maps = 2; /* default + read */ set->nr_maps = 1;
if (dev->io_queues[HCTX_TYPE_READ])
set->nr_maps = 2;
if (dev->io_queues[HCTX_TYPE_POLL]) if (dev->io_queues[HCTX_TYPE_POLL])
set->nr_maps++; set->nr_maps = 3;
set->timeout = NVME_IO_TIMEOUT; set->timeout = NVME_IO_TIMEOUT;
set->numa_node = dev->ctrl.numa_node; set->numa_node = dev->ctrl.numa_node;
set->queue_depth = min_t(unsigned, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1; set->queue_depth = min_t(unsigned, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
...@@ -2831,6 +2833,8 @@ static void nvme_reset_work(struct work_struct *work) ...@@ -2831,6 +2833,8 @@ static void nvme_reset_work(struct work_struct *work)
nvme_start_admin_queue(&dev->ctrl); nvme_start_admin_queue(&dev->ctrl);
} }
dma_set_min_align_mask(dev->dev, NVME_CTRL_PAGE_SIZE - 1);
/* /*
* Limit the max command size to prevent iod->sg allocations going * Limit the max command size to prevent iod->sg allocations going
* over a single page. * over a single page.
...@@ -2843,7 +2847,6 @@ static void nvme_reset_work(struct work_struct *work) ...@@ -2843,7 +2847,6 @@ static void nvme_reset_work(struct work_struct *work)
* Don't limit the IOMMU merged segment size. * Don't limit the IOMMU merged segment size.
*/ */
dma_set_max_seg_size(dev->dev, 0xffffffff); dma_set_max_seg_size(dev->dev, 0xffffffff);
dma_set_min_align_mask(dev->dev, NVME_CTRL_PAGE_SIZE - 1);
mutex_unlock(&dev->shutdown_lock); mutex_unlock(&dev->shutdown_lock);
......
...@@ -295,7 +295,7 @@ static int nvme_rdma_init_request(struct blk_mq_tag_set *set, ...@@ -295,7 +295,7 @@ static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
struct request *rq, unsigned int hctx_idx, struct request *rq, unsigned int hctx_idx,
unsigned int numa_node) unsigned int numa_node)
{ {
struct nvme_rdma_ctrl *ctrl = set->driver_data; struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(set->driver_data);
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx]; struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
...@@ -320,7 +320,7 @@ static int nvme_rdma_init_request(struct blk_mq_tag_set *set, ...@@ -320,7 +320,7 @@ static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx) unsigned int hctx_idx)
{ {
struct nvme_rdma_ctrl *ctrl = data; struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(data);
struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1]; struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1];
BUG_ON(hctx_idx >= ctrl->ctrl.queue_count); BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
...@@ -332,7 +332,7 @@ static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, ...@@ -332,7 +332,7 @@ static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
static int nvme_rdma_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, static int nvme_rdma_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx) unsigned int hctx_idx)
{ {
struct nvme_rdma_ctrl *ctrl = data; struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(data);
struct nvme_rdma_queue *queue = &ctrl->queues[0]; struct nvme_rdma_queue *queue = &ctrl->queues[0];
BUG_ON(hctx_idx != 0); BUG_ON(hctx_idx != 0);
...@@ -788,64 +788,21 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl) ...@@ -788,64 +788,21 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
return ret; return ret;
} }
static int nvme_rdma_alloc_admin_tag_set(struct nvme_ctrl *nctrl) static int nvme_rdma_alloc_tag_set(struct nvme_ctrl *ctrl)
{ {
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl); unsigned int cmd_size = sizeof(struct nvme_rdma_request) +
struct blk_mq_tag_set *set = &ctrl->admin_tag_set; NVME_RDMA_DATA_SGL_SIZE;
int ret;
memset(set, 0, sizeof(*set)); if (ctrl->max_integrity_segments)
set->ops = &nvme_rdma_admin_mq_ops; cmd_size += sizeof(struct nvme_rdma_sgl) +
set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; NVME_RDMA_METADATA_SGL_SIZE;
set->reserved_tags = NVMF_RESERVED_TAGS;
set->numa_node = nctrl->numa_node;
set->cmd_size = sizeof(struct nvme_rdma_request) +
NVME_RDMA_DATA_SGL_SIZE;
set->driver_data = ctrl;
set->nr_hw_queues = 1;
set->timeout = NVME_ADMIN_TIMEOUT;
set->flags = BLK_MQ_F_NO_SCHED;
ret = blk_mq_alloc_tag_set(set);
if (!ret)
ctrl->ctrl.admin_tagset = set;
return ret;
}
static int nvme_rdma_alloc_tag_set(struct nvme_ctrl *nctrl)
{
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
struct blk_mq_tag_set *set = &ctrl->tag_set;
int ret;
memset(set, 0, sizeof(*set)); return nvme_alloc_io_tag_set(ctrl, &to_rdma_ctrl(ctrl)->tag_set,
set->ops = &nvme_rdma_mq_ops; &nvme_rdma_mq_ops, BLK_MQ_F_SHOULD_MERGE, cmd_size);
set->queue_depth = nctrl->sqsize + 1;
set->reserved_tags = NVMF_RESERVED_TAGS;
set->numa_node = nctrl->numa_node;
set->flags = BLK_MQ_F_SHOULD_MERGE;
set->cmd_size = sizeof(struct nvme_rdma_request) +
NVME_RDMA_DATA_SGL_SIZE;
if (nctrl->max_integrity_segments)
set->cmd_size += sizeof(struct nvme_rdma_sgl) +
NVME_RDMA_METADATA_SGL_SIZE;
set->driver_data = ctrl;
set->nr_hw_queues = nctrl->queue_count - 1;
set->timeout = NVME_IO_TIMEOUT;
set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
ret = blk_mq_alloc_tag_set(set);
if (!ret)
ctrl->ctrl.tagset = set;
return ret;
} }
static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl, static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl)
bool remove)
{ {
if (remove) {
blk_mq_destroy_queue(ctrl->ctrl.admin_q);
blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
}
if (ctrl->async_event_sqe.data) { if (ctrl->async_event_sqe.data) {
cancel_work_sync(&ctrl->ctrl.async_event_work); cancel_work_sync(&ctrl->ctrl.async_event_work);
nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
...@@ -887,26 +844,19 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, ...@@ -887,26 +844,19 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
goto out_free_queue; goto out_free_queue;
if (new) { if (new) {
error = nvme_rdma_alloc_admin_tag_set(&ctrl->ctrl); error = nvme_alloc_admin_tag_set(&ctrl->ctrl,
&ctrl->admin_tag_set, &nvme_rdma_admin_mq_ops,
BLK_MQ_F_NO_SCHED,
sizeof(struct nvme_rdma_request) +
NVME_RDMA_DATA_SGL_SIZE);
if (error) if (error)
goto out_free_async_qe; goto out_free_async_qe;
ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
if (IS_ERR(ctrl->ctrl.fabrics_q)) {
error = PTR_ERR(ctrl->ctrl.fabrics_q);
goto out_free_tagset;
}
ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
if (IS_ERR(ctrl->ctrl.admin_q)) {
error = PTR_ERR(ctrl->ctrl.admin_q);
goto out_cleanup_fabrics_q;
}
} }
error = nvme_rdma_start_queue(ctrl, 0); error = nvme_rdma_start_queue(ctrl, 0);
if (error) if (error)
goto out_cleanup_queue; goto out_remove_admin_tag_set;
error = nvme_enable_ctrl(&ctrl->ctrl); error = nvme_enable_ctrl(&ctrl->ctrl);
if (error) if (error)
...@@ -933,15 +883,9 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, ...@@ -933,15 +883,9 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
out_stop_queue: out_stop_queue:
nvme_rdma_stop_queue(&ctrl->queues[0]); nvme_rdma_stop_queue(&ctrl->queues[0]);
nvme_cancel_admin_tagset(&ctrl->ctrl); nvme_cancel_admin_tagset(&ctrl->ctrl);
out_cleanup_queue: out_remove_admin_tag_set:
if (new) if (new)
blk_mq_destroy_queue(ctrl->ctrl.admin_q); nvme_remove_admin_tag_set(&ctrl->ctrl);
out_cleanup_fabrics_q:
if (new)
blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
out_free_tagset:
if (new)
blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
out_free_async_qe: out_free_async_qe:
if (ctrl->async_event_sqe.data) { if (ctrl->async_event_sqe.data) {
nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
...@@ -953,16 +897,6 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, ...@@ -953,16 +897,6 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
return error; return error;
} }
static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl,
bool remove)
{
if (remove) {
blk_mq_destroy_queue(ctrl->ctrl.connect_q);
blk_mq_free_tag_set(ctrl->ctrl.tagset);
}
nvme_rdma_free_io_queues(ctrl);
}
static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new) static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
{ {
int ret, nr_queues; int ret, nr_queues;
...@@ -975,10 +909,6 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new) ...@@ -975,10 +909,6 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
ret = nvme_rdma_alloc_tag_set(&ctrl->ctrl); ret = nvme_rdma_alloc_tag_set(&ctrl->ctrl);
if (ret) if (ret)
goto out_free_io_queues; goto out_free_io_queues;
ret = nvme_ctrl_init_connect_q(&(ctrl->ctrl));
if (ret)
goto out_free_tag_set;
} }
/* /*
...@@ -989,7 +919,7 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new) ...@@ -989,7 +919,7 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
nr_queues = min(ctrl->tag_set.nr_hw_queues + 1, ctrl->ctrl.queue_count); nr_queues = min(ctrl->tag_set.nr_hw_queues + 1, ctrl->ctrl.queue_count);
ret = nvme_rdma_start_io_queues(ctrl, 1, nr_queues); ret = nvme_rdma_start_io_queues(ctrl, 1, nr_queues);
if (ret) if (ret)
goto out_cleanup_connect_q; goto out_cleanup_tagset;
if (!new) { if (!new) {
nvme_start_queues(&ctrl->ctrl); nvme_start_queues(&ctrl->ctrl);
...@@ -1022,13 +952,10 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new) ...@@ -1022,13 +952,10 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
nvme_stop_queues(&ctrl->ctrl); nvme_stop_queues(&ctrl->ctrl);
nvme_sync_io_queues(&ctrl->ctrl); nvme_sync_io_queues(&ctrl->ctrl);
nvme_rdma_stop_io_queues(ctrl); nvme_rdma_stop_io_queues(ctrl);
out_cleanup_connect_q: out_cleanup_tagset:
nvme_cancel_tagset(&ctrl->ctrl); nvme_cancel_tagset(&ctrl->ctrl);
if (new) if (new)
blk_mq_destroy_queue(ctrl->ctrl.connect_q); nvme_remove_io_tag_set(&ctrl->ctrl);
out_free_tag_set:
if (new)
blk_mq_free_tag_set(ctrl->ctrl.tagset);
out_free_io_queues: out_free_io_queues:
nvme_rdma_free_io_queues(ctrl); nvme_rdma_free_io_queues(ctrl);
return ret; return ret;
...@@ -1041,9 +968,11 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl, ...@@ -1041,9 +968,11 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
blk_sync_queue(ctrl->ctrl.admin_q); blk_sync_queue(ctrl->ctrl.admin_q);
nvme_rdma_stop_queue(&ctrl->queues[0]); nvme_rdma_stop_queue(&ctrl->queues[0]);
nvme_cancel_admin_tagset(&ctrl->ctrl); nvme_cancel_admin_tagset(&ctrl->ctrl);
if (remove) if (remove) {
nvme_start_admin_queue(&ctrl->ctrl); nvme_start_admin_queue(&ctrl->ctrl);
nvme_rdma_destroy_admin_queue(ctrl, remove); nvme_remove_admin_tag_set(&ctrl->ctrl);
}
nvme_rdma_destroy_admin_queue(ctrl);
} }
static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl, static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
...@@ -1055,9 +984,11 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl, ...@@ -1055,9 +984,11 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
nvme_sync_io_queues(&ctrl->ctrl); nvme_sync_io_queues(&ctrl->ctrl);
nvme_rdma_stop_io_queues(ctrl); nvme_rdma_stop_io_queues(ctrl);
nvme_cancel_tagset(&ctrl->ctrl); nvme_cancel_tagset(&ctrl->ctrl);
if (remove) if (remove) {
nvme_start_queues(&ctrl->ctrl); nvme_start_queues(&ctrl->ctrl);
nvme_rdma_destroy_io_queues(ctrl, remove); nvme_remove_io_tag_set(&ctrl->ctrl);
}
nvme_rdma_free_io_queues(ctrl);
} }
} }
...@@ -1179,14 +1110,18 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new) ...@@ -1179,14 +1110,18 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
nvme_sync_io_queues(&ctrl->ctrl); nvme_sync_io_queues(&ctrl->ctrl);
nvme_rdma_stop_io_queues(ctrl); nvme_rdma_stop_io_queues(ctrl);
nvme_cancel_tagset(&ctrl->ctrl); nvme_cancel_tagset(&ctrl->ctrl);
nvme_rdma_destroy_io_queues(ctrl, new); if (new)
nvme_remove_io_tag_set(&ctrl->ctrl);
nvme_rdma_free_io_queues(ctrl);
} }
destroy_admin: destroy_admin:
nvme_stop_admin_queue(&ctrl->ctrl); nvme_stop_admin_queue(&ctrl->ctrl);
blk_sync_queue(ctrl->ctrl.admin_q); blk_sync_queue(ctrl->ctrl.admin_q);
nvme_rdma_stop_queue(&ctrl->queues[0]); nvme_rdma_stop_queue(&ctrl->queues[0]);
nvme_cancel_admin_tagset(&ctrl->ctrl); nvme_cancel_admin_tagset(&ctrl->ctrl);
nvme_rdma_destroy_admin_queue(ctrl, new); if (new)
nvme_remove_admin_tag_set(&ctrl->ctrl);
nvme_rdma_destroy_admin_queue(ctrl);
return ret; return ret;
} }
...@@ -2206,7 +2141,7 @@ static void nvme_rdma_complete_rq(struct request *rq) ...@@ -2206,7 +2141,7 @@ static void nvme_rdma_complete_rq(struct request *rq)
static void nvme_rdma_map_queues(struct blk_mq_tag_set *set) static void nvme_rdma_map_queues(struct blk_mq_tag_set *set)
{ {
struct nvme_rdma_ctrl *ctrl = set->driver_data; struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(set->driver_data);
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) { if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
......
...@@ -134,7 +134,6 @@ struct nvme_tcp_queue { ...@@ -134,7 +134,6 @@ struct nvme_tcp_queue {
/* send state */ /* send state */
struct nvme_tcp_request *request; struct nvme_tcp_request *request;
int queue_size;
u32 maxh2cdata; u32 maxh2cdata;
size_t cmnd_capsule_len; size_t cmnd_capsule_len;
struct nvme_tcp_ctrl *ctrl; struct nvme_tcp_ctrl *ctrl;
...@@ -466,7 +465,7 @@ static int nvme_tcp_init_request(struct blk_mq_tag_set *set, ...@@ -466,7 +465,7 @@ static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
struct request *rq, unsigned int hctx_idx, struct request *rq, unsigned int hctx_idx,
unsigned int numa_node) unsigned int numa_node)
{ {
struct nvme_tcp_ctrl *ctrl = set->driver_data; struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data);
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_tcp_cmd_pdu *pdu; struct nvme_tcp_cmd_pdu *pdu;
int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
...@@ -490,7 +489,7 @@ static int nvme_tcp_init_request(struct blk_mq_tag_set *set, ...@@ -490,7 +489,7 @@ static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx) unsigned int hctx_idx)
{ {
struct nvme_tcp_ctrl *ctrl = data; struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(data);
struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1]; struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
hctx->driver_data = queue; hctx->driver_data = queue;
...@@ -500,7 +499,7 @@ static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, ...@@ -500,7 +499,7 @@ static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx) unsigned int hctx_idx)
{ {
struct nvme_tcp_ctrl *ctrl = data; struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(data);
struct nvme_tcp_queue *queue = &ctrl->queues[0]; struct nvme_tcp_queue *queue = &ctrl->queues[0];
hctx->driver_data = queue; hctx->driver_data = queue;
...@@ -1479,8 +1478,7 @@ static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue) ...@@ -1479,8 +1478,7 @@ static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false); queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
} }
static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid)
int qid, size_t queue_size)
{ {
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
struct nvme_tcp_queue *queue = &ctrl->queues[qid]; struct nvme_tcp_queue *queue = &ctrl->queues[qid];
...@@ -1492,7 +1490,6 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, ...@@ -1492,7 +1490,6 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
INIT_LIST_HEAD(&queue->send_list); INIT_LIST_HEAD(&queue->send_list);
mutex_init(&queue->send_mutex); mutex_init(&queue->send_mutex);
INIT_WORK(&queue->io_work, nvme_tcp_io_work); INIT_WORK(&queue->io_work, nvme_tcp_io_work);
queue->queue_size = queue_size;
if (qid > 0) if (qid > 0)
queue->cmnd_capsule_len = nctrl->ioccsz * 16; queue->cmnd_capsule_len = nctrl->ioccsz * 16;
...@@ -1690,51 +1687,6 @@ static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx) ...@@ -1690,51 +1687,6 @@ static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
return ret; return ret;
} }
static int nvme_tcp_alloc_admin_tag_set(struct nvme_ctrl *nctrl)
{
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
struct blk_mq_tag_set *set = &ctrl->admin_tag_set;
int ret;
memset(set, 0, sizeof(*set));
set->ops = &nvme_tcp_admin_mq_ops;
set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
set->reserved_tags = NVMF_RESERVED_TAGS;
set->numa_node = nctrl->numa_node;
set->flags = BLK_MQ_F_BLOCKING;
set->cmd_size = sizeof(struct nvme_tcp_request);
set->driver_data = ctrl;
set->nr_hw_queues = 1;
set->timeout = NVME_ADMIN_TIMEOUT;
ret = blk_mq_alloc_tag_set(set);
if (!ret)
nctrl->admin_tagset = set;
return ret;
}
static int nvme_tcp_alloc_tag_set(struct nvme_ctrl *nctrl)
{
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
struct blk_mq_tag_set *set = &ctrl->tag_set;
int ret;
memset(set, 0, sizeof(*set));
set->ops = &nvme_tcp_mq_ops;
set->queue_depth = nctrl->sqsize + 1;
set->reserved_tags = NVMF_RESERVED_TAGS;
set->numa_node = nctrl->numa_node;
set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
set->cmd_size = sizeof(struct nvme_tcp_request);
set->driver_data = ctrl;
set->nr_hw_queues = nctrl->queue_count - 1;
set->timeout = NVME_IO_TIMEOUT;
set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
ret = blk_mq_alloc_tag_set(set);
if (!ret)
nctrl->tagset = set;
return ret;
}
static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl) static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
{ {
if (to_tcp_ctrl(ctrl)->async_req.pdu) { if (to_tcp_ctrl(ctrl)->async_req.pdu) {
...@@ -1785,7 +1737,7 @@ static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl) ...@@ -1785,7 +1737,7 @@ static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
{ {
int ret; int ret;
ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH); ret = nvme_tcp_alloc_queue(ctrl, 0);
if (ret) if (ret)
return ret; return ret;
...@@ -1805,7 +1757,7 @@ static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) ...@@ -1805,7 +1757,7 @@ static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
int i, ret; int i, ret;
for (i = 1; i < ctrl->queue_count; i++) { for (i = 1; i < ctrl->queue_count; i++) {
ret = nvme_tcp_alloc_queue(ctrl, i, ctrl->sqsize + 1); ret = nvme_tcp_alloc_queue(ctrl, i);
if (ret) if (ret)
goto out_free_queues; goto out_free_queues;
} }
...@@ -1893,10 +1845,8 @@ static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) ...@@ -1893,10 +1845,8 @@ static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove) static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
{ {
nvme_tcp_stop_io_queues(ctrl); nvme_tcp_stop_io_queues(ctrl);
if (remove) { if (remove)
blk_mq_destroy_queue(ctrl->connect_q); nvme_remove_io_tag_set(ctrl);
blk_mq_free_tag_set(ctrl->tagset);
}
nvme_tcp_free_io_queues(ctrl); nvme_tcp_free_io_queues(ctrl);
} }
...@@ -1909,13 +1859,12 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new) ...@@ -1909,13 +1859,12 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
return ret; return ret;
if (new) { if (new) {
ret = nvme_tcp_alloc_tag_set(ctrl); ret = nvme_alloc_io_tag_set(ctrl, &to_tcp_ctrl(ctrl)->tag_set,
&nvme_tcp_mq_ops,
BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING,
sizeof(struct nvme_tcp_request));
if (ret) if (ret)
goto out_free_io_queues; goto out_free_io_queues;
ret = nvme_ctrl_init_connect_q(ctrl);
if (ret)
goto out_free_tag_set;
} }
/* /*
...@@ -1962,10 +1911,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new) ...@@ -1962,10 +1911,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
out_cleanup_connect_q: out_cleanup_connect_q:
nvme_cancel_tagset(ctrl); nvme_cancel_tagset(ctrl);
if (new) if (new)
blk_mq_destroy_queue(ctrl->connect_q); nvme_remove_io_tag_set(ctrl);
out_free_tag_set:
if (new)
blk_mq_free_tag_set(ctrl->tagset);
out_free_io_queues: out_free_io_queues:
nvme_tcp_free_io_queues(ctrl); nvme_tcp_free_io_queues(ctrl);
return ret; return ret;
...@@ -1974,11 +1920,8 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new) ...@@ -1974,11 +1920,8 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove) static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
{ {
nvme_tcp_stop_queue(ctrl, 0); nvme_tcp_stop_queue(ctrl, 0);
if (remove) { if (remove)
blk_mq_destroy_queue(ctrl->admin_q); nvme_remove_admin_tag_set(ctrl);
blk_mq_destroy_queue(ctrl->fabrics_q);
blk_mq_free_tag_set(ctrl->admin_tagset);
}
nvme_tcp_free_admin_queue(ctrl); nvme_tcp_free_admin_queue(ctrl);
} }
...@@ -1991,26 +1934,17 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new) ...@@ -1991,26 +1934,17 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
return error; return error;
if (new) { if (new) {
error = nvme_tcp_alloc_admin_tag_set(ctrl); error = nvme_alloc_admin_tag_set(ctrl,
&to_tcp_ctrl(ctrl)->admin_tag_set,
&nvme_tcp_admin_mq_ops, BLK_MQ_F_BLOCKING,
sizeof(struct nvme_tcp_request));
if (error) if (error)
goto out_free_queue; goto out_free_queue;
ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset);
if (IS_ERR(ctrl->fabrics_q)) {
error = PTR_ERR(ctrl->fabrics_q);
goto out_free_tagset;
}
ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset);
if (IS_ERR(ctrl->admin_q)) {
error = PTR_ERR(ctrl->admin_q);
goto out_cleanup_fabrics_q;
}
} }
error = nvme_tcp_start_queue(ctrl, 0); error = nvme_tcp_start_queue(ctrl, 0);
if (error) if (error)
goto out_cleanup_queue; goto out_cleanup_tagset;
error = nvme_enable_ctrl(ctrl); error = nvme_enable_ctrl(ctrl);
if (error) if (error)
...@@ -2030,15 +1964,9 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new) ...@@ -2030,15 +1964,9 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
out_stop_queue: out_stop_queue:
nvme_tcp_stop_queue(ctrl, 0); nvme_tcp_stop_queue(ctrl, 0);
nvme_cancel_admin_tagset(ctrl); nvme_cancel_admin_tagset(ctrl);
out_cleanup_queue: out_cleanup_tagset:
if (new)
blk_mq_destroy_queue(ctrl->admin_q);
out_cleanup_fabrics_q:
if (new)
blk_mq_destroy_queue(ctrl->fabrics_q);
out_free_tagset:
if (new) if (new)
blk_mq_free_tag_set(ctrl->admin_tagset); nvme_remove_admin_tag_set(ctrl);
out_free_queue: out_free_queue:
nvme_tcp_free_admin_queue(ctrl); nvme_tcp_free_admin_queue(ctrl);
return error; return error;
...@@ -2489,7 +2417,7 @@ static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -2489,7 +2417,7 @@ static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
static void nvme_tcp_map_queues(struct blk_mq_tag_set *set) static void nvme_tcp_map_queues(struct blk_mq_tag_set *set)
{ {
struct nvme_tcp_ctrl *ctrl = set->driver_data; struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data);
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) { if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
......
...@@ -830,6 +830,7 @@ int nvmet_sq_init(struct nvmet_sq *sq) ...@@ -830,6 +830,7 @@ int nvmet_sq_init(struct nvmet_sq *sq)
} }
init_completion(&sq->free_done); init_completion(&sq->free_done);
init_completion(&sq->confirm_done); init_completion(&sq->confirm_done);
nvmet_auth_sq_init(sq);
return 0; return 0;
} }
......
...@@ -23,17 +23,12 @@ static void nvmet_auth_expired_work(struct work_struct *work) ...@@ -23,17 +23,12 @@ static void nvmet_auth_expired_work(struct work_struct *work)
sq->dhchap_tid = -1; sq->dhchap_tid = -1;
} }
void nvmet_init_auth(struct nvmet_ctrl *ctrl, struct nvmet_req *req) void nvmet_auth_sq_init(struct nvmet_sq *sq)
{ {
u32 result = le32_to_cpu(req->cqe->result.u32);
/* Initialize in-band authentication */ /* Initialize in-band authentication */
INIT_DELAYED_WORK(&req->sq->auth_expired_work, INIT_DELAYED_WORK(&sq->auth_expired_work, nvmet_auth_expired_work);
nvmet_auth_expired_work); sq->authenticated = false;
req->sq->authenticated = false; sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
result |= (u32)NVME_CONNECT_AUTHREQ_ATR << 16;
req->cqe->result.u32 = cpu_to_le32(result);
} }
static u16 nvmet_auth_negotiate(struct nvmet_req *req, void *d) static u16 nvmet_auth_negotiate(struct nvmet_req *req, void *d)
......
...@@ -198,6 +198,12 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req) ...@@ -198,6 +198,12 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
return ret; return ret;
} }
static u32 nvmet_connect_result(struct nvmet_ctrl *ctrl)
{
return (u32)ctrl->cntlid |
(nvmet_has_auth(ctrl) ? NVME_CONNECT_AUTHREQ_ATR : 0);
}
static void nvmet_execute_admin_connect(struct nvmet_req *req) static void nvmet_execute_admin_connect(struct nvmet_req *req)
{ {
struct nvmf_connect_command *c = &req->cmd->connect; struct nvmf_connect_command *c = &req->cmd->connect;
...@@ -269,10 +275,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) ...@@ -269,10 +275,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn, ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn,
ctrl->pi_support ? " T10-PI is enabled" : "", ctrl->pi_support ? " T10-PI is enabled" : "",
nvmet_has_auth(ctrl) ? " with DH-HMAC-CHAP" : ""); nvmet_has_auth(ctrl) ? " with DH-HMAC-CHAP" : "");
req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid); req->cqe->result.u32 = cpu_to_le32(nvmet_connect_result(ctrl));
if (nvmet_has_auth(ctrl))
nvmet_init_auth(ctrl, req);
out: out:
kfree(d); kfree(d);
complete: complete:
...@@ -328,13 +331,8 @@ static void nvmet_execute_io_connect(struct nvmet_req *req) ...@@ -328,13 +331,8 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
if (status) if (status)
goto out_ctrl_put; goto out_ctrl_put;
/* pass back cntlid for successful completion */
req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid); pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
if (nvmet_has_auth(ctrl)) req->cqe->result.u32 = cpu_to_le32(nvmet_connect_result(ctrl));
nvmet_init_auth(ctrl, req);
out: out:
kfree(d); kfree(d);
complete: complete:
......
...@@ -12,11 +12,9 @@ ...@@ -12,11 +12,9 @@
void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id) void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
{ {
const struct queue_limits *ql = &bdev_get_queue(bdev)->limits;
/* Number of logical blocks per physical block. */
const u32 lpp = ql->physical_block_size / ql->logical_block_size;
/* Logical blocks per physical block, 0's based. */ /* Logical blocks per physical block, 0's based. */
const __le16 lpp0b = to0based(lpp); const __le16 lpp0b = to0based(bdev_physical_block_size(bdev) /
bdev_logical_block_size(bdev));
/* /*
* For NVMe 1.2 and later, bit 1 indicates that the fields NAWUN, * For NVMe 1.2 and later, bit 1 indicates that the fields NAWUN,
...@@ -42,11 +40,12 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id) ...@@ -42,11 +40,12 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
/* NPWA = Namespace Preferred Write Alignment. 0's based */ /* NPWA = Namespace Preferred Write Alignment. 0's based */
id->npwa = id->npwg; id->npwa = id->npwg;
/* NPDG = Namespace Preferred Deallocate Granularity. 0's based */ /* NPDG = Namespace Preferred Deallocate Granularity. 0's based */
id->npdg = to0based(ql->discard_granularity / ql->logical_block_size); id->npdg = to0based(bdev_discard_granularity(bdev) /
bdev_logical_block_size(bdev));
/* NPDG = Namespace Preferred Deallocate Alignment */ /* NPDG = Namespace Preferred Deallocate Alignment */
id->npda = id->npdg; id->npda = id->npdg;
/* NOWS = Namespace Optimal Write Size */ /* NOWS = Namespace Optimal Write Size */
id->nows = to0based(ql->io_opt / ql->logical_block_size); id->nows = to0based(bdev_io_opt(bdev) / bdev_logical_block_size(bdev));
} }
void nvmet_bdev_ns_disable(struct nvmet_ns *ns) void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
......
...@@ -204,7 +204,7 @@ static int nvme_loop_init_request(struct blk_mq_tag_set *set, ...@@ -204,7 +204,7 @@ static int nvme_loop_init_request(struct blk_mq_tag_set *set,
struct request *req, unsigned int hctx_idx, struct request *req, unsigned int hctx_idx,
unsigned int numa_node) unsigned int numa_node)
{ {
struct nvme_loop_ctrl *ctrl = set->driver_data; struct nvme_loop_ctrl *ctrl = to_loop_ctrl(set->driver_data);
struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req); struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
nvme_req(req)->ctrl = &ctrl->ctrl; nvme_req(req)->ctrl = &ctrl->ctrl;
...@@ -218,7 +218,7 @@ static struct lock_class_key loop_hctx_fq_lock_key; ...@@ -218,7 +218,7 @@ static struct lock_class_key loop_hctx_fq_lock_key;
static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx) unsigned int hctx_idx)
{ {
struct nvme_loop_ctrl *ctrl = data; struct nvme_loop_ctrl *ctrl = to_loop_ctrl(data);
struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1]; struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
BUG_ON(hctx_idx >= ctrl->ctrl.queue_count); BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
...@@ -238,7 +238,7 @@ static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, ...@@ -238,7 +238,7 @@ static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx) unsigned int hctx_idx)
{ {
struct nvme_loop_ctrl *ctrl = data; struct nvme_loop_ctrl *ctrl = to_loop_ctrl(data);
struct nvme_loop_queue *queue = &ctrl->queues[0]; struct nvme_loop_queue *queue = &ctrl->queues[0];
BUG_ON(hctx_idx != 0); BUG_ON(hctx_idx != 0);
...@@ -266,9 +266,7 @@ static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) ...@@ -266,9 +266,7 @@ static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags)) if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
return; return;
nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
blk_mq_destroy_queue(ctrl->ctrl.admin_q); nvme_remove_admin_tag_set(&ctrl->ctrl);
blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
blk_mq_free_tag_set(&ctrl->admin_tag_set);
} }
static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl) static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
...@@ -282,10 +280,8 @@ static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl) ...@@ -282,10 +280,8 @@ static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
list_del(&ctrl->list); list_del(&ctrl->list);
mutex_unlock(&nvme_loop_ctrl_mutex); mutex_unlock(&nvme_loop_ctrl_mutex);
if (nctrl->tagset) { if (nctrl->tagset)
blk_mq_destroy_queue(ctrl->ctrl.connect_q); nvme_remove_io_tag_set(nctrl);
blk_mq_free_tag_set(&ctrl->tag_set);
}
kfree(ctrl->queues); kfree(ctrl->queues);
nvmf_free_options(nctrl->opts); nvmf_free_options(nctrl->opts);
free_ctrl: free_ctrl:
...@@ -350,52 +346,31 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) ...@@ -350,52 +346,31 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
{ {
int error; int error;
memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
ctrl->admin_tag_set.reserved_tags = NVMF_RESERVED_TAGS;
ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
ctrl->admin_tag_set.driver_data = ctrl;
ctrl->admin_tag_set.nr_hw_queues = 1;
ctrl->admin_tag_set.timeout = NVME_ADMIN_TIMEOUT;
ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
ctrl->queues[0].ctrl = ctrl; ctrl->queues[0].ctrl = ctrl;
error = nvmet_sq_init(&ctrl->queues[0].nvme_sq); error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
if (error) if (error)
return error; return error;
ctrl->ctrl.queue_count = 1; ctrl->ctrl.queue_count = 1;
error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set); error = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
&nvme_loop_admin_mq_ops, BLK_MQ_F_NO_SCHED,
sizeof(struct nvme_loop_iod) +
NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
if (error) if (error)
goto out_free_sq; goto out_free_sq;
ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
if (IS_ERR(ctrl->ctrl.fabrics_q)) {
error = PTR_ERR(ctrl->ctrl.fabrics_q);
goto out_free_tagset;
}
ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
if (IS_ERR(ctrl->ctrl.admin_q)) {
error = PTR_ERR(ctrl->ctrl.admin_q);
goto out_cleanup_fabrics_q;
}
/* reset stopped state for the fresh admin queue */ /* reset stopped state for the fresh admin queue */
clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->ctrl.flags); clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->ctrl.flags);
error = nvmf_connect_admin_queue(&ctrl->ctrl); error = nvmf_connect_admin_queue(&ctrl->ctrl);
if (error) if (error)
goto out_cleanup_queue; goto out_cleanup_tagset;
set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags); set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
error = nvme_enable_ctrl(&ctrl->ctrl); error = nvme_enable_ctrl(&ctrl->ctrl);
if (error) if (error)
goto out_cleanup_queue; goto out_cleanup_tagset;
ctrl->ctrl.max_hw_sectors = ctrl->ctrl.max_hw_sectors =
(NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9); (NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
...@@ -404,17 +379,13 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) ...@@ -404,17 +379,13 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
error = nvme_init_ctrl_finish(&ctrl->ctrl); error = nvme_init_ctrl_finish(&ctrl->ctrl);
if (error) if (error)
goto out_cleanup_queue; goto out_cleanup_tagset;
return 0; return 0;
out_cleanup_queue: out_cleanup_tagset:
clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags); clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
blk_mq_destroy_queue(ctrl->ctrl.admin_q); nvme_remove_admin_tag_set(&ctrl->ctrl);
out_cleanup_fabrics_q:
blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
out_free_tagset:
blk_mq_free_tag_set(&ctrl->admin_tag_set);
out_free_sq: out_free_sq:
nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
return error; return error;
...@@ -522,37 +493,21 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) ...@@ -522,37 +493,21 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
if (ret) if (ret)
return ret; return ret;
memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
ctrl->tag_set.ops = &nvme_loop_mq_ops; &nvme_loop_mq_ops, BLK_MQ_F_SHOULD_MERGE,
ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; sizeof(struct nvme_loop_iod) +
ctrl->tag_set.reserved_tags = NVMF_RESERVED_TAGS; NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
ctrl->tag_set.numa_node = ctrl->ctrl.numa_node;
ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
ctrl->tag_set.driver_data = ctrl;
ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
ctrl->ctrl.tagset = &ctrl->tag_set;
ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
if (ret) if (ret)
goto out_destroy_queues; goto out_destroy_queues;
ret = nvme_ctrl_init_connect_q(&(ctrl->ctrl));
if (ret)
goto out_free_tagset;
ret = nvme_loop_connect_io_queues(ctrl); ret = nvme_loop_connect_io_queues(ctrl);
if (ret) if (ret)
goto out_cleanup_connect_q; goto out_cleanup_tagset;
return 0; return 0;
out_cleanup_connect_q: out_cleanup_tagset:
blk_mq_destroy_queue(ctrl->ctrl.connect_q); nvme_remove_io_tag_set(&ctrl->ctrl);
out_free_tagset:
blk_mq_free_tag_set(&ctrl->tag_set);
out_destroy_queues: out_destroy_queues:
nvme_loop_destroy_io_queues(ctrl); nvme_loop_destroy_io_queues(ctrl);
return ret; return ret;
...@@ -601,7 +556,6 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev, ...@@ -601,7 +556,6 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
ret = -ENOMEM; ret = -ENOMEM;
ctrl->ctrl.sqsize = opts->queue_size - 1;
ctrl->ctrl.kato = opts->kato; ctrl->ctrl.kato = opts->kato;
ctrl->port = nvme_loop_find_port(&ctrl->ctrl); ctrl->port = nvme_loop_find_port(&ctrl->ctrl);
...@@ -621,6 +575,7 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev, ...@@ -621,6 +575,7 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
opts->queue_size, ctrl->ctrl.maxcmd); opts->queue_size, ctrl->ctrl.maxcmd);
opts->queue_size = ctrl->ctrl.maxcmd; opts->queue_size = ctrl->ctrl.maxcmd;
} }
ctrl->ctrl.sqsize = opts->queue_size - 1;
if (opts->nr_io_queues) { if (opts->nr_io_queues) {
ret = nvme_loop_create_io_queues(ctrl); ret = nvme_loop_create_io_queues(ctrl);
......
...@@ -704,7 +704,7 @@ int nvmet_auth_set_key(struct nvmet_host *host, const char *secret, ...@@ -704,7 +704,7 @@ int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
bool set_ctrl); bool set_ctrl);
int nvmet_auth_set_host_hash(struct nvmet_host *host, const char *hash); int nvmet_auth_set_host_hash(struct nvmet_host *host, const char *hash);
int nvmet_setup_auth(struct nvmet_ctrl *ctrl); int nvmet_setup_auth(struct nvmet_ctrl *ctrl);
void nvmet_init_auth(struct nvmet_ctrl *ctrl, struct nvmet_req *req); void nvmet_auth_sq_init(struct nvmet_sq *sq);
void nvmet_destroy_auth(struct nvmet_ctrl *ctrl); void nvmet_destroy_auth(struct nvmet_ctrl *ctrl);
void nvmet_auth_sq_free(struct nvmet_sq *sq); void nvmet_auth_sq_free(struct nvmet_sq *sq);
int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id); int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id);
...@@ -726,8 +726,9 @@ static inline int nvmet_setup_auth(struct nvmet_ctrl *ctrl) ...@@ -726,8 +726,9 @@ static inline int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
{ {
return 0; return 0;
} }
static inline void nvmet_init_auth(struct nvmet_ctrl *ctrl, static inline void nvmet_auth_sq_init(struct nvmet_sq *sq)
struct nvmet_req *req) {}; {
}
static inline void nvmet_destroy_auth(struct nvmet_ctrl *ctrl) {}; static inline void nvmet_destroy_auth(struct nvmet_ctrl *ctrl) {};
static inline void nvmet_auth_sq_free(struct nvmet_sq *sq) {}; static inline void nvmet_auth_sq_free(struct nvmet_sq *sq) {};
static inline bool nvmet_check_auth_status(struct nvmet_req *req) static inline bool nvmet_check_auth_status(struct nvmet_req *req)
......
...@@ -215,9 +215,11 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w) ...@@ -215,9 +215,11 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
{ {
struct nvmet_req *req = container_of(w, struct nvmet_req, p.work); struct nvmet_req *req = container_of(w, struct nvmet_req, p.work);
struct request *rq = req->p.rq; struct request *rq = req->p.rq;
struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
u32 effects;
int status; int status;
status = nvme_execute_passthru_rq(rq); status = nvme_execute_passthru_rq(rq, &effects);
if (status == NVME_SC_SUCCESS && if (status == NVME_SC_SUCCESS &&
req->cmd->common.opcode == nvme_admin_identify) { req->cmd->common.opcode == nvme_admin_identify) {
...@@ -238,6 +240,9 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w) ...@@ -238,6 +240,9 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
req->cqe->result = nvme_req(rq)->result; req->cqe->result = nvme_req(rq)->result;
nvmet_req_complete(req, status); nvmet_req_complete(req, status);
blk_mq_free_request(rq); blk_mq_free_request(rq);
if (effects)
nvme_passthru_end(ctrl, effects, req->cmd, status);
} }
static void nvmet_passthru_req_done(struct request *rq, static void nvmet_passthru_req_done(struct request *rq,
......
...@@ -164,7 +164,6 @@ static DEFINE_MUTEX(nvmet_tcp_queue_mutex); ...@@ -164,7 +164,6 @@ static DEFINE_MUTEX(nvmet_tcp_queue_mutex);
static struct workqueue_struct *nvmet_tcp_wq; static struct workqueue_struct *nvmet_tcp_wq;
static const struct nvmet_fabrics_ops nvmet_tcp_ops; static const struct nvmet_fabrics_ops nvmet_tcp_ops;
static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c); static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd);
static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd); static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd);
static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue, static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
...@@ -920,10 +919,17 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue) ...@@ -920,10 +919,17 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
struct nvme_tcp_data_pdu *data = &queue->pdu.data; struct nvme_tcp_data_pdu *data = &queue->pdu.data;
struct nvmet_tcp_cmd *cmd; struct nvmet_tcp_cmd *cmd;
if (likely(queue->nr_cmds)) if (likely(queue->nr_cmds)) {
if (unlikely(data->ttag >= queue->nr_cmds)) {
pr_err("queue %d: received out of bound ttag %u, nr_cmds %u\n",
queue->idx, data->ttag, queue->nr_cmds);
nvmet_tcp_fatal_error(queue);
return -EPROTO;
}
cmd = &queue->cmds[data->ttag]; cmd = &queue->cmds[data->ttag];
else } else {
cmd = &queue->connect; cmd = &queue->connect;
}
if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) { if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) {
pr_err("ttag %u unexpected data offset %u (expected %u)\n", pr_err("ttag %u unexpected data offset %u (expected %u)\n",
...@@ -961,6 +967,13 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue) ...@@ -961,6 +967,13 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
return nvmet_tcp_handle_icreq(queue); return nvmet_tcp_handle_icreq(queue);
} }
if (unlikely(hdr->type == nvme_tcp_icreq)) {
pr_err("queue %d: received icreq pdu in state %d\n",
queue->idx, queue->state);
nvmet_tcp_fatal_error(queue);
return -EPROTO;
}
if (hdr->type == nvme_tcp_h2c_data) { if (hdr->type == nvme_tcp_h2c_data) {
ret = nvmet_tcp_handle_h2c_data_pdu(queue); ret = nvmet_tcp_handle_h2c_data_pdu(queue);
if (unlikely(ret)) if (unlikely(ret))
...@@ -1163,7 +1176,8 @@ static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue) ...@@ -1163,7 +1176,8 @@ static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
queue->idx, cmd->req.cmd->common.command_id, queue->idx, cmd->req.cmd->common.command_id,
queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst), queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst),
le32_to_cpu(cmd->exp_ddgst)); le32_to_cpu(cmd->exp_ddgst));
nvmet_tcp_finish_cmd(cmd); nvmet_req_uninit(&cmd->req);
nvmet_tcp_free_cmd_buffers(cmd);
nvmet_tcp_fatal_error(queue); nvmet_tcp_fatal_error(queue);
ret = -EPROTO; ret = -EPROTO;
goto out; goto out;
...@@ -1392,12 +1406,6 @@ static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue) ...@@ -1392,12 +1406,6 @@ static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
write_unlock_bh(&sock->sk->sk_callback_lock); write_unlock_bh(&sock->sk->sk_callback_lock);
} }
static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd)
{
nvmet_req_uninit(&cmd->req);
nvmet_tcp_free_cmd_buffers(cmd);
}
static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue) static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
{ {
struct nvmet_tcp_cmd *cmd = queue->cmds; struct nvmet_tcp_cmd *cmd = queue->cmds;
...@@ -1406,16 +1414,28 @@ static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue) ...@@ -1406,16 +1414,28 @@ static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
for (i = 0; i < queue->nr_cmds; i++, cmd++) { for (i = 0; i < queue->nr_cmds; i++, cmd++) {
if (nvmet_tcp_need_data_in(cmd)) if (nvmet_tcp_need_data_in(cmd))
nvmet_req_uninit(&cmd->req); nvmet_req_uninit(&cmd->req);
nvmet_tcp_free_cmd_buffers(cmd);
} }
if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) { if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) {
/* failed in connect */ /* failed in connect */
nvmet_tcp_finish_cmd(&queue->connect); nvmet_req_uninit(&queue->connect.req);
} }
} }
static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue)
{
struct nvmet_tcp_cmd *cmd = queue->cmds;
int i;
for (i = 0; i < queue->nr_cmds; i++, cmd++) {
if (nvmet_tcp_need_data_in(cmd))
nvmet_tcp_free_cmd_buffers(cmd);
}
if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect))
nvmet_tcp_free_cmd_buffers(&queue->connect);
}
static void nvmet_tcp_release_queue_work(struct work_struct *w) static void nvmet_tcp_release_queue_work(struct work_struct *w)
{ {
struct page *page; struct page *page;
...@@ -1434,6 +1454,7 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w) ...@@ -1434,6 +1454,7 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w)
nvmet_tcp_uninit_data_in_cmds(queue); nvmet_tcp_uninit_data_in_cmds(queue);
nvmet_sq_destroy(&queue->nvme_sq); nvmet_sq_destroy(&queue->nvme_sq);
cancel_work_sync(&queue->io_work); cancel_work_sync(&queue->io_work);
nvmet_tcp_free_cmd_data_in_buffers(queue);
sock_release(queue->sock); sock_release(queue->sock);
nvmet_tcp_free_cmds(queue); nvmet_tcp_free_cmds(queue);
if (queue->hdr_digest || queue->data_digest) if (queue->hdr_digest || queue->data_digest)
......
...@@ -387,7 +387,6 @@ static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req) ...@@ -387,7 +387,6 @@ static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req)
{ {
struct block_device *bdev = req->ns->bdev; struct block_device *bdev = req->ns->bdev;
unsigned int nr_zones = bdev_nr_zones(bdev); unsigned int nr_zones = bdev_nr_zones(bdev);
struct request_queue *q = bdev_get_queue(bdev);
struct bio *bio = NULL; struct bio *bio = NULL;
sector_t sector = 0; sector_t sector = 0;
int ret; int ret;
...@@ -396,7 +395,7 @@ static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req) ...@@ -396,7 +395,7 @@ static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req)
}; };
d.zbitmap = kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(*(d.zbitmap)), d.zbitmap = kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(*(d.zbitmap)),
GFP_NOIO, q->node); GFP_NOIO, bdev->bd_disk->node_id);
if (!d.zbitmap) { if (!d.zbitmap) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
......
...@@ -1482,8 +1482,8 @@ struct nvmf_connect_command { ...@@ -1482,8 +1482,8 @@ struct nvmf_connect_command {
}; };
enum { enum {
NVME_CONNECT_AUTHREQ_ASCR = (1 << 2), NVME_CONNECT_AUTHREQ_ASCR = (1U << 18),
NVME_CONNECT_AUTHREQ_ATR = (1 << 1), NVME_CONNECT_AUTHREQ_ATR = (1U << 17),
}; };
struct nvmf_connect_data { struct nvmf_connect_data {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment