Commit 762d6bd2 authored by Jens Axboe's avatar Jens Axboe

Merge tag 'nvme-5.13-2021-04-06' of git://git.infradead.org/nvme into for-5.13/drivers

Pull NVMe updates from Christoph:

"nvme updates for Linux 5.13

 - fix handling of very large MDTS values (Bart Van Assche)
 - retrigger ANA log update if group descriptor isn't found
   (Hannes Reinecke)
 - fix locking contexts in nvme-tcp and nvmet-tcp (Sagi Grimberg)
 - return proper error code from discovery ctrl (Hou Pu)
 - verify the SGLS field in nvmet-tcp and nvmet-fc (Max Gurtovoy)
 - disallow passthru cmd from targeting a nsid != nsid of the block dev
   (Niklas Cassel)
 - do not allow model_number exceed 40 bytes in nvmet (Noam Gottlieb)
 - enable optional queue idle period tracking in nvmet-tcp
   (Mark Wunderlich)
 - various cleanups and optimizations (Chaitanya Kulkarni, Kanchan Joshi)
 - expose fast_io_fail_tmo in sysfs (Daniel Wagner)
 - implement non-MDTS command limits (Keith Busch)
 - reduce warnings for unhandled command effects (Keith Busch)
 - allocate storage for the SQE as part of the nvme_request (Keith Busch)"

* tag 'nvme-5.13-2021-04-06' of git://git.infradead.org/nvme: (33 commits)
  nvme: fix handling of large MDTS values
  nvme: implement non-mdts command limits
  nvme: disallow passthru cmd from targeting a nsid != nsid of the block dev
  nvme: retrigger ANA log update if group descriptor isn't found
  nvme: export fast_io_fail_tmo to sysfs
  nvme: remove superfluous else in nvme_ctrl_loss_tmo_store
  nvme: use sysfs_emit instead of sprintf
  nvme-fc: check sgl supported by target
  nvme-tcp: check sgl supported by target
  nvmet-tcp: enable optional queue idle period tracking
  nvmet-tcp: fix incorrect locking in state_change sk callback
  nvme-tcp: block BH in sk state_change sk callback
  nvmet: return proper error code from discovery ctrl
  nvme: warn of unhandled effects only once
  nvme: use driver pdu command for passthrough
  nvme-pci: allocate nvme_command within driver pdu
  nvmet: do not allow model_number exceed 40 bytes
  nvmet: remove unnecessary ctrl parameter
  nvmet-fc: update function documentation
  nvme-fc: fix the function documentation comment
  ...
parents 80755855 8609c63f
This diff is collapsed.
...@@ -1708,7 +1708,7 @@ nvme_fc_handle_ls_rqst_work(struct work_struct *work) ...@@ -1708,7 +1708,7 @@ nvme_fc_handle_ls_rqst_work(struct work_struct *work)
* *
* If this routine returns error, the LLDD should abort the exchange. * If this routine returns error, the LLDD should abort the exchange.
* *
* @remoteport: pointer to the (registered) remote port that the LS * @portptr: pointer to the (registered) remote port that the LS
* was received from. The remoteport is associated with * was received from. The remoteport is associated with
* a specific localport. * a specific localport.
* @lsrsp: pointer to a nvmefc_ls_rsp response structure to be * @lsrsp: pointer to a nvmefc_ls_rsp response structure to be
...@@ -2128,6 +2128,7 @@ nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq, ...@@ -2128,6 +2128,7 @@ nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
op->op.fcp_req.first_sgl = op->sgl; op->op.fcp_req.first_sgl = op->sgl;
op->op.fcp_req.private = &op->priv[0]; op->op.fcp_req.private = &op->priv[0];
nvme_req(rq)->ctrl = &ctrl->ctrl; nvme_req(rq)->ctrl = &ctrl->ctrl;
nvme_req(rq)->cmd = &op->op.cmd_iu.sqe;
return res; return res;
} }
...@@ -2759,8 +2760,6 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -2759,8 +2760,6 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
struct nvme_fc_ctrl *ctrl = queue->ctrl; struct nvme_fc_ctrl *ctrl = queue->ctrl;
struct request *rq = bd->rq; struct request *rq = bd->rq;
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
struct nvme_command *sqe = &cmdiu->sqe;
enum nvmefc_fcp_datadir io_dir; enum nvmefc_fcp_datadir io_dir;
bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags); bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags);
u32 data_len; u32 data_len;
...@@ -2770,7 +2769,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -2770,7 +2769,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq); return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
ret = nvme_setup_cmd(ns, rq, sqe); ret = nvme_setup_cmd(ns, rq);
if (ret) if (ret)
return ret; return ret;
...@@ -3086,7 +3085,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) ...@@ -3086,7 +3085,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
ret = nvme_init_identify(&ctrl->ctrl); ret = nvme_init_ctrl_finish(&ctrl->ctrl);
if (ret || test_bit(ASSOC_FAILED, &ctrl->flags)) if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
goto out_disconnect_admin_queue; goto out_disconnect_admin_queue;
...@@ -3100,6 +3099,11 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) ...@@ -3100,6 +3099,11 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
} }
/* FC-NVME supports normal SGL Data Block Descriptors */ /* FC-NVME supports normal SGL Data Block Descriptors */
if (!(ctrl->ctrl.sgls & ((1 << 0) | (1 << 1)))) {
dev_err(ctrl->ctrl.device,
"Mandatory sgls are not supported!\n");
goto out_disconnect_admin_queue;
}
if (opts->queue_size > ctrl->ctrl.maxcmd) { if (opts->queue_size > ctrl->ctrl.maxcmd) {
/* warn if maxcmd is lower than queue_size */ /* warn if maxcmd is lower than queue_size */
......
...@@ -602,8 +602,8 @@ static ssize_t nvme_subsys_iopolicy_show(struct device *dev, ...@@ -602,8 +602,8 @@ static ssize_t nvme_subsys_iopolicy_show(struct device *dev,
struct nvme_subsystem *subsys = struct nvme_subsystem *subsys =
container_of(dev, struct nvme_subsystem, dev); container_of(dev, struct nvme_subsystem, dev);
return sprintf(buf, "%s\n", return sysfs_emit(buf, "%s\n",
nvme_iopolicy_names[READ_ONCE(subsys->iopolicy)]); nvme_iopolicy_names[READ_ONCE(subsys->iopolicy)]);
} }
static ssize_t nvme_subsys_iopolicy_store(struct device *dev, static ssize_t nvme_subsys_iopolicy_store(struct device *dev,
...@@ -628,7 +628,7 @@ SUBSYS_ATTR_RW(iopolicy, S_IRUGO | S_IWUSR, ...@@ -628,7 +628,7 @@ SUBSYS_ATTR_RW(iopolicy, S_IRUGO | S_IWUSR,
static ssize_t ana_grpid_show(struct device *dev, struct device_attribute *attr, static ssize_t ana_grpid_show(struct device *dev, struct device_attribute *attr,
char *buf) char *buf)
{ {
return sprintf(buf, "%d\n", nvme_get_ns_from_dev(dev)->ana_grpid); return sysfs_emit(buf, "%d\n", nvme_get_ns_from_dev(dev)->ana_grpid);
} }
DEVICE_ATTR_RO(ana_grpid); DEVICE_ATTR_RO(ana_grpid);
...@@ -637,7 +637,7 @@ static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr, ...@@ -637,7 +637,7 @@ static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr,
{ {
struct nvme_ns *ns = nvme_get_ns_from_dev(dev); struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
return sprintf(buf, "%s\n", nvme_ana_state_names[ns->ana_state]); return sysfs_emit(buf, "%s\n", nvme_ana_state_names[ns->ana_state]);
} }
DEVICE_ATTR_RO(ana_state); DEVICE_ATTR_RO(ana_state);
...@@ -668,6 +668,10 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id) ...@@ -668,6 +668,10 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
if (desc.state) { if (desc.state) {
/* found the group desc: update */ /* found the group desc: update */
nvme_update_ns_ana_state(&desc, ns); nvme_update_ns_ana_state(&desc, ns);
} else {
/* group desc not found: trigger a re-read */
set_bit(NVME_NS_ANA_PENDING, &ns->flags);
queue_work(nvme_wq, &ns->ctrl->ana_work);
} }
} else { } else {
ns->ana_state = NVME_ANA_OPTIMIZED; ns->ana_state = NVME_ANA_OPTIMIZED;
......
...@@ -276,6 +276,9 @@ struct nvme_ctrl { ...@@ -276,6 +276,9 @@ struct nvme_ctrl {
u32 max_hw_sectors; u32 max_hw_sectors;
u32 max_segments; u32 max_segments;
u32 max_integrity_segments; u32 max_integrity_segments;
u32 max_discard_sectors;
u32 max_discard_segments;
u32 max_zeroes_sectors;
#ifdef CONFIG_BLK_DEV_ZONED #ifdef CONFIG_BLK_DEV_ZONED
u32 max_zone_append; u32 max_zone_append;
#endif #endif
...@@ -599,7 +602,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, ...@@ -599,7 +602,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl); void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
void nvme_start_ctrl(struct nvme_ctrl *ctrl); void nvme_start_ctrl(struct nvme_ctrl *ctrl);
void nvme_stop_ctrl(struct nvme_ctrl *ctrl); void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
int nvme_init_identify(struct nvme_ctrl *ctrl); int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl);
void nvme_remove_namespaces(struct nvme_ctrl *ctrl); void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
...@@ -623,8 +626,7 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl); ...@@ -623,8 +626,7 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl);
struct request *nvme_alloc_request(struct request_queue *q, struct request *nvme_alloc_request(struct request_queue *q,
struct nvme_command *cmd, blk_mq_req_flags_t flags); struct nvme_command *cmd, blk_mq_req_flags_t flags);
void nvme_cleanup_cmd(struct request *req); void nvme_cleanup_cmd(struct request *req);
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req, blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req);
struct nvme_command *cmd);
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buf, unsigned bufflen); void *buf, unsigned bufflen);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
...@@ -745,7 +747,7 @@ static inline void nvme_trace_bio_complete(struct request *req) ...@@ -745,7 +747,7 @@ static inline void nvme_trace_bio_complete(struct request *req)
static inline int nvme_mpath_init(struct nvme_ctrl *ctrl, static inline int nvme_mpath_init(struct nvme_ctrl *ctrl,
struct nvme_id_ctrl *id) struct nvme_id_ctrl *id)
{ {
if (ctrl->subsys->cmic & (1 << 3)) if (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA)
dev_warn(ctrl->device, dev_warn(ctrl->device,
"Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n"); "Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
return 0; return 0;
......
...@@ -224,6 +224,7 @@ struct nvme_queue { ...@@ -224,6 +224,7 @@ struct nvme_queue {
*/ */
struct nvme_iod { struct nvme_iod {
struct nvme_request req; struct nvme_request req;
struct nvme_command cmd;
struct nvme_queue *nvmeq; struct nvme_queue *nvmeq;
bool use_sgl; bool use_sgl;
int aborted; int aborted;
...@@ -429,6 +430,7 @@ static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req, ...@@ -429,6 +430,7 @@ static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req,
iod->nvmeq = nvmeq; iod->nvmeq = nvmeq;
nvme_req(req)->ctrl = &dev->ctrl; nvme_req(req)->ctrl = &dev->ctrl;
nvme_req(req)->cmd = &iod->cmd;
return 0; return 0;
} }
...@@ -917,7 +919,7 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -917,7 +919,7 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
struct nvme_dev *dev = nvmeq->dev; struct nvme_dev *dev = nvmeq->dev;
struct request *req = bd->rq; struct request *req = bd->rq;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req); struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct nvme_command cmnd; struct nvme_command *cmnd = &iod->cmd;
blk_status_t ret; blk_status_t ret;
iod->aborted = 0; iod->aborted = 0;
...@@ -931,24 +933,24 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -931,24 +933,24 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
return BLK_STS_IOERR; return BLK_STS_IOERR;
ret = nvme_setup_cmd(ns, req, &cmnd); ret = nvme_setup_cmd(ns, req);
if (ret) if (ret)
return ret; return ret;
if (blk_rq_nr_phys_segments(req)) { if (blk_rq_nr_phys_segments(req)) {
ret = nvme_map_data(dev, req, &cmnd); ret = nvme_map_data(dev, req, cmnd);
if (ret) if (ret)
goto out_free_cmd; goto out_free_cmd;
} }
if (blk_integrity_rq(req)) { if (blk_integrity_rq(req)) {
ret = nvme_map_metadata(dev, req, &cmnd); ret = nvme_map_metadata(dev, req, cmnd);
if (ret) if (ret)
goto out_unmap_data; goto out_unmap_data;
} }
blk_mq_start_request(req); blk_mq_start_request(req);
nvme_submit_cmd(nvmeq, &cmnd, bd->last); nvme_submit_cmd(nvmeq, cmnd, bd->last);
return BLK_STS_OK; return BLK_STS_OK;
out_unmap_data: out_unmap_data:
nvme_unmap_data(dev, req); nvme_unmap_data(dev, req);
...@@ -1060,18 +1062,10 @@ static inline int nvme_process_cq(struct nvme_queue *nvmeq) ...@@ -1060,18 +1062,10 @@ static inline int nvme_process_cq(struct nvme_queue *nvmeq)
static irqreturn_t nvme_irq(int irq, void *data) static irqreturn_t nvme_irq(int irq, void *data)
{ {
struct nvme_queue *nvmeq = data; struct nvme_queue *nvmeq = data;
irqreturn_t ret = IRQ_NONE;
/*
* The rmb/wmb pair ensures we see all updates from a previous run of
* the irq handler, even if that was on another CPU.
*/
rmb();
if (nvme_process_cq(nvmeq)) if (nvme_process_cq(nvmeq))
ret = IRQ_HANDLED; return IRQ_HANDLED;
wmb(); return IRQ_NONE;
return ret;
} }
static irqreturn_t nvme_irq_check(int irq, void *data) static irqreturn_t nvme_irq_check(int irq, void *data)
...@@ -2653,7 +2647,7 @@ static void nvme_reset_work(struct work_struct *work) ...@@ -2653,7 +2647,7 @@ static void nvme_reset_work(struct work_struct *work)
*/ */
dev->ctrl.max_integrity_segments = 1; dev->ctrl.max_integrity_segments = 1;
result = nvme_init_identify(&dev->ctrl); result = nvme_init_ctrl_finish(&dev->ctrl);
if (result) if (result)
goto out; goto out;
......
...@@ -314,6 +314,7 @@ static int nvme_rdma_init_request(struct blk_mq_tag_set *set, ...@@ -314,6 +314,7 @@ static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
NVME_RDMA_DATA_SGL_SIZE; NVME_RDMA_DATA_SGL_SIZE;
req->queue = queue; req->queue = queue;
nvme_req(rq)->cmd = req->sqe.data;
return 0; return 0;
} }
...@@ -917,7 +918,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, ...@@ -917,7 +918,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
error = nvme_init_identify(&ctrl->ctrl); error = nvme_init_ctrl_finish(&ctrl->ctrl);
if (error) if (error)
goto out_quiesce_queue; goto out_quiesce_queue;
...@@ -2038,7 +2039,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -2038,7 +2039,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
struct request *rq = bd->rq; struct request *rq = bd->rq;
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_rdma_qe *sqe = &req->sqe; struct nvme_rdma_qe *sqe = &req->sqe;
struct nvme_command *c = sqe->data; struct nvme_command *c = nvme_req(rq)->cmd;
struct ib_device *dev; struct ib_device *dev;
bool queue_ready = test_bit(NVME_RDMA_Q_LIVE, &queue->flags); bool queue_ready = test_bit(NVME_RDMA_Q_LIVE, &queue->flags);
blk_status_t ret; blk_status_t ret;
...@@ -2061,7 +2062,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -2061,7 +2062,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
ib_dma_sync_single_for_cpu(dev, sqe->dma, ib_dma_sync_single_for_cpu(dev, sqe->dma,
sizeof(struct nvme_command), DMA_TO_DEVICE); sizeof(struct nvme_command), DMA_TO_DEVICE);
ret = nvme_setup_cmd(ns, rq, c); ret = nvme_setup_cmd(ns, rq);
if (ret) if (ret)
goto unmap_qe; goto unmap_qe;
......
...@@ -417,6 +417,7 @@ static int nvme_tcp_init_request(struct blk_mq_tag_set *set, ...@@ -417,6 +417,7 @@ static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
{ {
struct nvme_tcp_ctrl *ctrl = set->driver_data; struct nvme_tcp_ctrl *ctrl = set->driver_data;
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_tcp_cmd_pdu *pdu;
int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx]; struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
u8 hdgst = nvme_tcp_hdgst_len(queue); u8 hdgst = nvme_tcp_hdgst_len(queue);
...@@ -427,8 +428,10 @@ static int nvme_tcp_init_request(struct blk_mq_tag_set *set, ...@@ -427,8 +428,10 @@ static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
if (!req->pdu) if (!req->pdu)
return -ENOMEM; return -ENOMEM;
pdu = req->pdu;
req->queue = queue; req->queue = queue;
nvme_req(rq)->ctrl = &ctrl->ctrl; nvme_req(rq)->ctrl = &ctrl->ctrl;
nvme_req(rq)->cmd = &pdu->cmd;
return 0; return 0;
} }
...@@ -867,7 +870,7 @@ static void nvme_tcp_state_change(struct sock *sk) ...@@ -867,7 +870,7 @@ static void nvme_tcp_state_change(struct sock *sk)
{ {
struct nvme_tcp_queue *queue; struct nvme_tcp_queue *queue;
read_lock(&sk->sk_callback_lock); read_lock_bh(&sk->sk_callback_lock);
queue = sk->sk_user_data; queue = sk->sk_user_data;
if (!queue) if (!queue)
goto done; goto done;
...@@ -888,7 +891,7 @@ static void nvme_tcp_state_change(struct sock *sk) ...@@ -888,7 +891,7 @@ static void nvme_tcp_state_change(struct sock *sk)
queue->state_change(sk); queue->state_change(sk);
done: done:
read_unlock(&sk->sk_callback_lock); read_unlock_bh(&sk->sk_callback_lock);
} }
static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue) static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
...@@ -1875,7 +1878,7 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new) ...@@ -1875,7 +1878,7 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
blk_mq_unquiesce_queue(ctrl->admin_q); blk_mq_unquiesce_queue(ctrl->admin_q);
error = nvme_init_identify(ctrl); error = nvme_init_ctrl_finish(ctrl);
if (error) if (error)
goto out_quiesce_queue; goto out_quiesce_queue;
...@@ -1963,6 +1966,11 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new) ...@@ -1963,6 +1966,11 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
goto destroy_admin; goto destroy_admin;
} }
if (!(ctrl->sgls & ((1 << 0) | (1 << 1)))) {
dev_err(ctrl->device, "Mandatory sgls are not supported!\n");
goto destroy_admin;
}
if (opts->queue_size > ctrl->sqsize + 1) if (opts->queue_size > ctrl->sqsize + 1)
dev_warn(ctrl->device, dev_warn(ctrl->device,
"queue_size %zu > ctrl sqsize %u, clamping down\n", "queue_size %zu > ctrl sqsize %u, clamping down\n",
...@@ -2259,7 +2267,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns, ...@@ -2259,7 +2267,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0; u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
blk_status_t ret; blk_status_t ret;
ret = nvme_setup_cmd(ns, rq, &pdu->cmd); ret = nvme_setup_cmd(ns, rq);
if (ret) if (ret)
return ret; return ret;
......
...@@ -513,7 +513,7 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req) ...@@ -513,7 +513,7 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
default: default:
id->nuse = id->nsze; id->nuse = id->nsze;
break; break;
} }
if (req->ns->bdev) if (req->ns->bdev)
nvmet_bdev_set_limits(req->ns->bdev, id); nvmet_bdev_set_limits(req->ns->bdev, id);
...@@ -940,7 +940,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req) ...@@ -940,7 +940,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
if (nvmet_req_subsys(req)->type == NVME_NQN_DISC) if (nvmet_req_subsys(req)->type == NVME_NQN_DISC)
return nvmet_parse_discovery_cmd(req); return nvmet_parse_discovery_cmd(req);
ret = nvmet_check_ctrl_status(req, cmd); ret = nvmet_check_ctrl_status(req);
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;
......
...@@ -1149,6 +1149,12 @@ static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys, ...@@ -1149,6 +1149,12 @@ static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys,
if (!len) if (!len)
return -EINVAL; return -EINVAL;
if (len > NVMET_MN_MAX_SIZE) {
pr_err("Model nubmer size can not exceed %d Bytes\n",
NVMET_MN_MAX_SIZE);
return -EINVAL;
}
for (pos = 0; pos < len; pos++) { for (pos = 0; pos < len; pos++) {
if (!nvmet_is_ascii(page[pos])) if (!nvmet_is_ascii(page[pos]))
return -EINVAL; return -EINVAL;
......
...@@ -864,10 +864,9 @@ static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req) ...@@ -864,10 +864,9 @@ static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req)
static u16 nvmet_parse_io_cmd(struct nvmet_req *req) static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
{ {
struct nvme_command *cmd = req->cmd;
u16 ret; u16 ret;
ret = nvmet_check_ctrl_status(req, cmd); ret = nvmet_check_ctrl_status(req);
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;
...@@ -1179,19 +1178,19 @@ static void nvmet_init_cap(struct nvmet_ctrl *ctrl) ...@@ -1179,19 +1178,19 @@ static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
ctrl->cap |= NVMET_QUEUE_SIZE - 1; ctrl->cap |= NVMET_QUEUE_SIZE - 1;
} }
u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid, struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
struct nvmet_req *req, struct nvmet_ctrl **ret) const char *hostnqn, u16 cntlid,
struct nvmet_req *req)
{ {
struct nvmet_ctrl *ctrl = NULL;
struct nvmet_subsys *subsys; struct nvmet_subsys *subsys;
struct nvmet_ctrl *ctrl;
u16 status = 0;
subsys = nvmet_find_get_subsys(req->port, subsysnqn); subsys = nvmet_find_get_subsys(req->port, subsysnqn);
if (!subsys) { if (!subsys) {
pr_warn("connect request for invalid subsystem %s!\n", pr_warn("connect request for invalid subsystem %s!\n",
subsysnqn); subsysnqn);
req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn); req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; goto out;
} }
mutex_lock(&subsys->lock); mutex_lock(&subsys->lock);
...@@ -1204,33 +1203,34 @@ u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid, ...@@ -1204,33 +1203,34 @@ u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
if (!kref_get_unless_zero(&ctrl->ref)) if (!kref_get_unless_zero(&ctrl->ref))
continue; continue;
*ret = ctrl; /* ctrl found */
goto out; goto found;
} }
} }
ctrl = NULL; /* ctrl not found */
pr_warn("could not find controller %d for subsys %s / host %s\n", pr_warn("could not find controller %d for subsys %s / host %s\n",
cntlid, subsysnqn, hostnqn); cntlid, subsysnqn, hostnqn);
req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid); req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
out: found:
mutex_unlock(&subsys->lock); mutex_unlock(&subsys->lock);
nvmet_subsys_put(subsys); nvmet_subsys_put(subsys);
return status; out:
return ctrl;
} }
u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd) u16 nvmet_check_ctrl_status(struct nvmet_req *req)
{ {
if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) { if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
pr_err("got cmd %d while CC.EN == 0 on qid = %d\n", pr_err("got cmd %d while CC.EN == 0 on qid = %d\n",
cmd->common.opcode, req->sq->qid); req->cmd->common.opcode, req->sq->qid);
return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
} }
if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) { if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n", pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n",
cmd->common.opcode, req->sq->qid); req->cmd->common.opcode, req->sq->qid);
return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
} }
return 0; return 0;
...@@ -1311,10 +1311,10 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, ...@@ -1311,10 +1311,10 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
pr_warn("connect request for invalid subsystem %s!\n", pr_warn("connect request for invalid subsystem %s!\n",
subsysnqn); subsysnqn);
req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn); req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
req->error_loc = offsetof(struct nvme_common_command, dptr);
goto out; goto out;
} }
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
down_read(&nvmet_config_sem); down_read(&nvmet_config_sem);
if (!nvmet_host_allowed(subsys, hostnqn)) { if (!nvmet_host_allowed(subsys, hostnqn)) {
pr_info("connect by host %s for subsystem %s not allowed\n", pr_info("connect by host %s for subsystem %s not allowed\n",
...@@ -1322,6 +1322,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, ...@@ -1322,6 +1322,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn); req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
up_read(&nvmet_config_sem); up_read(&nvmet_config_sem);
status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR; status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR;
req->error_loc = offsetof(struct nvme_common_command, dptr);
goto out_put_subsystem; goto out_put_subsystem;
} }
up_read(&nvmet_config_sem); up_read(&nvmet_config_sem);
......
...@@ -178,12 +178,14 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req) ...@@ -178,12 +178,14 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
if (req->cmd->get_log_page.lid != NVME_LOG_DISC) { if (req->cmd->get_log_page.lid != NVME_LOG_DISC) {
req->error_loc = req->error_loc =
offsetof(struct nvme_get_log_page_command, lid); offsetof(struct nvme_get_log_page_command, lid);
status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR; status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
goto out; goto out;
} }
/* Spec requires dword aligned offsets */ /* Spec requires dword aligned offsets */
if (offset & 0x3) { if (offset & 0x3) {
req->error_loc =
offsetof(struct nvme_get_log_page_command, lpo);
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
goto out; goto out;
} }
...@@ -250,7 +252,7 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req) ...@@ -250,7 +252,7 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req)
if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) { if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) {
req->error_loc = offsetof(struct nvme_identify, cns); req->error_loc = offsetof(struct nvme_identify, cns);
status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR; status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
goto out; goto out;
} }
......
...@@ -190,12 +190,8 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) ...@@ -190,12 +190,8 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req, status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req,
le32_to_cpu(c->kato), &ctrl); le32_to_cpu(c->kato), &ctrl);
if (status) { if (status)
if (status == (NVME_SC_INVALID_FIELD | NVME_SC_DNR))
req->error_loc =
offsetof(struct nvme_common_command, opcode);
goto out; goto out;
}
ctrl->pi_support = ctrl->port->pi_enable && ctrl->subsys->pi_support; ctrl->pi_support = ctrl->port->pi_enable && ctrl->subsys->pi_support;
...@@ -222,7 +218,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req) ...@@ -222,7 +218,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
{ {
struct nvmf_connect_command *c = &req->cmd->connect; struct nvmf_connect_command *c = &req->cmd->connect;
struct nvmf_connect_data *d; struct nvmf_connect_data *d;
struct nvmet_ctrl *ctrl = NULL; struct nvmet_ctrl *ctrl;
u16 qid = le16_to_cpu(c->qid); u16 qid = le16_to_cpu(c->qid);
u16 status = 0; u16 status = 0;
...@@ -249,11 +245,12 @@ static void nvmet_execute_io_connect(struct nvmet_req *req) ...@@ -249,11 +245,12 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
goto out; goto out;
} }
status = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn, ctrl = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn,
le16_to_cpu(d->cntlid), le16_to_cpu(d->cntlid), req);
req, &ctrl); if (!ctrl) {
if (status) status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
goto out; goto out;
}
if (unlikely(qid > ctrl->subsys->max_qid)) { if (unlikely(qid > ctrl->subsys->max_qid)) {
pr_warn("invalid queue id (%d)\n", qid); pr_warn("invalid queue id (%d)\n", qid);
......
...@@ -1996,6 +1996,7 @@ nvmet_fc_handle_ls_rqst_work(struct work_struct *work) ...@@ -1996,6 +1996,7 @@ nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
* *
* @target_port: pointer to the (registered) target port the LS was * @target_port: pointer to the (registered) target port the LS was
* received on. * received on.
* @hosthandle: pointer to the host specific data, gets stored in iod.
* @lsrsp: pointer to a lsrsp structure to be used to reference * @lsrsp: pointer to a lsrsp structure to be used to reference
* the exchange corresponding to the LS. * the exchange corresponding to the LS.
* @lsreqbuf: pointer to the buffer containing the LS Request * @lsreqbuf: pointer to the buffer containing the LS Request
......
...@@ -141,7 +141,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -141,7 +141,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready)) if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready))
return nvmf_fail_nonready_command(&queue->ctrl->ctrl, req); return nvmf_fail_nonready_command(&queue->ctrl->ctrl, req);
ret = nvme_setup_cmd(ns, req, &iod->cmd); ret = nvme_setup_cmd(ns, req);
if (ret) if (ret)
return ret; return ret;
...@@ -205,8 +205,10 @@ static int nvme_loop_init_request(struct blk_mq_tag_set *set, ...@@ -205,8 +205,10 @@ static int nvme_loop_init_request(struct blk_mq_tag_set *set,
unsigned int numa_node) unsigned int numa_node)
{ {
struct nvme_loop_ctrl *ctrl = set->driver_data; struct nvme_loop_ctrl *ctrl = set->driver_data;
struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
nvme_req(req)->ctrl = &ctrl->ctrl; nvme_req(req)->ctrl = &ctrl->ctrl;
nvme_req(req)->cmd = &iod->cmd;
return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req), return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
(set == &ctrl->tag_set) ? hctx_idx + 1 : 0); (set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
} }
...@@ -396,7 +398,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) ...@@ -396,7 +398,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
error = nvme_init_identify(&ctrl->ctrl); error = nvme_init_ctrl_finish(&ctrl->ctrl);
if (error) if (error)
goto out_cleanup_queue; goto out_cleanup_queue;
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#define NVMET_ERROR_LOG_SLOTS 128 #define NVMET_ERROR_LOG_SLOTS 128
#define NVMET_NO_ERROR_LOC ((u16)-1) #define NVMET_NO_ERROR_LOC ((u16)-1)
#define NVMET_DEFAULT_CTRL_MODEL "Linux" #define NVMET_DEFAULT_CTRL_MODEL "Linux"
#define NVMET_MN_MAX_SIZE 40
/* /*
* Supported optional AENs: * Supported optional AENs:
...@@ -428,10 +429,11 @@ void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl); ...@@ -428,10 +429,11 @@ void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new); void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp); struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp);
u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid, struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
struct nvmet_req *req, struct nvmet_ctrl **ret); const char *hostnqn, u16 cntlid,
struct nvmet_req *req);
void nvmet_ctrl_put(struct nvmet_ctrl *ctrl); void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd); u16 nvmet_check_ctrl_status(struct nvmet_req *req);
struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
enum nvme_subsys_type type); enum nvme_subsys_type type);
......
...@@ -29,6 +29,16 @@ static int so_priority; ...@@ -29,6 +29,16 @@ static int so_priority;
module_param(so_priority, int, 0644); module_param(so_priority, int, 0644);
MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority"); MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority");
/* Define a time period (in usecs) that io_work() shall sample an activated
* queue before determining it to be idle. This optional module behavior
* can enable NIC solutions that support socket optimized packet processing
* using advanced interrupt moderation techniques.
*/
static int idle_poll_period_usecs;
module_param(idle_poll_period_usecs, int, 0644);
MODULE_PARM_DESC(idle_poll_period_usecs,
"nvmet tcp io_work poll till idle time period in usecs");
#define NVMET_TCP_RECV_BUDGET 8 #define NVMET_TCP_RECV_BUDGET 8
#define NVMET_TCP_SEND_BUDGET 8 #define NVMET_TCP_SEND_BUDGET 8
#define NVMET_TCP_IO_WORK_BUDGET 64 #define NVMET_TCP_IO_WORK_BUDGET 64
...@@ -119,6 +129,8 @@ struct nvmet_tcp_queue { ...@@ -119,6 +129,8 @@ struct nvmet_tcp_queue {
struct ahash_request *snd_hash; struct ahash_request *snd_hash;
struct ahash_request *rcv_hash; struct ahash_request *rcv_hash;
unsigned long poll_end;
spinlock_t state_lock; spinlock_t state_lock;
enum nvmet_tcp_queue_state state; enum nvmet_tcp_queue_state state;
...@@ -1216,6 +1228,23 @@ static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue) ...@@ -1216,6 +1228,23 @@ static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
spin_unlock(&queue->state_lock); spin_unlock(&queue->state_lock);
} }
static inline void nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue *queue)
{
queue->poll_end = jiffies + usecs_to_jiffies(idle_poll_period_usecs);
}
static bool nvmet_tcp_check_queue_deadline(struct nvmet_tcp_queue *queue,
int ops)
{
if (!idle_poll_period_usecs)
return false;
if (ops)
nvmet_tcp_arm_queue_deadline(queue);
return !time_after(jiffies, queue->poll_end);
}
static void nvmet_tcp_io_work(struct work_struct *w) static void nvmet_tcp_io_work(struct work_struct *w)
{ {
struct nvmet_tcp_queue *queue = struct nvmet_tcp_queue *queue =
...@@ -1241,9 +1270,10 @@ static void nvmet_tcp_io_work(struct work_struct *w) ...@@ -1241,9 +1270,10 @@ static void nvmet_tcp_io_work(struct work_struct *w)
} while (pending && ops < NVMET_TCP_IO_WORK_BUDGET); } while (pending && ops < NVMET_TCP_IO_WORK_BUDGET);
/* /*
* We exahusted our budget, requeue our selves * Requeue the worker if idle deadline period is in progress or any
* ops activity was recorded during the do-while loop above.
*/ */
if (pending) if (nvmet_tcp_check_queue_deadline(queue, ops) || pending)
queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
} }
...@@ -1434,7 +1464,7 @@ static void nvmet_tcp_state_change(struct sock *sk) ...@@ -1434,7 +1464,7 @@ static void nvmet_tcp_state_change(struct sock *sk)
{ {
struct nvmet_tcp_queue *queue; struct nvmet_tcp_queue *queue;
write_lock_bh(&sk->sk_callback_lock); read_lock_bh(&sk->sk_callback_lock);
queue = sk->sk_user_data; queue = sk->sk_user_data;
if (!queue) if (!queue)
goto done; goto done;
...@@ -1452,7 +1482,7 @@ static void nvmet_tcp_state_change(struct sock *sk) ...@@ -1452,7 +1482,7 @@ static void nvmet_tcp_state_change(struct sock *sk)
queue->idx, sk->sk_state); queue->idx, sk->sk_state);
} }
done: done:
write_unlock_bh(&sk->sk_callback_lock); read_unlock_bh(&sk->sk_callback_lock);
} }
static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue) static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
...@@ -1501,6 +1531,8 @@ static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue) ...@@ -1501,6 +1531,8 @@ static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
sock->sk->sk_state_change = nvmet_tcp_state_change; sock->sk->sk_state_change = nvmet_tcp_state_change;
queue->write_space = sock->sk->sk_write_space; queue->write_space = sock->sk->sk_write_space;
sock->sk->sk_write_space = nvmet_tcp_write_space; sock->sk->sk_write_space = nvmet_tcp_write_space;
if (idle_poll_period_usecs)
nvmet_tcp_arm_queue_deadline(queue);
queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
} }
write_unlock_bh(&sock->sk->sk_callback_lock); write_unlock_bh(&sock->sk->sk_callback_lock);
......
...@@ -405,6 +405,16 @@ struct nvme_id_ctrl_zns { ...@@ -405,6 +405,16 @@ struct nvme_id_ctrl_zns {
__u8 rsvd1[4095]; __u8 rsvd1[4095];
}; };
struct nvme_id_ctrl_nvm {
__u8 vsl;
__u8 wzsl;
__u8 wusl;
__u8 dmrl;
__le32 dmrsl;
__le64 dmsl;
__u8 rsvd16[4080];
};
enum { enum {
NVME_ID_CNS_NS = 0x00, NVME_ID_CNS_NS = 0x00,
NVME_ID_CNS_CTRL = 0x01, NVME_ID_CNS_CTRL = 0x01,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment