Commit eb71f435 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

nvme: Modify and export sync command submission for fabrics

NVMe over fabrics will use __nvme_submit_sync_cmd in the the
transport and require a few tweaks to it.  For that we export it
and add a few more paramters:

1. allow passing a queue ID to the block layer

   For the NVMe over Fabrics connect command we need to able to specify a
   queue ID that we want to send the command on.  Add a qid parameter to
   the relevant functions to enable this behavior.

2. allow submitting at_head commands

   In cases where we want to (re)connect to a controller
   where we have inflight queued commands we want to first
   connect and only then allow the other queued commands to
   be kicked. This will prevents failures in controller resets
   and reconnects.

3. allow passing flags to blk_mq_allocate_request

   Both for Fabrics connect the the keep-alive feature in NVMe 1.2.1 we
   want to be able to use reserved requests.
Reviewed-by: default avatarJay Freyensee <james.p.freyensee@intel.com>
Reviewed-by: default avatarSagi Grimberg <sagi@grimberg.me>
Tested-by: default avatarMing Lin <ming.l@ssi.samsung.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarKeith Busch <keith.busch@intel.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 7d2e8008
...@@ -192,11 +192,16 @@ void nvme_requeue_req(struct request *req) ...@@ -192,11 +192,16 @@ void nvme_requeue_req(struct request *req)
EXPORT_SYMBOL_GPL(nvme_requeue_req); EXPORT_SYMBOL_GPL(nvme_requeue_req);
struct request *nvme_alloc_request(struct request_queue *q, struct request *nvme_alloc_request(struct request_queue *q,
struct nvme_command *cmd, unsigned int flags) struct nvme_command *cmd, unsigned int flags, int qid)
{ {
struct request *req; struct request *req;
if (qid == NVME_QID_ANY) {
req = blk_mq_alloc_request(q, nvme_is_write(cmd), flags); req = blk_mq_alloc_request(q, nvme_is_write(cmd), flags);
} else {
req = blk_mq_alloc_request_hctx(q, nvme_is_write(cmd), flags,
qid ? qid - 1 : 0);
}
if (IS_ERR(req)) if (IS_ERR(req))
return req; return req;
...@@ -324,12 +329,12 @@ EXPORT_SYMBOL_GPL(nvme_setup_cmd); ...@@ -324,12 +329,12 @@ EXPORT_SYMBOL_GPL(nvme_setup_cmd);
*/ */
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
struct nvme_completion *cqe, void *buffer, unsigned bufflen, struct nvme_completion *cqe, void *buffer, unsigned bufflen,
unsigned timeout) unsigned timeout, int qid, int at_head, int flags)
{ {
struct request *req; struct request *req;
int ret; int ret;
req = nvme_alloc_request(q, cmd, 0); req = nvme_alloc_request(q, cmd, flags, qid);
if (IS_ERR(req)) if (IS_ERR(req))
return PTR_ERR(req); return PTR_ERR(req);
...@@ -342,17 +347,19 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, ...@@ -342,17 +347,19 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
goto out; goto out;
} }
blk_execute_rq(req->q, NULL, req, 0); blk_execute_rq(req->q, NULL, req, at_head);
ret = req->errors; ret = req->errors;
out: out:
blk_mq_free_request(req); blk_mq_free_request(req);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buffer, unsigned bufflen) void *buffer, unsigned bufflen)
{ {
return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0); return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
NVME_QID_ANY, 0, 0);
} }
EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd); EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
...@@ -370,7 +377,7 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, ...@@ -370,7 +377,7 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
void *meta = NULL; void *meta = NULL;
int ret; int ret;
req = nvme_alloc_request(q, cmd, 0); req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY);
if (IS_ERR(req)) if (IS_ERR(req))
return PTR_ERR(req); return PTR_ERR(req);
...@@ -520,7 +527,8 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid, ...@@ -520,7 +527,8 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
c.features.prp1 = cpu_to_le64(dma_addr); c.features.prp1 = cpu_to_le64(dma_addr);
c.features.fid = cpu_to_le32(fid); c.features.fid = cpu_to_le32(fid);
ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0); ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0,
NVME_QID_ANY, 0, 0);
if (ret >= 0) if (ret >= 0)
*result = le32_to_cpu(cqe.result); *result = le32_to_cpu(cqe.result);
return ret; return ret;
...@@ -539,7 +547,8 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11, ...@@ -539,7 +547,8 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
c.features.fid = cpu_to_le32(fid); c.features.fid = cpu_to_le32(fid);
c.features.dword11 = cpu_to_le32(dword11); c.features.dword11 = cpu_to_le32(dword11);
ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0); ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0,
NVME_QID_ANY, 0, 0);
if (ret >= 0) if (ret >= 0)
*result = le32_to_cpu(cqe.result); *result = le32_to_cpu(cqe.result);
return ret; return ret;
......
...@@ -231,8 +231,9 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl); ...@@ -231,8 +231,9 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl);
void nvme_start_queues(struct nvme_ctrl *ctrl); void nvme_start_queues(struct nvme_ctrl *ctrl);
void nvme_kill_queues(struct nvme_ctrl *ctrl); void nvme_kill_queues(struct nvme_ctrl *ctrl);
#define NVME_QID_ANY -1
struct request *nvme_alloc_request(struct request_queue *q, struct request *nvme_alloc_request(struct request_queue *q,
struct nvme_command *cmd, unsigned int flags); struct nvme_command *cmd, unsigned int flags, int qid);
void nvme_requeue_req(struct request *req); void nvme_requeue_req(struct request *req);
int nvme_setup_cmd(struct nvme_ns *ns, struct request *req, int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmd); struct nvme_command *cmd);
...@@ -240,7 +241,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, ...@@ -240,7 +241,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buf, unsigned bufflen); void *buf, unsigned bufflen);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
struct nvme_completion *cqe, void *buffer, unsigned bufflen, struct nvme_completion *cqe, void *buffer, unsigned bufflen,
unsigned timeout); unsigned timeout, int qid, int at_head, int flags);
int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
void __user *ubuffer, unsigned bufflen, u32 *result, void __user *ubuffer, unsigned bufflen, u32 *result,
unsigned timeout); unsigned timeout);
......
...@@ -901,7 +901,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) ...@@ -901,7 +901,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
req->tag, nvmeq->qid); req->tag, nvmeq->qid);
abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd, abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd,
BLK_MQ_REQ_NOWAIT); BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
if (IS_ERR(abort_req)) { if (IS_ERR(abort_req)) {
atomic_inc(&dev->ctrl.abort_limit); atomic_inc(&dev->ctrl.abort_limit);
return BLK_EH_RESET_TIMER; return BLK_EH_RESET_TIMER;
...@@ -1512,7 +1512,7 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) ...@@ -1512,7 +1512,7 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
cmd.delete_queue.opcode = opcode; cmd.delete_queue.opcode = opcode;
cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid); cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid);
req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT); req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
if (IS_ERR(req)) if (IS_ERR(req))
return PTR_ERR(req); return PTR_ERR(req);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment