Commit f705f837 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

nvme: consolidate synchronous command submission helpers

Note that we keep the unused timeout argument, but allow callers to
pass 0 instead of a timeout if they want the default.  This will allow
adding a timeout to the pass through path later on.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 6a927007
...@@ -991,27 +991,40 @@ static void sync_completion(struct nvme_queue *nvmeq, void *ctx, ...@@ -991,27 +991,40 @@ static void sync_completion(struct nvme_queue *nvmeq, void *ctx,
* Returns 0 on success. If the result is negative, it's a Linux error code; * Returns 0 on success. If the result is negative, it's a Linux error code;
* if the result is positive, it's an NVM Express status code * if the result is positive, it's an NVM Express status code
*/ */
static int nvme_submit_sync_cmd(struct request *req, struct nvme_command *cmd, static int __nvme_submit_sync_cmd(struct request_queue *q,
u32 *result, unsigned timeout) struct nvme_command *cmd, u32 *result, unsigned timeout)
{ {
struct sync_cmd_info cmdinfo; struct sync_cmd_info cmdinfo;
struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req); struct nvme_cmd_info *cmd_rq;
struct nvme_queue *nvmeq = cmd_rq->nvmeq; struct request *req;
int res;
req = blk_mq_alloc_request(q, WRITE, GFP_KERNEL, false);
if (IS_ERR(req))
return PTR_ERR(req);
cmdinfo.task = current; cmdinfo.task = current;
cmdinfo.status = -EINTR; cmdinfo.status = -EINTR;
cmd->common.command_id = req->tag; cmd->common.command_id = req->tag;
cmd_rq = blk_mq_rq_to_pdu(req);
nvme_set_info(cmd_rq, &cmdinfo, sync_completion); nvme_set_info(cmd_rq, &cmdinfo, sync_completion);
set_current_state(TASK_UNINTERRUPTIBLE); set_current_state(TASK_UNINTERRUPTIBLE);
nvme_submit_cmd(nvmeq, cmd); nvme_submit_cmd(cmd_rq->nvmeq, cmd);
schedule(); schedule();
if (result) if (result)
*result = cmdinfo.result; *result = cmdinfo.result;
return cmdinfo.status; res = cmdinfo.status;
blk_mq_free_request(req);
return res;
}
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd)
{
return __nvme_submit_sync_cmd(q, cmd, NULL, 0);
} }
static int nvme_submit_async_admin_req(struct nvme_dev *dev) static int nvme_submit_async_admin_req(struct nvme_dev *dev)
...@@ -1060,41 +1073,6 @@ static int nvme_submit_admin_async_cmd(struct nvme_dev *dev, ...@@ -1060,41 +1073,6 @@ static int nvme_submit_admin_async_cmd(struct nvme_dev *dev,
return nvme_submit_cmd(nvmeq, cmd); return nvme_submit_cmd(nvmeq, cmd);
} }
static int __nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
u32 *result, unsigned timeout)
{
int res;
struct request *req;
req = blk_mq_alloc_request(dev->admin_q, WRITE, GFP_KERNEL, false);
if (IS_ERR(req))
return PTR_ERR(req);
res = nvme_submit_sync_cmd(req, cmd, result, timeout);
blk_mq_free_request(req);
return res;
}
int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
u32 *result)
{
return __nvme_submit_admin_cmd(dev, cmd, result, ADMIN_TIMEOUT);
}
int nvme_submit_io_cmd(struct nvme_dev *dev, struct nvme_ns *ns,
struct nvme_command *cmd, u32 *result)
{
int res;
struct request *req;
req = blk_mq_alloc_request(ns->queue, WRITE, (GFP_KERNEL|__GFP_WAIT),
false);
if (IS_ERR(req))
return PTR_ERR(req);
res = nvme_submit_sync_cmd(req, cmd, result, NVME_IO_TIMEOUT);
blk_mq_free_request(req);
return res;
}
static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
{ {
struct nvme_command c; struct nvme_command c;
...@@ -1103,7 +1081,7 @@ static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) ...@@ -1103,7 +1081,7 @@ static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
c.delete_queue.opcode = opcode; c.delete_queue.opcode = opcode;
c.delete_queue.qid = cpu_to_le16(id); c.delete_queue.qid = cpu_to_le16(id);
return nvme_submit_admin_cmd(dev, &c, NULL); return nvme_submit_sync_cmd(dev->admin_q, &c);
} }
static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
...@@ -1120,7 +1098,7 @@ static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, ...@@ -1120,7 +1098,7 @@ static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
c.create_cq.cq_flags = cpu_to_le16(flags); c.create_cq.cq_flags = cpu_to_le16(flags);
c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector); c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
return nvme_submit_admin_cmd(dev, &c, NULL); return nvme_submit_sync_cmd(dev->admin_q, &c);
} }
static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
...@@ -1137,7 +1115,7 @@ static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, ...@@ -1137,7 +1115,7 @@ static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
c.create_sq.sq_flags = cpu_to_le16(flags); c.create_sq.sq_flags = cpu_to_le16(flags);
c.create_sq.cqid = cpu_to_le16(qid); c.create_sq.cqid = cpu_to_le16(qid);
return nvme_submit_admin_cmd(dev, &c, NULL); return nvme_submit_sync_cmd(dev->admin_q, &c);
} }
static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid) static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
...@@ -1161,7 +1139,7 @@ int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns, ...@@ -1161,7 +1139,7 @@ int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
c.identify.prp1 = cpu_to_le64(dma_addr); c.identify.prp1 = cpu_to_le64(dma_addr);
c.identify.cns = cpu_to_le32(cns); c.identify.cns = cpu_to_le32(cns);
return nvme_submit_admin_cmd(dev, &c, NULL); return nvme_submit_sync_cmd(dev->admin_q, &c);
} }
int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid, int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
...@@ -1175,7 +1153,7 @@ int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid, ...@@ -1175,7 +1153,7 @@ int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
c.features.prp1 = cpu_to_le64(dma_addr); c.features.prp1 = cpu_to_le64(dma_addr);
c.features.fid = cpu_to_le32(fid); c.features.fid = cpu_to_le32(fid);
return nvme_submit_admin_cmd(dev, &c, result); return __nvme_submit_sync_cmd(dev->admin_q, &c, result, 0);
} }
int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11, int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
...@@ -1189,7 +1167,7 @@ int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11, ...@@ -1189,7 +1167,7 @@ int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
c.features.fid = cpu_to_le32(fid); c.features.fid = cpu_to_le32(fid);
c.features.dword11 = cpu_to_le32(dword11); c.features.dword11 = cpu_to_le32(dword11);
return nvme_submit_admin_cmd(dev, &c, result); return __nvme_submit_sync_cmd(dev->admin_q, &c, result, 0);
} }
/** /**
...@@ -1813,7 +1791,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) ...@@ -1813,7 +1791,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
c.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); c.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
c.rw.prp2 = cpu_to_le64(iod->first_dma); c.rw.prp2 = cpu_to_le64(iod->first_dma);
c.rw.metadata = cpu_to_le64(meta_dma); c.rw.metadata = cpu_to_le64(meta_dma);
status = nvme_submit_io_cmd(dev, ns, &c, NULL); status = nvme_submit_sync_cmd(ns->queue, &c);
unmap: unmap:
nvme_unmap_user_pages(dev, write, iod); nvme_unmap_user_pages(dev, write, iod);
nvme_free_iod(dev, iod); nvme_free_iod(dev, iod);
...@@ -1869,23 +1847,15 @@ static int nvme_user_cmd(struct nvme_dev *dev, struct nvme_ns *ns, ...@@ -1869,23 +1847,15 @@ static int nvme_user_cmd(struct nvme_dev *dev, struct nvme_ns *ns,
timeout = cmd.timeout_ms ? msecs_to_jiffies(cmd.timeout_ms) : timeout = cmd.timeout_ms ? msecs_to_jiffies(cmd.timeout_ms) :
ADMIN_TIMEOUT; ADMIN_TIMEOUT;
if (length != cmd.data_len) if (length != cmd.data_len) {
status = -ENOMEM; status = -ENOMEM;
else if (ns) { goto out;
struct request *req;
req = blk_mq_alloc_request(ns->queue, WRITE,
(GFP_KERNEL|__GFP_WAIT), false);
if (IS_ERR(req))
status = PTR_ERR(req);
else {
status = nvme_submit_sync_cmd(req, &c, &cmd.result,
timeout);
blk_mq_free_request(req);
} }
} else
status = __nvme_submit_admin_cmd(dev, &c, &cmd.result, timeout);
status = __nvme_submit_sync_cmd(ns ? ns->queue : dev->admin_q, &c,
&cmd.result, timeout);
out:
if (cmd.data_len) { if (cmd.data_len) {
nvme_unmap_user_pages(dev, cmd.opcode & 1, iod); nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
nvme_free_iod(dev, iod); nvme_free_iod(dev, iod);
......
...@@ -1053,7 +1053,7 @@ static int nvme_trans_log_info_exceptions(struct nvme_ns *ns, ...@@ -1053,7 +1053,7 @@ static int nvme_trans_log_info_exceptions(struct nvme_ns *ns,
c.common.prp1 = cpu_to_le64(dma_addr); c.common.prp1 = cpu_to_le64(dma_addr);
c.common.cdw10[0] = cpu_to_le32((((sizeof(struct nvme_smart_log) / c.common.cdw10[0] = cpu_to_le32((((sizeof(struct nvme_smart_log) /
BYTES_TO_DWORDS) - 1) << 16) | NVME_LOG_SMART); BYTES_TO_DWORDS) - 1) << 16) | NVME_LOG_SMART);
res = nvme_submit_admin_cmd(dev, &c, NULL); res = nvme_submit_sync_cmd(dev->admin_q, &c);
if (res != NVME_SC_SUCCESS) { if (res != NVME_SC_SUCCESS) {
temp_c = LOG_TEMP_UNKNOWN; temp_c = LOG_TEMP_UNKNOWN;
} else { } else {
...@@ -1121,7 +1121,7 @@ static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr, ...@@ -1121,7 +1121,7 @@ static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr,
c.common.prp1 = cpu_to_le64(dma_addr); c.common.prp1 = cpu_to_le64(dma_addr);
c.common.cdw10[0] = cpu_to_le32((((sizeof(struct nvme_smart_log) / c.common.cdw10[0] = cpu_to_le32((((sizeof(struct nvme_smart_log) /
BYTES_TO_DWORDS) - 1) << 16) | NVME_LOG_SMART); BYTES_TO_DWORDS) - 1) << 16) | NVME_LOG_SMART);
res = nvme_submit_admin_cmd(dev, &c, NULL); res = nvme_submit_sync_cmd(dev->admin_q, &c);
if (res != NVME_SC_SUCCESS) { if (res != NVME_SC_SUCCESS) {
temp_c_cur = LOG_TEMP_UNKNOWN; temp_c_cur = LOG_TEMP_UNKNOWN;
} else { } else {
...@@ -1609,7 +1609,7 @@ static int nvme_trans_send_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr, ...@@ -1609,7 +1609,7 @@ static int nvme_trans_send_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
c.common.cdw10[0] = cpu_to_le32(cdw10); c.common.cdw10[0] = cpu_to_le32(cdw10);
} }
nvme_sc = nvme_submit_admin_cmd(dev, &c, NULL); nvme_sc = nvme_submit_sync_cmd(dev->admin_q, &c);
res = nvme_trans_status_code(hdr, nvme_sc); res = nvme_trans_status_code(hdr, nvme_sc);
if (res) if (res)
goto out_unmap; goto out_unmap;
...@@ -1971,7 +1971,7 @@ static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr, ...@@ -1971,7 +1971,7 @@ static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
c.format.nsid = cpu_to_le32(ns->ns_id); c.format.nsid = cpu_to_le32(ns->ns_id);
c.format.cdw10 = cpu_to_le32(cdw10); c.format.cdw10 = cpu_to_le32(cdw10);
nvme_sc = nvme_submit_admin_cmd(dev, &c, NULL); nvme_sc = nvme_submit_sync_cmd(dev->admin_q, &c);
res = nvme_trans_status_code(hdr, nvme_sc); res = nvme_trans_status_code(hdr, nvme_sc);
if (res) if (res)
goto out_dma; goto out_dma;
...@@ -2139,7 +2139,7 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr, ...@@ -2139,7 +2139,7 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
nvme_offset += unit_num_blocks; nvme_offset += unit_num_blocks;
nvme_sc = nvme_submit_io_cmd(dev, ns, &c, NULL); nvme_sc = nvme_submit_sync_cmd(ns->queue, &c);
if (nvme_sc != NVME_SC_SUCCESS) { if (nvme_sc != NVME_SC_SUCCESS) {
nvme_unmap_user_pages(dev, nvme_unmap_user_pages(dev,
(is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE, (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
...@@ -2696,7 +2696,7 @@ static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr, ...@@ -2696,7 +2696,7 @@ static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
c.common.opcode = nvme_cmd_flush; c.common.opcode = nvme_cmd_flush;
c.common.nsid = cpu_to_le32(ns->ns_id); c.common.nsid = cpu_to_le32(ns->ns_id);
nvme_sc = nvme_submit_io_cmd(ns->dev, ns, &c, NULL); nvme_sc = nvme_submit_sync_cmd(ns->queue, &c);
res = nvme_trans_status_code(hdr, nvme_sc); res = nvme_trans_status_code(hdr, nvme_sc);
if (res) if (res)
goto out; goto out;
...@@ -2724,8 +2724,7 @@ static int nvme_trans_synchronize_cache(struct nvme_ns *ns, ...@@ -2724,8 +2724,7 @@ static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
c.common.opcode = nvme_cmd_flush; c.common.opcode = nvme_cmd_flush;
c.common.nsid = cpu_to_le32(ns->ns_id); c.common.nsid = cpu_to_le32(ns->ns_id);
nvme_sc = nvme_submit_io_cmd(ns->dev, ns, &c, NULL); nvme_sc = nvme_submit_sync_cmd(ns->queue, &c);
res = nvme_trans_status_code(hdr, nvme_sc); res = nvme_trans_status_code(hdr, nvme_sc);
if (res) if (res)
goto out; goto out;
...@@ -2932,7 +2931,7 @@ static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr, ...@@ -2932,7 +2931,7 @@ static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
c.dsm.nr = cpu_to_le32(ndesc - 1); c.dsm.nr = cpu_to_le32(ndesc - 1);
c.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); c.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
nvme_sc = nvme_submit_io_cmd(dev, ns, &c, NULL); nvme_sc = nvme_submit_sync_cmd(ns->queue, &c);
res = nvme_trans_status_code(hdr, nvme_sc); res = nvme_trans_status_code(hdr, nvme_sc);
dma_free_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range), dma_free_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range),
......
...@@ -158,11 +158,7 @@ struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, ...@@ -158,11 +158,7 @@ struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
unsigned long addr, unsigned length); unsigned long addr, unsigned length);
void nvme_unmap_user_pages(struct nvme_dev *dev, int write, void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
struct nvme_iod *iod); struct nvme_iod *iod);
int nvme_submit_io_cmd(struct nvme_dev *, struct nvme_ns *, int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd);
struct nvme_command *, u32 *);
int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns);
int nvme_submit_admin_cmd(struct nvme_dev *, struct nvme_command *,
u32 *result);
int nvme_identify(struct nvme_dev *, unsigned nsid, unsigned cns, int nvme_identify(struct nvme_dev *, unsigned nsid, unsigned cns,
dma_addr_t dma_addr); dma_addr_t dma_addr);
int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid, int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment