Commit 499aeb45 authored by Jens Axboe's avatar Jens Axboe

Merge branch 'nvme-4.21' of git://git.infradead.org/nvme into for-4.21/block

Pull last batch of NVMe updates for 4.21 from Christoph:

"This contains a series from Sagi to restore poll support for nvme-rdma,
 a new tracepoint from yupeng and various fixes."

* 'nvme-4.21' of git://git.infradead.org/nvme:
  nvme-pci: trace SQ status on completions
  nvme-rdma: implement polling queue map
  nvme-fabrics: allow user to pass in nr_poll_queues
  nvme-fabrics: allow nvmf_connect_io_queue to poll
  nvme-core: optionally poll sync commands
  block: make request_to_qc_t public
  nvme-tcp: fix spelling mistake "attepmpt" -> "attempt"
  nvme-tcp: fix endianess annotations
  nvmet-tcp: fix endianess annotations
  nvme-pci: refactor nvme_poll_irqdisable to make sparse happy
  nvme-pci: only set nr_maps to 2 if poll queues are supported
  nvmet: use a macro for default error location
  nvmet: fix comparison of a u16 with -1
parents cd19181b 604c01d5
...@@ -1749,14 +1749,6 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio) ...@@ -1749,14 +1749,6 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
blk_account_io_start(rq, true); blk_account_io_start(rq, true);
} }
static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
{
if (rq->tag != -1)
return blk_tag_to_qc_t(rq->tag, hctx->queue_num, false);
return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
}
static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
struct request *rq, struct request *rq,
blk_qc_t *cookie, bool last) blk_qc_t *cookie, bool last)
......
...@@ -724,6 +724,31 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req, ...@@ -724,6 +724,31 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
} }
EXPORT_SYMBOL_GPL(nvme_setup_cmd); EXPORT_SYMBOL_GPL(nvme_setup_cmd);
static void nvme_end_sync_rq(struct request *rq, blk_status_t error)
{
struct completion *waiting = rq->end_io_data;
rq->end_io_data = NULL;
complete(waiting);
}
static void nvme_execute_rq_polled(struct request_queue *q,
struct gendisk *bd_disk, struct request *rq, int at_head)
{
DECLARE_COMPLETION_ONSTACK(wait);
WARN_ON_ONCE(!test_bit(QUEUE_FLAG_POLL, &q->queue_flags));
rq->cmd_flags |= REQ_HIPRI;
rq->end_io_data = &wait;
blk_execute_rq_nowait(q, bd_disk, rq, at_head, nvme_end_sync_rq);
while (!completion_done(&wait)) {
blk_poll(q, request_to_qc_t(rq->mq_hctx, rq), true);
cond_resched();
}
}
/* /*
* Returns 0 on success. If the result is negative, it's a Linux error code; * Returns 0 on success. If the result is negative, it's a Linux error code;
* if the result is positive, it's an NVM Express status code * if the result is positive, it's an NVM Express status code
...@@ -731,7 +756,7 @@ EXPORT_SYMBOL_GPL(nvme_setup_cmd); ...@@ -731,7 +756,7 @@ EXPORT_SYMBOL_GPL(nvme_setup_cmd);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen, union nvme_result *result, void *buffer, unsigned bufflen,
unsigned timeout, int qid, int at_head, unsigned timeout, int qid, int at_head,
blk_mq_req_flags_t flags) blk_mq_req_flags_t flags, bool poll)
{ {
struct request *req; struct request *req;
int ret; int ret;
...@@ -748,6 +773,9 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, ...@@ -748,6 +773,9 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
goto out; goto out;
} }
if (poll)
nvme_execute_rq_polled(req->q, NULL, req, at_head);
else
blk_execute_rq(req->q, NULL, req, at_head); blk_execute_rq(req->q, NULL, req, at_head);
if (result) if (result)
*result = nvme_req(req)->result; *result = nvme_req(req)->result;
...@@ -765,7 +793,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, ...@@ -765,7 +793,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buffer, unsigned bufflen) void *buffer, unsigned bufflen)
{ {
return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0, return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
NVME_QID_ANY, 0, 0); NVME_QID_ANY, 0, 0, false);
} }
EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd); EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
...@@ -1084,7 +1112,7 @@ static int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword ...@@ -1084,7 +1112,7 @@ static int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword
c.features.dword11 = cpu_to_le32(dword11); c.features.dword11 = cpu_to_le32(dword11);
ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
buffer, buflen, 0, NVME_QID_ANY, 0, 0); buffer, buflen, 0, NVME_QID_ANY, 0, 0, false);
if (ret >= 0 && result) if (ret >= 0 && result)
*result = le32_to_cpu(res.u32); *result = le32_to_cpu(res.u32);
return ret; return ret;
...@@ -1727,7 +1755,7 @@ int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, ...@@ -1727,7 +1755,7 @@ int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
cmd.common.cdw11 = cpu_to_le32(len); cmd.common.cdw11 = cpu_to_le32(len);
return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0); ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0, false);
} }
EXPORT_SYMBOL_GPL(nvme_sec_submit); EXPORT_SYMBOL_GPL(nvme_sec_submit);
#endif /* CONFIG_BLK_SED_OPAL */ #endif /* CONFIG_BLK_SED_OPAL */
......
...@@ -159,7 +159,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) ...@@ -159,7 +159,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
cmd.prop_get.offset = cpu_to_le32(off); cmd.prop_get.offset = cpu_to_le32(off);
ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res, NULL, 0, 0, ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res, NULL, 0, 0,
NVME_QID_ANY, 0, 0); NVME_QID_ANY, 0, 0, false);
if (ret >= 0) if (ret >= 0)
*val = le64_to_cpu(res.u64); *val = le64_to_cpu(res.u64);
...@@ -206,7 +206,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) ...@@ -206,7 +206,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
cmd.prop_get.offset = cpu_to_le32(off); cmd.prop_get.offset = cpu_to_le32(off);
ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res, NULL, 0, 0, ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res, NULL, 0, 0,
NVME_QID_ANY, 0, 0); NVME_QID_ANY, 0, 0, false);
if (ret >= 0) if (ret >= 0)
*val = le64_to_cpu(res.u64); *val = le64_to_cpu(res.u64);
...@@ -252,7 +252,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) ...@@ -252,7 +252,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
cmd.prop_set.value = cpu_to_le64(val); cmd.prop_set.value = cpu_to_le64(val);
ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, NULL, 0, 0, ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, NULL, 0, 0,
NVME_QID_ANY, 0, 0); NVME_QID_ANY, 0, 0, false);
if (unlikely(ret)) if (unlikely(ret))
dev_err(ctrl->device, dev_err(ctrl->device,
"Property Set error: %d, offset %#x\n", "Property Set error: %d, offset %#x\n",
...@@ -406,7 +406,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl) ...@@ -406,7 +406,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res, ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res,
data, sizeof(*data), 0, NVME_QID_ANY, 1, data, sizeof(*data), 0, NVME_QID_ANY, 1,
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT); BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT, false);
if (ret) { if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32), nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
&cmd, data); &cmd, data);
...@@ -441,7 +441,7 @@ EXPORT_SYMBOL_GPL(nvmf_connect_admin_queue); ...@@ -441,7 +441,7 @@ EXPORT_SYMBOL_GPL(nvmf_connect_admin_queue);
* > 0: NVMe error status code * > 0: NVMe error status code
* < 0: Linux errno error code * < 0: Linux errno error code
*/ */
int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid) int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid, bool poll)
{ {
struct nvme_command cmd; struct nvme_command cmd;
struct nvmf_connect_data *data; struct nvmf_connect_data *data;
...@@ -468,7 +468,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid) ...@@ -468,7 +468,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res, ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
data, sizeof(*data), 0, qid, 1, data, sizeof(*data), 0, qid, 1,
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT); BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT, poll);
if (ret) { if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32), nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
&cmd, data); &cmd, data);
...@@ -617,6 +617,7 @@ static const match_table_t opt_tokens = { ...@@ -617,6 +617,7 @@ static const match_table_t opt_tokens = {
{ NVMF_OPT_HDR_DIGEST, "hdr_digest" }, { NVMF_OPT_HDR_DIGEST, "hdr_digest" },
{ NVMF_OPT_DATA_DIGEST, "data_digest" }, { NVMF_OPT_DATA_DIGEST, "data_digest" },
{ NVMF_OPT_NR_WRITE_QUEUES, "nr_write_queues=%d" }, { NVMF_OPT_NR_WRITE_QUEUES, "nr_write_queues=%d" },
{ NVMF_OPT_NR_POLL_QUEUES, "nr_poll_queues=%d" },
{ NVMF_OPT_ERR, NULL } { NVMF_OPT_ERR, NULL }
}; };
...@@ -850,6 +851,18 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ...@@ -850,6 +851,18 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
} }
opts->nr_write_queues = token; opts->nr_write_queues = token;
break; break;
case NVMF_OPT_NR_POLL_QUEUES:
if (match_int(args, &token)) {
ret = -EINVAL;
goto out;
}
if (token <= 0) {
pr_err("Invalid nr_poll_queues %d\n", token);
ret = -EINVAL;
goto out;
}
opts->nr_poll_queues = token;
break;
default: default:
pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n", pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
p); p);
......
...@@ -62,6 +62,7 @@ enum { ...@@ -62,6 +62,7 @@ enum {
NVMF_OPT_HDR_DIGEST = 1 << 15, NVMF_OPT_HDR_DIGEST = 1 << 15,
NVMF_OPT_DATA_DIGEST = 1 << 16, NVMF_OPT_DATA_DIGEST = 1 << 16,
NVMF_OPT_NR_WRITE_QUEUES = 1 << 17, NVMF_OPT_NR_WRITE_QUEUES = 1 << 17,
NVMF_OPT_NR_POLL_QUEUES = 1 << 18,
}; };
/** /**
...@@ -93,6 +94,7 @@ enum { ...@@ -93,6 +94,7 @@ enum {
* @hdr_digest: generate/verify header digest (TCP) * @hdr_digest: generate/verify header digest (TCP)
* @data_digest: generate/verify data digest (TCP) * @data_digest: generate/verify data digest (TCP)
* @nr_write_queues: number of queues for write I/O * @nr_write_queues: number of queues for write I/O
* @nr_poll_queues: number of queues for polling I/O
*/ */
struct nvmf_ctrl_options { struct nvmf_ctrl_options {
unsigned mask; unsigned mask;
...@@ -113,6 +115,7 @@ struct nvmf_ctrl_options { ...@@ -113,6 +115,7 @@ struct nvmf_ctrl_options {
bool hdr_digest; bool hdr_digest;
bool data_digest; bool data_digest;
unsigned int nr_write_queues; unsigned int nr_write_queues;
unsigned int nr_poll_queues;
}; };
/* /*
...@@ -168,7 +171,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val); ...@@ -168,7 +171,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val); int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val);
int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val); int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val);
int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl); int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl);
int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid); int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid, bool poll);
int nvmf_register_transport(struct nvmf_transport_ops *ops); int nvmf_register_transport(struct nvmf_transport_ops *ops);
void nvmf_unregister_transport(struct nvmf_transport_ops *ops); void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
void nvmf_free_options(struct nvmf_ctrl_options *opts); void nvmf_free_options(struct nvmf_ctrl_options *opts);
......
...@@ -1975,7 +1975,7 @@ nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) ...@@ -1975,7 +1975,7 @@ nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
(qsize / 5)); (qsize / 5));
if (ret) if (ret)
break; break;
ret = nvmf_connect_io_queue(&ctrl->ctrl, i); ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false);
if (ret) if (ret)
break; break;
......
...@@ -447,7 +447,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, ...@@ -447,7 +447,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen, union nvme_result *result, void *buffer, unsigned bufflen,
unsigned timeout, int qid, int at_head, unsigned timeout, int qid, int at_head,
blk_mq_req_flags_t flags); blk_mq_req_flags_t flags, bool poll);
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count); int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl); void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl(struct nvme_ctrl *ctrl); int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <linux/sed-opal.h> #include <linux/sed-opal.h>
#include <linux/pci-p2pdma.h> #include <linux/pci-p2pdma.h>
#include "trace.h"
#include "nvme.h" #include "nvme.h"
#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) #define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
...@@ -1003,6 +1004,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) ...@@ -1003,6 +1004,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
} }
req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id); req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id);
trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
nvme_end_request(req, cqe->status, cqe->result); nvme_end_request(req, cqe->status, cqe->result);
} }
...@@ -1089,15 +1091,15 @@ static int nvme_poll_irqdisable(struct nvme_queue *nvmeq, unsigned int tag) ...@@ -1089,15 +1091,15 @@ static int nvme_poll_irqdisable(struct nvme_queue *nvmeq, unsigned int tag)
* using the CQ lock. For normal interrupt driven threads we have * using the CQ lock. For normal interrupt driven threads we have
* to disable the interrupt to avoid racing with it. * to disable the interrupt to avoid racing with it.
*/ */
if (nvmeq->cq_vector == -1) if (nvmeq->cq_vector == -1) {
spin_lock(&nvmeq->cq_poll_lock); spin_lock(&nvmeq->cq_poll_lock);
else
disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
found = nvme_process_cq(nvmeq, &start, &end, tag); found = nvme_process_cq(nvmeq, &start, &end, tag);
if (nvmeq->cq_vector == -1)
spin_unlock(&nvmeq->cq_poll_lock); spin_unlock(&nvmeq->cq_poll_lock);
else } else {
disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
found = nvme_process_cq(nvmeq, &start, &end, tag);
enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
}
nvme_complete_cqes(nvmeq, start, end); nvme_complete_cqes(nvmeq, start, end);
return found; return found;
...@@ -2289,6 +2291,9 @@ static int nvme_dev_add(struct nvme_dev *dev) ...@@ -2289,6 +2291,9 @@ static int nvme_dev_add(struct nvme_dev *dev)
if (!dev->ctrl.tagset) { if (!dev->ctrl.tagset) {
dev->tagset.ops = &nvme_mq_ops; dev->tagset.ops = &nvme_mq_ops;
dev->tagset.nr_hw_queues = dev->online_queues - 1; dev->tagset.nr_hw_queues = dev->online_queues - 1;
dev->tagset.nr_maps = 2; /* default + read */
if (dev->io_queues[HCTX_TYPE_POLL])
dev->tagset.nr_maps++;
dev->tagset.nr_maps = HCTX_MAX_TYPES; dev->tagset.nr_maps = HCTX_MAX_TYPES;
dev->tagset.timeout = NVME_IO_TIMEOUT; dev->tagset.timeout = NVME_IO_TIMEOUT;
dev->tagset.numa_node = dev_to_node(dev->dev); dev->tagset.numa_node = dev_to_node(dev->dev);
......
...@@ -162,6 +162,13 @@ static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue) ...@@ -162,6 +162,13 @@ static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue)
return queue - queue->ctrl->queues; return queue - queue->ctrl->queues;
} }
static bool nvme_rdma_poll_queue(struct nvme_rdma_queue *queue)
{
return nvme_rdma_queue_idx(queue) >
queue->ctrl->ctrl.opts->nr_io_queues +
queue->ctrl->ctrl.opts->nr_write_queues;
}
static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue) static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue)
{ {
return queue->cmnd_capsule_len - sizeof(struct nvme_command); return queue->cmnd_capsule_len - sizeof(struct nvme_command);
...@@ -440,6 +447,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue) ...@@ -440,6 +447,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
const int send_wr_factor = 3; /* MR, SEND, INV */ const int send_wr_factor = 3; /* MR, SEND, INV */
const int cq_factor = send_wr_factor + 1; /* + RECV */ const int cq_factor = send_wr_factor + 1; /* + RECV */
int comp_vector, idx = nvme_rdma_queue_idx(queue); int comp_vector, idx = nvme_rdma_queue_idx(queue);
enum ib_poll_context poll_ctx;
int ret; int ret;
queue->device = nvme_rdma_find_get_device(queue->cm_id); queue->device = nvme_rdma_find_get_device(queue->cm_id);
...@@ -456,10 +464,16 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue) ...@@ -456,10 +464,16 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
*/ */
comp_vector = idx == 0 ? idx : idx - 1; comp_vector = idx == 0 ? idx : idx - 1;
/* Polling queues need direct cq polling context */
if (nvme_rdma_poll_queue(queue))
poll_ctx = IB_POLL_DIRECT;
else
poll_ctx = IB_POLL_SOFTIRQ;
/* +1 for ib_stop_cq */ /* +1 for ib_stop_cq */
queue->ib_cq = ib_alloc_cq(ibdev, queue, queue->ib_cq = ib_alloc_cq(ibdev, queue,
cq_factor * queue->queue_size + 1, cq_factor * queue->queue_size + 1,
comp_vector, IB_POLL_SOFTIRQ); comp_vector, poll_ctx);
if (IS_ERR(queue->ib_cq)) { if (IS_ERR(queue->ib_cq)) {
ret = PTR_ERR(queue->ib_cq); ret = PTR_ERR(queue->ib_cq);
goto out_put_dev; goto out_put_dev;
...@@ -595,15 +609,17 @@ static void nvme_rdma_stop_io_queues(struct nvme_rdma_ctrl *ctrl) ...@@ -595,15 +609,17 @@ static void nvme_rdma_stop_io_queues(struct nvme_rdma_ctrl *ctrl)
static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx) static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx)
{ {
struct nvme_rdma_queue *queue = &ctrl->queues[idx];
bool poll = nvme_rdma_poll_queue(queue);
int ret; int ret;
if (idx) if (idx)
ret = nvmf_connect_io_queue(&ctrl->ctrl, idx); ret = nvmf_connect_io_queue(&ctrl->ctrl, idx, poll);
else else
ret = nvmf_connect_admin_queue(&ctrl->ctrl); ret = nvmf_connect_admin_queue(&ctrl->ctrl);
if (!ret) if (!ret)
set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[idx].flags); set_bit(NVME_RDMA_Q_LIVE, &queue->flags);
else else
dev_info(ctrl->ctrl.device, dev_info(ctrl->ctrl.device,
"failed to connect queue: %d ret=%d\n", idx, ret); "failed to connect queue: %d ret=%d\n", idx, ret);
...@@ -646,6 +662,7 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl) ...@@ -646,6 +662,7 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
ibdev->num_comp_vectors); ibdev->num_comp_vectors);
nr_io_queues += min(opts->nr_write_queues, num_online_cpus()); nr_io_queues += min(opts->nr_write_queues, num_online_cpus());
nr_io_queues += min(opts->nr_poll_queues, num_online_cpus());
ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
if (ret) if (ret)
...@@ -716,7 +733,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl, ...@@ -716,7 +733,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
set->driver_data = ctrl; set->driver_data = ctrl;
set->nr_hw_queues = nctrl->queue_count - 1; set->nr_hw_queues = nctrl->queue_count - 1;
set->timeout = NVME_IO_TIMEOUT; set->timeout = NVME_IO_TIMEOUT;
set->nr_maps = 2 /* default + read */; set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
} }
ret = blk_mq_alloc_tag_set(set); ret = blk_mq_alloc_tag_set(set);
...@@ -1742,6 +1759,13 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -1742,6 +1759,13 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_IOERR; return BLK_STS_IOERR;
} }
static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx)
{
struct nvme_rdma_queue *queue = hctx->driver_data;
return ib_process_cq_direct(queue->ib_cq, -1);
}
static void nvme_rdma_complete_rq(struct request *rq) static void nvme_rdma_complete_rq(struct request *rq)
{ {
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
...@@ -1772,6 +1796,17 @@ static int nvme_rdma_map_queues(struct blk_mq_tag_set *set) ...@@ -1772,6 +1796,17 @@ static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
ctrl->device->dev, 0); ctrl->device->dev, 0);
blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_READ], blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_READ],
ctrl->device->dev, 0); ctrl->device->dev, 0);
if (ctrl->ctrl.opts->nr_poll_queues) {
set->map[HCTX_TYPE_POLL].nr_queues =
ctrl->ctrl.opts->nr_poll_queues;
set->map[HCTX_TYPE_POLL].queue_offset =
ctrl->ctrl.opts->nr_io_queues;
if (ctrl->ctrl.opts->nr_write_queues)
set->map[HCTX_TYPE_POLL].queue_offset +=
ctrl->ctrl.opts->nr_write_queues;
blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
}
return 0; return 0;
} }
...@@ -1783,6 +1818,7 @@ static const struct blk_mq_ops nvme_rdma_mq_ops = { ...@@ -1783,6 +1818,7 @@ static const struct blk_mq_ops nvme_rdma_mq_ops = {
.init_hctx = nvme_rdma_init_hctx, .init_hctx = nvme_rdma_init_hctx,
.timeout = nvme_rdma_timeout, .timeout = nvme_rdma_timeout,
.map_queues = nvme_rdma_map_queues, .map_queues = nvme_rdma_map_queues,
.poll = nvme_rdma_poll,
}; };
static const struct blk_mq_ops nvme_rdma_admin_mq_ops = { static const struct blk_mq_ops nvme_rdma_admin_mq_ops = {
...@@ -1927,7 +1963,8 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, ...@@ -1927,7 +1963,8 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work); INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work);
INIT_WORK(&ctrl->ctrl.reset_work, nvme_rdma_reset_ctrl_work); INIT_WORK(&ctrl->ctrl.reset_work, nvme_rdma_reset_ctrl_work);
ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues + 1; ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
opts->nr_poll_queues + 1;
ctrl->ctrl.sqsize = opts->queue_size - 1; ctrl->ctrl.sqsize = opts->queue_size - 1;
ctrl->ctrl.kato = opts->kato; ctrl->ctrl.kato = opts->kato;
...@@ -1979,7 +2016,7 @@ static struct nvmf_transport_ops nvme_rdma_transport = { ...@@ -1979,7 +2016,7 @@ static struct nvmf_transport_ops nvme_rdma_transport = {
.required_opts = NVMF_OPT_TRADDR, .required_opts = NVMF_OPT_TRADDR,
.allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY | .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO | NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
NVMF_OPT_NR_WRITE_QUEUES, NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES,
.create_ctrl = nvme_rdma_create_ctrl, .create_ctrl = nvme_rdma_create_ctrl,
}; };
......
...@@ -35,7 +35,7 @@ struct nvme_tcp_request { ...@@ -35,7 +35,7 @@ struct nvme_tcp_request {
u32 pdu_sent; u32 pdu_sent;
u16 ttag; u16 ttag;
struct list_head entry; struct list_head entry;
u32 ddgst; __le32 ddgst;
struct bio *curr_bio; struct bio *curr_bio;
struct iov_iter iter; struct iov_iter iter;
...@@ -272,7 +272,8 @@ nvme_tcp_fetch_request(struct nvme_tcp_queue *queue) ...@@ -272,7 +272,8 @@ nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
return req; return req;
} }
static inline void nvme_tcp_ddgst_final(struct ahash_request *hash, u32 *dgst) static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
__le32 *dgst)
{ {
ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0); ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
crypto_ahash_final(hash); crypto_ahash_final(hash);
...@@ -817,7 +818,7 @@ static void nvme_tcp_fail_request(struct nvme_tcp_request *req) ...@@ -817,7 +818,7 @@ static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
union nvme_result res = {}; union nvme_result res = {};
nvme_end_request(blk_mq_rq_from_pdu(req), nvme_end_request(blk_mq_rq_from_pdu(req),
NVME_SC_DATA_XFER_ERROR, res); cpu_to_le16(NVME_SC_DATA_XFER_ERROR), res);
} }
static int nvme_tcp_try_send_data(struct nvme_tcp_request *req) static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
...@@ -1393,7 +1394,7 @@ static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx) ...@@ -1393,7 +1394,7 @@ static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
int ret; int ret;
if (idx) if (idx)
ret = nvmf_connect_io_queue(nctrl, idx); ret = nvmf_connect_io_queue(nctrl, idx, false);
else else
ret = nvmf_connect_admin_queue(nctrl); ret = nvmf_connect_admin_queue(nctrl);
...@@ -1789,7 +1790,7 @@ static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work) ...@@ -1789,7 +1790,7 @@ static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
if (nvme_tcp_setup_ctrl(ctrl, false)) if (nvme_tcp_setup_ctrl(ctrl, false))
goto requeue; goto requeue;
dev_info(ctrl->device, "Successfully reconnected (%d attepmpt)\n", dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
ctrl->nr_reconnects); ctrl->nr_reconnects);
ctrl->nr_reconnects = 0; ctrl->nr_reconnects = 0;
...@@ -1960,7 +1961,7 @@ nvme_tcp_timeout(struct request *rq, bool reserved) ...@@ -1960,7 +1961,7 @@ nvme_tcp_timeout(struct request *rq, bool reserved)
union nvme_result res = {}; union nvme_result res = {};
nvme_req(rq)->flags |= NVME_REQ_CANCELLED; nvme_req(rq)->flags |= NVME_REQ_CANCELLED;
nvme_end_request(rq, NVME_SC_ABORT_REQ, res); nvme_end_request(rq, cpu_to_le16(NVME_SC_ABORT_REQ), res);
return BLK_EH_DONE; return BLK_EH_DONE;
} }
......
...@@ -139,3 +139,6 @@ const char *nvme_trace_disk_name(struct trace_seq *p, char *name) ...@@ -139,3 +139,6 @@ const char *nvme_trace_disk_name(struct trace_seq *p, char *name)
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(nvme_trace_disk_name);
EXPORT_TRACEPOINT_SYMBOL_GPL(nvme_sq);
...@@ -184,6 +184,29 @@ TRACE_EVENT(nvme_async_event, ...@@ -184,6 +184,29 @@ TRACE_EVENT(nvme_async_event,
#undef aer_name #undef aer_name
TRACE_EVENT(nvme_sq,
TP_PROTO(struct request *req, __le16 sq_head, int sq_tail),
TP_ARGS(req, sq_head, sq_tail),
TP_STRUCT__entry(
__field(int, ctrl_id)
__array(char, disk, DISK_NAME_LEN)
__field(int, qid)
__field(u16, sq_head)
__field(u16, sq_tail)
),
TP_fast_assign(
__entry->ctrl_id = nvme_req(req)->ctrl->instance;
__assign_disk_name(__entry->disk, req->rq_disk);
__entry->qid = nvme_req_qid(req);
__entry->sq_head = le16_to_cpu(sq_head);
__entry->sq_tail = sq_tail;
),
TP_printk("nvme%d: %sqid=%d, head=%u, tail=%u",
__entry->ctrl_id, __print_disk_name(__entry->disk),
__entry->qid, __entry->sq_head, __entry->sq_tail
)
);
#endif /* _TRACE_NVME_H */ #endif /* _TRACE_NVME_H */
#undef TRACE_INCLUDE_PATH #undef TRACE_INCLUDE_PATH
......
...@@ -663,7 +663,7 @@ static void nvmet_set_error(struct nvmet_req *req, u16 status) ...@@ -663,7 +663,7 @@ static void nvmet_set_error(struct nvmet_req *req, u16 status)
req->rsp->status = cpu_to_le16(status << 1); req->rsp->status = cpu_to_le16(status << 1);
if (!ctrl || req->error_loc == -1) if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC)
return; return;
spin_lock_irqsave(&ctrl->error_lock, flags); spin_lock_irqsave(&ctrl->error_lock, flags);
...@@ -849,7 +849,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, ...@@ -849,7 +849,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
req->rsp->status = 0; req->rsp->status = 0;
req->rsp->sq_head = 0; req->rsp->sq_head = 0;
req->ns = NULL; req->ns = NULL;
req->error_loc = -1; req->error_loc = NVMET_NO_ERROR_LOC;
req->error_slba = 0; req->error_slba = 0;
/* no support for fused commands yet */ /* no support for fused commands yet */
......
...@@ -345,7 +345,7 @@ static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl) ...@@ -345,7 +345,7 @@ static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
int i, ret; int i, ret;
for (i = 1; i < ctrl->ctrl.queue_count; i++) { for (i = 1; i < ctrl->ctrl.queue_count; i++) {
ret = nvmf_connect_io_queue(&ctrl->ctrl, i); ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false);
if (ret) if (ret)
return ret; return ret;
set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags); set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#define NVMET_ASYNC_EVENTS 4 #define NVMET_ASYNC_EVENTS 4
#define NVMET_ERROR_LOG_SLOTS 128 #define NVMET_ERROR_LOG_SLOTS 128
#define NVMET_NO_ERROR_LOC ((u16)-1)
/* /*
* Supported optional AENs: * Supported optional AENs:
......
...@@ -758,7 +758,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue) ...@@ -758,7 +758,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
if (icreq->maxr2t != 0) { if (icreq->maxr2t != 0) {
pr_err("queue %d: unsupported maxr2t %d\n", queue->idx, pr_err("queue %d: unsupported maxr2t %d\n", queue->idx,
le16_to_cpu(icreq->maxr2t) + 1); le32_to_cpu(icreq->maxr2t) + 1);
return -EPROTO; return -EPROTO;
} }
...@@ -776,7 +776,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue) ...@@ -776,7 +776,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
icresp->hdr.pdo = 0; icresp->hdr.pdo = 0;
icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen); icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0); icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
icresp->maxdata = 0xffff; /* FIXME: support r2t */ icresp->maxdata = cpu_to_le32(0xffff); /* FIXME: support r2t */
icresp->cpda = 0; icresp->cpda = 0;
if (queue->hdr_digest) if (queue->hdr_digest)
icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE; icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
......
...@@ -357,4 +357,14 @@ static inline void *blk_mq_rq_to_pdu(struct request *rq) ...@@ -357,4 +357,14 @@ static inline void *blk_mq_rq_to_pdu(struct request *rq)
for ((i) = 0; (i) < (hctx)->nr_ctx && \ for ((i) = 0; (i) < (hctx)->nr_ctx && \
({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++) ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
static inline blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx,
struct request *rq)
{
if (rq->tag != -1)
return rq->tag | (hctx->queue_num << BLK_QC_T_SHIFT);
return rq->internal_tag | (hctx->queue_num << BLK_QC_T_SHIFT) |
BLK_QC_T_INTERNAL;
}
#endif #endif
...@@ -425,17 +425,6 @@ static inline bool blk_qc_t_valid(blk_qc_t cookie) ...@@ -425,17 +425,6 @@ static inline bool blk_qc_t_valid(blk_qc_t cookie)
return cookie != BLK_QC_T_NONE; return cookie != BLK_QC_T_NONE;
} }
static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num,
bool internal)
{
blk_qc_t ret = tag | (queue_num << BLK_QC_T_SHIFT);
if (internal)
ret |= BLK_QC_T_INTERNAL;
return ret;
}
static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie) static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
{ {
return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT; return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment