Commit 1ba2e507 authored by Daniel Wagner's avatar Daniel Wagner Committed by Christoph Hellwig

nvme-tcp: Do not reset transport on data digest errors

The spec says

  7.4.6.1 Digest Error handling

  When a host detects a data digest error in a C2HData PDU, that host
  shall continue processing C2HData PDUs associated with the command and
  when the command processing has completed, if a successful status was
  returned by the controller, the host shall fail the command with a
  non-fatal transport error.

Currently the transport is reseted when a data digest error is
detected. Instead, when a digest error is detected, mark the final
status as NVME_SC_DATA_XFER_ERROR and let the upper layer handle
the error.

In order to keep track of the final result maintain a status field in
nvme_tcp_request object and use it to overwrite the completion queue
status (which might be successful even though a digest error has been
detected) when completing the request.
Signed-off-by: default avatarDaniel Wagner <dwagner@suse.de>
Reviewed-by: default avatarSagi Grimberg <sagi@grimberg.me>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent f0406481
...@@ -45,6 +45,7 @@ struct nvme_tcp_request { ...@@ -45,6 +45,7 @@ struct nvme_tcp_request {
u32 pdu_len; u32 pdu_len;
u32 pdu_sent; u32 pdu_sent;
u16 ttag; u16 ttag;
__le16 status;
struct list_head entry; struct list_head entry;
struct llist_node lentry; struct llist_node lentry;
__le32 ddgst; __le32 ddgst;
...@@ -485,6 +486,7 @@ static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl) ...@@ -485,6 +486,7 @@ static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue, static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
struct nvme_completion *cqe) struct nvme_completion *cqe)
{ {
struct nvme_tcp_request *req;
struct request *rq; struct request *rq;
rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id); rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id);
...@@ -496,7 +498,11 @@ static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue, ...@@ -496,7 +498,11 @@ static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
return -EINVAL; return -EINVAL;
} }
if (!nvme_try_complete_req(rq, cqe->status, cqe->result)) req = blk_mq_rq_to_pdu(rq);
if (req->status == cpu_to_le16(NVME_SC_SUCCESS))
req->status = cqe->status;
if (!nvme_try_complete_req(rq, req->status, cqe->result))
nvme_complete_rq(rq); nvme_complete_rq(rq);
queue->nr_cqe++; queue->nr_cqe++;
...@@ -758,7 +764,8 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb, ...@@ -758,7 +764,8 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH; queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
} else { } else {
if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) { if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
nvme_tcp_end_request(rq, NVME_SC_SUCCESS); nvme_tcp_end_request(rq,
le16_to_cpu(req->status));
queue->nr_cqe++; queue->nr_cqe++;
} }
nvme_tcp_init_recv_ctx(queue); nvme_tcp_init_recv_ctx(queue);
...@@ -788,18 +795,24 @@ static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue, ...@@ -788,18 +795,24 @@ static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
return 0; return 0;
if (queue->recv_ddgst != queue->exp_ddgst) { if (queue->recv_ddgst != queue->exp_ddgst) {
struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
pdu->command_id);
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
req->status = cpu_to_le16(NVME_SC_DATA_XFER_ERROR);
dev_err(queue->ctrl->ctrl.device, dev_err(queue->ctrl->ctrl.device,
"data digest error: recv %#x expected %#x\n", "data digest error: recv %#x expected %#x\n",
le32_to_cpu(queue->recv_ddgst), le32_to_cpu(queue->recv_ddgst),
le32_to_cpu(queue->exp_ddgst)); le32_to_cpu(queue->exp_ddgst));
return -EIO;
} }
if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) { if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue), struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
pdu->command_id); pdu->command_id);
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
nvme_tcp_end_request(rq, NVME_SC_SUCCESS); nvme_tcp_end_request(rq, le16_to_cpu(req->status));
queue->nr_cqe++; queue->nr_cqe++;
} }
...@@ -2293,6 +2306,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns, ...@@ -2293,6 +2306,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
return ret; return ret;
req->state = NVME_TCP_SEND_CMD_PDU; req->state = NVME_TCP_SEND_CMD_PDU;
req->status = cpu_to_le16(NVME_SC_SUCCESS);
req->offset = 0; req->offset = 0;
req->data_sent = 0; req->data_sent = 0;
req->pdu_len = 0; req->pdu_len = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment