Commit abf87d5e authored by Sagi Grimberg's avatar Sagi Grimberg Committed by Christoph Hellwig

nvme-rdma: rework rdma connection establishment error path

Instead of introducing a flag for if the queue is allocated,
simply free the rdma resources when we get the error.

We allocate the queue rdma resources when we have an address
resolution, their we allocate (or take a reference on) our device
so we should free it when we have error after the address resolution
namely:
1. route resolution error
2. connect reject
3. connect error
4. peer unreachable error
Signed-off-by: default avatarSagi Grimberg <sagi@grimberg.me>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent ca6e95bb
...@@ -81,9 +81,8 @@ struct nvme_rdma_request { ...@@ -81,9 +81,8 @@ struct nvme_rdma_request {
enum nvme_rdma_queue_flags { enum nvme_rdma_queue_flags {
NVME_RDMA_Q_CONNECTED = 0, NVME_RDMA_Q_CONNECTED = 0,
NVME_RDMA_IB_QUEUE_ALLOCATED = 1, NVME_RDMA_Q_DELETING = 1,
NVME_RDMA_Q_DELETING = 2, NVME_RDMA_Q_LIVE = 2,
NVME_RDMA_Q_LIVE = 3,
}; };
struct nvme_rdma_queue { struct nvme_rdma_queue {
...@@ -466,9 +465,6 @@ static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue) ...@@ -466,9 +465,6 @@ static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
struct nvme_rdma_device *dev; struct nvme_rdma_device *dev;
struct ib_device *ibdev; struct ib_device *ibdev;
if (!test_and_clear_bit(NVME_RDMA_IB_QUEUE_ALLOCATED, &queue->flags))
return;
dev = queue->device; dev = queue->device;
ibdev = dev->dev; ibdev = dev->dev;
rdma_destroy_qp(queue->cm_id); rdma_destroy_qp(queue->cm_id);
...@@ -525,7 +521,6 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue) ...@@ -525,7 +521,6 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
ret = -ENOMEM; ret = -ENOMEM;
goto out_destroy_qp; goto out_destroy_qp;
} }
set_bit(NVME_RDMA_IB_QUEUE_ALLOCATED, &queue->flags);
return 0; return 0;
...@@ -590,7 +585,6 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl, ...@@ -590,7 +585,6 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
return 0; return 0;
out_destroy_cm_id: out_destroy_cm_id:
nvme_rdma_destroy_queue_ib(queue);
rdma_destroy_id(queue->cm_id); rdma_destroy_id(queue->cm_id);
return ret; return ret;
} }
...@@ -1374,12 +1368,14 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, ...@@ -1374,12 +1368,14 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
complete(&queue->cm_done); complete(&queue->cm_done);
return 0; return 0;
case RDMA_CM_EVENT_REJECTED: case RDMA_CM_EVENT_REJECTED:
nvme_rdma_destroy_queue_ib(queue);
cm_error = nvme_rdma_conn_rejected(queue, ev); cm_error = nvme_rdma_conn_rejected(queue, ev);
break; break;
case RDMA_CM_EVENT_ADDR_ERROR:
case RDMA_CM_EVENT_ROUTE_ERROR: case RDMA_CM_EVENT_ROUTE_ERROR:
case RDMA_CM_EVENT_CONNECT_ERROR: case RDMA_CM_EVENT_CONNECT_ERROR:
case RDMA_CM_EVENT_UNREACHABLE: case RDMA_CM_EVENT_UNREACHABLE:
nvme_rdma_destroy_queue_ib(queue);
case RDMA_CM_EVENT_ADDR_ERROR:
dev_dbg(queue->ctrl->ctrl.device, dev_dbg(queue->ctrl->ctrl.device,
"CM error event %d\n", ev->event); "CM error event %d\n", ev->event);
cm_error = -ECONNRESET; cm_error = -ECONNRESET;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment