Commit e1a2ee24 authored by Israel Rukshin's avatar Israel Rukshin Committed by Jens Axboe

nvmet-rdma: Fix use after free in nvmet_rdma_cm_handler()

We free nvmet rdma queues while handling rdma_cm events.
In order to avoid this we destroy the qp and the queue after destroying
the cm_id which guarantees that all rdma_cm events are done.
Signed-off-by: default avatarIsrael Rukshin <israelr@mellanox.com>
Reviewed-by: default avatarMax Gurtovoy <maxg@mellanox.com>
Signed-off-by: default avatarKeith Busch <keith.busch@intel.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent be9bddeb
...@@ -913,8 +913,11 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue) ...@@ -913,8 +913,11 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue) static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
{ {
ib_drain_qp(queue->cm_id->qp); struct ib_qp *qp = queue->cm_id->qp;
rdma_destroy_qp(queue->cm_id);
ib_drain_qp(qp);
rdma_destroy_id(queue->cm_id);
ib_destroy_qp(qp);
ib_free_cq(queue->cq); ib_free_cq(queue->cq);
} }
...@@ -939,13 +942,10 @@ static void nvmet_rdma_release_queue_work(struct work_struct *w) ...@@ -939,13 +942,10 @@ static void nvmet_rdma_release_queue_work(struct work_struct *w)
{ {
struct nvmet_rdma_queue *queue = struct nvmet_rdma_queue *queue =
container_of(w, struct nvmet_rdma_queue, release_work); container_of(w, struct nvmet_rdma_queue, release_work);
struct rdma_cm_id *cm_id = queue->cm_id;
struct nvmet_rdma_device *dev = queue->dev; struct nvmet_rdma_device *dev = queue->dev;
nvmet_rdma_free_queue(queue); nvmet_rdma_free_queue(queue);
rdma_destroy_id(cm_id);
kref_put(&dev->ref, nvmet_rdma_free_dev); kref_put(&dev->ref, nvmet_rdma_free_dev);
} }
...@@ -1150,8 +1150,11 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id, ...@@ -1150,8 +1150,11 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
} }
ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
if (ret) if (ret) {
goto release_queue; schedule_work(&queue->release_work);
/* Destroying rdma_cm id is not needed here */
return 0;
}
mutex_lock(&nvmet_rdma_queue_mutex); mutex_lock(&nvmet_rdma_queue_mutex);
list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list); list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list);
...@@ -1159,8 +1162,6 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id, ...@@ -1159,8 +1162,6 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
return 0; return 0;
release_queue:
nvmet_rdma_free_queue(queue);
put_device: put_device:
kref_put(&ndev->ref, nvmet_rdma_free_dev); kref_put(&ndev->ref, nvmet_rdma_free_dev);
...@@ -1318,12 +1319,6 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, ...@@ -1318,12 +1319,6 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
case RDMA_CM_EVENT_ADDR_CHANGE: case RDMA_CM_EVENT_ADDR_CHANGE:
case RDMA_CM_EVENT_DISCONNECTED: case RDMA_CM_EVENT_DISCONNECTED:
case RDMA_CM_EVENT_TIMEWAIT_EXIT: case RDMA_CM_EVENT_TIMEWAIT_EXIT:
/*
* We might end up here when we already freed the qp
* which means queue release sequence is in progress,
* so don't get in the way...
*/
if (queue)
nvmet_rdma_queue_disconnect(queue); nvmet_rdma_queue_disconnect(queue);
break; break;
case RDMA_CM_EVENT_DEVICE_REMOVAL: case RDMA_CM_EVENT_DEVICE_REMOVAL:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment