Commit 8b878ee2 authored by Jens Axboe's avatar Jens Axboe

Merge branch 'nvme-4.20' of git://git.infradead.org/nvme into for-linus

Pull NVMe fixes from Christoph.

* 'nvme-4.20' of git://git.infradead.org/nvme:
  nvmet-rdma: fix response use after free
  nvme: validate controller state before rescheduling keep alive
parents c616cbee d7dcdf9d
...@@ -831,6 +831,8 @@ static int nvme_submit_user_cmd(struct request_queue *q, ...@@ -831,6 +831,8 @@ static int nvme_submit_user_cmd(struct request_queue *q,
static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status) static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
{ {
struct nvme_ctrl *ctrl = rq->end_io_data; struct nvme_ctrl *ctrl = rq->end_io_data;
unsigned long flags;
bool startka = false;
blk_mq_free_request(rq); blk_mq_free_request(rq);
...@@ -841,7 +843,13 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status) ...@@ -841,7 +843,13 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
return; return;
} }
schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); spin_lock_irqsave(&ctrl->lock, flags);
if (ctrl->state == NVME_CTRL_LIVE ||
ctrl->state == NVME_CTRL_CONNECTING)
startka = true;
spin_unlock_irqrestore(&ctrl->lock, flags);
if (startka)
schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
} }
static int nvme_keep_alive(struct nvme_ctrl *ctrl) static int nvme_keep_alive(struct nvme_ctrl *ctrl)
......
...@@ -529,6 +529,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) ...@@ -529,6 +529,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
{ {
struct nvmet_rdma_rsp *rsp = struct nvmet_rdma_rsp *rsp =
container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe); container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
struct nvmet_rdma_queue *queue = cq->cq_context;
nvmet_rdma_release_rsp(rsp); nvmet_rdma_release_rsp(rsp);
...@@ -536,7 +537,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) ...@@ -536,7 +537,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
wc->status != IB_WC_WR_FLUSH_ERR)) { wc->status != IB_WC_WR_FLUSH_ERR)) {
pr_err("SEND for CQE 0x%p failed with status %s (%d).\n", pr_err("SEND for CQE 0x%p failed with status %s (%d).\n",
wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
nvmet_rdma_error_comp(rsp->queue); nvmet_rdma_error_comp(queue);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment